How to use lt_strategy method in pandera

Best Python code snippet using pandera_python

test_strategies.py

Source:test_strategies.py Github

copy

Full Screen

...115 assert (116 data.draw(strategies.ge_strategy(data_type, min_value=value)) >= value117 )118 assert (119 data.draw(strategies.lt_strategy(data_type, max_value=value)) < value120 )121 assert (122 data.draw(strategies.le_strategy(data_type, max_value=value)) <= value123 )124def value_ranges(data_type: pa.DataType):125 """Strategy to generate value range based on PandasDtype"""126 kwargs = dict(127 allow_nan=False,128 allow_infinity=False,129 exclude_min=False,130 exclude_max=False,131 )132 return (133 st.tuples(...

Full Screen

Full Screen

test_cli.py

Source:test_cli.py Github

copy

Full Screen

1# Copyright The PyTorch Lightning team.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14import os15from unittest import mock16from unittest.mock import Mock17import pytest18from tests_lite.helpers.runif import RunIf19from lightning_lite.cli import main as cli_main20from lightning_lite.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_1321if not (_IS_WINDOWS and _TORCH_GREATER_EQUAL_1_13):22 import torch.distributed.run23def skip_windows_pt_1_13():24 # https://github.com/pytorch/pytorch/issues/8542725 return pytest.mark.skipif(26 condition=(_IS_WINDOWS and _TORCH_GREATER_EQUAL_1_13),27 reason="Torchelastic import bug in 1.13 affecting Windows",28 )29@skip_windows_pt_1_13()30@mock.patch.dict(os.environ, os.environ.copy(), clear=True)31def test_cli_env_vars_defaults(monkeypatch):32 monkeypatch.setattr(torch.distributed, "run", Mock())33 with mock.patch("sys.argv", ["cli.py", "script.py"]):34 cli_main()35 assert os.environ["LT_CLI_USED"] == "1"36 assert os.environ["LT_ACCELERATOR"] == "cpu"37 assert "LT_STRATEGY" not in os.environ38 assert os.environ["LT_DEVICES"] == "1"39 assert os.environ["LT_NUM_NODES"] == "1"40 assert os.environ["LT_PRECISION"] == "32"41@skip_windows_pt_1_13()42@pytest.mark.parametrize("accelerator", ["cpu", "gpu", "cuda", pytest.param("mps", marks=RunIf(mps=True))])43@mock.patch.dict(os.environ, os.environ.copy(), clear=True)44@mock.patch("lightning_lite.accelerators.cuda.num_cuda_devices", return_value=2)45def test_cli_env_vars_accelerator(_, accelerator, monkeypatch):46 monkeypatch.setattr(torch.distributed, "run", Mock())47 with mock.patch("sys.argv", ["cli.py", "script.py", "--accelerator", accelerator]):48 cli_main()49 assert os.environ["LT_ACCELERATOR"] == accelerator50@skip_windows_pt_1_13()51@pytest.mark.parametrize("strategy", ["dp", "ddp", "deepspeed"])52@mock.patch.dict(os.environ, os.environ.copy(), clear=True)53@mock.patch("lightning_lite.accelerators.cuda.num_cuda_devices", return_value=2)54def test_cli_env_vars_strategy(_, strategy, monkeypatch):55 monkeypatch.setattr(torch.distributed, "run", Mock())56 with mock.patch("sys.argv", ["cli.py", "script.py", "--strategy", strategy]):57 cli_main()58 assert os.environ["LT_STRATEGY"] == strategy59@skip_windows_pt_1_13()60@pytest.mark.parametrize("devices", ["1", "2", "0,", "1,0", "-1"])61@mock.patch.dict(os.environ, os.environ.copy(), clear=True)62@mock.patch("lightning_lite.accelerators.cuda.num_cuda_devices", return_value=2)63def test_cli_env_vars_devices_cuda(_, devices, monkeypatch):64 monkeypatch.setattr(torch.distributed, "run", Mock())65 with mock.patch("sys.argv", ["cli.py", "script.py", "--accelerator", "cuda", "--devices", devices]):66 cli_main()67 assert os.environ["LT_DEVICES"] == devices68@RunIf(mps=True)69@skip_windows_pt_1_13()70@pytest.mark.parametrize("accelerator", ["mps", "gpu"])71@mock.patch.dict(os.environ, os.environ.copy(), clear=True)72def test_cli_env_vars_devices_mps(accelerator, monkeypatch):73 monkeypatch.setattr(torch.distributed, "run", Mock())74 with mock.patch("sys.argv", ["cli.py", "script.py", "--accelerator", accelerator]):75 cli_main()76 assert os.environ["LT_DEVICES"] == "1"77@skip_windows_pt_1_13()78@pytest.mark.parametrize("num_nodes", ["1", "2", "3"])79@mock.patch.dict(os.environ, os.environ.copy(), clear=True)80def test_cli_env_vars_num_nodes(num_nodes, monkeypatch):81 monkeypatch.setattr(torch.distributed, "run", Mock())82 with mock.patch("sys.argv", ["cli.py", "script.py", "--num-nodes", num_nodes]):83 cli_main()84 assert os.environ["LT_NUM_NODES"] == num_nodes85@skip_windows_pt_1_13()86@pytest.mark.parametrize("precision", ["64", "32", "16", "bf16"])87@mock.patch.dict(os.environ, os.environ.copy(), clear=True)88def test_cli_env_vars_precision(precision, monkeypatch):89 monkeypatch.setattr(torch.distributed, "run", Mock())90 with mock.patch("sys.argv", ["cli.py", "script.py", "--precision", precision]):91 cli_main()92 assert os.environ["LT_PRECISION"] == precision93@skip_windows_pt_1_13()94@mock.patch.dict(os.environ, os.environ.copy(), clear=True)95def test_cli_torchrun_defaults(monkeypatch):96 torchrun_mock = Mock()97 monkeypatch.setattr(torch.distributed, "run", torchrun_mock)98 with mock.patch("sys.argv", ["cli.py", "script.py"]):99 cli_main()100 torchrun_mock.main.assert_called_with(101 [102 "--nproc_per_node=1",103 "--nnodes=1",104 "--node_rank=0",105 "--master_addr=127.0.0.1",106 "--master_port=29400",107 "script.py",108 ]109 )110@skip_windows_pt_1_13()111@pytest.mark.parametrize(112 "devices,expected",113 [114 ("1", 1),115 ("2", 2),116 ("0,", 1),117 ("1,0,2", 3),118 ("-1", 5),119 ],120)121@mock.patch.dict(os.environ, os.environ.copy(), clear=True)122@mock.patch("lightning_lite.accelerators.cuda.num_cuda_devices", return_value=5)123def test_cli_torchrun_num_processes_launched(_, devices, expected, monkeypatch):124 torchrun_mock = Mock()125 monkeypatch.setattr(torch.distributed, "run", torchrun_mock)126 with mock.patch("sys.argv", ["cli.py", "script.py", "--accelerator", "cuda", "--devices", devices]):127 cli_main()128 torchrun_mock.main.assert_called_with(129 [130 f"--nproc_per_node={expected}",131 "--nnodes=1",132 "--node_rank=0",133 "--master_addr=127.0.0.1",134 "--master_port=29400",135 "script.py",136 ]...

Full Screen

Full Screen

cli.py

Source:cli.py Github

copy

Full Screen

1# Copyright The PyTorch Lightning team.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14import logging15import os16from argparse import ArgumentParser, Namespace17from typing import List, Tuple18from lightning_lite.accelerators import CPUAccelerator, CUDAAccelerator, MPSAccelerator19from lightning_lite.utilities.device_parser import _parse_gpu_ids20from lightning_lite.utilities.imports import _IS_WINDOWS, _TORCH_GREATER_EQUAL_1_1321_log = logging.getLogger(__name__)22_SUPPORTED_ACCELERATORS = ("cpu", "gpu", "cuda", "mps", "tpu")23_SUPPORTED_STRATEGIES = (None, "ddp", "dp", "deepspeed")24_SUPPORTED_PRECISION = ("64", "32", "16", "bf16")25def _parse_args() -> Tuple[Namespace, List[str]]:26 parser = ArgumentParser(description="Launch your script with the Lightning Lite CLI.")27 parser.add_argument("script", type=str, help="Path to the Python script with Lightning Lite inside.")28 parser.add_argument(29 "--accelerator",30 type=str,31 default="cpu",32 choices=_SUPPORTED_ACCELERATORS,33 help="The hardware accelerator to run on.",34 )35 parser.add_argument(36 "--strategy",37 type=str,38 default=None,39 choices=_SUPPORTED_STRATEGIES,40 help="Strategy for how to run across multiple devices.",41 )42 parser.add_argument(43 "--devices",44 type=str,45 default="1",46 help=(47 "Number of devices to run on (``int``), which devices to run on (``list`` or ``str``), or ``'auto'``."48 " The value applies per node."49 ),50 )51 parser.add_argument(52 "--num-nodes",53 "--num_nodes",54 type=int,55 default=1,56 help="Number of machines (nodes) for distributed execution.",57 )58 parser.add_argument(59 "--node-rank",60 "--node_rank",61 type=int,62 default=0,63 help=(64 "The index of the machine (node) this command gets started on. Must be a number in the range"65 " 0, ..., num_nodes - 1."66 ),67 )68 parser.add_argument(69 "--main-address",70 "--main_address",71 type=str,72 default="127.0.0.1",73 help="The hostname or IP address of the main machine (usually the one with node_rank = 0).",74 )75 parser.add_argument(76 "--main-port",77 "--main_port",78 type=int,79 default=29400,80 help="The main port to connect to the main machine.",81 )82 parser.add_argument(83 "--precision",84 type=str,85 default="32",86 choices=_SUPPORTED_PRECISION,87 help=(88 "Double precision (``64``), full precision (``32``), half precision (``16``) or bfloat16 precision"89 " (``'bf16'``)"90 ),91 )92 args, script_args = parser.parse_known_args()93 return args, script_args94def _set_env_variables(args: Namespace) -> None:95 """Set the environment variables for the new processes.96 The Lite connector will parse the arguments set here.97 """98 os.environ["LT_CLI_USED"] = "1"99 os.environ["LT_ACCELERATOR"] = str(args.accelerator)100 if args.strategy is not None:101 os.environ["LT_STRATEGY"] = str(args.strategy)102 os.environ["LT_DEVICES"] = str(args.devices)103 os.environ["LT_NUM_NODES"] = str(args.num_nodes)104 os.environ["LT_PRECISION"] = str(args.precision)105def _get_num_processes(accelerator: str, devices: str) -> int:106 """Parse the `devices` argument to determine how many processes need to be launched on the current machine."""107 if accelerator == "gpu":108 parsed_devices = _parse_gpu_ids(devices, include_cuda=True, include_mps=True)109 elif accelerator == "cuda":110 parsed_devices = CUDAAccelerator.parse_devices(devices)111 elif accelerator == "mps":112 parsed_devices = MPSAccelerator.parse_devices(devices)113 elif accelerator == "tpu":114 raise ValueError("Launching processes for TPU through the CLI is not supported.")115 else:116 return CPUAccelerator.parse_devices(devices)117 return len(parsed_devices) if parsed_devices is not None else 0118def _torchrun_launch(args: Namespace, script_args: List[str]) -> None:119 """This will invoke `torchrun` programmatically to launch the given script in new processes."""120 if _IS_WINDOWS and _TORCH_GREATER_EQUAL_1_13:121 # TODO: remove once import issue is resolved: https://github.com/pytorch/pytorch/issues/85427122 _log.error(123 "On the Windows platform, this launcher is currently only supported on torch < 1.13 due to a bug"124 " upstream: https://github.com/pytorch/pytorch/issues/85427"125 )126 exit(1)127 import torch.distributed.run as torchrun128 if args.strategy == "dp":129 num_processes = 1130 else:131 num_processes = _get_num_processes(args.accelerator, args.devices)132 torchrun_args = [133 f"--nproc_per_node={num_processes}",134 f"--nnodes={args.num_nodes}",135 f"--node_rank={args.node_rank}",136 f"--master_addr={args.main_address}",137 f"--master_port={args.main_port}",138 args.script,139 ]140 torchrun_args.extend(script_args)141 # set a good default number of threads for OMP to avoid warnings being emitted to the user142 os.environ.setdefault("OMP_NUM_THREADS", str(max(1, (os.cpu_count() or 1) // num_processes)))143 torchrun.main(torchrun_args)144def main() -> None:145 args, script_args = _parse_args()146 _set_env_variables(args)147 _torchrun_launch(args, script_args)148if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pandera automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful