Best Python code snippet using lisa_python
test_platform.py
Source:test_platform.py  
...44        return constants.PLATFORM_MOCK45    @classmethod46    def supported_features(cls) -> List[Type[Feature]]:47        return []48    def set_test_config(49        self,50        return_prepared: bool = True,51        deploy_success: bool = True,52        deployed_status: EnvironmentStatus = EnvironmentStatus.Deployed,53        wait_more_resource_error: bool = False,54    ) -> None:55        self.initialize()56        self._mock_runbook.return_prepared = return_prepared57        self._mock_runbook.deploy_success = deploy_success58        self._mock_runbook.deployed_status = deployed_status59        self._mock_runbook.wait_more_resource_error = wait_more_resource_error60    def _initialize(self, *args: Any, **kwargs: Any) -> None:61        self._mock_runbook: MockPlatformSchema = self.runbook.get_extended_runbook(62            MockPlatformSchema, constants.PLATFORM_MOCK63        )64    def _prepare_environment(self, environment: Environment, log: Logger) -> bool:65        self.test_data.prepared_envs.append(environment.name)66        requirements = environment.runbook.nodes_requirement67        if self._mock_runbook.return_prepared and requirements:68            min_capabilities: List[schema.NodeSpace] = []69            for node_space in requirements:70                min_capabilities.append(node_space.generate_min_capability(node_space))71            environment.runbook.nodes_requirement = min_capabilities72        return self._mock_runbook.return_prepared73    def _deploy_environment(self, environment: Environment, log: Logger) -> None:74        if self._mock_runbook.wait_more_resource_error:75            raise ResourceAwaitableException("any", "wait more resource")76        if not self._mock_runbook.deploy_success:77            raise LisaException("mock deploy failed")78        if self._mock_runbook.return_prepared and environment.runbook.nodes_requirement:79            requirements = environment.runbook.nodes_requirement80            for node_space in requirements:81                environment.create_node_from_requirement(node_requirement=node_space)82        for node in environment.nodes.list():83            node._is_initialized = True84        self.test_data.deployed_envs.append(environment.name)85        if self._mock_runbook.deployed_status not in [86            EnvironmentStatus.Deployed,87            EnvironmentStatus.Connected,88        ]:89            raise LisaException(90                f"expected status is {self._mock_runbook.deployed_status}, "91                f"deployment should be failed"92            )93    def _delete_environment(self, environment: Environment, log: Logger) -> None:94        self.test_data.deleted_envs.append(environment.name)95        self.delete_called = True96def generate_platform(97    keep_environment: Optional[Union[str, bool]] = False,98    admin_password: str = "do not use for real",99    admin_key_file: str = "",100) -> MockPlatform:101    runbook_data = {102        constants.TYPE: constants.PLATFORM_MOCK,103        "keep_environment": keep_environment,104        "admin_password": admin_password,105        "admin_private_key_file": admin_key_file,106    }107    runbook = schema.load_by_type(schema.Platform, runbook_data)108    platform = load_platform([runbook])109    platform.initialize()110    try:111        assert isinstance(platform, MockPlatform), f"actual: {type(platform)}"112    except AssertionError:113        # as UT imported from tests package, instead of from lisa.tests package114        # ignore by assign type from current package115        platform = MockPlatform(runbook)116    return platform117def generate_environments() -> Environments:118    envs_runbook = generate_env_runbook(local=True, requirement=True)119    envs = load_environments(envs_runbook)120    return envs121class PlatformTestCase(TestCase):122    def setUp(self) -> None:123        lisa.environment._global_environment_id = 0124    def test_prepared_env_not_success_with_exception(self) -> None:125        platform = generate_platform()126        platform.set_test_config(return_prepared=False)127        envs = generate_environments()128        self.assertEqual(2, len(envs))129        with self.assertRaises(LisaException) as cm:130            [platform.prepare_environment(env) for env in envs.values()]131        self.assertEqual(132            "no capability found for environment: Environment("133            "name='customized_0', topology='subnet', nodes_raw=[{'type': 'local', "134            "'capability': {'core_count': {'min': 4}}}], nodes_requirement=None, "135            "_original_nodes_requirement=None)",136            str(cm.exception),137        )138    def test_prepared_env_success(self) -> None:139        platform = generate_platform()140        platform.set_test_config(return_prepared=True)141        envs = generate_environments()142        self.assertEqual(2, len(envs))143        prepared_environments = [144            platform.prepare_environment(env) for env in envs.values()145        ]146        self.assertEqual(2, len(prepared_environments))147    def test_prepared_env_sorted_predefined_first(self) -> None:148        platform = generate_platform()149        platform.set_test_config()150        envs = generate_environments()151        # verify init as expected152        self.assertListEqual(["customized_0", "customized_1"], [x for x in envs])153        self.assertListEqual([True, True], [x.is_predefined for x in envs.values()])154        # verify stable sort155        envs["customized_1"].is_predefined = False156        prepared_environments = [157            platform.prepare_environment(env) for env in envs.values()158        ]159        prepared_environments.sort(key=lambda x: (not x.is_predefined, x.cost))160        self.assertListEqual(161            ["customized_0", "customized_1"], [x.name for x in prepared_environments]162        )163        self.assertListEqual(164            [True, False], [x.is_predefined for x in prepared_environments]165        )166        # verify reverse sort167        envs["customized_0"].is_predefined = False168        envs["customized_1"].is_predefined = True169        prepared_environments = [170            platform.prepare_environment(env) for env in envs.values()171        ]172        prepared_environments.sort(key=lambda x: (not x.is_predefined, x.cost))173        self.assertListEqual(174            ["customized_1", "customized_0"],175            [x.name for x in prepared_environments],176        )177        self.assertListEqual(178            [True, False], [x.is_predefined for x in prepared_environments]179        )180    def test_prepared_env_sorted_by_cost(self) -> None:181        platform = generate_platform()182        envs = generate_environments()183        platform.set_test_config()184        self.assertListEqual(["customized_0", "customized_1"], [x for x in envs])185        self.assertListEqual([0, 0], [x.cost for x in envs.values()])186        envs["customized_0"].cost = 1187        envs["customized_1"].cost = 2188        prepared_environments = [189            platform.prepare_environment(env) for env in envs.values()190        ]191        prepared_environments.sort(key=lambda x: (not x.is_predefined, x.cost))192        self.assertListEqual(193            ["customized_0", "customized_1"], [x.name for x in prepared_environments]194        )195        self.assertListEqual([1, 2], [x.cost for x in prepared_environments])196        envs["customized_0"].cost = 2197        envs["customized_1"].cost = 1198        prepared_environments = [199            platform.prepare_environment(env) for env in envs.values()200        ]201        prepared_environments.sort(key=lambda x: (not x.is_predefined, x.cost))202        self.assertListEqual(203            ["customized_1", "customized_0"], [x.name for x in prepared_environments]204        )205        self.assertListEqual([1, 2], [x.cost for x in prepared_environments])206    def test_prepared_env_deploy_failed(self) -> None:207        platform = generate_platform()208        envs = generate_environments()209        platform.set_test_config(deploy_success=False)210        for env in envs.values():211            with self.assertRaises(LisaException) as cm:212                platform.deploy_environment(env)213            self.assertEqual("mock deploy failed", str(cm.exception))214    def test_prepared_env_deleted_not_ready(self) -> None:215        platform = generate_platform()216        envs = generate_environments()217        platform.set_test_config()218        for env in envs.values():219            platform.deploy_environment(env)220            self.assertEqual(EnvironmentStatus.Deployed, env.status)221            platform.delete_environment(env)...conftest.py
Source:conftest.py  
...34# We only load bert-base-uncased, so we fix the random seed to always get the same randomly generated heads on top35@pytest.fixture(scope="class")36def test_transformer_sequence_classification():37    torch.manual_seed(987654321)38    set_test_config(39        model_name=TRANSFORMER_MODEL,40        model_class="sequence_classification",41        disable_gpu=True,42        batch_size=1,43        max_input_size=50,44        model_type="transformer",45    )46    return Transformer()47@pytest.fixture(scope="class")48def test_transformer_sequence_classification_roberta():49    torch.manual_seed(987654321)50    set_test_config(51        model_name=TRANSFORMER_MODEL_ROBERTA,52        model_class="sequence_classification",53        disable_gpu=True,54        batch_size=1,55        max_input_size=50,56        model_type="transformer",57    )58    return Transformer()59@pytest.fixture(scope="class")60def test_transformer_embedding():61    torch.manual_seed(987654321)62    set_test_config(63        model_name=TRANSFORMER_MODEL,64        model_class="base",65        disable_gpu=True,66        batch_size=1,67        max_input_size=50,68        model_type="transformers",69    )70    return Transformer()71@pytest.fixture(scope="class")72def test_transformer_token_classification():73    torch.manual_seed(987654321)74    set_test_config(75        model_name=TRANSFORMER_MODEL,76        model_class="token_classification",77        disable_gpu=True,78        batch_size=1,79        max_input_size=50,80        model_type="transformers",81    )82    return Transformer()83@pytest.fixture(scope="class")84def test_transformer_question_answering():85    torch.manual_seed(987654321)86    set_test_config(87        model_name=TRANSFORMER_MODEL,88        model_class="question_answering",89        disable_gpu=True,90        batch_size=1,91        max_input_size=50,92        model_type="transformers",93    )94    return Transformer()95@pytest.fixture(scope="class")96def test_transformer_explainability():97    torch.manual_seed(987654321)98    set_test_config(99        model_name=TRANSFORMER_MODEL,100        model_class="question_answering",101        disable_gpu=True,102        batch_size=1,103        max_input_size=50,104        model_type="transformers",105    )106    return Transformer()107@pytest.fixture(scope="class")108def test_transformer_explainability_roberta():109    torch.manual_seed(987654321)110    set_test_config(111        model_name=TRANSFORMER_MODEL_ROBERTA,112        model_class="question_answering",113        disable_gpu=True,114        batch_size=1,115        max_input_size=50,116        model_type="transformers",117    )118    return Transformer()119@pytest.fixture(scope="class")120def test_transformer_generation():121    torch.manual_seed(987654321)122    set_test_config(123        model_name=TRANSFORMER_MODEL,124        model_class="generation",125        disable_gpu=True,126        batch_size=1,127        max_input_size=50,128        model_type = "transformers",129    )130    return Transformer()131@pytest.fixture(scope="class")132def test_adapter():133    set_test_config(134        model_name=TRANSFORMER_MODEL,135        disable_gpu=True,136        batch_size=1,137        max_input_size=50,138        cache=TRANSFORMERS_TESTING_CACHE,139        preloaded_adapters=False,140        model_type="adapter",141    )142    return AdapterTransformer()143@pytest.fixture(scope="class")144def test_sentence_transformer():145    set_test_config(146        model_name=SENTENCE_MODEL,147        disable_gpu=True,148        batch_size=1,149        max_input_size=50,150        model_type="sentence-transformer"151    )152    return SentenceTransformer()153@pytest.fixture(scope="class")154def test_onnx_sequence_classification():155    onnx_path = "./onnx_models/german-bert/model.onnx"156    if os.path.isfile(onnx_path):157        set_test_config(158            model_name=ONNX_MODEL,159            disable_gpu=True,160            batch_size=1,161            max_input_size=50,162            onnx_path=onnx_path,163            model_type="onnx",164        )165        return Onnx()166    else:167        return None168@pytest.fixture(scope="class")169def test_onnx_token_classification():170    onnx_path = "./onnx_models\\NER-bert\\model.onnx"171    if os.path.isfile(onnx_path):172        set_test_config(173            model_name=ONNX_MODEL,174            disable_gpu=True,175            batch_size=1,176            max_input_size=50,177            onnx_path=onnx_path,178            model_type="onnx",179        )180        return Onnx()181    else:182        return None183@pytest.fixture(scope="class")184def test_onnx_embedding():185    onnx_path = "./onnx_models/bert-base-cased/model.onnx"186    if os.path.isfile(onnx_path):187        set_test_config(188            model_name=ONNX_MODEL,189            disable_gpu=True,190            batch_size=1,191            max_input_size=50,192            onnx_path=onnx_path,193            model_type="onnx",194        )195        return Onnx()196    else:197        return None198@pytest.fixture(scope="class")199def test_onnx_question_answering():200    onnx_path = "./onnx_models/squad2-bert/model.onnx"201    if os.path.isfile(onnx_path):202        set_test_config(203            model_name=ONNX_MODEL,204            disable_gpu=True,205            batch_size=1,206            max_input_size=50,207            onnx_path=onnx_path,208            model_type="onnx",209        )210        return Onnx()211    else:212        return None213@pytest.fixture(scope="class")214def test_onnx_generation():215    onnx_path = "./onnx_models/t5_encoder_decoder/t5-small-encoder.onnx"216    decoder_init_path = "./onnx_models/t5_encoder_decoder/t5-small-init-decoder.onnx"217    if os.path.isfile(onnx_path):218        set_test_config(219            model_name=ONNX_MODEL,220            disable_gpu=True,221            batch_size=1,222            max_input_size=50,223            onnx_path=onnx_path,224            decoder_path=decoder_init_path,225            model_type="onnx",226        )227        return Onnx()228    else:229        return None230@pytest.fixture()231def prediction_request():232    request = PredictionRequest.parse_obj({...create_test_config.py
Source:create_test_config.py  
...14# See the License for the specific language governing permissions and15# limitations under the License.16import configparser17import sys18def set_test_config(project, instance):19    config = configparser.ConfigParser()20    url = (21        f"spanner+spanner:///projects/{project}/instances/{instance}/"22        "databases/compliance-test"23    )24    config.add_section("db")25    config["db"]["default"] = url26    with open("test.cfg", "w") as configfile:27        config.write(configfile)28def main(argv):29    project = argv[0]30    instance = argv[1]31    set_test_config(project, instance)32if __name__ == "__main__":...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
