How to use record_xml_attribute method in Pytest

Best Python code snippet using pytest

kf_is_ready_test.py

Source:kf_is_ready_test.py Github

copy

Full Screen

1import logging2import os3import yaml4import pytest5from kubeflow.testing import util6from kubeflow.kfctl.testing.util import deploy_utils7from kubeflow.kfctl.testing.util import aws_util as kfctl_aws_util8def set_logging():9 logging.basicConfig(level=logging.INFO,10 format=('%(levelname)s|%(asctime)s'11 '|%(pathname)s|%(lineno)d| %(message)s'),12 datefmt='%Y-%m-%dT%H:%M:%S',13 )14 logging.getLogger().setLevel(logging.INFO)15def get_platform_app_name(app_path):16 with open(os.path.join(app_path, "tmp.yaml")) as f:17 kfdef = yaml.safe_load(f)18 app_name = kfdef["metadata"]["name"]19 platform = ""20 apiVersion = kfdef["apiVersion"].strip().split("/")21 if len(apiVersion) != 2:22 raise RuntimeError("Invalid apiVersion: " + kfdef["apiVersion"].strip())23 if apiVersion[-1] == "v1alpha1":24 platform = kfdef["spec"]["platform"]25 elif apiVersion[-1] in ["v1beta1", "v1"]:26 for plugin in kfdef["spec"].get("plugins", []):27 if plugin.get("kind", "") == "KfGcpPlugin":28 platform = "gcp"29 elif plugin.get("kind", "") == "KfAwsPlugin":30 platform = "aws"31 elif plugin.get("kind", "") == "KfExistingArriktoPlugin":32 platform = "existing_arrikto"33 else:34 # Indicate agnostic Kubeflow Platform35 platform = "agnostic"36 else:37 raise RuntimeError("Unknown version: " + apiVersion[-1])38 return platform, app_name39def check_deployments_ready(record_xml_attribute, namespace, name, deployments, cluster_name):40 """Test that Kubeflow deployments are successfully deployed.41 Args:42 namespace: The namespace Kubeflow is deployed to.43 """44 set_logging()45 util.set_pytest_junit(record_xml_attribute, name)46 kfctl_aws_util.aws_auth_load_kubeconfig(cluster_name)47 api_client = deploy_utils.create_k8s_client()48 for deployment_name in deployments:49 logging.info("Verifying that deployment %s started...", deployment_name)50 util.wait_for_deployment(api_client, namespace, deployment_name, 10)51def test_admission_is_ready(record_xml_attribute, namespace, cluster_name):52 deployment_names = [53 "admission-webhook-deployment"54 ]55 check_deployments_ready(record_xml_attribute, namespace,56 "test_admission_is_ready", deployment_names,57 cluster_name)58def test_katib_is_ready(record_xml_attribute, namespace, cluster_name):59 deployment_names = [60 "katib-controller",61 "katib-mysql",62 "katib-db-manager",63 "katib-ui",64 ]65 check_deployments_ready(record_xml_attribute, namespace,66 "test_katib_is_ready", deployment_names,67 cluster_name)68def test_metadata_is_ready(record_xml_attribute, namespace, cluster_name):69 deployment_names = [70 "metadata-grpc-deployment",71 "metadata-db",72 "metadata-envoy-deployment",73 "metadata-writer",74 ]75 check_deployments_ready(record_xml_attribute, namespace,76 "test_metadata_is_ready", deployment_names,77 cluster_name)78def test_pipeline_is_ready(record_xml_attribute, namespace, cluster_name):79 deployment_names = [80 "argo-ui",81 "cache-deployer-deployment",82 "cache-server",83 "kubeflow-pipelines-profile-controller",84 "minio",85 "ml-pipeline",86 "ml-pipeline-persistenceagent",87 "ml-pipeline-scheduledworkflow",88 "ml-pipeline-ui",89 "ml-pipeline-viewer-crd",90 "ml-pipeline-visualizationserver",91 "mysql",92 ]93 check_deployments_ready(record_xml_attribute, namespace,94 "test_pipeline_is_ready", deployment_names,95 cluster_name)96def test_notebook_is_ready(record_xml_attribute, namespace, cluster_name):97 deployment_names = [98 "jupyter-web-app-deployment",99 "notebook-controller-deployment",100 ]101 check_deployments_ready(record_xml_attribute, namespace,102 "test_notebook_is_ready", deployment_names,103 cluster_name)104def test_centraldashboard_is_ready(record_xml_attribute, namespace, cluster_name):105 check_deployments_ready(record_xml_attribute, namespace,106 "test_centraldashboard_is_ready", ["centraldashboard"],107 cluster_name)108def test_profiles_is_ready(record_xml_attribute, namespace, cluster_name):109 check_deployments_ready(record_xml_attribute, namespace,110 "test_profile_is_ready", ["profiles-deployment"],111 cluster_name)112def test_seldon_is_ready(record_xml_attribute, namespace, cluster_name):113 deployment_names = [114 "seldon-controller-manager"115 ]116 check_deployments_ready(record_xml_attribute, namespace,117 "test_seldon_is_ready", deployment_names,118 cluster_name)119def test_spark_is_ready(record_xml_attribute, namespace, cluster_name):120 deployment_names = [121 "spark-operatorsparkoperator"122 ]123 check_deployments_ready(record_xml_attribute, namespace,124 "test_spark_is_ready", deployment_names,125 cluster_name)126def test_training_operators_are_ready(record_xml_attribute, namespace, cluster_name):127 deployment_names = [128 "mpi-operator",129 "mxnet-operator",130 "pytorch-operator",131 "tf-job-operator",132 ]133 check_deployments_ready(record_xml_attribute, namespace,134 "test_training_is_ready", deployment_names,135 cluster_name)136def test_workflow_controller_is_ready(record_xml_attribute, namespace, cluster_name):137 check_deployments_ready(record_xml_attribute, namespace,138 "test_workflow_controller_is_ready", ["workflow-controller"],139 cluster_name)140def test_kf_is_ready(record_xml_attribute, namespace, use_basic_auth,141 app_path, cluster_name):142 """Test that Kubeflow was successfully deployed.143 Args:144 namespace: The namespace Kubeflow is deployed to.145 """146 set_logging()147 util.set_pytest_junit(record_xml_attribute, "test_kf_is_ready")148 kfctl_aws_util.aws_auth_load_kubeconfig(cluster_name)149 api_client = deploy_utils.create_k8s_client()150 # Verify that components are actually deployed.151 deployment_names = []152 stateful_set_names = []153 daemon_set_names = []154 platform, _ = get_platform_app_name(app_path)155 # TODO(PatrickXYS): not sure why istio-galley can't found156 ingress_related_deployments = [157 "cluster-local-gateway",158 "istio-citadel",159 "istio-ingressgateway",160 "istio-pilot",161 "istio-policy",162 "istio-sidecar-injector",163 "istio-telemetry",164 "prometheus",165 ]166 ingress_related_stateful_sets = []167 knative_namespace = "knative-serving"168 knative_related_deployments = [169 "activator",170 "autoscaler",171 "controller",172 "networking-istio",173 "webhook",174 ]175 if platform == "gcp":176 deployment_names.extend(["cloud-endpoints-controller"])177 stateful_set_names.extend(["kfserving-controller-manager"])178 if use_basic_auth:179 deployment_names.extend(["basic-auth-login"])180 ingress_related_stateful_sets.extend(["backend-updater"])181 else:182 ingress_related_deployments.extend(["iap-enabler"])183 ingress_related_stateful_sets.extend(["backend-updater"])184 elif platform == "existing_arrikto":185 deployment_names.extend(["dex"])186 ingress_related_deployments.extend(["authservice"])187 knative_related_deployments = []188 elif platform == "aws":189 # TODO(PatrickXYS): Extend List with AWS Deployment190 deployment_names.extend(["alb-ingress-controller"])191 daemon_set_names.extend(["nvidia-device-plugin-daemonset"])192 # TODO(jlewi): Might want to parallelize this.193 for deployment_name in deployment_names:194 logging.info("Verifying that deployment %s started...", deployment_name)195 util.wait_for_deployment(api_client, namespace, deployment_name, 10)196 ingress_namespace = "istio-system"197 for deployment_name in ingress_related_deployments:198 logging.info("Verifying that deployment %s started...", deployment_name)199 util.wait_for_deployment(api_client, ingress_namespace, deployment_name, 10)200 all_stateful_sets = [(namespace, name) for name in stateful_set_names]201 all_stateful_sets.extend([(ingress_namespace, name) for name in ingress_related_stateful_sets])202 for ss_namespace, name in all_stateful_sets:203 logging.info("Verifying that stateful set %s.%s started...", ss_namespace, name)204 try:205 util.wait_for_statefulset(api_client, ss_namespace, name)206 except:207 # Collect debug information by running describe208 util.run(["kubectl", "-n", ss_namespace, "describe", "statefulsets", name])209 raise210 all_daemon_sets = [(namespace, name) for name in daemon_set_names]211 for ds_namespace, name in all_daemon_sets:212 logging.info("Verifying that daemonset set %s.%s started...", ds_namespace, name)213 try:214 util.wait_for_daemonset(api_client, ds_namespace, name)215 except:216 # Collect debug information by running describe217 util.run(["kubectl", "-n", ds_namespace, "describe", "daemonset", name])218 raise219 ingress_names = ["istio-ingress"]220 # Check if Ingress is Ready and Healthy221 if platform in ["aws"]:222 for ingress_name in ingress_names:223 logging.info("Verifying that ingress %s started...", ingress_name)224 util.wait_for_ingress(api_client, ingress_namespace, ingress_name, 10)225 for deployment_name in knative_related_deployments:226 logging.info("Verifying that deployment %s started...", deployment_name)227 util.wait_for_deployment(api_client, knative_namespace, deployment_name, 10)228 # Check if Dex is Ready and Healthy229 dex_deployment_names = ["dex"]230 dex_namespace = "auth"231 for dex_deployment_name in dex_deployment_names:232 logging.info("Verifying that deployment %s started...", dex_deployment_name)233 util.wait_for_deployment(api_client, dex_namespace, dex_deployment_name, 10)234 # Check if Cert-Manager is Ready and Healthy235 cert_manager_deployment_names = [236 "cert-manager",237 "cert-manager-cainjector",238 "cert-manager-webhook",239 ]240 cert_manager_namespace = "cert-manager"241 for cert_manager_deployment_name in cert_manager_deployment_names:242 logging.info("Verifying that deployment %s started...", cert_manager_deployment_name)243 util.wait_for_deployment(api_client, cert_manager_namespace, cert_manager_deployment_name, 10)244if __name__ == "__main__":245 logging.basicConfig(level=logging.INFO,246 format=('%(levelname)s|%(asctime)s'247 '|%(pathname)s|%(lineno)d| %(message)s'),248 datefmt='%Y-%m-%dT%H:%M:%S',249 )250 logging.getLogger().setLevel(logging.INFO)...

Full Screen

Full Screen

test.py

Source:test.py Github

copy

Full Screen

...42 time, result = timing(flood_tool.get_easting_northing_from_lat_long, *args)43 matches = [r == approx(np.array(o),44 abs=tol) for r, o in zip(np.array(result).T, output)]45 record_property('multiple_lookup', (time, matches))46 record_xml_attribute('points', calculate_score(time, matches,47 data[name]))48@fixture(scope="module")49def timed_tool(data, flood_tool):50 t0 = timer()51 postcode_file = os.sep.join([BASE_PATH] + data["postcode file"])52 risk_file = os.sep.join([BASE_PATH] + data["flood probability file"])53 value_file = os.sep.join([BASE_PATH] + data["property value file"])54 out = flood_tool.Tool(postcode_file,55 risk_file,56 value_file)57 t1 = timer()58 return t1 - t0, out59@fixture(scope="module")60def tool(timed_tool):61 return timed_tool[1]62@mark.timeout(timeouts["tool"])63def test_tool(timed_tool, record_property, record_xml_attribute):64 record_property('tool initialization', (timed_tool[0], [True]))65 record_xml_attribute('points', 100)66@mark.timeout(timeouts["get_lat_long"])67def test_get_lat_long(data, testdb, tool,68 record_property, record_xml_attribute):69 rel = data['get_lat_long']['tolerance']70 output = np.array([[51.379129999999996, 1.3067440000000001]])71 time, result = timing(tool.get_lat_long, ['CT7 9ET'])72 matches = [r == approx(o, rel=rel) for r, o in zip(result, output)]73 record_property('single_postcode_lookup',74 (time, matches))75 input_headings = data['get_lat_long']['input headings']76 output_headings = data['get_lat_long']['output headings']77 idx1 = data['get_lat_long']['idx1']78 idx2 = data['get_lat_long']['idx2']79 args = list(testdb.iloc[idx1:idx2][input_headings].to_numpy().ravel())80 output = testdb.iloc[idx1:idx2][output_headings].to_numpy()81 time, result = timing(tool.get_lat_long, args)82 matches = [r == approx(np.array(o),83 rel=rel) for r, o in zip(result, output)]84 record_property('multiple_postcode_lookup', (time, matches))85 record_xml_attribute('points', calculate_score(time, matches,86 data['get_lat_long']))87@mark.timeout(timeouts["get_easting_northing_flood_probability"])88def test_get_easting_northing_flood_probability(data, testdb, tool, record_property, record_xml_attribute):89 name = 'get_easting_northing_flood_probability'90 output = ['Zero']91 inputs = [[298169], [519487]]92 time, result = timing(getattr(tool, name),93 *inputs)94 matches = [r == o for r, o in zip(result, output)]95 record_property('single_lookup', (time, matches))96 input_headings = data[name]['input headings']97 output_headings = data[name]['output headings']98 idx1 = data[name]['idx1']99 idx2 = data[name]['idx2']100 args = list(testdb.iloc[idx1:idx2][input_headings].to_numpy().T)101 output = list(testdb.iloc[idx1:idx2][output_headings].to_numpy().ravel())102 time, result = timing(getattr(tool, name), *args)103 matches = [r == o for r, o in zip(result, output)]104 record_property('multiple_lookup', (time, matches))105 record_xml_attribute('points', calculate_score(time, matches,106 data[name]))107@mark.timeout(timeouts["get_sorted_flood_probability"])108def test_get_sorted_flood_probability(data, testdb, tool, record_property, record_xml_attribute):109 name = 'get_sorted_flood_probability'110 idx1 = data[name]['idx1']111 idx2 = data[name]['idx2']112 idx3 = data[name]['idx3']113 idx4 = data[name]['idx4']114 args = testdb.iloc[idx1:idx2]['Postcode'].to_numpy().ravel()115 output = testdb.iloc[idx3:idx4][['Postcode', 'Probability Band']]116 output.drop_duplicates(subset='Postcode', inplace=True)117 output.set_index('Postcode', inplace=True)118 time, result = timing(getattr(tool, name), args)119 assert result.index.name == 'Postcode'120 matches = list((result.index == output.index) &121 (result['Probability Band'].to_numpy().ravel()122 == output['Probability Band'].to_numpy().ravel()))123 record_property('multiple_lookup', (time, matches))124 record_xml_attribute('points', calculate_score(time, matches,125 data[name]))126@mark.timeout(timeouts["get_flood_cost"])127def test_get_flood_cost(data, testdb, tool,128 record_property, record_xml_attribute):129 name = 'get_flood_cost'130 rel = data[name]['tolerance']131 output = np.array([4646599.42])132 time, result = timing(getattr(tool, name), ['TN8 6AB'])133 matches = [r == approx(o) for r, o in zip(result, output)]134 record_property('single_postcode_lookup',135 (time, matches))136 input_headings = data[name]['input headings']137 output_headings = data[name]['output headings']138 idx1 = data[name]['idx1']139 idx2 = data[name]['idx2']140 args = list(testdb.iloc[idx1:idx2][input_headings].to_numpy().T)141 output = list(testdb.iloc[idx1:idx2][output_headings].to_numpy().ravel())142 time, result = timing(getattr(tool, name), *args)143 matches = [r == approx(o, rel) for r, o in zip(result, output)]144 record_property('multiple_postcode_lookup', (time, matches))145 record_xml_attribute('points', calculate_score(time, matches,146 data[name]))147@mark.timeout(timeouts["get_annual_flood_risk"])148def test_get_annual_flood_risk(data, testdb, tool,149 record_property, record_xml_attribute):150 name = 'get_annual_flood_risk'151 rel = data[name]['tolerance']152 output = np.array([193.506606])153 time, result = timing(getattr(tool, name), ['DA1 5NU'], ['Very Low'])154 matches = [r == approx(o) for r, o in zip(result, output)]155 record_property('single_postcode_lookup',156 (time, matches))157 input_headings = data[name]['input headings']158 output_headings = data[name]['output headings']159 idx1 = data[name]['idx1']160 idx2 = data[name]['idx2']161 args = list(testdb.iloc[idx1:idx2][input_headings].to_numpy().T)162 output = testdb.iloc[idx1:idx2][output_headings].to_numpy().ravel()163 time, result = timing(getattr(tool, name), *args)164 matches = [r == approx(o, rel) for r, o in zip(result, output)]165 record_property('multiple_postcode_lookup', (time, matches))166 record_xml_attribute('points', calculate_score(time, matches,167 data[name]))168@mark.timeout(timeouts["get_sorted_annual_flood_risk"])169def test_get_sorted_annual_flood_risk(data, testdb, tool,170 record_property, record_xml_attribute):171 name = 'get_sorted_annual_flood_risk'172 rel = data[name]['tolerance']173 idx1 = data[name]['idx1']174 idx2 = data[name]['idx2']175 idx3 = data[name]['idx3']176 idx4 = data[name]['idx4']177 args = testdb.iloc[idx1:idx2][['Postcode']].to_numpy().ravel()178 output = testdb.iloc[idx3:idx4][['Postcode', 'Flood Risk']]179 output.drop_duplicates(subset='Postcode', inplace=True)180 output.set_index('Postcode', inplace=True)181 time, result = timing(getattr(tool, name), args)182 assert result.index.name == 'Postcode'183 matches = list((result.index == output.index) &184 (result['Flood Risk'].to_numpy().ravel()185 == approx(output['Flood Risk'].to_numpy().ravel(), rel)))186 record_property('multiple_postcode_lookup', (time, matches))187 record_xml_attribute('points', calculate_score(time, matches,...

Full Screen

Full Screen

test_wtwn_all_levels.py

Source:test_wtwn_all_levels.py Github

copy

Full Screen

1import pytest2import json3from pytest_lib import config, add_device_tag, add_mso_tag4from common.base import Base5from pytest_dependency import depends6import time7from synth_test_lib.synthassert import synthassert8@pytest.mark.synthetic_tests_wtwn9@pytest.mark.parametrize('device_domain', config["device_domains"])10class TestWtwn:11 '''12 TITLE: WTWN Multi-Level Validation13 DESCRIPTION:14 Validates that the PCD Discovery endpoints are correctly configured and that all WTWN feeds are working. 15 '''16 base = Base()17 feedItems = {}18 thirdLevelFeedItems = {}19 @staticmethod20 def get_device_config(request, record_xml_attribute, device_domain):21 device_config = config["device_domain_config"][device_domain]22 add_device_tag(23 request=request,24 record_xml_attribute=record_xml_attribute,25 device=device_config['deviceType']26 )27 add_mso_tag(28 request=request,29 record_xml_attribute=record_xml_attribute,30 mso=device_config['mso']31 )32 return device_config33 @pytest.mark.dependency()34 def test_wtwn_root_level(self, device_domain, request, record_xml_attribute, generate_mind_url,35 generate_json_headers):36 device_domain_config = self.get_device_config(request, record_xml_attribute, device_domain)37 rootfeedname = device_domain_config.get("rootFeedName")38 rootcaption = device_domain_config.get("rootCaption")39 response = self.base.request_api(40 url=generate_mind_url,41 payload=json.dumps(42 self.base.wtwn_payload(device=device_domain_config.get("deviceType"), feedname=rootfeedname,43 device_domain_config=device_domain_config)),44 headers=generate_json_headers,45 urlparams={"type": "feedItemFind", "bodyId": device_domain_config.get("bodyId")}46 )47 try:48 json_data = response.json()49 synthassert(50 "caption" in json_data,51 message="Caption attribute missing in response",52 response=response)53 synthassert(54 json_data["caption"] == rootcaption,55 message="Caption attribute value different than:" + str(rootcaption),56 response=response)57 self.base.assert_feeditemfind_response(response)58 synthassert(59 "items" in json_data,60 message="Items attribute not present in response",61 response=response)62 for item in json_data["items"]:63 self.base.assert_feedname_in_response(response, item)64 feedname = item["kernel"]["expandedFeedAction"]["feedName"]65 if feedname == "/liveTvApps":66 continue67 TestWtwn.feedItems.setdefault(rootcaption, []).append(feedname)68 except ValueError:69 synthassert(False, message="Decoding JSON from the response failed", response=response)70 @pytest.mark.dependency()71 @pytest.mark.flaky(reruns=3)72 def test_wtwn_second_level(self, request, record_xml_attribute, device_domain, generate_mind_url,73 generate_json_headers):74 depends(request, ["TestWtwn::test_wtwn_root_level[{}]".format(device_domain)])75 device_domain_config = self.get_device_config(request, record_xml_attribute, device_domain)76 rootcaption = device_domain_config.get("rootCaption")77 for feedItem in TestWtwn.feedItems[rootcaption]:78 response = self.base.request_api(79 url=generate_mind_url,80 payload=json.dumps(81 self.base.wtwn_payload(device=device_domain_config.get("deviceType"), feedname=feedItem,82 device_domain_config=device_domain_config)),83 headers=generate_json_headers,84 urlparams={"type": "feedItemFind", "bodyId": device_domain_config.get("bodyId")}85 )86 try:87 json_data = response.json()88 self.base.assert_feeditemfind_response(response)89 synthassert(90 "items" in json_data,91 message="Items attribute not present in response",92 response=response)93 for item in json_data["items"]:94 self.base.assert_feedname_in_response(response, item)95 feedname = item["kernel"]["expandedFeedAction"]["feedName"]96 if feedname == "/liveTvApps":97 continue98 TestWtwn.thirdLevelFeedItems.setdefault(rootcaption, []).append(feedname)99 except ValueError:100 synthassert(False, message="Decoding JSON from the response failed", response=response)101 @pytest.mark.dependency()102 @pytest.mark.flaky(reruns=3)103 def test_wtwn_third_level(self, request, record_xml_attribute, device_domain, generate_mind_url,104 generate_json_headers):105 depends(request, ["TestWtwn::test_wtwn_second_level[{}]".format(device_domain)])106 device_domain_config = self.get_device_config(request, record_xml_attribute, device_domain)107 rootcaption = device_domain_config.get("rootCaption")108 for thirdLevelFeedItem in TestWtwn.thirdLevelFeedItems[rootcaption]:109 time.sleep(1)110 response = self.base.request_api(111 url=generate_mind_url,112 payload=json.dumps(113 self.base.wtwn_payload(device=device_domain_config.get("deviceType"), feedname=thirdLevelFeedItem,114 device_domain_config=device_domain_config)),115 headers=generate_json_headers,116 urlparams={"type": "feedItemFind", "bodyId": device_domain_config.get("bodyId")}117 )118 try:119 self.base.assert_feeditemfind_response(response)120 except ValueError:...

Full Screen

Full Screen

Test_Common.py

Source:Test_Common.py Github

copy

Full Screen

...32 def teardown_class(self):33 self.wd.quit()34 @fixture()35 def settings(self, record_xml_attribute):36 record_xml_attribute('usermode', self.home_page.cook[1])37 self.home_page.load_and_valid_page(self.home_page.Parameters)38 def test_contact(self, settings):39 assert self.home_page.find_and_click(self.home_page.Contact['Contact'])40 assert self.home_page.link_check(self.home_page.Contact['Contact_page'])41 def test_about(self, settings):42 assert self.home_page.find_and_click(self.home_page.About['About'])43 assert self.home_page.link_check(self.home_page.About['About_page'])44class TestHeadHamburger:45 def setup_class(self):46 self.wd = HomePage.Home.browser()47 self.home_page = HomePage.Home(self.wd)48 self.wd.set_window_size(self.home_page.screensize[0]//2, self.home_page.screensize[1])49 self.home_page.smoke_load_and_valid_page(self.home_page.Parameters)50 if self.home_page.cook[0]:51 self.home_page.cookies_activate()52 def teardown_class(self):53 self.wd.quit()54 @fixture()55 def settings(self, record_xml_attribute):56 record_xml_attribute('user_mode', self.home_page.cook[1])57 self.home_page.load_and_valid_page(self.home_page.Parameters)58 self.home_page.find_and_click(self.home_page.Hamburger['Toggle'])59 def test_opened(self, settings):60 assert self.home_page.find(self.home_page.Hamburger['Body']).get_attribute('className') == self.home_page\61 .Hamburger['Body_class']['is']62 def test_exit(self, settings):63 assert self.home_page.find_and_click(self.home_page.Hamburger['Toggle'])64 assert self.home_page.find(self.home_page.Hamburger['Body']).get_attribute('className') == self.home_page\65 .Hamburger['Body_class']['no']66 def test_tools_menu(self, settings):67 assert self.home_page.find_and_click(self.home_page.Hamburger['Tools'])68 assert self.home_page.find_visible(self.home_page.Hamburger['Tools_ddm_list'])69 assert self.home_page.find(self.home_page.Hamburger['Tools']).get_attribute('className') == self.home_page\70 .Hamburger['Tools_class']['dropped']...

Full Screen

Full Screen

Pytest Tutorial

Looking for an in-depth tutorial around pytest? LambdaTest covers the detailed pytest tutorial that has everything related to the pytest, from setting up the pytest framework to automation testing. Delve deeper into pytest testing by exploring advanced use cases like parallel testing, pytest fixtures, parameterization, executing multiple test cases from a single file, and more.

Chapters

  1. What is pytest
  2. Pytest installation: Want to start pytest from scratch? See how to install and configure pytest for Python automation testing.
  3. Run first test with pytest framework: Follow this step-by-step tutorial to write and run your first pytest script.
  4. Parallel testing with pytest: A hands-on guide to parallel testing with pytest to improve the scalability of your test automation.
  5. Generate pytest reports: Reports make it easier to understand the results of pytest-based test runs. Learn how to generate pytest reports.
  6. Pytest Parameterized tests: Create and run your pytest scripts while avoiding code duplication and increasing test coverage with parameterization.
  7. Pytest Fixtures: Check out how to implement pytest fixtures for your end-to-end testing needs.
  8. Execute Multiple Test Cases: Explore different scenarios for running multiple test cases in pytest from a single file.
  9. Stop Test Suite after N Test Failures: See how to stop your test suite after n test failures in pytest using the @pytest.mark.incremental decorator and maxfail command-line option.

YouTube

Skim our below pytest tutorial playlist to get started with automation testing using the pytest framework.

https://www.youtube.com/playlist?list=PLZMWkkQEwOPlcGgDmHl8KkXKeLF83XlrP

Run Pytest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful