Best Python code snippet using avocado_python
check.py
Source:check.py  
...197        default=[],198    )199    arg = parser.parse_args()200    return arg201def create_suite_job_api(args):  # pylint: disable=W0621202    suites = []203    def get_ref(method_short_name):204        return [f"{__file__}:JobAPIFeaturesTest.test_{method_short_name}"]205    # ========================================================================206    # Test if the archive file was created207    # ========================================================================208    config_check_archive_file_exists = {209        "resolver.references": get_ref("check_archive_file_exists"),210        "run.dict_variants.variant_id_keys": ["namespace", "value"],211        "run.dict_variants": [212            {"namespace": "run.results.archive", "value": True, "assert": True},213        ],214    }215    suites.append(216        TestSuite.from_config(217            config_check_archive_file_exists, f"job-api-{len(suites) + 1}"218        )219    )220    # ========================================================================221    # Test if the category directory was created222    # ========================================================================223    config_check_category_directory_exists = {224        "resolver.references": get_ref("check_category_directory_exists"),225        "run.dict_variants.variant_id_keys": ["namespace", "value"],226        "run.dict_variants": [227            {"namespace": "run.job_category", "value": "foo", "assert": True},228        ],229    }230    suites.append(231        TestSuite.from_config(232            config_check_category_directory_exists, f"job-api-{len(suites) + 1}"233        )234    )235    # ========================================================================236    # Test if a directory was created237    # ========================================================================238    config_check_directory_exists = {239        "resolver.references": get_ref("check_directory_exists"),240        "run.dict_variants.variant_id_keys": ["namespace", "value"],241        "run.dict_variants": [242            {243                "namespace": "sysinfo.collect.enabled",244                "value": True,245                "directory": "sysinfo",246                "assert": True,247            },248            {249                "namespace": "sysinfo.collect.enabled",250                "value": False,251                "directory": "sysinfo",252                "assert": False,253            },254        ],255    }256    suites.append(257        TestSuite.from_config(258            config_check_directory_exists, f"job-api-{len(suites) + 1}"259        )260    )261    # ========================================================================262    # Test the content of a file263    # ========================================================================264    config_check_file_content = {265        "resolver.references": get_ref("check_file_content"),266        "run.dict_variants.variant_id_keys": ["namespace", "value", "file"],267        "run.dict_variants": [268            # finding the correct 'content' here is trick because any269            # simple string is added to the variant file name and is270            # found in the log file.271            # Using DEBUG| makes the variant name have DEBUG_, working272            # fine here.273            {274                "namespace": "job.output.loglevel",275                "value": "INFO",276                "file": "job.log",277                "content": r"DEBUG\| Test metadata:$",278                "assert": False,279                "regex": True,280            },281            {282                "namespace": "job.run.result.tap.include_logs",283                "value": True,284                "file": "results.tap",285                "reference": ["examples/tests/passtest.py:PassTest.test"],286                "content": "PASS 1-examples/tests/passtest.py:PassTest.test",287                "assert": True,288            },289            {290                "namespace": "job.run.result.tap.include_logs",291                "value": False,292                "file": "results.tap",293                "content": "Command '/bin/true' finished with 0",294                "assert": False,295            },296            {297                "namespace": "job.run.result.xunit.job_name",298                "value": "foo",299                "file": "results.xml",300                "content": 'name="foo"',301                "assert": True,302            },303            {304                "namespace": "job.run.result.xunit.max_test_log_chars",305                "value": 1,306                "file": "results.xml",307                "content": "--[ CUT DUE TO XML PER TEST LIMIT ]--",308                "assert": True,309                "reference": ["examples/tests/failtest.py:FailTest.test"],310                "exit_code": 1,311            },312            {313                "namespace": "run.failfast",314                "value": True,315                "file": "results.json",316                "content": '"skip": 1',317                "assert": True,318                "reference": ["/bin/false", "/bin/true"],319                "exit_code": 9,320                "extra_job_config": {"nrunner.max_parallel_tasks": 1},321            },322            {323                "namespace": "run.ignore_missing_references",324                "value": "on",325                "file": "results.json",326                "content": '"pass": 1',327                "assert": True,328                "reference": ["/bin/true", "foo"],329            },330            {331                "namespace": "run.unique_job_id",332                "value": "abcdefghi",333                "file": "job.log",334                "content": "Job ID: abcdefghi",335                "assert": True,336            },337            {338                "namespace": "job.run.timeout",339                "value": 1,340                "reference": ["examples/tests/sleeptenmin.py"],341                "file": "job.log",342                "content": "RuntimeError: Test interrupted by SIGTERM",343                "assert": True,344                "exit_code": 8,345            },346        ],347    }348    suites.append(349        TestSuite.from_config(config_check_file_content, f"job-api-{len(suites) + 1}")350    )351    # ========================================================================352    # Test if the result file was created353    # ========================================================================354    config_check_file_exists = {355        "resolver.references": get_ref("check_file_exists"),356        "run.dict_variants.variant_id_keys": ["namespace", "value"],357        "run.dict_variants": [358            {359                "namespace": "job.run.result.json.enabled",360                "value": True,361                "file": "results.json",362                "assert": True,363            },364            {365                "namespace": "job.run.result.json.enabled",366                "value": False,367                "file": "results.json",368                "assert": False,369            },370            {371                "namespace": "job.run.result.tap.enabled",372                "value": True,373                "file": "results.tap",374                "assert": True,375            },376            {377                "namespace": "job.run.result.tap.enabled",378                "value": False,379                "file": "results.tap",380                "assert": False,381            },382            {383                "namespace": "job.run.result.xunit.enabled",384                "value": True,385                "file": "results.xml",386                "assert": True,387            },388            {389                "namespace": "job.run.result.xunit.enabled",390                "value": False,391                "file": "results.xml",392                "assert": False,393            },394            {395                "namespace": "run.dry_run.enabled",396                "value": True,397                "file": "job.log",398                "assert": False,399            },400            {401                "namespace": "run.dry_run.no_cleanup",402                "value": True,403                "file": "job.log",404                "assert": True,405            },406            {407                "namespace": "plugins.disable",408                "value": ["result.xunit"],409                "file": "result.xml",410                "assert": False,411            },412            # this test needs a huge improvement413            {414                "namespace": "run.journal.enabled",415                "value": True,416                "file": ".journal.sqlite",417                "assert": True,418            },419        ],420    }421    if (422        python_module_available("avocado-framework-plugin-result-html")423        and "html" not in args.disable_plugin_checks424    ):425        config_check_file_exists["run.dict_variants"].append(426            {427                "namespace": "job.run.result.html.enabled",428                "value": True,429                "file": "results.html",430                "assert": True,431            }432        )433        config_check_file_exists["run.dict_variants"].append(434            {435                "namespace": "job.run.result.html.enabled",436                "value": False,437                "file": "results.html",438                "assert": False,439            }440        )441    suites.append(442        TestSuite.from_config(config_check_file_exists, f"job-api-{len(suites) + 1}")443    )444    # ========================================================================445    # Test if a file was created446    # ========================================================================447    config_check_output_file = {448        "resolver.references": get_ref("check_output_file"),449        "run.dict_variants.variant_id_keys": ["namespace", "file"],450        "run.dict_variants": [451            {452                "namespace": "job.run.result.json.output",453                "file": "custom.json",454                "assert": True,455            },456            # https://github.com/avocado-framework/avocado/issues/4034457            {458                "namespace": "job.run.result.tap.output",459                "file": "custom.tap",460                "assert": True,461            },462            {463                "namespace": "job.run.result.xunit.output",464                "file": "custom.xml",465                "assert": True,466            },467        ],468    }469    if (470        python_module_available("avocado-framework-plugin-result-html")471        and "html" not in args.disable_plugin_checks472    ):473        config_check_output_file["run.dict_variants"].append(474            {475                "namespace": "job.run.result.html.output",476                "file": "custom.html",477                "assert": True,478            }479        )480    suites.append(481        TestSuite.from_config(config_check_output_file, f"job-api-{len(suites) + 1}")482    )483    # ========================================================================484    # Test if the temporary directory was created485    # ========================================================================486    config_check_tmp_directory_exists = {487        "resolver.references": get_ref("check_tmp_directory_exists"),488        "run.dict_variants.variant_id_keys": ["namespace", "value"],489        "run.dict_variants": [490            {"namespace": "run.keep_tmp", "value": True, "assert": True},491        ],492    }493    suites.append(494        TestSuite.from_config(495            config_check_tmp_directory_exists, f"job-api-{len(suites) + 1}"496        )497    )498    return suites499def create_suites(args):  # pylint: disable=W0621500    suites = []501    # ========================================================================502    # Run nrunner interface checks for all available runners503    # ========================================================================504    config_nrunner_interface = {505        "resolver.references": ["selftests/functional/test_nrunner_interface.py"],506        "run.dict_variants.variant_id_keys": ["runner"],507        "run.dict_variants": [508            {509                "runner": "avocado-runner",510                "runnable-run-no-args-exit-code": 2,511                "runnable-run-uri-only-exit-code": 2,512                "task-run-id-only-exit-code": 2,513            },514            {515                "runner": "avocado-runner-dry-run",516                "runnable-run-no-args-exit-code": 0,517                "runnable-run-uri-only-exit-code": 0,518                "task-run-id-only-exit-code": 0,519            },520            {521                "runner": "avocado-runner-noop",522                "runnable-run-no-args-exit-code": 0,523                "runnable-run-uri-only-exit-code": 0,524                "task-run-id-only-exit-code": 0,525            },526            {527                "runner": "avocado-runner-exec-test",528                "runnable-run-no-args-exit-code": 0,529                "runnable-run-uri-only-exit-code": 0,530                "task-run-id-only-exit-code": 0,531            },532            {533                "runner": "avocado-runner-python-unittest",534                "runnable-run-no-args-exit-code": 0,535                "runnable-run-uri-only-exit-code": 0,536                "task-run-id-only-exit-code": 0,537            },538            {539                "runner": "avocado-runner-avocado-instrumented",540                "runnable-run-no-args-exit-code": 0,541                "runnable-run-uri-only-exit-code": 0,542                "task-run-id-only-exit-code": 0,543            },544            {545                "runner": "avocado-runner-tap",546                "runnable-run-no-args-exit-code": 0,547                "runnable-run-uri-only-exit-code": 0,548                "task-run-id-only-exit-code": 0,549            },550            {551                "runner": "avocado-runner-podman-image",552                "runnable-run-no-args-exit-code": 0,553                "runnable-run-uri-only-exit-code": 0,554                "task-run-id-only-exit-code": 0,555            },556        ],557    }558    if (559        python_module_available("avocado-framework-plugin-golang")560        and "golang" not in args.disable_plugin_checks561    ):562        config_nrunner_interface["run.dict_variants"].append(563            {564                "runner": "avocado-runner-golang",565                "runnable-run-no-args-exit-code": 0,566                "runnable-run-uri-only-exit-code": 0,567                "task-run-id-only-exit-code": 0,568            }569        )570    if (571        python_module_available("avocado-framework-plugin-robot")572        and "robot" not in args.disable_plugin_checks573    ):574        config_nrunner_interface["run.dict_variants"].append(575            {576                "runner": "avocado-runner-robot",577                "runnable-run-no-args-exit-code": 0,578                "runnable-run-uri-only-exit-code": 0,579                "task-run-id-only-exit-code": 0,580            }581        )582    if (583        python_module_available("avocado-framework-plugin-ansible")584        and "ansible" not in args.disable_plugin_checks585    ):586        config_nrunner_interface["run.dict_variants"].append(587            {588                "runner": "avocado-runner-ansible-module",589                "runnable-run-no-args-exit-code": 0,590                "runnable-run-uri-only-exit-code": 0,591                "task-run-id-only-exit-code": 0,592            }593        )594    if args.dict_tests["nrunner-interface"]:595        suites.append(596            TestSuite.from_config(config_nrunner_interface, "nrunner-interface")597        )598    # ========================================================================599    # Run functional requirement tests600    # ========================================================================601    config_nrunner_requirement = {602        "resolver.references": ["selftests/functional/serial/test_requirements.py"],603        "nrunner.max_parallel_tasks": 1,604        "run.dict_variants": [605            {"spawner": "process"},606            {"spawner": "podman"},607        ],608    }609    if args.dict_tests["nrunner-requirement"]:610        suites.append(611            TestSuite.from_config(config_nrunner_requirement, "nrunner-requirement")612        )613    # ========================================================================614    # Run all static checks, unit and functional tests615    # ========================================================================616    config_check = {617        "run.ignore_missing_references": True,618    }619    if args.dict_tests["unit"]:620        config_check_unit = copy.copy(config_check)621        config_check_unit["resolver.references"] = ["selftests/unit/"]622        suites.append(TestSuite.from_config(config_check_unit, "unit"))623    if args.dict_tests["jobs"]:624        config_check_jobs = copy.copy(config_check)625        config_check_jobs["resolver.references"] = ["selftests/jobs/"]626        suites.append(TestSuite.from_config(config_check_jobs, "jobs"))627    if args.dict_tests["functional"]:628        functional_path = os.path.join("selftests", "functional")629        references = glob.glob(os.path.join(functional_path, "test*.py"))630        references.extend(631            [632                os.path.join(functional_path, "utils"),633                os.path.join(functional_path, "plugin"),634            ]635        )636        config_check_functional_parallel = copy.copy(config_check)637        config_check_functional_parallel["resolver.references"] = references638        suites.append(639            TestSuite.from_config(640                config_check_functional_parallel, "functional-parallel"641            )642        )643        config_check_functional_serial = copy.copy(config_check)644        config_check_functional_serial["resolver.references"] = [645            "selftests/functional/serial/"646        ]647        config_check_functional_serial["nrunner.max_parallel_tasks"] = 1648        suites.append(649            TestSuite.from_config(config_check_functional_serial, "functional-serial")650        )651    if args.dict_tests["static-checks"]:652        config_check_static = copy.copy(config_check)653        config_check_static["resolver.references"] = glob.glob("selftests/*.sh")654        suites.append(TestSuite.from_config(config_check_static, "static-checks"))655    if args.dict_tests["optional-plugins"]:656        config_check_optional = copy.copy(config_check)657        config_check_optional["resolver.references"] = []658        for optional_plugin in glob.glob("optional_plugins/*"):659            plugin_name = os.path.basename(optional_plugin)660            if plugin_name not in args.disable_plugin_checks:661                pattern = f"{optional_plugin}/tests/*"662                config_check_optional["resolver.references"] += glob.glob(pattern)663        suites.append(TestSuite.from_config(config_check_optional, "optional-plugins"))664    return suites665def main(args):  # pylint: disable=W0621666    args.dict_tests = {667        "static-checks": False,668        "job-api": False,669        "nrunner-interface": False,670        "nrunner-requirement": False,671        "unit": False,672        "jobs": False,673        "functional": False,674        "optional-plugins": False,675    }676    # Make a list of strings instead of a list with a single string677    if len(args.disable_plugin_checks) > 0:678        args.disable_plugin_checks = args.disable_plugin_checks[0].split(",")679    if len(args.select) > 0:680        args.select = args.select[0].split(",")681    if len(args.skip) > 0:682        args.skip = args.skip[0].split(",")683    # Print features covered in this test684    if args.list_features:685        suites = create_suite_job_api(args)686        suites += create_suites(args)687        features = []688        for suite in suites:689            for variants in suite.config["run.dict_variants"]:690                if variants.get("namespace"):691                    features.append(variants["namespace"])692        unique_features = sorted(set(features))693        print(f"Features covered ({len(unique_features)}):")694        print("\n".join(unique_features))695        exit(0)696    # Will only run the test you select, --select must be followed by list of tests697    elif args.select:698        for elem in args.select:699            if elem not in args.dict_tests.keys():700                print(elem, "is not in the list of valid tests.")701                exit(0)702            else:703                args.dict_tests[elem] = True704    # Will run all the tests except these you skip, --skip must be followed by list of tests705    elif args.skip:706        # Make all the values True, so later we set to False the tests we don't want to run707        args.dict_tests = {x: True for x in args.dict_tests}708        for elem in args.skip:709            if elem not in args.dict_tests.keys():710                print(elem, "is not in the list of valid tests.")711                exit(0)712            else:713                args.dict_tests[elem] = False714    # If no option was selected, run all tests!715    elif not (args.skip or args.select):716        print("No test were selected to run, running all of them.")717        args.dict_tests = {x: True for x in args.dict_tests}718    else:719        print("Something went wrong, please report a bug!")720        exit(1)721    suites = []722    if args.dict_tests["job-api"]:723        suites += create_suite_job_api(args)724    suites += create_suites(args)725    # ========================================================================726    # Job execution727    # ========================================================================728    config = {729        "run.job_category": "avocado-selftests",730        "job.output.testlogs.statuses": ["FAIL", "ERROR", "INTERRUPT"],731    }732    # Workaround for travis problem on arm64 - https://github.com/avocado-framework/avocado/issues/4768733    if platform.machine() == "aarch64":734        max_parallel = int(multiprocessing.cpu_count() / 2)735        for suite in suites:736            if suite.name == "functional-parallel":737                suite.config["nrunner.max_parallel_tasks"] = max_parallel...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
