How to use test_plant_organic method in avocado

Best Python code snippet using avocado_python

test_basic.py

Source:test_basic.py Github

copy

Full Screen

1import glob2import json3import os4import re5import tempfile6import time7import unittest8import xml.dom.minidom9import zipfile10from avocado.core import exit_codes11from avocado.utils import path as utils_path12from avocado.utils import process, script13from selftests.utils import (14 AVOCADO,15 BASEDIR,16 TestCaseTmpDir,17 python_module_available,18 skipOnLevelsInferiorThan,19 skipUnlessPathExists,20 temp_dir_prefix,21)22try:23 import xmlschema24 SCHEMA_CAPABLE = True25except ImportError:26 SCHEMA_CAPABLE = False27UNSUPPORTED_STATUS_TEST_CONTENTS = """28from avocado import Test29class FakeStatusTest(Test):30 def run_avocado(self):31 super(FakeStatusTest, self).run_avocado()32 # Please do NOT ever use this, it's for unittesting only.33 self._Test__status = 'not supported'34 def test(self):35 pass36"""37INVALID_PYTHON_TEST = """38from avocado import Test39class MyTest(Test):40 non_existing_variable_causing_crash41 def test_my_name(self):42 pass43"""44VALID_PYTHON_TEST_WITH_TAGS = '''45from avocado import Test46class MyTest(Test):47 def test(self):48 """49 :avocado: tags=BIG_TAG_NAME50 """51 pass52'''53DIE_WITHOUT_REPORTING_STATUS = """54from avocado import Test55import os56import signal57class MyTest(Test):58 def test(self):59 os.kill(os.getpid(), signal.SIGKILL)60"""61RAISE_CUSTOM_PATH_EXCEPTION_CONTENT = """import os62import sys63from avocado import Test64class SharedLibTest(Test):65 def test(self):66 sys.path.append(os.path.join(os.path.dirname(__file__), "shared_lib"))67 from mylib import CancelExc68 raise CancelExc("This should not crash on unpickling in runner")69"""70TEST_OTHER_LOGGERS_CONTENT = """71import logging72from avocado import Test73class My(Test):74 def test(self):75 logging.getLogger("some.other.logger").info("SHOULD NOT BE ON debug.log")76"""77def probe_binary(binary):78 try:79 return utils_path.find_command(binary)80 except utils_path.CmdNotFoundError:81 return None82TRUE_CMD = probe_binary("true")83CC_BINARY = probe_binary("cc")84# On macOS, the default GNU core-utils installation (brew)85# installs the gnu utility versions with a g prefix. It still has the86# BSD versions of the core utilities installed on their expected paths87# but their behavior and flags are in most cases different.88GNU_ECHO_BINARY = probe_binary("echo")89if GNU_ECHO_BINARY is not None:90 if probe_binary("man") is not None:91 echo_cmd = f"man {os.path.basename(GNU_ECHO_BINARY)}"92 echo_manpage = process.run(echo_cmd, env={"LANG": "C"}, encoding="ascii").stdout93 if b"-e" not in echo_manpage:94 GNU_ECHO_BINARY = probe_binary("gecho")95READ_BINARY = probe_binary("read")96SLEEP_BINARY = probe_binary("sleep")97class RunnerOperationTest(TestCaseTmpDir):98 def test_show_version(self):99 result = process.run(f"{AVOCADO} -v", ignore_status=True)100 self.assertEqual(result.exit_status, 0)101 self.assertTrue(102 re.match(r"^Avocado \d+\.\d+$", result.stdout_text),103 (104 f"Version string does not match "105 f"'Avocado \\d\\.\\d:'\n"106 f"{result.stdout_text}"107 ),108 )109 def test_alternate_config_datadir(self):110 """111 Uses the "--config" flag to check custom configuration is applied112 Even on the more complex data_dir module, which adds extra checks113 to what is set on the plain settings module.114 """115 base_dir = os.path.join(self.tmpdir.name, "datadir_base")116 os.mkdir(base_dir)117 mapping = {118 "base_dir": base_dir,119 "test_dir": os.path.join(base_dir, "test"),120 "data_dir": os.path.join(base_dir, "data"),121 "logs_dir": os.path.join(base_dir, "logs"),122 }123 config = "[datadir.paths]\n"124 for key, value in mapping.items():125 if not os.path.isdir(value):126 os.mkdir(value)127 config += f"{key} = {value}\n"128 fd, config_file = tempfile.mkstemp(dir=self.tmpdir.name)129 os.write(fd, config.encode())130 os.close(fd)131 cmd = f"{AVOCADO} --config {config_file} config --datadir"132 result = process.run(cmd)133 expected_rc = exit_codes.AVOCADO_ALL_OK134 self.assertEqual(135 result.exit_status,136 expected_rc,137 (f"Avocado did not return rc {expected_rc}:" f"\n{result}"),138 )139 self.assertIn(" base " + mapping["base_dir"], result.stdout_text)140 self.assertIn(" data " + mapping["data_dir"], result.stdout_text)141 self.assertIn(" logs " + mapping["logs_dir"], result.stdout_text)142 def test_runner_phases(self):143 cmd_line = (144 f"{AVOCADO} run --disable-sysinfo "145 f"--job-results-dir {self.tmpdir.name} "146 f"examples/tests/phases.py"147 )148 result = process.run(cmd_line)149 expected_rc = exit_codes.AVOCADO_ALL_OK150 self.assertEqual(151 result.exit_status,152 expected_rc,153 (f"Avocado did not return rc {expected_rc}:" f"\n{result}"),154 )155 def test_runner_all_ok(self):156 cmd_line = (157 f"{AVOCADO} run --disable-sysinfo "158 f"--job-results-dir {self.tmpdir.name} "159 f"examples/tests/passtest.py examples/tests/passtest.py"160 )161 process.run(cmd_line)162 # Also check whether jobdata contains correct parameter paths163 variants = open(164 os.path.join(self.tmpdir.name, "latest", "jobdata", "variants-1.json"),165 encoding="utf-8",166 ).read()167 expected = '[{"paths": ["/run/*"], "variant_id": null, "variant": [["/", []]]}]'168 self.assertEqual(variants, expected)169 def test_runner_failfast_fail(self):170 cmd_line = (171 f"{AVOCADO} run --disable-sysinfo "172 f"--job-results-dir {self.tmpdir.name} "173 f"examples/tests/passtest.py examples/tests/failtest.py "174 f"examples/tests/passtest.py --failfast "175 f"--nrunner-max-parallel-tasks=1"176 )177 result = process.run(cmd_line, ignore_status=True)178 self.assertIn(b"Interrupting job (failfast).", result.stdout)179 self.assertIn(b"PASS 1 | ERROR 0 | FAIL 1 | SKIP 1", result.stdout)180 expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED181 self.assertEqual(182 result.exit_status,183 expected_rc,184 f"Avocado did not return rc {expected_rc}:\n{result}",185 )186 def test_runner_failfast_error(self):187 cmd_line = (188 f"{AVOCADO} run --disable-sysinfo "189 f"--job-results-dir {self.tmpdir.name} "190 f"examples/tests/passtest.py examples/tests/errortest.py "191 f"examples/tests/passtest.py --failfast "192 f"--nrunner-max-parallel-tasks=1"193 )194 result = process.run(cmd_line, ignore_status=True)195 self.assertIn(b"Interrupting job (failfast).", result.stdout)196 self.assertIn(b"PASS 1 | ERROR 1 | FAIL 0 | SKIP 1", result.stdout)197 expected_rc = exit_codes.AVOCADO_TESTS_FAIL | exit_codes.AVOCADO_JOB_INTERRUPTED198 self.assertEqual(199 result.exit_status,200 expected_rc,201 f"Avocado did not return rc {expected_rc}:\n{result}",202 )203 def test_runner_ignore_missing_references_one_missing(self):204 cmd_line = (205 f"{AVOCADO} run --disable-sysinfo "206 f"--job-results-dir {self.tmpdir.name} "207 f"examples/tests/passtest.py badtest.py "208 f"--ignore-missing-references"209 )210 result = process.run(cmd_line, ignore_status=True)211 self.assertIn(b"PASS 1 | ERROR 0 | FAIL 0 | SKIP 0", result.stdout)212 expected_rc = exit_codes.AVOCADO_ALL_OK213 self.assertEqual(214 result.exit_status,215 expected_rc,216 f"Avocado did not return rc {expected_rc}:\n{result}",217 )218 def test_runner_ignore_missing_references_all_missing(self):219 cmd_line = (220 f"{AVOCADO} run --disable-sysinfo "221 f"--job-results-dir {self.tmpdir.name} "222 f"badtest.py badtest2.py --ignore-missing-references"223 )224 result = process.run(cmd_line, ignore_status=True)225 self.assertIn(b"Suite is empty. There is no tests to run.", result.stderr)226 expected_rc = exit_codes.AVOCADO_FAIL227 self.assertEqual(228 result.exit_status,229 expected_rc,230 f"Avocado did not return rc {expected_rc}:\n{result}",231 )232 def test_runner_test_with_local_imports(self):233 prefix = temp_dir_prefix(self)234 with tempfile.TemporaryDirectory(prefix=prefix) as libdir:235 with script.Script(236 os.path.join(libdir, "mylib.py"),237 "def hello():\n return 'Hello world'",238 ):239 with script.Script(240 os.path.join(libdir, "test_local_imports.py"),241 (242 "from avocado import Test\n"243 "from mylib import hello\n"244 "class LocalImportTest(Test):\n"245 " def test(self):\n"246 " self.log.info(hello())\n"247 ),248 ) as mytest:249 cmd_line = (250 f"{AVOCADO} run --disable-sysinfo "251 f"--job-results-dir {self.tmpdir.name} "252 f"{mytest}"253 )254 process.run(cmd_line)255 def test_unsupported_status(self):256 with script.TemporaryScript(257 "fake_status.py",258 UNSUPPORTED_STATUS_TEST_CONTENTS,259 "avocado_unsupported_status",260 ) as tst:261 res = process.run(262 (263 f"{AVOCADO} run --disable-sysinfo "264 f"--job-results-dir {self.tmpdir.name} {tst} "265 f"--json -"266 ),267 ignore_status=True,268 )269 self.assertEqual(res.exit_status, exit_codes.AVOCADO_TESTS_FAIL)270 results = json.loads(res.stdout_text)271 self.assertEqual(272 results["tests"][0]["status"],273 "ERROR",274 (f"{results['tests'][0]['status']} != " f"{'ERROR'}\n{res}"),275 )276 self.assertIn(277 "Runner error occurred: Test reports unsupported",278 results["tests"][0]["fail_reason"],279 )280 def test_runner_tests_fail(self):281 cmd_line = (282 f"{AVOCADO} run --disable-sysinfo --job-results-dir "283 f"{self.tmpdir.name} examples/tests/passtest.py "284 f"examples/tests/failtest.py examples/tests/passtest.py"285 )286 result = process.run(cmd_line, ignore_status=True)287 expected_rc = exit_codes.AVOCADO_TESTS_FAIL288 self.assertEqual(289 result.exit_status,290 expected_rc,291 f"Avocado did not return rc {expected_rc}:\n{result}",292 )293 def test_runner_test_fail_with_warning(self):294 cmd_line = (295 f"{AVOCADO} run --disable-sysinfo --job-results-dir "296 f"{self.tmpdir.name} examples/tests/failtest_with_warning.py"297 )298 result = process.run(cmd_line, ignore_status=True)299 expected_rc = exit_codes.AVOCADO_TESTS_FAIL300 self.assertEqual(301 result.exit_status,302 expected_rc,303 f"Avocado did not return rc {expected_rc}:\n{result}",304 )305 def test_runner_nonexistent_test(self):306 cmd_line = (307 f"{AVOCADO} run --disable-sysinfo --job-results-dir "308 f"{self.tmpdir.name} bogustest"309 )310 result = process.run(cmd_line, ignore_status=True)311 expected_rc = exit_codes.AVOCADO_JOB_FAIL312 unexpected_rc = exit_codes.AVOCADO_FAIL313 self.assertNotEqual(314 result.exit_status,315 unexpected_rc,316 f"Avocado crashed (rc {unexpected_rc}):\n{result}",317 )318 self.assertEqual(319 result.exit_status,320 expected_rc,321 f"Avocado did not return rc {expected_rc}:\n{result}",322 )323 def test_runner_doublefail(self):324 cmd_line = (325 f"{AVOCADO} run --disable-sysinfo --job-results-dir "326 f"{self.tmpdir.name} --xunit - "327 f"examples/tests/doublefail.py"328 )329 result = process.run(cmd_line, ignore_status=True)330 expected_rc = exit_codes.AVOCADO_TESTS_FAIL331 unexpected_rc = exit_codes.AVOCADO_FAIL332 self.assertNotEqual(333 result.exit_status,334 unexpected_rc,335 f"Avocado crashed (rc {unexpected_rc}):\n{result}",336 )337 self.assertEqual(338 result.exit_status,339 expected_rc,340 f"Avocado did not return rc {expected_rc}:\n{result}",341 )342 self.assertIn(343 b"TestError: Failing during tearDown. Yay!",344 result.stdout,345 "Cleanup exception not printed to log output",346 )347 self.assertIn(348 b"TestFail: This test is supposed to fail",349 result.stdout,350 (f"Test did not fail with action exception:" f"\n{result.stdout}"),351 )352 def test_uncaught_exception(self):353 cmd_line = (354 f"{AVOCADO} run --disable-sysinfo --job-results-dir "355 f"{self.tmpdir.name} --json - "356 f"examples/tests/uncaught_exception.py"357 )358 result = process.run(cmd_line, ignore_status=True)359 expected_rc = exit_codes.AVOCADO_TESTS_FAIL360 self.assertEqual(361 result.exit_status,362 expected_rc,363 f"Avocado did not return rc {expected_rc}:\n{result}",364 )365 self.assertIn(b'"status": "ERROR"', result.stdout)366 def test_fail_on_exception(self):367 cmd_line = (368 f"{AVOCADO} run --disable-sysinfo --job-results-dir "369 f"{self.tmpdir.name} --json - "370 f"examples/tests/fail_on_exception.py"371 )372 result = process.run(cmd_line, ignore_status=True)373 expected_rc = exit_codes.AVOCADO_TESTS_FAIL374 self.assertEqual(375 result.exit_status,376 expected_rc,377 f"Avocado did not return rc {expected_rc}:\n{result}",378 )379 self.assertIn(b'"status": "FAIL"', result.stdout)380 def test_cancel_on_exception(self):381 cmd_line = (382 f"{AVOCADO} run --disable-sysinfo --job-results-dir "383 f"{self.tmpdir.name} --json - "384 f"examples/tests/cancel_on_exception.py"385 )386 result = process.run(cmd_line, ignore_status=True)387 expected_rc = exit_codes.AVOCADO_ALL_OK388 self.assertEqual(389 result.exit_status,390 expected_rc,391 f"Avocado did not return rc {expected_rc}:\n{result}",392 )393 result = json.loads(result.stdout_text)394 for test in result["tests"]:395 self.assertEqual(test["status"], "CANCEL")396 def test_assert_raises(self):397 cmd_line = (398 f"{AVOCADO} run --disable-sysinfo --job-results-dir "399 f"{self.tmpdir.name} -- examples/tests/assert.py"400 )401 result = process.run(cmd_line, ignore_status=True)402 expected_rc = exit_codes.AVOCADO_TESTS_FAIL403 self.assertEqual(404 result.exit_status,405 expected_rc,406 f"Avocado did not return rc {expected_rc}:\n{result}",407 )408 self.assertIn(b"Assert.test_assert_raises: PASS", result.stdout)409 self.assertIn(b"Assert.test_fails_to_raise: FAIL", result.stdout)410 self.assertIn(b"PASS 1 | ERROR 0 | FAIL 1 ", result.stdout)411 def test_exception_not_in_path(self):412 os.mkdir(os.path.join(self.tmpdir.name, "shared_lib"))413 mylib = script.Script(414 os.path.join(self.tmpdir.name, "shared_lib", "mylib.py"),415 "from avocado import TestCancel\n\n"416 "class CancelExc(TestCancel):\n"417 " pass",418 )419 mylib.save()420 mytest = script.Script(421 os.path.join(self.tmpdir.name, "mytest.py"),422 RAISE_CUSTOM_PATH_EXCEPTION_CONTENT,423 )424 mytest.save()425 result = process.run(426 f"{AVOCADO} --show test run --disable-sysinfo "427 f"--job-results-dir {self.tmpdir.name} {mytest}"428 )429 self.assertIn(430 b"'fail_reason': 'This should not crash on " b"unpickling in runner'",431 result.stdout,432 )433 def test_runner_timeout(self):434 cmd_line = (435 f"{AVOCADO} run --disable-sysinfo --job-results-dir "436 f"{self.tmpdir.name} examples/tests/timeouttest.py"437 )438 result = process.run(cmd_line, ignore_status=True)439 json_path = os.path.join(self.tmpdir.name, "latest", "results.json")440 with open(json_path, encoding="utf-8") as json_file:441 result_json = json.load(json_file)442 output = result.stdout443 expected_rc = exit_codes.AVOCADO_JOB_INTERRUPTED444 unexpected_rc = exit_codes.AVOCADO_FAIL445 self.assertNotEqual(446 result.exit_status,447 unexpected_rc,448 f"Avocado crashed (rc {unexpected_rc}):\n{result}",449 )450 self.assertEqual(451 result.exit_status,452 expected_rc,453 f"Avocado did not return rc {expected_rc}:\n{result}",454 )455 self.assertIn("timeout", result_json["tests"][0]["fail_reason"])456 # Ensure no test aborted error messages show up457 self.assertNotIn(b"TestAbortError: Test aborted unexpectedly", output)458 def test_silent_output(self):459 cmd_line = (460 f"{AVOCADO} --show=none run --disable-sysinfo "461 f"--job-results-dir {self.tmpdir.name} "462 f"examples/tests/passtest.py"463 )464 result = process.run(cmd_line, ignore_status=True)465 self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)466 self.assertEqual(result.stdout, b"")467 def test_show_user_stream(self):468 cmd_line = (469 f"{AVOCADO} --show=app,avocado.test.progress run "470 f"--disable-sysinfo --job-results-dir {self.tmpdir.name} "471 f"examples/tests/logging_streams.py"472 )473 result = process.run(cmd_line, ignore_status=True)474 self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)475 self.assertIn(476 b"Plant.test_plant_organic: preparing soil on row 0", result.stdout477 )478 def test_empty_args_list(self):479 cmd_line = AVOCADO480 result = process.run(cmd_line, ignore_status=True)481 self.assertEqual(result.exit_status, exit_codes.AVOCADO_FAIL)482 self.assertIn(483 b"avocado: error: the following arguments are required", result.stderr484 )485 def test_empty_test_list(self):486 cmd_line = (487 f"{AVOCADO} run --disable-sysinfo --job-results-dir " f"{self.tmpdir.name}"488 )489 result = process.run(cmd_line, ignore_status=True)490 self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)491 self.assertEqual(492 result.stderr,493 (494 b"Test Suite could not be created. No test references"495 b" provided nor any other arguments resolved into "496 b"tests\n"497 ),498 )499 def test_not_found(self):500 cmd_line = (501 f"{AVOCADO} run --disable-sysinfo --job-results-dir "502 f"{self.tmpdir.name} sbrubles"503 )504 result = process.run(cmd_line, ignore_status=True)505 self.assertEqual(result.exit_status, exit_codes.AVOCADO_JOB_FAIL)506 self.assertEqual(result.stdout, b"")507 self.assertEqual(result.stderr, b"Could not resolve references: sbrubles\n")508 def test_invalid_unique_id(self):509 cmd_line = (510 f"{AVOCADO} run --disable-sysinfo "511 f"--job-results-dir {self.tmpdir.name} "512 f"--force-job-id foobar examples/tests/passtest.py"513 )514 result = process.run(cmd_line, ignore_status=True)515 self.assertNotEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)516 self.assertIn(b"needs to be a 40 digit hex", result.stderr)517 self.assertNotIn(b"needs to be a 40 digit hex", result.stdout)518 def test_valid_unique_id(self):519 cmd_line = (520 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "521 f"--disable-sysinfo "522 f"--force-job-id 975de258ac05ce5e490648dec4753657b7ccc7d1 "523 f"examples/tests/passtest.py"524 )525 result = process.run(cmd_line, ignore_status=True)526 self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)527 self.assertNotIn(b"needs to be a 40 digit hex", result.stderr)528 self.assertIn(b"PASS", result.stdout)529 def test_automatic_unique_id(self):530 cmd_line = (531 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "532 f"--disable-sysinfo examples/tests/passtest.py --json -"533 )534 result = process.run(cmd_line, ignore_status=True)535 self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)536 r = json.loads(result.stdout_text)537 int(r["job_id"], 16) # it's an hex number538 self.assertEqual(len(r["job_id"]), 40)539 @skipOnLevelsInferiorThan(2)540 def test_early_latest_result(self):541 """542 Tests that the `latest` link to the latest job results is created early543 :avocado: tags=parallel:1544 """545 cmd_line = (546 f"{AVOCADO} run --disable-sysinfo "547 f"--job-results-dir {self.tmpdir.name} "548 f"examples/tests/passtest.py"549 )550 avocado_process = process.SubProcess(cmd_line)551 try:552 avocado_process.start()553 link = os.path.join(self.tmpdir.name, "latest")554 for _ in range(0, 50):555 time.sleep(0.1)556 if os.path.exists(link) and os.path.islink(link):557 avocado_process.wait()558 break559 self.assertTrue(os.path.exists(link))560 self.assertTrue(os.path.islink(link))561 finally:562 avocado_process.wait()563 def test_invalid_python(self):564 test = script.make_script(565 os.path.join(self.tmpdir.name, "test.py"), INVALID_PYTHON_TEST566 )567 cmd_line = (568 f"{AVOCADO} run --disable-sysinfo "569 f"--job-results-dir {self.tmpdir.name} {test}"570 )571 result = process.run(cmd_line, ignore_status=True)572 expected_rc = exit_codes.AVOCADO_TESTS_FAIL573 self.assertEqual(574 result.exit_status,575 expected_rc,576 f"Avocado did not return rc {expected_rc}:\n{result}",577 )578 self.assertIn(f"{test}:MyTest.test_my_name: ERROR", result.stdout_text)579 @unittest.skipIf(not READ_BINARY, "read binary not available.")580 @skipOnLevelsInferiorThan(1)581 def test_read(self):582 """583 :avocado: tags=parallel:1584 """585 cmd = (586 f"{AVOCADO} run --disable-sysinfo "587 f"--job-results-dir {self.tmpdir.name} "588 f"{READ_BINARY}"589 )590 result = process.run(cmd, timeout=10, ignore_status=True)591 self.assertLess(592 result.duration, 8, (f"Duration longer than expected." f"\n{result}")593 )594 self.assertEqual(595 result.exit_status, 1, (f"Expected exit status is 1" f"\n{result}")596 )597 def test_runner_test_parameters(self):598 cmd_line = (599 f"{AVOCADO} run --disable-sysinfo --job-results-dir "600 f'{self.tmpdir.name} -p "sleep_length=0.01" -- '601 f"examples/tests/sleeptest.py "602 )603 result = process.run(cmd_line, ignore_status=True)604 expected_rc = exit_codes.AVOCADO_ALL_OK605 self.assertEqual(606 result.exit_status,607 expected_rc,608 f"Avocado did not return rc {expected_rc}:\n{result}",609 )610 json_path = os.path.join(self.tmpdir.name, "latest", "results.json")611 with open(json_path, encoding="utf-8") as json_file:612 result_json = json.load(json_file)613 with open(614 result_json["tests"][0]["logfile"], "r+b"615 ) as test_log_file: # pylint: disable=W1514616 test_log = test_log_file.read()617 self.assertIn(618 b"PARAMS (key=sleep_length, path=*, default=1) => '0.01'", test_log619 )620 self.assertIn(b"Sleeping for 0.01 seconds", test_log)621 def test_other_loggers(self):622 with script.TemporaryScript(623 "mytest.py",624 TEST_OTHER_LOGGERS_CONTENT,625 "avocado_functional_test_other_loggers",626 ) as mytest:627 cmd_line = (628 f"{AVOCADO} run --disable-sysinfo "629 f"--job-results-dir {self.tmpdir.name} -- {mytest}"630 )631 result = process.run(cmd_line, ignore_status=True)632 expected_rc = exit_codes.AVOCADO_ALL_OK633 self.assertEqual(634 result.exit_status,635 expected_rc,636 (f"Avocado did not return rc {expected_rc}:" f"\n{result}"),637 )638 test_log_dir = glob.glob(639 os.path.join(self.tmpdir.name, "job-*", "test-results", "1-*")640 )[0]641 test_log_path = os.path.join(test_log_dir, "debug.log")642 with open(test_log_path, "rb") as test_log: # pylint: disable=W1514643 self.assertNotIn(b"SHOULD NOT BE ON debug.log", test_log.read())644 def test_store_logging_stream(self):645 cmd = (646 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "647 f"--store-logging-stream=avocado.test.progress "648 f"--disable-sysinfo -- examples/tests/logging_streams.py"649 )650 result = process.run(cmd)651 self.assertEqual(result.exit_status, exit_codes.AVOCADO_ALL_OK)652 progress_info = os.path.join(653 self.tmpdir.name,654 "latest",655 "test-results",656 "1-examples_tests_logging_streams.py_Plant" ".test_plant_organic",657 "avocado.test.progress",658 )659 self.assertTrue(os.path.exists(progress_info))660 with open(progress_info, encoding="utf-8") as file:661 stream_line = file.readline()662 self.assertIn(663 "INFO | 1-examples/tests/logging_streams.py:"664 "Plant.test_plant_organic: preparing soil on row 0",665 stream_line,666 )667class DryRunTest(TestCaseTmpDir):668 def test_dry_run(self):669 examples_path = os.path.join("examples", "tests")670 passtest = os.path.join(examples_path, "passtest.py")671 failtest = os.path.join(examples_path, "failtest.py")672 gendata = os.path.join(examples_path, "gendata.py")673 cmd = (674 f"{AVOCADO} run --disable-sysinfo --dry-run "675 f"--dry-run-no-cleanup --json - "676 f"-- {passtest} {failtest} {gendata}"677 )678 number_of_tests = 3679 result = json.loads(process.run(cmd).stdout_text)680 # Check if all tests were skipped681 self.assertEqual(result["cancel"], number_of_tests)682 for i in range(number_of_tests):683 test = result["tests"][i]684 self.assertEqual(test["fail_reason"], "Test cancelled due to --dry-run")685class RunnerHumanOutputTest(TestCaseTmpDir):686 def test_output_pass(self):687 cmd_line = (688 f"{AVOCADO} run --disable-sysinfo --job-results-dir "689 f"{self.tmpdir.name} examples/tests/passtest.py"690 )691 result = process.run(cmd_line, ignore_status=True)692 expected_rc = exit_codes.AVOCADO_ALL_OK693 self.assertEqual(694 result.exit_status,695 expected_rc,696 f"Avocado did not return rc {expected_rc}:\n{result}",697 )698 self.assertIn(b"passtest.py:PassTest.test: PASS", result.stdout)699 def test_output_fail(self):700 cmd_line = (701 f"{AVOCADO} run --disable-sysinfo --job-results-dir "702 f"{self.tmpdir.name} examples/tests/failtest.py"703 )704 result = process.run(cmd_line, ignore_status=True)705 expected_rc = exit_codes.AVOCADO_TESTS_FAIL706 self.assertEqual(707 result.exit_status,708 expected_rc,709 f"Avocado did not return rc {expected_rc}:\n{result}",710 )711 self.assertIn(b"examples/tests/failtest.py:FailTest.test: FAIL", result.stdout)712 def test_output_error(self):713 cmd_line = (714 f"{AVOCADO} run --disable-sysinfo --job-results-dir "715 f"{self.tmpdir.name} examples/tests/errortest.py"716 )717 result = process.run(cmd_line, ignore_status=True)718 expected_rc = exit_codes.AVOCADO_TESTS_FAIL719 self.assertEqual(720 result.exit_status,721 expected_rc,722 f"Avocado did not return rc {expected_rc}:\n{result}",723 )724 self.assertIn(b"errortest.py:ErrorTest.test: ERROR", result.stdout)725 def test_output_cancel(self):726 cmd_line = (727 f"{AVOCADO} run --disable-sysinfo --job-results-dir "728 f"{self.tmpdir.name} examples/tests/cancelonsetup.py"729 )730 result = process.run(cmd_line, ignore_status=True)731 expected_rc = exit_codes.AVOCADO_ALL_OK732 self.assertEqual(733 result.exit_status,734 expected_rc,735 f"Avocado did not return rc {expected_rc}:\n{result}",736 )737 self.assertIn(738 b"PASS 0 | ERROR 0 | FAIL 0 | SKIP 0 | WARN 0 | " b"INTERRUPT 0 | CANCEL 1",739 result.stdout,740 )741class RunnerExecTest(TestCaseTmpDir):742 def setUp(self):743 super().setUp()744 self.pass_script = script.TemporaryScript(745 "\u00e1 \u00e9 \u00ed \u00f3 \u00fa",746 "#!/bin/sh\ntrue",747 "avocado_exec_test_functional",748 )749 self.pass_script.save()750 self.fail_script = script.TemporaryScript(751 "avocado_fail.sh", "#!/bin/sh\nfalse", "avocado_exec_test_" "functional"752 )753 self.fail_script.save()754 def test_exec_test_pass(self):755 cmd_line = (756 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "757 f'--disable-sysinfo "{self.pass_script.path}"'758 )759 result = process.run(cmd_line, ignore_status=True)760 expected_rc = exit_codes.AVOCADO_ALL_OK761 self.assertEqual(762 result.exit_status,763 expected_rc,764 f"Avocado did not return rc {expected_rc}:\n{result}",765 )766 def test_exec_test_fail(self):767 cmd_line = (768 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "769 f"--disable-sysinfo {self.fail_script.path}"770 )771 result = process.run(cmd_line, ignore_status=True)772 expected_rc = exit_codes.AVOCADO_TESTS_FAIL773 self.assertEqual(774 result.exit_status,775 expected_rc,776 f"Avocado did not return rc {expected_rc}:\n{result}",777 )778 @skipOnLevelsInferiorThan(2)779 def test_runner_onehundred_fail_timing(self):780 """781 We can be pretty sure that a failtest should return immediately. Let's782 run 100 of them and assure they not take more than 30 seconds to run.783 Notice: on a current machine this takes about 0.12s, so 30 seconds is784 considered to be pretty safe here.785 :avocado: tags=parallel:1786 """787 one_hundred = "examples/tests/failtest.py " * 100788 cmd_line = (789 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "790 f"--disable-sysinfo {one_hundred}"791 )792 initial_time = time.monotonic()793 result = process.run(cmd_line, ignore_status=True)794 actual_time = time.monotonic() - initial_time795 self.assertLess(actual_time, 60.0)796 expected_rc = exit_codes.AVOCADO_TESTS_FAIL797 self.assertEqual(798 result.exit_status,799 expected_rc,800 f"Avocado did not return rc {expected_rc}:\n{result}",801 )802 @skipOnLevelsInferiorThan(2)803 def test_runner_sleep_fail_sleep_timing(self):804 """805 Sleeptest is supposed to take 1 second, let's make a sandwich of806 100 failtests and check the test runner timing.807 :avocado: tags=parallel:1808 """809 sleep_fail_sleep = (810 "examples/tests/sleeptest.py "811 + "examples/tests/failtest.py " * 100812 + "examples/tests/sleeptest.py"813 )814 cmd_line = (815 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "816 f"--disable-sysinfo {sleep_fail_sleep}"817 )818 initial_time = time.monotonic()819 result = process.run(cmd_line, ignore_status=True)820 actual_time = time.monotonic() - initial_time821 self.assertLess(actual_time, 63.0)822 expected_rc = exit_codes.AVOCADO_TESTS_FAIL823 self.assertEqual(824 result.exit_status,825 expected_rc,826 f"Avocado did not return rc {expected_rc}:\n{result}",827 )828 def test_non_absolute_path(self):829 test_base_dir = os.path.dirname(self.pass_script.path)830 os.chdir(test_base_dir)831 test_file_name = os.path.basename(self.pass_script.path)832 cmd_line = (833 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "834 f'--disable-sysinfo "{test_file_name}"'835 )836 result = process.run(cmd_line, ignore_status=True)837 expected_rc = exit_codes.AVOCADO_ALL_OK838 self.assertEqual(839 result.exit_status,840 expected_rc,841 f"Avocado did not return rc {expected_rc}:\n{result}",842 )843 def tearDown(self):844 self.pass_script.remove()845 self.fail_script.remove()846 super().tearDown()847class RunnerReferenceFromConfig(TestCaseTmpDir):848 def setUp(self):849 super().setUp()850 self.config_file = script.TemporaryScript(851 "avocado.conf", "[resolver]\n" "references = ['/bin/true']\n"852 )853 self.config_file.save()854 @skipUnlessPathExists("/bin/true")855 def test(self):856 cmd_line = (857 f"{AVOCADO} --config {self.config_file.path} run "858 f"--job-results-dir {self.tmpdir.name} --disable-sysinfo "859 )860 result = process.run(cmd_line, ignore_status=True)861 expected_rc = exit_codes.AVOCADO_ALL_OK862 self.assertEqual(863 result.exit_status,864 expected_rc,865 f"Avocado did not return rc {expected_rc}:\n{result}",866 )867 def tearDown(self):868 super().tearDown()869 self.config_file.remove()870class RunnerExecTestFailureFields(TestCaseTmpDir):871 def setUp(self):872 super().setUp()873 self.config_file = script.TemporaryScript(874 "avocado.conf",875 "[simpletests.status]\n" "failure_fields = ['stdout', 'stderr']\n",876 )877 self.config_file.save()878 def test_exec_test_failure_fields(self):879 fail_test = os.path.join(BASEDIR, "examples", "tests", "failtest.sh")880 cmd_line = (881 f"{AVOCADO} --config {self.config_file.path} run "882 f"--job-results-dir {self.tmpdir.name} "883 f"--disable-sysinfo -- {fail_test}"884 )885 result = process.run(cmd_line, ignore_status=True)886 expected_rc = exit_codes.AVOCADO_TESTS_FAIL887 self.assertEqual(888 result.exit_status,889 expected_rc,890 f"Avocado did not return rc {expected_rc}:\n{result}",891 )892 self.assertNotIn("Exited with status: '1'", result.stdout_text)893 def tearDown(self):894 super().tearDown()895 self.config_file.remove()896class PluginsTest(TestCaseTmpDir):897 def test_sysinfo_plugin(self):898 cmd_line = f"{AVOCADO} sysinfo {self.tmpdir.name}"899 result = process.run(cmd_line, ignore_status=True)900 expected_rc = exit_codes.AVOCADO_ALL_OK901 self.assertEqual(902 result.exit_status,903 expected_rc,904 f"Avocado did not return rc {expected_rc}:\n{result}",905 )906 sysinfo_files = os.listdir(self.tmpdir.name)907 self.assertGreater(len(sysinfo_files), 0, "Empty sysinfo files dir")908 def test_list_plugin(self):909 cmd_line = f"{AVOCADO} list"910 result = process.run(cmd_line, ignore_status=True)911 expected_rc = exit_codes.AVOCADO_ALL_OK912 self.assertEqual(913 result.exit_status,914 expected_rc,915 f"Avocado did not return rc {expected_rc}:\n{result}",916 )917 self.assertNotIn(b"No tests were found on current tests dir", result.stdout)918 def test_list_error_output(self):919 cmd_line = f"{AVOCADO} list sbrubles"920 result = process.run(cmd_line, ignore_status=True)921 self.assertEqual("", result.stdout_text)922 def test_plugin_list(self):923 cmd_line = f"{AVOCADO} plugins"924 result = process.run(cmd_line, ignore_status=True)925 expected_rc = exit_codes.AVOCADO_ALL_OK926 self.assertEqual(927 result.exit_status,928 expected_rc,929 f"Avocado did not return rc {expected_rc}:\n{result}",930 )931 self.assertNotIn(b"Disabled", result.stdout)932 def test_config_plugin(self):933 cmd_line = f"{AVOCADO} config "934 result = process.run(cmd_line, ignore_status=True)935 expected_rc = exit_codes.AVOCADO_ALL_OK936 self.assertEqual(937 result.exit_status,938 expected_rc,939 f"Avocado did not return rc {expected_rc}:\n{result}",940 )941 self.assertNotIn(b"Disabled", result.stdout)942 def test_config_plugin_datadir(self):943 cmd_line = f"{AVOCADO} config --datadir "944 result = process.run(cmd_line, ignore_status=True)945 expected_rc = exit_codes.AVOCADO_ALL_OK946 self.assertEqual(947 result.exit_status,948 expected_rc,949 f"Avocado did not return rc {expected_rc}:\n{result}",950 )951 self.assertNotIn(b"Disabled", result.stdout)952 def test_disable_plugin(self):953 cmd_line = f"{AVOCADO} plugins"954 result = process.run(cmd_line, ignore_status=True)955 expected_rc = exit_codes.AVOCADO_ALL_OK956 self.assertEqual(957 result.exit_status,958 expected_rc,959 f"Avocado did not return rc {expected_rc}:\n{result}",960 )961 self.assertIn(b"Collect system information", result.stdout)962 config_content = "[plugins]\ndisable=['cli.cmd.sysinfo',]"963 config = script.TemporaryScript("disable_sysinfo_cmd.conf", config_content)964 with config:965 cmd_line = f"{AVOCADO} --config {config} plugins"966 result = process.run(cmd_line, ignore_status=True)967 expected_rc = exit_codes.AVOCADO_ALL_OK968 self.assertEqual(969 result.exit_status,970 expected_rc,971 (f"Avocado did not return rc {expected_rc}:" f"\n{result}"),972 )973 self.assertNotIn(b"Collect system information", result.stdout)974 def test_plugin_order(self):975 """976 Tests plugin order by configuration file977 First it checks if html, json, xunit and zip_archive plugins are enabled.978 Then it runs a test with zip_archive running first, which means the html,979 json and xunit output files do not make into the archive.980 Then it runs with zip_archive set to run last, which means the html,981 json and xunit output files *do* make into the archive.982 """983 def run_config(config_path):984 cmd = (985 f"{AVOCADO} --config {config_path} "986 f"run examples/tests/passtest.py --archive "987 f"--job-results-dir {self.tmpdir.name} "988 f"--disable-sysinfo"989 )990 result = process.run(cmd, ignore_status=True)991 expected_rc = exit_codes.AVOCADO_ALL_OK992 self.assertEqual(993 result.exit_status,994 expected_rc,995 (f"Avocado did not return rc {expected_rc}:" f"\n{result}"),996 )997 result_plugins = ["json", "xunit", "zip_archive"]998 result_outputs = ["results.json", "results.xml"]999 if python_module_available("avocado-framework-plugin-result-html"):1000 result_plugins.append("html")1001 result_outputs.append("results.html")1002 cmd_line = f"{AVOCADO} plugins"1003 result = process.run(cmd_line, ignore_status=True)1004 expected_rc = exit_codes.AVOCADO_ALL_OK1005 self.assertEqual(1006 result.exit_status,1007 expected_rc,1008 f"Avocado did not return rc {expected_rc}:\n{result}",1009 )1010 for result_plugin in result_plugins:1011 self.assertIn(result_plugin, result.stdout_text)1012 config_content_zip_first = "[plugins.result]\norder=['zip_archive']"1013 config_zip_first = script.TemporaryScript(1014 "zip_first.conf", config_content_zip_first1015 )1016 with config_zip_first:1017 run_config(config_zip_first)1018 archives = glob.glob(os.path.join(self.tmpdir.name, "*.zip"))1019 self.assertEqual(len(archives), 1, "ZIP Archive not generated")1020 zip_file = zipfile.ZipFile(archives[0], "r")1021 zip_file_list = zip_file.namelist()1022 for result_output in result_outputs:1023 self.assertNotIn(result_output, zip_file_list)1024 os.unlink(archives[0])1025 config_content_zip_last = (1026 "[plugins.result]\norder=['html', 'json',"1027 "'xunit', 'non_existing_plugin_is_ignored'"1028 ",'zip_archive']"1029 )1030 config_zip_last = script.TemporaryScript(1031 "zip_last.conf", config_content_zip_last1032 )1033 with config_zip_last:1034 run_config(config_zip_last)1035 archives = glob.glob(os.path.join(self.tmpdir.name, "*.zip"))1036 self.assertEqual(len(archives), 1, "ZIP Archive not generated")1037 zip_file = zipfile.ZipFile(archives[0], "r")1038 zip_file_list = zip_file.namelist()1039 for result_output in result_outputs:1040 self.assertIn(result_output, zip_file_list)1041 def test_Namespace_object_has_no_attribute(self):1042 cmd_line = f"{AVOCADO} plugins"1043 result = process.run(cmd_line, ignore_status=True)1044 expected_rc = exit_codes.AVOCADO_ALL_OK1045 self.assertEqual(1046 result.exit_status,1047 expected_rc,1048 f"Avocado did not return rc {expected_rc}:\n{result}",1049 )1050 self.assertNotIn(b"'Namespace' object has no attribute", result.stderr)1051class ParseXMLError(Exception):1052 pass1053class PluginsXunitTest(TestCaseTmpDir):1054 @unittest.skipUnless(1055 SCHEMA_CAPABLE, "Unable to validate schema due to missing xmlschema library"1056 )1057 def setUp(self):1058 super().setUp()1059 junit_xsd = os.path.join(1060 os.path.dirname(__file__), os.path.pardir, ".data", "jenkins-junit.xsd"1061 )1062 self.xml_schema = xmlschema.XMLSchema(junit_xsd)1063 def run_and_check(self, testname, e_rc, e_ntests, e_nerrors, e_nfailures, e_nskip):1064 cmd_line = (1065 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "1066 f"--disable-sysinfo "1067 f"--xunit - {testname}"1068 )1069 result = process.run(cmd_line, ignore_status=True)1070 xml_output = result.stdout1071 self.assertEqual(1072 result.exit_status, e_rc, f"Avocado did not return rc {e_rc}:\n{result}"1073 )1074 try:1075 xunit_doc = xml.dom.minidom.parseString(xml_output)1076 except Exception as detail:1077 raise ParseXMLError(f"Failed to parse content: {detail}\n{xml_output}")1078 # pylint: disable=I11011079 xunit_file_output = os.path.join(self.tmpdir.name, "latest", "results.xml")1080 self.assertTrue(self.xml_schema.is_valid(xunit_file_output))1081 testsuite_list = xunit_doc.getElementsByTagName("testsuite")1082 self.assertEqual(len(testsuite_list), 1, "More than one testsuite tag")1083 testsuite_tag = testsuite_list[0]1084 self.assertEqual(1085 len(testsuite_tag.attributes),1086 7,1087 (f"The testsuite tag does not have 7 attributes. " f"XML:\n{xml_output}"),1088 )1089 n_tests = int(testsuite_tag.attributes["tests"].value)1090 self.assertEqual(1091 n_tests,1092 e_ntests,1093 (f"Unexpected number of executed tests, XML:\n" f"{xml_output}"),1094 )1095 n_errors = int(testsuite_tag.attributes["errors"].value)1096 self.assertEqual(1097 n_errors,1098 e_nerrors,1099 (f"Unexpected number of test errors, XML:\n" f"{xml_output}"),1100 )1101 n_failures = int(testsuite_tag.attributes["failures"].value)1102 self.assertEqual(1103 n_failures,1104 e_nfailures,1105 (f"Unexpected number of test failures, XML:\n" f"{xml_output}"),1106 )1107 n_skip = int(testsuite_tag.attributes["skipped"].value)1108 self.assertEqual(1109 n_skip, e_nskip, f"Unexpected number of test skips, XML:\n" f"{xml_output}"1110 )1111 def test_xunit_plugin_passtest(self):1112 self.run_and_check(1113 "examples/tests/passtest.py", exit_codes.AVOCADO_ALL_OK, 1, 0, 0, 01114 )1115 def test_xunit_plugin_failtest(self):1116 self.run_and_check(1117 "examples/tests/failtest.py", exit_codes.AVOCADO_TESTS_FAIL, 1, 0, 1, 01118 )1119 def test_xunit_plugin_skiponsetuptest(self):1120 self.run_and_check(1121 "examples/tests/cancelonsetup.py", exit_codes.AVOCADO_ALL_OK, 1, 0, 0, 11122 )1123 def test_xunit_plugin_errortest(self):1124 self.run_and_check(1125 "examples/tests/errortest.py", exit_codes.AVOCADO_TESTS_FAIL, 1, 1, 0, 01126 )1127class ParseJSONError(Exception):1128 pass1129class PluginsJSONTest(TestCaseTmpDir):1130 def run_and_check(1131 self, testname, e_rc, e_ntests, e_nerrors, e_nfailures, e_nskip, e_ncancel=01132 ):1133 cmd_line = (1134 f"{AVOCADO} run --job-results-dir {self.tmpdir.name} "1135 f"--disable-sysinfo --json - "1136 f"--archive {testname}"1137 )1138 result = process.run(cmd_line, ignore_status=True)1139 json_output = result.stdout_text1140 self.assertEqual(1141 result.exit_status, e_rc, f"Avocado did not return rc {e_rc}:\n{result}"1142 )1143 try:1144 json_data = json.loads(json_output)1145 except Exception as detail:1146 raise ParseJSONError(1147 (f"Failed to parse content: {detail}\n" f"{json_output}")1148 )1149 self.assertTrue(json_data, f"Empty JSON result:\n{json_output}")1150 self.assertIsInstance(1151 json_data["tests"], list, "JSON result lacks 'tests' list"1152 )1153 n_tests = len(json_data["tests"])1154 self.assertEqual(n_tests, e_ntests, "Different number of expected tests")1155 n_errors = json_data["errors"]1156 self.assertEqual(n_errors, e_nerrors, "Different number of expected tests")1157 n_failures = json_data["failures"]1158 self.assertEqual(n_failures, e_nfailures, "Different number of expected tests")1159 n_skip = json_data["skip"]1160 self.assertEqual(n_skip, e_nskip, "Different number of skipped tests")1161 n_cancel = json_data["cancel"]1162 self.assertEqual(n_cancel, e_ncancel)1163 return json_data1164 def test_json_plugin_passtest(self):1165 self.run_and_check(1166 "examples/tests/passtest.py", exit_codes.AVOCADO_ALL_OK, 1, 0, 0, 01167 )1168 def test_json_plugin_failtest(self):1169 self.run_and_check(1170 "examples/tests/failtest.py", exit_codes.AVOCADO_TESTS_FAIL, 1, 0, 1, 01171 )1172 def test_json_plugin_skiponsetuptest(self):1173 self.run_and_check(1174 "examples/tests/cancelonsetup.py", exit_codes.AVOCADO_ALL_OK, 1, 0, 0, 0, 11175 )1176 def test_json_plugin_errortest(self):1177 self.run_and_check(1178 "examples/tests/errortest.py", exit_codes.AVOCADO_TESTS_FAIL, 1, 1, 0, 01179 )1180if __name__ == "__main__":...

Full Screen

Full Screen

plant.py

Source:plant.py Github

copy

Full Screen

2import time3from avocado import Test4progress_log = logging.getLogger("progress")5class Plant(Test):6 def test_plant_organic(self):7 rows = self.params.get("rows", default=3)8 # Preparing soil9 for row in range(rows):10 progress_log.info("%s: preparing soil on row %s",11 self.name, row)12 # Letting soil rest13 progress_log.info("%s: letting soil rest before throwing seeds",14 self.name)15 time.sleep(2)16 # Throwing seeds17 for row in range(rows):18 progress_log.info("%s: throwing seeds on row %s",19 self.name, row)20 # Let them grow...

Full Screen

Full Screen

logging_streams.py

Source:logging_streams.py Github

copy

Full Screen

2import time3from avocado import Test4class Plant(Test):5 """Logs parts of the test progress in an specific logging stream."""6 def test_plant_organic(self):7 progress_log = logging.getLogger("avocado.test.progress")8 rows = int(self.params.get("rows", default=3))9 # Preparing soil10 for row in range(rows):11 progress_log.info("%s: preparing soil on row %s", self.name, row)12 # Letting soil rest13 progress_log.info("%s: letting soil rest before throwing seeds", self.name)14 time.sleep(1)15 # Throwing seeds16 for row in range(rows):17 progress_log.info("%s: throwing seeds on row %s", self.name, row)18 # Let them grow19 progress_log.info("%s: waiting for Avocados to grow", self.name)20 time.sleep(2)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful