How to use get_runnables method in Slash

Best Python code snippet using slash

test_loader.py

Source:test_loader.py Github

copy

Full Screen

...8def test_total_num_tests(suite):9 suite.debug_info = False10 path = suite.commit()11 with Session() as s:12 Loader().get_runnables(path)13 assert s.get_total_num_tests() == len(suite)14def test_loader_sort_filenames(tmpdir):15 tests_dir = tmpdir.join(str(uuid4()))16 filenames = []17 for _ in range(10):18 filename = str(uuid4()).replace('-', '') + '.py'19 with tests_dir.join(filename).open('w', ensure=True) as f:20 f.write('def test_something():\n pass')21 filenames.append(filename)22 with Session():23 runnables = Loader().get_runnables(str(tests_dir))24 assert [os.path.basename(runnable.__slash__.file_path) for runnable in runnables] == sorted(filenames)25def test_loader_skips_empty_dirs(tmpdir):26 tests_dir = tmpdir.join('tests')27 with tests_dir.join('.dir').join('test_something.py').open('w', ensure=True) as f:28 f.write('def test_something():\n pass')29 with Session():30 runnables = Loader().get_runnables(str(tests_dir))31 assert runnables == []32def test_loader_warns_duplicate_test_funcs(tmpdir):33 tests_dir = tmpdir.join('tests')34 full_path = tests_dir.join('.dir').join('test_something.py')35 test_name = 'test_something'36 with full_path.open('w', ensure=True) as f:37 f.write('def {}():\n assert True\n'.format(test_name))38 f.write('def {}():\n assert True\n'.format(test_name))39 with Session() as session:40 Loader().get_runnables([str(full_path)])41 assert len(session.warnings) == 142 assert 'Duplicate' in session.warnings.warnings[0].details['message']43 assert test_name in session.warnings.warnings[0].details['message']44def test_loader_warns_on_duplicate_fixtures(suite):45 fixture_name = 'fixture_name'46 fixture1 = suite.slashconf.add_fixture(name=fixture_name)47 fixture1.append_line('assert this == slash.context.fixture')48 fixture2 = suite.slashconf.add_fixture(name=fixture_name)49 fixture2.append_line('assert this == slash.context.fixture')50 summary = suite.run()51 assert len(summary.session.warnings) == 152 assert 'Duplicate' in summary.session.warnings.warnings[0].details['message']53 assert fixture_name in summary.session.warnings.warnings[0].details['message']54@pytest.mark.parametrize('specific_method', [True, False])55@pytest.mark.parametrize('with_parameters', [True, False])56def test_iter_specific_factory(suite, suite_test, specific_method, with_parameters):57 if suite_test.cls is not None and specific_method:58 suite_test.cls.add_method_test()59 if with_parameters:60 suite_test.add_parameter()61 for test in suite:62 if suite_test.cls is None and test is not suite_test:63 # we are selecting a specific function, and that's not it:64 test.expect_deselect()65 elif suite_test.cls is not None and test.cls is not suite_test.cls:66 test.expect_deselect()67 elif specific_method and suite_test.cls is test.cls and suite_test is not test:68 test.expect_deselect()69 path = suite.commit()70 if suite_test.cls:71 assert suite_test.cls.tests72 factory_name = suite_test.cls.name73 else:74 factory_name = suite_test.name75 pattern = '{}:{}'.format(os.path.join(path, suite_test.file.get_relative_path()), factory_name)76 if suite_test.cls is not None and specific_method:77 assert len(suite_test.cls.tests) > 178 pattern += '.{}'.format(suite_test.name)79 suite.run(args=[pattern])80def test_import_error_registers_as_session_error(active_slash_session, test_loader):81 with pytest.raises(CannotLoadTests):82 test_loader.get_runnables(["/non/existent/path"])83 errors = active_slash_session.results.global_result.get_errors()84 assert len(errors) == 185 [error] = errors # pylint: disable=unused-variable86def test_no_traceback_for_slash_exception():87 suite = Suite()88 summary = suite.run(expect_session_errors=True)89 assert not summary.session.results.is_success()90 [err] = summary.session.results.global_result.get_errors()91 assert err.exception_type is CannotLoadTests92 output = summary.get_console_output()93 assert 'Traceback' not in output94def test_no_traceback_for_marked_exceptions():95 suite = Suite()96 @suite.slashconf.append_body97 def __code__(): # pylint: disable=unused-variable98 from slash.exception_handling import inhibit_unhandled_exception_traceback99 raise inhibit_unhandled_exception_traceback(Exception('Some Error'))100 summary = suite.run(expect_session_errors=True)101 assert not summary.session.results.is_success()102 errors = summary.session.results.global_result.get_errors()103 assert [err.exception_type for err in errors] == [Exception]104 assert 'Some Error' in errors[0].exception_str105 output = summary.get_console_output()106 assert 'Traceback' not in output107def test_import_errors_with_session():108 suite = Suite()109 for _ in range(20):110 suite.add_test()111 problematic = suite.files[1]112 problematic.prepend_line('from nonexistent import nonexistent')113 for test in suite:114 test.expect_deselect()115 summary = suite.run(expect_session_errors=True)116 assert summary.exit_code != 0117 errs = summary.session.results.global_result.get_errors()118 for err in errs:119 assert 'No module named nonexistent' in err.message or "No module named 'nonexistent'" in err.message120 return suite121def test_fixture_and_test_overrides(tmpdir, config_override):122 tests_dir = tmpdir.join('tests')123 full_path = tests_dir.join('.dir').join('test_something.py')124 test_prefix = 'test_something'125 with full_path.open('w', ensure=True) as f:126 f.write('import slash\n')127 f.write('@slash.tag("tag-1")\ndef test_something1():\n pass\n'.format(test_prefix))128 f.write('def test_something2():\n pass\n'.format(test_prefix))129 config_override("run.filter_strings", ["tag:tag-1"])130 with Session():131 runnables = Loader().get_runnables(["{}:{}{}".format(full_path, test_prefix, index) for index in range(1, 3)])132 assert len(runnables) == 1...

Full Screen

Full Screen

test_e2e.py

Source:test_e2e.py Github

copy

Full Screen

1import os2from typing import List34from test_executor.abstract_test.test_result import TestVerdict, TestResult5from test_executor.test_generation.test_executor import TestExecutor6from test_executor.test_generation.test_loader import TestLoader78tests_path = os.path.join(os.path.dirname(__file__), "mocked_tests")91011class ListenerMock(object):12 """13 Mock for a listener14 """1516 def __init__(self):17 self.results = []1819 def notify(self, result: TestResult):20 self.results.append(result)212223def execute(test_paths: List[str], concurrency: int = 1, listener=None, set_params=False, get_runnables=False):24 """25 Main execution flow2627 :param get_runnables: True if should return runnables28 :param concurrency: concurrency level for executing29 :param test_paths: list of test paths30 :param listener: listener for the tests31 :param set_params: True if should set params in execution32 :return: results of the tests33 """34 loaded_tests = TestLoader().load_tests(test_paths)3536 params = {}3738 if set_params:39 params = {40 "MockTest1": {"1key1": "1", "1key2": "2"},41 "MockTest2": {"2key1": "1", "2key2": "2"},42 "MockTest1.test1": {"1key3": "1", "1key4": "2"},43 }4445 test_executor = TestExecutor(concurrency_level=concurrency)4647 test_executor.Logger.info("*" * 32 + " Start Execution " + "*" * 32)48 results = test_executor.execute(loaded_tests, listener=listener, custom_params=params)49 test_executor.Logger.info("*" * 32 + " End Execution " + "*" * 32)50 for result in results:51 assert result.verdict == TestVerdict.PASSED, f"Expected test {result.test_name} to pass"5253 if get_runnables:54 return results, loaded_tests5556 return results575859def test_regular_flow():60 """61 Test regular features62 """63 execute([tests_path])646566def test_regular_flow_set_params():67 """68 Test regular features with setting params69 """70 results, runnables = execute([tests_path], set_params=True, get_runnables=True)7172 assert len(results) == len(runnables), "Expected runnables and results to have the same length"73 for runnable in runnables:74 if str(runnable).startswith("MockTest1") or str(runnable).startswith("MockTest2"):75 assert runnable._test_class.params, "Expected to have params"767778def test_regular_flow_multiple_paths():79 """80 Tests multiple paths feature81 """82 results = execute([tests_path] * 3)8384 assert len(results) == 18, f"Excepted to have 18 results but had: {len(results)}"858687def test_regular_flow_parallel():88 """89 Test parallel execution feature90 """91 results = execute([tests_path] * 3, 18)9293 assert len(results) == 18, f"Excepted to have 18 results but had: {len(results)}"949596def test_regular_flow_parallel_and_listener():97 """98 Test parallel execution feature with listener99 """100 listener = ListenerMock()101 results = execute([tests_path] * 3, 18, listener=listener)102103 assert len(results) == 18, f"Excepted to have 18 results but had: {len(results)}"104 assert results == listener.results, "Expected listener to have same results"105106107tests = [108 test_regular_flow,109 test_regular_flow_set_params,110 test_regular_flow_multiple_paths,111 test_regular_flow_parallel,112 test_regular_flow_parallel_and_listener113]114115if __name__ == '__main__':116 import traceback117118 executor = TestExecutor()119 failed = False120121 for test in tests:122 try:123 executor.Logger.info("*" * 32)124 executor.Logger.info(f"Running: {test}")125 test()126 executor.Logger.info("Execution Ended")127 executor.Logger.info("*" * 32)128 except Exception as e:129 executor.Logger.error("An exception has occurred, traceback is below")130 executor.Logger.error(traceback.format_exc())131 failed = True132133 if failed: ...

Full Screen

Full Screen

runpar_test.py

Source:runpar_test.py Github

copy

Full Screen

...58 #Run the algorithm over the first task59 rp = RunPar(task1 , dag, cores)60 61 #Check core 0 runnables & total WCET :62 self.assertEqual(core_0.get_runnables(), [r1,r3])63 self.assertEqual(core_1.get_runnables(), [r4])64 #Check core 1 runnables & total WCET :65 self.assertEqual(core_0.utilization, r1.WCET + r3.WCET)66 self.assertEqual(core_1.utilization, r4.WCET)676869 def test_runnables_allocated_correctly_task_2(self):70 71 #Task 172 core_0 = Core(0)73 core_1 = Core(1)74 cores = [core_0 ,core_1]75 76 #Run the algorithm over the third task77 rp = RunPar(task2 , dag, cores)78 79 80 core_0.print_runnables()81 core_1.print_runnables()82 83 #Check core 0 runnables & total WCET :84 self.assertEqual(core_0.get_runnables(), [r2,r5])85 #self.assertEqual(core_1.get_runnables(), [r6]) #will fail because the idle runnable is assigned dynamically86 #Check core 1 runnables & total WCET :87 self.assertEqual(core_0.utilization, r2.WCET + r5.WCET)88 self.assertEqual(core_1.utilization, r2.WCET + r6.WCET)89 90 def test_runnables_allocated_correctly_task_3(self):91 92 #Task 193 core_0 = Core(0)94 core_1 = Core(1)95 cores = [core_0 ,core_1]96 97 #Run the algorithm over the third task98 rp = RunPar(task3 , dag, cores)99 100 #Check core 0 runnables & total WCET :101 self.assertEqual(core_0.get_runnables(), [r7])102 self.assertEqual(core_1.get_runnables(), []) 103 #Check core 1 runnables & total WCET :104 self.assertEqual(core_0.utilization, r7.WCET)105 self.assertEqual(core_1.utilization, 0)106 107 def test_task3(self):108 pass109 110 111if __name__ == '__main__': ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful