How to use report_test_start method in Slash

Best Python code snippet using slash

test_test_api.py

Source:test_test_api.py Github

copy

Full Screen

...16 assert started_test.info['class_name'] == class_name17def test_test_information_name(started_test, test_name):18 assert started_test.info['name'] == test_name19def test_report_test_start_logical_id(started_session, test_name):20 test = started_session.report_test_start(21 name=test_name, test_logical_id='11')22 assert test is not None23def test_test_session_id(started_session, started_test):24 assert started_test.session_id == started_session.id25def test_cannot_start_test_ended_session(ended_session):26 with raises_conflict():27 ended_session.report_test_start(name='name')28def test_cannot_start_test_nonexistent_session(nonexistent_session):29 with raises_not_found():30 nonexistent_session.report_test_start(name='name')31def test_start_time(started_test):32 original_time = flux.current_timeline.time()33 test_time = started_test.start_time34 assert test_time == original_time35def test_duration_empty(started_test):36 assert started_test.duration is None37def test_duration(started_test):38 duration = 10.539 flux.current_timeline.sleep(duration)40 started_test.report_end()41 started_test.refresh()42 assert started_test.duration == duration43@pytest.mark.parametrize('use_duration', [True, False])44def test_report_test_end(started_test, use_duration):45 duration = 1046 start_time = started_test.start_time47 if use_duration:48 started_test.report_end(duration=duration)49 else:50 flux.current_timeline.sleep(10)51 started_test.report_end()52 started_test.refresh()53 assert started_test.end_time == start_time + duration54def test_test_duration(started_test):55 duration = 1056 start_time = started_test.start_time57 started_test.report_end(duration=duration)58 started_test.refresh()59 assert started_test.duration == duration60def test_end_test_doesnt_exist(nonexistent_test):61 with raises_not_found():62 nonexistent_test.report_end()63def test_end_test_twice(ended_test):64 ended_test.report_end()65def test_test_add_error(started_test):66 started_test.add_error('E')67 started_test.refresh()68 assert started_test.num_errors == 169def test_get_status_running(started_test):70 started_test.refresh() # probably not needed71 assert started_test.status == 'RUNNING'72def test_get_status_error(started_test):73 started_test.add_error('E')74 started_test.report_end()75 started_test.refresh()76 assert started_test.status == 'ERROR'77def test_get_status_failure(started_test):78 started_test.add_failure('F')79 started_test.report_end()80 started_test.refresh()81 assert started_test.status == 'FAILURE'82def test_first_error(started_test):83 for i in range(3):84 flux.current_timeline.sleep(1)85 started_test.add_error(str(i))86 started_test.refresh()87 assert started_test.first_error['message'] == '0'88def test_get_status_skipped(started_test):89 started_test.mark_skipped()90 started_test.report_end()91 started_test.refresh()92 assert started_test.status == 'SKIPPED'93def test_get_status_skipped_and_failure(started_test):94 started_test.add_failure('F')95 started_test.mark_skipped()96 started_test.report_end()97 started_test.refresh()98 assert started_test.status == 'FAILURE'99@pytest.mark.parametrize('use_duration', [True, False])100def test_report_test_end(started_test, use_duration):101 if use_duration:102 started_test.report_end(duration=10)103 else:104 started_test.report_end()105 started_test.refresh()106 assert started_test.status == 'SUCCESS'107@pytest.mark.parametrize('reason', [None, 'some reason here'])108def test_skip_reason(started_test, reason):109 started_test.mark_skipped(reason=reason)110 started_test.report_end()111 assert started_test.refresh().status == 'SKIPPED'112 assert started_test.skip_reason == reason113def test_test_variation(started_session, variation, test_name, class_name):114 test = started_session.report_test_start(115 name=test_name, class_name=class_name, variation=variation, test_logical_id=str(uuid4()))116 if variation is NOTHING:117 expected_variation = None118 else:119 expected_variation = variation120 assert test.refresh().variation == expected_variation121def test_test_variation_invalid_values(started_session, invalid_variation, test_name, class_name):122 test = started_session.report_test_start(123 name=test_name,124 class_name=class_name,125 variation=invalid_variation,126 test_logical_id=str(uuid4()))127 assert test.refresh().variation != invalid_variation128 assert test.variation # make sure it is not empty129def test_test_start_with_metadata(started_session, test_name, class_name):130 metadata = {'metadata_key1': 'metadata_value1',131 'metadata_key2': 'metadata_value2'}132 test = started_session.report_test_start(133 name=test_name, class_name=class_name, metadata=metadata)134 assert test.refresh().get_metadata() == metadata135def test_test_index_default(started_session, test_name):136 test_1 = started_session.report_test_start(name=test_name, test_logical_id=str(uuid4()))137 assert test_1.refresh().test_index == 1138 test_2 = started_session.report_test_start(name=test_name, test_logical_id=str(uuid4()))139 assert test_2.refresh().test_index == 2140def test_test_index_custom(started_session, test_name):141 test_1 = started_session.report_test_start(name=test_name, test_index=600)142 assert test_1.refresh().test_index == 600143def test_report_interrupted(started_test):144 started_test.report_interrupted()145 assert started_test.refresh().status.lower() == 'interrupted'146def test_interruptions_after_test_end(started_test):147 started_test.report_end()148 started_test.report_interrupted()149 assert started_test.refresh().status.lower() == 'interrupted'150def test_test_parameters(started_session, test_name, params):151 test = started_session.report_test_start(152 name=test_name, parameters=params)153 expected = params.copy()154 if 'obj_param' in expected:155 expected['obj_param'] = str(expected['obj_param'])156 test.refresh()157 got_params = test.parameters158 assert expected['very_long_param'] != got_params['very_long_param']159 assert expected['very_long_param'][:10] == got_params['very_long_param'][:10]160 expected.pop('very_long_param')161 got_params.pop('very_long_param')162 assert got_params == expected163def test_append_upcoming_with_ended_session(ended_session, test_name, file_name, class_name):164 test_list = [{'test_logical_id': str(uuid4()),165 'file_name': file_name,166 'name': test_name,167 'class_name': class_name168 }]169 with raises_conflict():170 ended_session.report_upcoming_tests(tests=test_list)171def test_append_upcoming_report_all_tests(started_session, test_name, file_name, class_name):172 test_logical_id = str(uuid4())173 test1 = {'test_logical_id': test_logical_id,174 'name': test_name,175 'file_name': file_name,176 'class_name': class_name177 }178 test2 = {'test_logical_id': str(uuid4()),179 'name': test_name,180 'file_name': file_name,181 'class_name': class_name182 }183 test_list = [test1, test2]184 all_tests = started_session.query_tests(include_planned=True).all()185 assert len(all_tests) == 0186 started_session.report_upcoming_tests(tests=test_list)187 all_tests = started_session.query_tests(include_planned=True).all()188 assert len(all_tests) == 2189 started_session.report_test_start(name=test_name, test_logical_id=test_logical_id)190 all_tests = started_session.query_tests(include_planned=True).all()191 assert len(all_tests) == 2192 started_session.report_test_start(name=test_name, test_logical_id=str(uuid4()))193 all_tests = started_session.query_tests(include_planned=True).all()194 assert len(all_tests) == 3195def test_cannot_report_interruption_on_planned_test(started_session, test_name, file_name, class_name):196 test_logical_id = str(uuid4())197 test1 = {'test_logical_id': test_logical_id,198 'name': test_name,199 'file_name': file_name,200 'class_name': class_name201 }202 test_list = [test1]203 started_session.report_upcoming_tests(tests=test_list)204 test = started_session.query_tests(include_planned=True)[0]205 with raises_conflict():206 test.report_interrupted()...

Full Screen

Full Screen

test.py

Source:test.py Github

copy

Full Screen

...133 self.unsupported = {}134 self.attachments = {}135 def startTest(self, test):136 super(TestResult, self).startTest(test)137 self.report_test_start(test)138 def addAttachment(self, test, name, attachment):139 self.attachments.setdefault(test, [])140 self.attachments[test].append((name, attachment))141 def stopTest(self, test):142 super(TestResult, self).stopTest(test)143 self.report_test_stop(test)144 def addError(self, test, err):145 if isinstance(err[1], UnavailableFeature):146 self.addUnsupported(test, err[1].args[0])147 else:148 super(TestResult, self).addError(test, err)149 self.report_error(test)150 def addFailure(self, test, err):151 super(TestResult, self).addFailure(test, err)152 self.report_failure(test)153 def addUnsupported(self, test, feature):154 self.unsupported.setdefault(str(feature), 0)155 self.unsupported[str(feature)] += 1156 self.report_unsupported(test, feature)157 def addSkipped(self, test):158 self.report_skipped(test)159 def addSuccess(self, test):160 super(TestResult, self).addSuccess(test)161 self.report_success(test)162 def _exc_info_to_string(self, err, test):163 """Converts a sys.exc_info()-style tuple of values into a string."""164 exctype, value, tb = err165 # Skip test runner traceback levels166 while tb and self._is_relevant_tb_level(tb):167 tb = tb.tb_next168 if exctype is test.failureException:169 # Skip assert*() traceback levels170 length = self._count_relevant_tb_levels(tb)171 return ''.join(traceback.format_exception(exctype, value, tb, length))172 return cgitb.text((exctype, value, tb))173 # FIXME: Maybe these should be callbacks?174 def report_starting(self):175 pass176 def report_test_start(self, test):177 pass178 def report_test_stop(self, test):179 pass180 def report_error(self, test):181 pass182 def report_failure(self, test):183 pass184 def report_unsupported(self, test, feature):185 pass186 def report_skipped(self, test):187 pass188 def report_success(self, test):189 pass190 def report_finished(self):191 pass192class TextTestResult(TestResult):193 seperator1 = '=' * 70194 seperator2 = '-' * 70195 def getDescription(self, test):196 return test.shortDescription()197 def report_finished(self, timeTaken):198 self.stream.writeln()199 self.printErrorList('ERROR', self.errors)200 self.printErrorList('FAIL', self.failures)201 self.stream.writeln(self.seperator2)202 run = self.testsRun203 self.stream.writeln("Ran %d test%s in %.3fs" %204 (run, run != 1 and "s" or "", timeTaken))205 self.stream.writeln()206 if not self.wasSuccessful():207 self.stream.write("FAILED (")208 failed, errored = map(len, (self.failures, self.errors))209 if failed:210 self.stream.write("failures=%d" % failed)211 if errored:212 if failed: self.stream.write(", ")213 self.stream.write("errors=%d" % errored)214 self.stream.writeln(")")215 else:216 self.stream.writeln("OK")217 if len(self.unsupported) > 0:218 for feature, count in self.unsupported.iteritems():219 self.stream.writeln("%s not available, %d tests skipped" % (feature, count))220 def printErrorList(self, flavour, errors):221 for test, err in errors:222 self.stream.writeln(self.seperator1)223 self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))224 self.stream.writeln(self.seperator2)225 self.stream.writeln("%s" % err)226class SimpleTestResult(TextTestResult):227 def report_starting(self):228 self.pb = progressbar.ProgressBar()229 self.pb.max = self.num_tests230 def report_test_start(self, test):231 self.pb.update(self.pb.cur + 1)232 def report_finished(self, timetaken):233 self.pb.finish()234 super(SimpleTestResult, self).report_finished(timetaken)235class VerboseConsoleTextResult(TextTestResult):236 def report_test_start(self, test):237 print test.shortDescription()238class TestRunner(object):239 def __init__(self, opts, stream=sys.stderr, descriptions=0, verbosity=1):240 self.stream = unittest._WritelnDecorator(stream)241 self.descriptions = 0242 self.verbosity = 0243 def make_results(self, tests):244 if self.verbosity > 1:245 klass = VerboseConsoleTextResult246 else:247 klass = SimpleTestResult248 return klass(self.stream, self.descriptions, self.verbosity, num_tests=tests.countTestCases())249 def iter_tests(self, tests):250 if isinstance(tests, unittest.TestSuite):...

Full Screen

Full Screen

session.py

Source:session.py Github

copy

Full Screen

...19 kwargs = {'id': self.id, 'duration': duration, 'has_fatal_errors': has_fatal_errors}20 self.client.api.call_function('report_session_end', kwargs)21 def send_keepalive(self) -> None:22 self.client.api.call_function('send_keepalive', {'session_id': self.id})23 def report_test_start(self, name, file_name=NOTHING, class_name=NOTHING, test_logical_id=NOTHING, scm=NOTHING,24 file_hash=NOTHING,25 scm_revision=NOTHING, scm_dirty=NOTHING, scm_local_branch=NOTHING, scm_remote_branch=NOTHING,26 is_interactive=NOTHING, variation=NOTHING, metadata=NOTHING, test_index=NOTHING, parameters=NOTHING):27 params = {28 'session_id': self.id,29 'name': name,30 'scm': scm,31 'file_hash': file_hash,32 'scm_revision': scm_revision,33 'scm_dirty': scm_dirty,34 'scm_local_branch': scm_local_branch,35 'scm_remote_branch': scm_remote_branch,36 'class_name': class_name,37 'file_name': file_name,...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful