How to use defaultTestResult method in autotest

Best Python code snippet using autotest_python

test_skipping.py

Source:test_skipping.py Github

copy

Full Screen

2from unittest.test.support import LoggingResult3class Test_TestSkipping(unittest.TestCase):4 def test_skipping(self):5 class Foo(unittest.TestCase):6 def defaultTestResult(self):7 return LoggingResult(events)8 def test_skip_me(self):9 self.skipTest("skip")10 events = []11 result = LoggingResult(events)12 test = Foo("test_skip_me")13 self.assertIs(test.run(result), result)14 self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])15 self.assertEqual(result.skipped, [(test, "skip")])16 events = []17 result = test.run()18 self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',19 'stopTest', 'stopTestRun'])20 self.assertEqual(result.skipped, [(test, "skip")])21 self.assertEqual(result.testsRun, 1)22 # Try letting setUp skip the test now.23 class Foo(unittest.TestCase):24 def defaultTestResult(self):25 return LoggingResult(events)26 def setUp(self):27 self.skipTest("testing")28 def test_nothing(self): pass29 events = []30 result = LoggingResult(events)31 test = Foo("test_nothing")32 self.assertIs(test.run(result), result)33 self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])34 self.assertEqual(result.skipped, [(test, "testing")])35 self.assertEqual(result.testsRun, 1)36 events = []37 result = test.run()38 self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',39 'stopTest', 'stopTestRun'])40 self.assertEqual(result.skipped, [(test, "testing")])41 self.assertEqual(result.testsRun, 1)42 def test_skipping_subtests(self):43 class Foo(unittest.TestCase):44 def defaultTestResult(self):45 return LoggingResult(events)46 def test_skip_me(self):47 with self.subTest(a=1):48 with self.subTest(b=2):49 self.skipTest("skip 1")50 self.skipTest("skip 2")51 self.skipTest("skip 3")52 events = []53 result = LoggingResult(events)54 test = Foo("test_skip_me")55 self.assertIs(test.run(result), result)56 self.assertEqual(events, ['startTest', 'addSkip', 'addSkip',57 'addSkip', 'stopTest'])58 self.assertEqual(len(result.skipped), 3)59 subtest, msg = result.skipped[0]60 self.assertEqual(msg, "skip 1")61 self.assertIsInstance(subtest, unittest.TestCase)62 self.assertIsNot(subtest, test)63 subtest, msg = result.skipped[1]64 self.assertEqual(msg, "skip 2")65 self.assertIsInstance(subtest, unittest.TestCase)66 self.assertIsNot(subtest, test)67 self.assertEqual(result.skipped[2], (test, "skip 3"))68 events = []69 result = test.run()70 self.assertEqual(events,71 ['startTestRun', 'startTest', 'addSkip', 'addSkip',72 'addSkip', 'stopTest', 'stopTestRun'])73 self.assertEqual([msg for subtest, msg in result.skipped],74 ['skip 1', 'skip 2', 'skip 3'])75 def test_skipping_decorators(self):76 op_table = ((unittest.skipUnless, False, True),77 (unittest.skipIf, True, False))78 for deco, do_skip, dont_skip in op_table:79 class Foo(unittest.TestCase):80 def defaultTestResult(self):81 return LoggingResult(events)82 @deco(do_skip, "testing")83 def test_skip(self): pass84 @deco(dont_skip, "testing")85 def test_dont_skip(self): pass86 test_do_skip = Foo("test_skip")87 test_dont_skip = Foo("test_dont_skip")88 suite = unittest.TestSuite([test_do_skip, test_dont_skip])89 events = []90 result = LoggingResult(events)91 self.assertIs(suite.run(result), result)92 self.assertEqual(len(result.skipped), 1)93 expected = ['startTest', 'addSkip', 'stopTest',94 'startTest', 'addSuccess', 'stopTest']95 self.assertEqual(events, expected)96 self.assertEqual(result.testsRun, 2)97 self.assertEqual(result.skipped, [(test_do_skip, "testing")])98 self.assertTrue(result.wasSuccessful())99 events = []100 result = test_do_skip.run()101 self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',102 'stopTest', 'stopTestRun'])103 self.assertEqual(result.skipped, [(test_do_skip, "testing")])104 events = []105 result = test_dont_skip.run()106 self.assertEqual(events, ['startTestRun', 'startTest', 'addSuccess',107 'stopTest', 'stopTestRun'])108 self.assertEqual(result.skipped, [])109 def test_skip_class(self):110 @unittest.skip("testing")111 class Foo(unittest.TestCase):112 def defaultTestResult(self):113 return LoggingResult(events)114 def test_1(self):115 record.append(1)116 events = []117 record = []118 result = LoggingResult(events)119 test = Foo("test_1")120 suite = unittest.TestSuite([test])121 self.assertIs(suite.run(result), result)122 self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])123 self.assertEqual(result.skipped, [(test, "testing")])124 self.assertEqual(record, [])125 events = []126 result = test.run()...

Full Screen

Full Screen

test_test_suite.py

Source:test_test_suite.py Github

copy

Full Screen

...54 return x + y55def test_square_wrong(square: Problem[int], metadata: SubmissionMetadata) -> None:56 """Test that the tests fail for the incorrect implementation."""57 suite, _ = square.generate_test_suite(square_wrong, metadata)58 result = suite.run(TestCase().defaultTestResult())59 assert not result.wasSuccessful()60def test_square_right(square: Problem[int], metadata: SubmissionMetadata) -> None:61 """Test that the tests succeed for the correct implementation."""62 suite, _ = square.generate_test_suite(square_right, metadata)63 result = suite.run(TestCase().defaultTestResult())64 assert result.wasSuccessful()65@pytest.fixture(name="square_failure")66def fixture_square_failure(metadata: SubmissionMetadata) -> list[tuple[TestCase, str]]:67 """Generate a list of failures for the single tc square problem."""68 suite, _ = square_one_tc.generate_test_suite(square_wrong, metadata)69 result = suite.run(TestCase().defaultTestResult())70 return result.failures71def test_one_failure(square_failure: list[tuple[TestCase, str]]) -> None:72 """Test that the one-tc problem only has one failure."""73 assert len(square_failure) == 174def test_failure_message(square_failure: list[tuple[TestCase, str]]) -> None:75 """Test that the one-tc problem's failure message is correct."""76 message = square_failure[0][1]77 assert (78 "Your submission didn't give the output we expected. "79 "We checked it with 2 and got 3, but we expected 4." in message80 )81def test_failure_description(square_failure: list[tuple[TestCase, str]]) -> None:82 """Test that the one-tc problem's test case description is correct."""83 message = square_failure[0][0].shortDescription()84 assert message == "Test on 2."85@pytest.fixture(name="diff_failure")86def fixture_diff_failure(metadata: SubmissionMetadata) -> list[tuple[TestCase, str]]:87 """Generate a list of failures for the single tc diff problem."""88 suite, _ = diff_one_tc.generate_test_suite(diff_wrong, metadata)89 result = suite.run(TestCase().defaultTestResult())90 return result.failures91def test_one_failure_diff(diff_failure: list[tuple[TestCase, str]]) -> None:92 """Test that the one-tc problem only has one failure."""93 assert len(diff_failure) == 194def test_failure_message_multiple_args(95 diff_failure: list[tuple[TestCase, str]]96) -> None:97 """Test that the one-tc diff problem's failure message is correct.98 This test is interesting because diff has two arguments, and we do formatting for99 tuples in `_TestInputs`.100 """101 message = diff_failure[0][1]102 assert (103 "Your submission didn't give the output we expected. "104 "We checked it with 2,1 and got 3, but we expected 1." in message105 )106def test_failure_description_multiple_args(107 diff_failure: list[tuple[TestCase, str]]108) -> None:109 """Test that the one-tc diff problem's test case description is correct.110 This test is interesting because diff has two arguments, and we do formatting for111 tuples in `_TestInputs`.112 """113 message = diff_failure[0][0].shortDescription()114 assert message == "Test on 2,1."115@pytest.fixture(name="square_kwd_failure")116def fixture_square_kwd_failure(117 metadata: SubmissionMetadata,118) -> list[tuple[TestCase, str]]:119 """Generate a list of failures for the single tc square kwd problem."""120 suite, _ = square_one_tc_kwd.generate_test_suite(square_wrong, metadata)121 result = suite.run(TestCase().defaultTestResult())122 return result.failures123def test_one_failure_square_kwd(square_kwd_failure: list[tuple[TestCase, str]]) -> None:124 """Test that the one-tc problem only has one failure."""125 assert len(square_kwd_failure) == 1126def test_failure_message_kwdargs(127 square_kwd_failure: list[tuple[TestCase, str]]128) -> None:129 """Test that the one-tc square_kwd problem's failure message is correct.130 This test is interesting because square_kwd has a kewyord argument, and we do131 formatting for kwdargs in `_TestInputs`.132 """133 message = square_kwd_failure[0][1]134 assert (135 "Your submission didn't give the output we expected. "136 "We checked it with x=2 and got 3, but we expected 4." in message137 )138def test_failure_description_kwdargs(139 square_kwd_failure: list[tuple[TestCase, str]]140) -> None:141 """Test that the one-tc square_kwd problem's test case description is correct.142 This test is interesting because square_kwd has a kewyord argument, and we do143 formatting for kwdargs in `_TestInputs`.144 """145 message = square_kwd_failure[0][0].shortDescription()146 assert message == "Test on x=2."147@pytest.fixture(name="diff_kwd_failure")148def fixture_diff_kwd_failure(149 metadata: SubmissionMetadata,150) -> list[tuple[TestCase, str]]:151 """Generate a list of failures for the single tc diff kwd problem."""152 suite, _ = diff_one_tc_kwd.generate_test_suite(diff_wrong, metadata)153 result = suite.run(TestCase().defaultTestResult())154 return result.failures155def test_one_failure_diff_kwd(diff_kwd_failure: list[tuple[TestCase, str]]) -> None:156 """Test that the one-tc problem only has one failure."""157 assert len(diff_kwd_failure) == 1158def test_failure_message_pos_and_kwdargs(159 diff_kwd_failure: list[tuple[TestCase, str]]160) -> None:161 """Test that the one-tc diff_kwd problem's failure message is correct.162 This test is interesting because diff_kwd has a kewyord argument and a positional163 argument.164 """165 message = diff_kwd_failure[0][1]166 print(diff_kwd_failure[0])167 print(message)...

Full Screen

Full Screen

handlers_tests.py

Source:handlers_tests.py Github

copy

Full Screen

...48 def test_handler_should_not_be_called(self):49 broken_handler = Mock(side_effect=Exception())50 test_case = test_case_ok()51 test_case.add_handler('on_test_error', broken_handler)52 result = self.defaultTestResult()53 test_case.run(result)54 self.assertEqual(0, len(result.errors))55 self.assertEqual(broken_handler.call_count, 0)56 def test_ok_with_broken_handler(self):57 broken_handler = Mock(side_effect=Exception('Handler is broken'))58 test_case = test_case_fail_on_test_method()59 test_case.add_handler('on_test_error', broken_handler)60 result = self.defaultTestResult()61 test_case.run(result)62 self.assertEqual(1, len(result.errors))63 self.assertIn('ValueError(\'123\')', result.errors[0][1])64 self.assertNotIn('Handler is broken', result.errors[0][1])65 self.assertEqual(1, broken_handler.call_count)66 def test_few_handlers_call(self):67 handler_1 = Mock()68 handler_2 = Mock()69 test_case = test_case_fail_on_test_method()70 test_case.add_handler('on_test_error', handler_1)71 test_case.add_handler('on_test_error', handler_2)72 result = self.defaultTestResult()73 test_case.run(result)74 self.assertEqual(1, len(result.errors))75 self.assertIn('ValueError(\'123\')', result.errors[0][1])76 self.assertNotIn('Handler is broken', result.errors[0][1])77 self.assertEqual(handler_1.call_count, 1)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful