How to use test_after_test_error3 method in Test_junkie

Best Python code snippet using test_junkie

Run Test_junkie automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

test_ignore.py

Source: test_ignore.py Github

copy
1import pprint
2
3from test_junkie.errors import BadParameters, BadSignature
4from test_junkie.runner import Runner
5from tests.QualityManager import QualityManager
6from tests.junkie_suites.IgnoreSuite import IgnoreSuiteBoundMethod, IgnoreSuiteFunction, IgnoreSuiteClassic, \
7    IgnoreSuiteClassic2, IgnoreSuiteClassic3
8from tests.junkie_suites.error_handling.ErrorSuite4 import ErrorSuite4
9from tests.junkie_suites.error_handling.ErrorSuite5 import ErrorSuite5
10from tests.junkie_suites.error_handling.ErrorSuite6 import ErrorSuite6
11
12runner1 = Runner([IgnoreSuiteBoundMethod])
13runner1.run()
14results1 = runner1.get_executed_suites()
15
16pprint.pprint(results1[0].metrics.get_metrics())
17for test in results1[0].get_test_objects():
18    print(test.get_function_name())
19    pprint.pprint(test.metrics.get_metrics())
20
21
22def test_class_metrics():
23
24    metrics = results1[0].metrics.get_metrics()
25    QualityManager.check_class_metrics(metrics,
26                                       expected_status="ignore",
27                                       expected_retry_count=0)
28
29
30def test_test_metrics():
31
32    assert results1[0].get_test_objects()
33    for test in results1[0].get_test_objects():
34
35        properties = test.metrics.get_metrics()
36        assert len(properties) == 0
37
38
39runner2 = Runner([IgnoreSuiteFunction])
40runner2.run()
41results2 = runner2.get_executed_suites()
42
43pprint.pprint(results2[0].metrics.get_metrics())
44for test in results2[0].get_test_objects():
45    print(test.get_function_name())
46    pprint.pprint(test.metrics.get_metrics())
47
48
49def test_class_metrics2():
50
51    metrics = results2[0].metrics.get_metrics()
52    QualityManager.check_class_metrics(metrics,
53                                       expected_status="ignore",
54                                       expected_retry_count=0)
55
56
57def test_test_metrics2():
58
59    assert results2[0].get_test_objects()
60    for test in results2[0].get_test_objects():
61
62        properties = test.metrics.get_metrics()
63        assert len(properties) == 0
64
65
66runner3 = Runner([IgnoreSuiteClassic])
67runner3.run()
68results3 = runner3.get_executed_suites()
69
70pprint.pprint(results3[0].metrics.get_metrics())
71for test in results3[0].get_test_objects():
72    print(test.get_function_name())
73    pprint.pprint(test.metrics.get_metrics())
74
75
76def test_class_metrics3():
77
78    metrics = results3[0].metrics.get_metrics()
79    QualityManager.check_class_metrics(metrics,
80                                       expected_status="ignore",
81                                       expected_retry_count=0)
82
83
84def test_test_metrics3():
85
86    assert results3[0].get_test_objects()
87    for test in results3[0].get_test_objects():
88
89        properties = test.metrics.get_metrics()
90        assert len(properties) == 0
91
92
93runner4 = Runner([IgnoreSuiteClassic2])
94runner4.run()
95results4 = runner4.get_executed_suites()
96
97pprint.pprint(results4[0].metrics.get_metrics())
98for test in results4[0].get_test_objects():
99    print(test.get_function_name())
100    pprint.pprint(test.metrics.get_metrics())
101
102
103def test_class_metrics4():
104
105    metrics = results4[0].metrics.get_metrics()
106    QualityManager.check_class_metrics(metrics,
107                                       expected_status="ignore",
108                                       expected_retry_count=0)
109
110
111def test_test_metrics4():
112
113    assert results4[0].get_test_objects()
114    for test in results4[0].get_test_objects():
115
116        properties = test.metrics.get_metrics()
117        assert len(properties) == 0
118
119
120runner = Runner([IgnoreSuiteClassic3])
121runner.run()
122results = runner.get_executed_suites()
123
124pprint.pprint(results[0].metrics.get_metrics())
125for test in results[0].get_test_objects():
126    print(test.get_function_name())
127    pprint.pprint(test.metrics.get_metrics())
128
129
130def test_class_metrics5():
131
132    metrics = results[0].metrics.get_metrics()
133    QualityManager.check_class_metrics(metrics,
134                                       expected_status="ignore",
135                                       expected_retry_count=0)
136
137
138def test_test_metrics5():
139
140    assert results[0].get_test_objects()
141    for test in results[0].get_test_objects():
142
143        properties = test.metrics.get_metrics()
144        assert len(properties) == 0
145
146
147def test_wrong_params():
148
149    try:
150        from tests.junkie_suites.error_handling import ErrorSuite1
151        raise AssertionError("This test must raise exception, as wrang datatype is used for parameters")
152    except Exception as error:
153        assert isinstance(error, BadParameters), "Exception must be raised for bad parameters"
154
155
156def test_wrong_params2():
157
158    try:
159        from tests.junkie_suites.error_handling import ErrorSuite2
160        raise AssertionError("This test must raise exception, as wrang datatype is used for parameters")
161    except Exception as error:
162        assert isinstance(error, BadSignature), "Exception must be raised for bad signature"
163
164
165def test_wrong_params3():
166
167    try:
168        from tests.junkie_suites.error_handling.ErrorSuite3 import ErrorSuite3
169        raise AssertionError("This test must raise exception, as wrang datatype is used for parameters")
170    except Exception as error:
171        assert isinstance(error, BadParameters), "Exception must be raised for bad parameters"
172
173
174runner5 = Runner([ErrorSuite4])
175runner5.run()
176results5 = runner5.get_executed_suites()
177
178pprint.pprint(results5[0].metrics.get_metrics())
179for test in results5[0].get_test_objects():
180    print(test.get_function_name())
181    pprint.pprint(test.metrics.get_metrics())
182
183
184def test_before_test_error1():
185
186    metrics5 = results5[0].metrics.get_metrics()
187    QualityManager.check_class_metrics(metrics5,
188                                       expected_status="fail",
189                                       expected_retry_count=1,
190                                       expected_beforetest_performance_count=1,
191                                       expected_beforetest_exception_object=Exception,
192                                       expected_beforetest_exception_count=1)
193
194
195def test_before_test_error2():
196
197    assert results5[0].get_test_objects()
198    for test in results5[0].get_test_objects():
199
200        if test.get_function_name() == "failure":
201            metrics = test.metrics.get_metrics()["None"]["None"]
202            QualityManager.check_test_metrics(metrics,
203                                              expected_status="error",
204                                              expected_exception=Exception)
205
206
207runner6 = Runner([ErrorSuite5])
208runner6.run()
209results6 = runner6.get_executed_suites()
210
211pprint.pprint(results6[0].metrics.get_metrics())
212for test in results6[0].get_test_objects():
213    print(test.get_function_name())
214    pprint.pprint(test.metrics.get_metrics())
215
216
217def test_after_test_error3():
218
219    metrics6 = results6[0].metrics.get_metrics()
220    QualityManager.check_class_metrics(metrics6,
221                                       expected_status="fail",
222                                       expected_retry_count=1,
223                                       expected_aftertest_performance_count=1,
224                                       expected_aftertest_exception_object=Exception,
225                                       expected_aftertest_exception_count=1)
226
227
228def test_after_test_error4():
229
230    assert results6[0].get_test_objects()
231    for test in results6[0].get_test_objects():
232
233        if test.get_function_name() == "failure":
234            metrics = test.metrics.get_metrics()["None"]["None"]
235            QualityManager.check_test_metrics(metrics,
236                                              expected_status="error",
237                                              expected_exception=Exception)
238
239
240def test_bad_suite_inputs():
241
242    try:
243        runner7 = Runner([ErrorSuite6])
244        runner7.run()
245        raise Exception("Expected BadParameters error to be thrown")
246    except BadParameters:
247        pass  # expected
248
Full Screen

Accelerate Your Automation Test Cycles With LambdaTest

Leverage LambdaTest’s cloud-based platform to execute your automation tests in parallel and trim down your test execution time significantly. Your first 100 automation testing minutes are on us.

Try LambdaTest

Run Python Tests on LambdaTest Cloud Grid

Execute automation tests with Test_junkie on a cloud-based Grid of 3000+ real browsers and operating systems for both web and mobile applications.

Test now for Free
LambdaTestX

We use cookies to give you the best experience. Cookies help to provide a more personalized experience and relevant advertising for you, and web analytics for us. Learn More in our Cookies policy, Privacy & Terms of service

Allow Cookie
Sarah

I hope you find the best code examples for your project.

If you want to accelerate automated browser testing, try LambdaTest. Your first 100 automation testing minutes are FREE.

Sarah Elson (Product & Growth Lead)