How to use _check_subunit method in stestr

Best Python code snippet using stestr_python

test_return_codes.py

Source:test_return_codes.py Github

copy

Full Screen

...53 # Change directory, run wrapper and check result54 self.addCleanup(os.chdir, self.repo_root)55 os.chdir(self.directory)56 subprocess.call('stestr init', shell=True)57 def _check_subunit(self, output_stream):58 stream = subunit_lib.ByteStreamToStreamResult(output_stream)59 starts = testtools.StreamResult()60 summary = testtools.StreamSummary()61 tests = []62 def _add_dict(test):63 tests.append(test)64 outcomes = testtools.StreamToDict(functools.partial(_add_dict))65 result = testtools.CopyStreamResult([starts, outcomes, summary])66 result.startTestRun()67 try:68 stream.run(result)69 finally:70 result.stopTestRun()71 self.assertThat(len(tests), testtools.matchers.GreaterThan(0))72 def assertRunExit(self, cmd, expected, subunit=False, stdin=None):73 if stdin:74 p = subprocess.Popen(75 "%s" % cmd, shell=True, stdin=subprocess.PIPE,76 stdout=subprocess.PIPE, stderr=subprocess.PIPE)77 out, err = p.communicate(stdin)78 else:79 p = subprocess.Popen(80 "%s" % cmd, shell=True,81 stdout=subprocess.PIPE, stderr=subprocess.PIPE)82 out, err = p.communicate()83 if not subunit:84 self.assertEqual(85 p.returncode, expected,86 "Stdout: {}; Stderr: {}".format(out, err))87 return (out, err)88 else:89 self.assertEqual(p.returncode, expected,90 "Expected return code: %s doesn't match actual "91 "return code of: %s" % (expected, p.returncode))92 output_stream = io.BytesIO(out)93 stream = subunit_lib.ByteStreamToStreamResult(output_stream)94 starts = testtools.StreamResult()95 summary = testtools.StreamSummary()96 tests = []97 def _add_dict(test):98 tests.append(test)99 outcomes = testtools.StreamToDict(functools.partial(_add_dict))100 result = testtools.CopyStreamResult([starts, outcomes, summary])101 result.startTestRun()102 try:103 stream.run(result)104 finally:105 result.stopTestRun()106 self.assertThat(len(tests), testtools.matchers.GreaterThan(0))107 return (out, err)108 def test_parallel_passing(self):109 self.assertRunExit('stestr run passing', 0)110 def test_parallel_passing_bad_regex(self):111 self.assertRunExit('stestr run bad.regex.foobar', 1)112 def test_parallel_fails(self):113 self.assertRunExit('stestr run', 1)114 def test_parallel_passing_xfail(self):115 self.assertRunExit('stestr run xfail', 0)116 def test_parallel_fails_unxsuccess(self):117 self.assertRunExit('stestr run unexpected', 1)118 def test_parallel_exclusion_list(self):119 fd, path = tempfile.mkstemp()120 self.addCleanup(os.remove, path)121 with os.fdopen(fd, 'w') as exclusion_list:122 exclusion_list.write('fail')123 cmd = 'stestr run --exclude-list %s' % path124 self.assertRunExit(cmd, 0)125 def test_parallel_inclusion_list(self):126 fd, path = tempfile.mkstemp()127 self.addCleanup(os.remove, path)128 with os.fdopen(fd, 'w') as inclusion_list:129 inclusion_list.write('passing')130 cmd = 'stestr run --include-list %s' % path131 self.assertRunExit(cmd, 0)132 def test_serial_passing(self):133 self.assertRunExit('stestr run --serial passing', 0)134 def test_serial_fails(self):135 self.assertRunExit('stestr run --serial', 1)136 def test_serial_exclusion_list(self):137 fd, path = tempfile.mkstemp()138 self.addCleanup(os.remove, path)139 with os.fdopen(fd, 'w') as exclusion_list:140 exclusion_list.write('fail')141 cmd = 'stestr run --serial --exclude-list %s' % path142 self.assertRunExit(cmd, 0)143 def test_serial_inclusion_list(self):144 fd, path = tempfile.mkstemp()145 self.addCleanup(os.remove, path)146 with os.fdopen(fd, 'w') as inclusion_list:147 inclusion_list.write('passing')148 cmd = 'stestr run --serial --include-list %s' % path149 self.assertRunExit(cmd, 0)150 def test_serial_subunit_passing(self):151 self.assertRunExit('stestr --user-config stestr.yaml run --subunit '152 '--serial passing', 0, subunit=True)153 def test_serial_subunit_failing(self):154 self.assertRunExit('stestr --user-config stestr.yaml run --subunit '155 '--serial failing', 0, subunit=True)156 def test_parallel_subunit_passing(self):157 self.assertRunExit('stestr --user-config stestr.yaml run --subunit '158 'passing', 0, subunit=True)159 def test_parallel_subunit_failing(self):160 self.assertRunExit('stestr --user-config stestr.yaml run --subunit '161 'failing', 0, subunit=True)162 def test_slowest_passing(self):163 self.assertRunExit('stestr run --slowest passing', 0)164 def test_slowest_failing(self):165 self.assertRunExit('stestr run --slowest failing', 1)166 def test_until_failure_fails(self):167 self.assertRunExit('stestr run --until-failure', 1)168 def test_until_failure_with_subunit_fails(self):169 self.assertRunExit('stestr --user-config stestr.yaml run '170 '--until-failure --subunit', 1, subunit=True)171 def test_with_parallel_class(self):172 # NOTE(masayukig): Ideally, it's better to figure out the173 # difference between with --parallel-class and without174 # --parallel-class. However, it's difficult to make such a175 # test from a command line based test.176 self.assertRunExit('stestr --parallel-class run passing', 0)177 def test_no_repo_dir(self):178 stestr_repo_dir = os.path.join(self.directory, '.stestr')179 shutil.rmtree(stestr_repo_dir, ignore_errors=True)180 # We can use stestr run even if there's no repo directory.181 self.assertRunExit('stestr run passing', 0)182 def test_empty_repo_dir(self):183 stestr_repo_dir = os.path.join(self.directory, '.stestr')184 shutil.rmtree(stestr_repo_dir, ignore_errors=True)185 os.mkdir(stestr_repo_dir)186 # We can initialize an empty repo directory.187 self.assertRunExit('stestr run passing', 0)188 def test_non_empty_repo_dir(self):189 stestr_repo_dir = os.path.join(self.directory, '.stestr')190 shutil.rmtree(stestr_repo_dir, ignore_errors=True)191 os.mkdir(stestr_repo_dir)192 with open(os.path.join(stestr_repo_dir, 'foo'), 'wt') as stream:193 stream.write('1\n')194 # We can't initialize a non-empty repo directory.195 self.assertRunExit('stestr run passing', 1)196 def test_list(self):197 self.assertRunExit('stestr list', 0)198 def _get_cmd_stdout(self, cmd):199 p = subprocess.Popen(cmd, shell=True,200 stdout=subprocess.PIPE)201 out = p.communicate()202 self.assertEqual(0, p.returncode)203 return out204 def test_combine_results(self):205 self.assertRunExit('stestr run passing', 0)206 stdout = self._get_cmd_stdout(207 'stestr last --no-subunit-trace')208 stdout = str(stdout[0])209 test_count_split = stdout.split(' ')210 test_count = test_count_split[1]211 test_count = int(test_count)212 id_regex = re.compile(r'\(id=(.*?)\)')213 test_id = id_regex.search(stdout).group(0)214 self.assertRunExit('stestr run --combine passing', 0)215 combine_stdout = self._get_cmd_stdout(216 'stestr last --no-subunit-trace')[0]217 combine_stdout = str(combine_stdout)218 combine_test_count_split = combine_stdout.split(' ')219 combine_test_count = combine_test_count_split[1]220 combine_test_count = int(combine_test_count)221 combine_test_id = id_regex.search(combine_stdout).group(0)222 self.assertEqual(test_id, combine_test_id)223 # The test results from running the same tests twice with combine224 # should return a test count 2x as big at the end of the run225 self.assertEqual(test_count * 2, combine_test_count)226 def test_load_from_stdin(self):227 self.assertRunExit('stestr run passing', 0)228 stream = self._get_cmd_stdout(229 'stestr last --subunit')[0]230 self.assertRunExit('stestr load', 0, stdin=stream)231 def test_load_force_init(self):232 self.assertRunExit('stestr run passing', 0)233 stream = self._get_cmd_stdout(234 'stestr last --subunit')[0]235 # NOTE: --force-init should work here because there is an properly236 # initialized repository.237 self.assertRunExit('stestr load --force-init', 0, stdin=stream)238 def test_load_force_init_invalid(self):239 self.assertRunExit('stestr run passing', 0)240 stream = self._get_cmd_stdout(241 'stestr last --subunit')[0]242 os.remove(os.path.join(self.directory, '.stestr', 'format'))243 # NOTE: --force-init should fail here because there is an invalid244 # repository.245 self.assertRunExit('stestr load --force-init', 1, stdin=stream)246 def test_load_from_stdin_quiet(self):247 out, err = self.assertRunExit('stestr --user-config stestr.yaml -q '248 'run passing', 0)249 self.assertEqual(out.decode('utf-8'), '')250 # FIXME(masayukig): We get some warnings when we run a coverage job.251 # So, just ignore 'err' here.252 stream = self._get_cmd_stdout('stestr last --subunit')[0]253 out, err = self.assertRunExit('stestr --user-config stestr.yaml -q '254 'load', 0, stdin=stream)255 self.assertEqual(out.decode('utf-8'), '')256 self.assertEqual(err.decode('utf-8'), '')257 def test_history_list(self):258 self.assertRunExit('stestr run passing', 0)259 self.assertRunExit('stestr run', 1)260 self.assertRunExit('stestr run passing', 0)261 table = self.assertRunExit(262 'stestr history list', 0)[0].decode('utf8')263 self.assertIn("| 0 | True |", table.split('\n')[3].rstrip())264 self.assertIn("| 1 | False |", table.split('\n')[4].rstrip())265 self.assertIn("| 2 | True |", table.split('\n')[5].rstrip())266 expected = """267+--------+--------+-----------+----------------------------------+268| Run ID | Passed | Runtime | Date |269+--------+--------+-----------+----------------------------------+270""".rstrip()271 self.assertEqual(expected.strip(), '\n'.join(272 [x.rstrip() for x in table.split('\n')[:3]]).strip())273 def test_history_empty(self):274 table = self.assertRunExit(275 'stestr history list', 0)[0].decode('utf8')276 self.assertEqual("",277 '\n'.join(278 [x.rstrip() for x in table.split('\n')]).strip())279 def test_history_show_passing(self):280 self.assertRunExit('stestr run passing', 0)281 self.assertRunExit('stestr run', 1)282 self.assertRunExit('stestr run passing', 0)283 output, _ = self.assertRunExit('stestr history show 0', 0)284 lines = [x.rstrip() for x in output.decode('utf8').split('\n')]285 self.assertIn(' - Passed: 2', lines)286 self.assertIn(' - Failed: 0', lines)287 self.assertIn(' - Expected Fail: 1', lines)288 def test_history_show_failing(self):289 self.assertRunExit('stestr run passing', 0)290 self.assertRunExit('stestr run', 1)291 self.assertRunExit('stestr run passing', 0)292 output, _ = self.assertRunExit('stestr history show 1', 1)293 lines = [x.rstrip() for x in output.decode('utf8').split('\n')]294 self.assertIn(' - Passed: 2', lines)295 self.assertIn(' - Failed: 2', lines)296 self.assertIn(' - Expected Fail: 1', lines)297 self.assertIn(' - Unexpected Success: 1', lines)298 def test_history_show_invalid_id(self):299 self.assertRunExit('stestr run passing', 0)300 self.assertRunExit('stestr run', 1)301 self.assertRunExit('stestr run passing', 0)302 output, _ = self.assertRunExit('stestr history show 42', 1)303 self.assertEqual(output.decode('utf8').rstrip(), "'No such run.'")304 def test_history_remove(self):305 self.assertRunExit('stestr run passing', 0)306 self.assertRunExit('stestr run', 1)307 self.assertRunExit('stestr run passing', 0)308 self.assertRunExit('stestr history remove 1', 0)309 table = self.assertRunExit(310 'stestr history list', 0)[0].decode('utf8')311 self.assertIn("| 0 | True |", table.split('\n')[3].rstrip())312 self.assertNotIn("| 1 | False |", table.split('\n')[4].strip())313 self.assertIn("| 2 | True |", table.split('\n')[4].rstrip())314 expected = """315+--------+--------+-----------+----------------------------------+316| Run ID | Passed | Runtime | Date |317+--------+--------+-----------+----------------------------------+318""".strip()319 self.assertEqual(expected, '\n'.join(320 [x.rstrip() for x in table.split('\n')[:3]]))321 def test_no_subunit_trace_force_subunit_trace(self):322 out, err = self.assertRunExit(323 'stestr run --no-subunit-trace --force-subunit-trace passing', 0)324 out = str(out)325 self.assertNotIn('PASSED (id=0)', out)326 self.assertIn('Totals', out)327 self.assertIn('Worker Balance', out)328 self.assertIn('Sum of execute time for each test:', out)329 def test_parallel_passing_from_func(self):330 stdout = fixtures.StringStream('stdout')331 self.useFixture(stdout)332 self.assertEqual(0, run.run_command(filters=['passing'],333 stdout=stdout.stream))334 def test_parallel_passing_bad_regex_from_func(self):335 stdout = fixtures.StringStream('stdout')336 self.useFixture(stdout)337 self.assertEqual(1, run.run_command(filters=['bad.regex.foobar'],338 stdout=stdout.stream))339 def test_parallel_fails_from_func(self):340 stdout = fixtures.StringStream('stdout')341 self.useFixture(stdout)342 self.assertEqual(1, run.run_command(stdout=stdout.stream))343 def test_serial_passing_from_func(self):344 stdout = fixtures.StringStream('stdout')345 self.useFixture(stdout)346 self.assertEqual(0, run.run_command(filters=['passing'], serial=True,347 stdout=stdout.stream))348 def test_str_concurrency_passing_from_func(self):349 stdout = fixtures.StringStream('stdout')350 self.useFixture(stdout)351 self.assertEqual(0, run.run_command(filters=['passing'],352 concurrency='1',353 stdout=stdout.stream))354 def test_str_concurrency_fails_from_func(self):355 stdout = fixtures.StringStream('stdout')356 self.useFixture(stdout)357 self.assertEqual(1, run.run_command(concurrency='1',358 stdout=stdout.stream))359 def test_serial_fails_from_func(self):360 stdout = fixtures.StringStream('stdout')361 self.useFixture(stdout)362 self.assertEqual(1, run.run_command(serial=True, stdout=stdout.stream))363 def test_serial_subunit_passing_from_func(self):364 stdout = io.BytesIO()365 self.assertEqual(0, run.run_command(subunit_out=True, serial=True,366 filters=['passing'],367 stdout=stdout))368 stdout.seek(0)369 self._check_subunit(stdout)370 def test_parallel_subunit_passing_from_func(self):371 stdout = io.BytesIO()372 self.assertEqual(0, run.run_command(subunit_out=True,373 filters=['passing'],374 stdout=stdout))375 stdout.seek(0)376 self._check_subunit(stdout)377 def test_until_failure_fails_from_func(self):378 stdout = fixtures.StringStream('stdout')379 self.useFixture(stdout)380 self.assertEqual(1, run.run_command(until_failure=True,381 stdout=stdout.stream))382 def test_until_failure_with_subunit_fails_from_func(self):383 stdout = io.BytesIO()384 self.assertEqual(1, run.run_command(until_failure=True,385 subunit_out=True,386 stdout=stdout))387 stdout.seek(0)388 self._check_subunit(stdout)389 def test_list_from_func(self):390 stdout = fixtures.StringStream('stdout')391 self.useFixture(stdout)392 self.assertEqual(0, list_cmd.list_command(stdout=stdout.stream))393 def test_run_no_discover_pytest_path(self):394 passing_string = 'tests/test_passing.py::FakeTestClass::test_pass_list'395 out, err = self.assertRunExit('stestr run -n %s' % passing_string, 0)396 lines = out.decode('utf8').splitlines()397 self.assertIn(' - Passed: 1', lines)398 self.assertIn(' - Failed: 0', lines)399 def test_run_no_discover_pytest_path_failing(self):400 passing_string = 'tests/test_failing.py::FakeTestClass::test_pass_list'401 out, err = self.assertRunExit('stestr run -n %s' % passing_string, 1)402 lines = out.decode('utf8').splitlines()...

Full Screen

Full Screen

test_command.py

Source:test_command.py Github

copy

Full Screen

...40 stderr = self.useFixture(fixtures.StringStream('stderr')).stream41 self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))42 self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,43 level=None))44 def _check_subunit(self, output_stream):45 stream = subunit.ByteStreamToStreamResult(output_stream)46 starts = testtools.StreamResult()47 summary = testtools.StreamSummary()48 tests = []49 def _add_dict(test):50 tests.append(test)51 outcomes = testtools.StreamToDict(functools.partial(_add_dict))52 result = testtools.CopyStreamResult([starts, outcomes, summary])53 result.startTestRun()54 try:55 stream.run(result)56 finally:57 result.stopTestRun()58 self.assertThat(len(tests), testtools.matchers.GreaterThan(0))59 return tests60 def test_ant_xml_in_stdout(self):61 ant_xml_path = os.path.join(self.examples_dir, 'ant.xml')62 junitxml_proc = subprocess.Popen([self.command, ant_xml_path],63 cwd=self.repo_root,64 stdout=subprocess.PIPE)65 stdout, _ = junitxml_proc.communicate()66 tests = self._check_subunit(io.BytesIO(stdout))67 self.assertEqual(1, len(tests))68 self.assertEqual('codereview.cobol.rules.ProgramIdRule',69 tests[0].get('id'))70 self.assertEqual('fail', tests[0].get('status'))71 def test_ant_xml_in_file_out(self):72 ant_xml_path = os.path.join(self.examples_dir, 'ant.xml')73 out_file, out_path = tempfile.mkstemp()74 os.close(out_file)75 self.addCleanup(os.remove, out_path)76 junitxml_proc = subprocess.Popen([self.command, '-o', out_path,77 ant_xml_path],78 cwd=self.repo_root,79 stdout=subprocess.PIPE)80 stdout, _ = junitxml_proc.communicate()81 with open(out_path, 'r') as fd:82 tests = self._check_subunit(fd)83 self.assertEqual(1, len(tests))84 self.assertEqual('codereview.cobol.rules.ProgramIdRule',85 tests[0].get('id'))86 self.assertEqual('fail', tests[0].get('status'))87 def test_hudson_xml_in_stdout(self):88 hudson_xml_path = os.path.join(self.examples_dir, 'hudson.xml')89 junitxml_proc = subprocess.Popen([self.command, hudson_xml_path],90 cwd=self.repo_root,91 stdout=subprocess.PIPE)92 stdout, _ = junitxml_proc.communicate()93 tests = self._check_subunit(io.BytesIO(stdout))94 self.assertEqual(3, len(tests))95 test_ids = [x.get('id') for x in tests]96 test_statuses = [x.get('status') for x in tests]97 self.assertIn('tests.ATest.error',98 test_ids)99 self.assertIn('tests.ATest.fail', test_ids)100 self.assertIn('tests.ATest.success', test_ids)101 self.assertEqual(['fail', 'fail', 'success'], test_statuses)102 def test_hudson_xml_in_file_out(self):103 hudson_xml_path = os.path.join(self.examples_dir, 'hudson.xml')104 out_file, out_path = tempfile.mkstemp()105 os.close(out_file)106 self.addCleanup(os.remove, out_path)107 junitxml_proc = subprocess.Popen([self.command, '-o', out_path,108 hudson_xml_path],109 cwd=self.repo_root,110 stdout=subprocess.PIPE)111 stdout, _ = junitxml_proc.communicate()112 with open(out_path, 'r') as fd:113 tests = self._check_subunit(fd)114 self.assertEqual(3, len(tests))115 test_ids = [x.get('id') for x in tests]116 test_statuses = [x.get('status') for x in tests]117 self.assertIn('tests.ATest.error',118 test_ids)119 self.assertIn('tests.ATest.fail', test_ids)120 self.assertIn('tests.ATest.success', test_ids)121 self.assertEqual(['fail', 'fail', 'success'], test_statuses)122 def test_pytest_xml_in_stdout(self):123 pytest_xml_path = os.path.join(self.examples_dir, 'pytest.xml')124 junitxml_proc = subprocess.Popen([self.command, pytest_xml_path],125 cwd=self.repo_root,126 stdout=subprocess.PIPE)127 stdout, _ = junitxml_proc.communicate()128 tests = self._check_subunit(io.BytesIO(stdout))129 self.assertEqual(118, len(tests))130 skip_count = len([x for x in tests if x.get('status') == 'skip'])131 success_count = len([x for x in tests if x.get('status') == 'success'])132 example_id = ('stestr.tests.test_scheduler.TestScheduler.'133 'test_partition_tests_with_grouping')134 test_ids = [x.get('id') for x in tests]135 self.assertIn(example_id, test_ids)136 self.assertEqual(skip_count, 2)137 self.assertEqual(success_count, 116)138 def test_pytest_xml_in_file_out(self):139 pytest_xml_path = os.path.join(self.examples_dir, 'pytest.xml')140 out_file, out_path = tempfile.mkstemp()141 os.close(out_file)142 self.addCleanup(os.remove, out_path)143 junitxml_proc = subprocess.Popen([self.command, '-o', out_path,144 pytest_xml_path],145 cwd=self.repo_root,146 stdout=subprocess.PIPE)147 stdout, _ = junitxml_proc.communicate()148 with open(out_path, 'r') as fd:149 tests = self._check_subunit(fd)150 self.assertEqual(118, len(tests))151 test_ids = [x.get('id') for x in tests]152 skip_count = len([x for x in tests if x.get('status') == 'skip'])153 success_count = len([x for x in tests if x.get('status') == 'success'])154 example_id = ('stestr.tests.test_scheduler.TestScheduler.'155 'test_partition_tests_with_grouping')156 self.assertIn(example_id, test_ids)157 self.assertEqual(skip_count, 2)158 self.assertEqual(success_count, 116)159 def test_no_time_xml_in_stdout(self):160 no_time_xml_path = os.path.join(self.examples_dir, 'no_time.xml')161 junitxml_proc = subprocess.Popen([self.command, no_time_xml_path],162 cwd=self.repo_root,163 stdout=subprocess.PIPE,...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run stestr automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful