How to use displayStderr method in green

Best Python code snippet using green

result.py

Source:result.py Github

copy

Full Screen

...177 self.stdout_output[test],178 )179 )180 del self.stdout_output[test]181 def displayStderr(self, test):182 """183 Displays AND REMOVES the errput captured from a specific test. The184 removal is done so that this method can be called multiple times185 without duplicating results errput.186 """187 test = proto_test(test)188 if test.dotted_name in self.stderr_errput:189 self.stream.write(190 "\n{} for {}\n{}".format(191 self.colors.yellow("Captured stderr"),192 self.colors.bold(test.dotted_name),193 self.stderr_errput[test],194 )195 )196 del self.stderr_errput[test]197class ProtoTestResult(BaseTestResult):198 """199 I'm the TestResult object for a single unit test run in a process.200 """201 def __init__(self, start_callback=None, finalize_callback=None):202 super(ProtoTestResult, self).__init__(None, None)203 self.start_callback = start_callback204 self.finalize_callback = finalize_callback205 self.finalize_callback_called = False206 self.pickle_attrs = [207 "errors",208 "expectedFailures",209 "failures",210 "passing",211 "pickle_attrs",212 "shouldStop",213 "skipped",214 "stderr_errput",215 "stdout_output",216 "unexpectedSuccesses",217 "test_time",218 ]219 self.failfast = False # Because unittest inspects the attribute220 self.reinitialize()221 def reinitialize(self):222 self.shouldStop = False223 self.errors = []224 self.expectedFailures = []225 self.failures = []226 self.passing = []227 self.skipped = []228 self.unexpectedSuccesses = []229 self.test_time = ""230 def __repr__(self): # pragma: no cover231 return (232 "errors"233 + str(self.errors)234 + ", "235 + "expectedFailures"236 + str(self.expectedFailures)237 + ", "238 + "failures"239 + str(self.failures)240 + ", "241 + "passing"242 + str(self.passing)243 + ", "244 + "skipped"245 + str(self.skipped)246 + ", "247 + "unexpectedSuccesses"248 + str(self.unexpectedSuccesses)249 + ", "250 + "test_time"251 + str(self.test_time)252 )253 def __getstate__(self):254 """255 Prevent the callback functions from getting pickled256 """257 result_dict = {}258 for pickle_attr in self.pickle_attrs:259 result_dict[pickle_attr] = self.__dict__[pickle_attr]260 return result_dict261 def __setstate__(self, dict):262 """263 Since the callback functions weren't pickled, we need to init them264 """265 self.__dict__.update(dict)266 self.start_callback = None267 self.finalize_callback = None268 def startTest(self, test):269 """270 Called before each test runs271 """272 test = proto_test(test)273 self.start_time = time.time()274 self.reinitialize()275 if self.start_callback:276 self.start_callback(test)277 def stopTest(self, test):278 """279 Called after each test runs280 """281 self.test_time = str(time.time() - self.start_time)282 def finalize(self):283 """284 I am here so that after the GreenTestSuite has had a chance to inject285 the captured stdout/stderr back into me, I can relay that through to286 the worker process's poolRunner who will send me back up to the parent287 process.288 """289 if self.finalize_callback:290 self.finalize_callback(self)291 self.finalize_callback_called = True292 def addSuccess(self, test):293 """294 Called when a test passed295 """296 self.passing.append(proto_test(test))297 def addError(self, test, err):298 """299 Called when a test raises an exception300 """301 self.errors.append((proto_test(test), proto_error(err)))302 def addFailure(self, test, err):303 """304 Called when a test fails a unittest assertion305 """306 self.failures.append((proto_test(test), proto_error(err)))307 def addSkip(self, test, reason):308 """309 Called when a test is skipped310 """311 self.skipped.append((proto_test(test), reason))312 def addExpectedFailure(self, test, err):313 """314 Called when a test fails, and we expeced the failure315 """316 self.expectedFailures.append((proto_test(test), proto_error(err)))317 def addUnexpectedSuccess(self, test):318 """319 Called when a test passed, but we expected a failure320 """321 self.unexpectedSuccesses.append(proto_test(test))322 def addSubTest(self, test, subtest, err):323 """324 Called at the end of a subtest no matter its result.325 The test that runs the subtests calls the other test methods to326 record its own result. We use this method to record each subtest as a327 separate test result. It's very meta.328 """329 if err is not None:330 if issubclass(err[0], test.failureException):331 self.addFailure(subtest, err)332 else:333 self.addError(subtest, err)334class GreenTestResult(BaseTestResult):335 """336 Aggregates test results and outputs them to a stream.337 """338 def __init__(self, args, stream):339 super(GreenTestResult, self).__init__(stream, Colors(args.termcolor))340 self.args = args341 self.showAll = args.verbose > 1342 self.dots = args.verbose == 1343 self.verbose = args.verbose344 self.last_module = ""345 self.last_class = ""346 self.first_text_output = ""347 self.failfast = args.failfast348 self.shouldStop = False349 self.testsRun = 0350 # Individual lists351 self.errors = []352 self.expectedFailures = []353 self.failures = []354 self.passing = []355 self.skipped = []356 self.unexpectedSuccesses = []357 # Combination of all errors and failures358 self.all_errors = []359 # For exiting non-zero if we don't reach a certain level of coverage360 self.coverage_percent = None361 def __str__(self): # pragma: no cover362 return (363 "tests run: {}".format(self.testsRun)364 + ", "365 + "errors"366 + str(self.errors)367 + ", "368 + "expectedFailures"369 + str(self.expectedFailures)370 + ", "371 + "failures"372 + str(self.failures)373 + ", "374 + "passing"375 + str(self.passing)376 + ", "377 + "skipped"378 + str(self.skipped)379 + ", "380 + "unexpectedSuccesses"381 + str(self.unexpectedSuccesses)382 )383 def stop(self):384 self.shouldStop = True385 def tryRecordingStdoutStderr(self, test, proto_test_result, err=None):386 if proto_test_result.stdout_output.get(test, False):387 self.recordStdout(test, proto_test_result.stdout_output[test])388 if proto_test_result.stderr_errput.get(test, False):389 self.recordStderr(test, proto_test_result.stderr_errput[test])390 # SubTest errors/failures (but not successes) generate a different err object, so we have to391 # do some inspection to figure out which object has the output/errput392 if (test.class_name == "SubTest") and err:393 for t in proto_test_result.stdout_output.keys():394 if test.dotted_name.startswith(t.dotted_name):395 self.recordStdout(test, proto_test_result.stdout_output[t])396 break397 for t in proto_test_result.stderr_errput.keys():398 if test.dotted_name.startswith(t.dotted_name):399 self.recordStderr(test, proto_test_result.stderr_errput[t])400 break401 def addProtoTestResult(self, proto_test_result):402 for test, err in proto_test_result.errors:403 self.addError(test, err, proto_test_result.test_time)404 self.tryRecordingStdoutStderr(test, proto_test_result, err)405 for test, err in proto_test_result.expectedFailures:406 self.addExpectedFailure(test, err, proto_test_result.test_time)407 self.tryRecordingStdoutStderr(test, proto_test_result, err)408 for test, err in proto_test_result.failures:409 self.addFailure(test, err, proto_test_result.test_time)410 self.tryRecordingStdoutStderr(test, proto_test_result, err)411 for test in proto_test_result.passing:412 self.addSuccess(test, proto_test_result.test_time)413 self.tryRecordingStdoutStderr(test, proto_test_result)414 for test, reason in proto_test_result.skipped:415 self.addSkip(test, reason, proto_test_result.test_time)416 self.tryRecordingStdoutStderr(test, proto_test_result)417 for test in proto_test_result.unexpectedSuccesses:418 self.addUnexpectedSuccess(test, proto_test_result.test_time)419 self.tryRecordingStdoutStderr(test, proto_test_result)420 def startTestRun(self):421 """422 Called once before any tests run423 """424 self.startTime = time.time()425 # Really verbose information426 if self.verbose > 2:427 self.stream.writeln(self.colors.bold(pretty_version() + "\n"))428 def stopTestRun(self):429 """430 Called once after all tests have run431 """432 self.stopTime = time.time()433 self.timeTaken = self.stopTime - self.startTime434 self.printErrors()435 if self.args.run_coverage or self.args.quiet_coverage:436 from coverage.misc import CoverageException437 try:438 self.stream.writeln()439 self.args.cov.stop()440 self.args.cov.save()441 self.args.cov.combine()442 self.args.cov.save()443 if not self.args.quiet_coverage:444 self.stream.coverage_percent = None445 self.args.cov.report(446 file=self.stream,447 omit=self.args.omit_patterns,448 show_missing=True,449 )450 self.coverage_percent = self.stream.coverage_percent451 except CoverageException as ce:452 if (len(ce.args) == 1) and ("No data to report" not in ce.args[0]):453 raise ce454 if self.testsRun and not self.shouldStop:455 self.stream.writeln()456 if self.shouldStop:457 self.stream.writeln()458 self.stream.writeln(459 self.colors.yellow("Warning: Some tests may not have been run.")460 )461 self.stream.writeln()462 self.stream.writeln(463 "Ran %s test%s in %ss using %s process%s"464 % (465 self.colors.bold(str(self.testsRun)),466 self.testsRun != 1 and "s" or "",467 self.colors.bold("%.3f" % self.timeTaken),468 self.colors.bold("%d" % self.args.processes),469 self.args.processes != 1 and "es" or "",470 )471 )472 self.stream.writeln()473 results = [474 (self.errors, "errors", self.colors.error),475 (self.expectedFailures, "expected_failures", self.colors.expectedFailure),476 (self.failures, "failures", self.colors.failing),477 (self.passing, "passes", self.colors.passing),478 (self.skipped, "skips", self.colors.skipped),479 (480 self.unexpectedSuccesses,481 "unexpected_successes",482 self.colors.unexpectedSuccess,483 ),484 ]485 stats = []486 for obj_list, name, color_func in results:487 if obj_list:488 stats.append("{}={}".format(name, color_func(str(len(obj_list)))))489 if not stats:490 self.stream.writeln(self.colors.failing("No Tests Found"))491 else:492 grade = self.colors.passing("OK")493 if not self.wasSuccessful():494 grade = self.colors.failing("FAILED")495 self.stream.writeln("{} ({})".format(grade, ", ".join(stats)))496 def startTest(self, test):497 """498 Called before the start of each test499 """500 # Get our bearings501 test = proto_test(test)502 current_module = test.module503 current_class = test.class_name504 # Output505 if self.showAll:506 # Module...if it changed.507 if current_module != self.last_module:508 self.stream.writeln(self.colors.moduleName(current_module))509 # Class...if it changed.510 if current_class != self.last_class:511 self.stream.writeln(512 self.colors.className(513 self.stream.formatText(current_class, indent=1)514 )515 )516 if self.stream.isatty():517 # In the terminal, we will write a placeholder, and then518 # modify the first character and rewrite it in color after519 # the test has run.520 self.first_text_output = self.stream.formatLine(521 test.getDescription(self.verbose), indent=2522 )523 self.stream.write(self.colors.bold(self.first_text_output))524 self.stream.flush()525 # Set state for next time526 if current_module != self.last_module:527 self.last_module = current_module528 if current_class != self.last_class:529 self.last_class = current_class530 def stopTest(self, test):531 """532 Supposed to be called after each test, but as far as I can tell that's a533 lie and this is simply never called.534 """535 def _reportOutcome(self, test, outcome_char, color_func, err=None, reason=""):536 self.testsRun += 1537 test = proto_test(test)538 if self.showAll:539 if self.stream.isatty():540 self.stream.write(self.colors.start_of_line())541 # Can end up being different from the first time due to subtest542 # information only being available after a test result comes in.543 second_text_output = self.stream.formatLine(544 test.getDescription(self.verbose), indent=2, outcome_char=outcome_char545 )546 if self.stream.isatty() and terminal_width: # pragma: no cover547 cursor_rewind = (548 int(ceil(float(len(self.first_text_output)) / terminal_width)) - 1549 )550 if cursor_rewind:551 self.stream.write(self.colors.up(cursor_rewind))552 self.stream.write(color_func(second_text_output))553 if reason:554 self.stream.write(color_func(" -- " + reason))555 self.stream.writeln()556 self.stream.flush()557 elif self.dots:558 self.stream.write(color_func(outcome_char))559 self.stream.flush()560 def addSuccess(self, test, test_time=None):561 """562 Called when a test passed563 """564 test = proto_test(test)565 if test_time:566 test.test_time = str(test_time)567 self.passing.append(test)568 self._reportOutcome(test, ".", self.colors.passing)569 @failfast570 def addError(self, test, err, test_time=None):571 """572 Called when a test raises an exception573 """574 test = proto_test(test)575 if test_time:576 test.test_time = str(test_time)577 err = proto_error(err)578 self.errors.append((test, err))579 self.all_errors.append((test, self.colors.error, "Error", err))580 self._reportOutcome(test, "E", self.colors.error, err)581 @failfast582 def addFailure(self, test, err, test_time=None):583 """584 Called when a test fails a unittest assertion585 """586 # Special case: Catch Twisted's skips that come thtrough as failures587 # and treat them as skips instead588 if len(err.traceback_lines) == 1:589 if err.traceback_lines[0].startswith("UnsupportedTrialFeature"):590 reason = eval(err.traceback_lines[0][25:])[1]591 self.addSkip(test, reason)592 return593 test = proto_test(test)594 if test_time:595 test.test_time = str(test_time)596 err = proto_error(err)597 self.failures.append((test, err))598 self.all_errors.append((test, self.colors.error, "Failure", err))599 self._reportOutcome(test, "F", self.colors.failing, err)600 def addSkip(self, test, reason, test_time=None):601 """602 Called when a test is skipped603 """604 test = proto_test(test)605 if test_time:606 test.test_time = str(test_time)607 self.skipped.append((test, reason))608 self._reportOutcome(test, "s", self.colors.skipped, reason=reason)609 def addExpectedFailure(self, test, err, test_time=None):610 """611 Called when a test fails, and we expected the failure612 """613 test = proto_test(test)614 if test_time:615 test.test_time = str(test_time)616 err = proto_error(err)617 self.expectedFailures.append((test, err))618 self._reportOutcome(test, "x", self.colors.expectedFailure, err)619 def addUnexpectedSuccess(self, test, test_time=None):620 """621 Called when a test passed, but we expected a failure622 """623 test = proto_test(test)624 if test_time:625 test.test_time = str(test_time)626 self.unexpectedSuccesses.append(test)627 self._reportOutcome(test, "u", self.colors.unexpectedSuccess)628 def printErrors(self):629 """630 Print a list of all tracebacks from errors and failures, as well as631 captured stdout (even if the test passed, except with quiet_stdout632 option).633 """634 if self.dots:635 self.stream.writeln()636 # Skipped Test Report637 if not self.args.no_skip_report:638 for test, reason in self.skipped:639 self.stream.writeln(640 "\n{} {} - {}".format(641 self.colors.blue("Skipped"),642 self.colors.bold(test.dotted_name),643 reason,644 )645 )646 # Captured output for non-failing tests647 if not self.args.quiet_stdout:648 failing_tests = set([x[0] for x in self.all_errors])649 for test in list(self.stdout_output) + list(self.stderr_errput):650 if test not in failing_tests:651 self.displayStdout(test)652 self.displayStderr(test)653 # Actual tracebacks and captured output for failing tests654 for (test, color_func, outcome, err) in self.all_errors:655 # Header Line656 self.stream.writeln(657 "\n" + color_func(outcome) + " in " + self.colors.bold(test.dotted_name)658 )659 # Traceback660 if not self.args.no_tracebacks:661 relevant_frames = []662 for i, frame in enumerate(err.traceback_lines):663 # Python2 tracebacks containing unicode need some special handling664 # This doesn't always make it readable, but at least it doesn't665 # crash666 if sys.version_info[0] == 2: # pragma: no cover667 try:668 "".join([frame]) # intentionally trigger exceptions669 except UnicodeDecodeError:670 frame = frame.decode("utf-8")671 debug(672 "\n"673 + "*" * 30674 + "Frame {}:".format(i)675 + "*" * 30676 + "\n{}".format(self.colors.yellow(frame)),677 level=3,678 )679 # Ignore useless frames680 if self.verbose < 4:681 if frame.strip() == "Traceback (most recent call last):":682 continue683 # Done with this frame, capture it.684 relevant_frames.append(frame)685 self.stream.write("".join(relevant_frames))686 # Captured output for failing tests687 self.displayStdout(test)688 self.displayStderr(test)689 def wasSuccessful(self):690 """691 Tells whether or not the overall run was successful692 """693 if self.args.minimum_coverage != None:694 if self.coverage_percent < self.args.minimum_coverage:695 self.stream.writeln(696 self.colors.red(697 "Coverage of {}% is below minimum level of {}%".format(698 self.coverage_percent, self.args.minimum_coverage699 )700 )701 )702 return False...

Full Screen

Full Screen

test_result.py

Source:test_result.py Github

copy

Full Screen

...68 btr = BaseTestResult(None, None)69 pt = ProtoTest()70 btr.recordStderr(pt, '')71 self.assertEqual(btr.stderr_errput, {})72 def test_displayStderr(self):73 """74 displayStderr displays captured stderr75 """76 stream = StringIO()77 noise = "blah blah blah"78 btr = BaseTestResult(stream, Colors(False))79 pt = ProtoTest()80 btr.stderr_errput[pt] = noise81 btr.displayStderr(pt)82 self.assertIn(noise, stream.getvalue())83class TestProtoTestResult(unittest.TestCase):84 def test_addSuccess(self):85 """86 addSuccess adds a test correctly87 """88 ptr = ProtoTestResult()89 test = proto_test(MagicMock())90 ptr.addSuccess(test)91 self.assertEqual(test, ptr.passing[0])92 def test_addError(self):93 """94 addError adds a test and error correctly95 """...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run green automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful