Best Python code snippet using green
skipping.py
Source:skipping.py  
1""" support for skip/xfail functions and markers. """2from _pytest.config import hookimpl3from _pytest.mark.evaluate import MarkEvaluator4from _pytest.outcomes import fail5from _pytest.outcomes import skip6from _pytest.outcomes import xfail7def pytest_addoption(parser):8    group = parser.getgroup("general")9    group.addoption(10        "--runxfail",11        action="store_true",12        dest="runxfail",13        default=False,14        help="report the results of xfail tests as if they were not marked",15    )16    parser.addini(17        "xfail_strict",18        "default for the strict parameter of xfail "19        "markers when not given explicitly (default: False)",20        default=False,21        type="bool",22    )23def pytest_configure(config):24    if config.option.runxfail:25        # yay a hack26        import pytest27        old = pytest.xfail28        config._cleanup.append(lambda: setattr(pytest, "xfail", old))29        def nop(*args, **kwargs):30            pass31        nop.Exception = xfail.Exception32        setattr(pytest, "xfail", nop)33    config.addinivalue_line(34        "markers",35        "skip(reason=None): skip the given test function with an optional reason. "36        'Example: skip(reason="no way of currently testing this") skips the '37        "test.",38    )39    config.addinivalue_line(40        "markers",41        "skipif(condition): skip the given test function if eval(condition) "42        "results in a True value.  Evaluation happens within the "43        "module global context. Example: skipif('sys.platform == \"win32\"') "44        "skips the test if we are on the win32 platform. see "45        "https://docs.pytest.org/en/latest/skipping.html",46    )47    config.addinivalue_line(48        "markers",49        "xfail(condition, reason=None, run=True, raises=None, strict=False): "50        "mark the test function as an expected failure if eval(condition) "51        "has a True value. Optionally specify a reason for better reporting "52        "and run=False if you don't even want to execute the test function. "53        "If only specific exception(s) are expected, you can list them in "54        "raises, and if the test fails in other ways, it will be reported as "55        "a true failure. See https://docs.pytest.org/en/latest/skipping.html",56    )57@hookimpl(tryfirst=True)58def pytest_runtest_setup(item):59    # Check if skip or skipif are specified as pytest marks60    item._skipped_by_mark = False61    eval_skipif = MarkEvaluator(item, "skipif")62    if eval_skipif.istrue():63        item._skipped_by_mark = True64        skip(eval_skipif.getexplanation())65    for skip_info in item.iter_markers(name="skip"):66        item._skipped_by_mark = True67        if "reason" in skip_info.kwargs:68            skip(skip_info.kwargs["reason"])69        elif skip_info.args:70            skip(skip_info.args[0])71        else:72            skip("unconditional skip")73    item._evalxfail = MarkEvaluator(item, "xfail")74    check_xfail_no_run(item)75@hookimpl(hookwrapper=True)76def pytest_pyfunc_call(pyfuncitem):77    check_xfail_no_run(pyfuncitem)78    outcome = yield79    passed = outcome.excinfo is None80    if passed:81        check_strict_xfail(pyfuncitem)82def check_xfail_no_run(item):83    """check xfail(run=False)"""84    if not item.config.option.runxfail:85        evalxfail = item._evalxfail86        if evalxfail.istrue():87            if not evalxfail.get("run", True):88                xfail("[NOTRUN] " + evalxfail.getexplanation())89def check_strict_xfail(pyfuncitem):90    """check xfail(strict=True) for the given PASSING test"""91    evalxfail = pyfuncitem._evalxfail92    if evalxfail.istrue():93        strict_default = pyfuncitem.config.getini("xfail_strict")94        is_strict_xfail = evalxfail.get("strict", strict_default)95        if is_strict_xfail:96            del pyfuncitem._evalxfail97            explanation = evalxfail.getexplanation()98            fail("[XPASS(strict)] " + explanation, pytrace=False)99@hookimpl(hookwrapper=True)100def pytest_runtest_makereport(item, call):101    outcome = yield102    rep = outcome.get_result()103    evalxfail = getattr(item, "_evalxfail", None)104    # unittest special case, see setting of _unexpectedsuccess105    if hasattr(item, "_unexpectedsuccess") and rep.when == "call":106        if item._unexpectedsuccess:107            rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)108        else:109            rep.longrepr = "Unexpected success"110        rep.outcome = "failed"111    elif item.config.option.runxfail:112        pass  # don't interfere113    elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):114        rep.wasxfail = "reason: " + call.excinfo.value.msg115        rep.outcome = "skipped"116    elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():117        if call.excinfo:118            if evalxfail.invalidraise(call.excinfo.value):119                rep.outcome = "failed"120            else:121                rep.outcome = "skipped"122                rep.wasxfail = evalxfail.getexplanation()123        elif call.when == "call":124            strict_default = item.config.getini("xfail_strict")125            is_strict_xfail = evalxfail.get("strict", strict_default)126            explanation = evalxfail.getexplanation()127            if is_strict_xfail:128                rep.outcome = "failed"129                rep.longrepr = "[XPASS(strict)] {}".format(explanation)130            else:131                rep.outcome = "passed"132                rep.wasxfail = explanation133    elif (134        getattr(item, "_skipped_by_mark", False)135        and rep.skipped136        and type(rep.longrepr) is tuple137    ):138        # skipped by mark.skipif; change the location of the failure139        # to point to the item definition, otherwise it will display140        # the location of where the skip exception was raised within pytest141        _, _, reason = rep.longrepr142        filename, line = item.location[:2]143        rep.longrepr = filename, line + 1, reason144# called by terminalreporter progress reporting145def pytest_report_teststatus(report):146    if hasattr(report, "wasxfail"):147        if report.skipped:148            return "xfailed", "x", "XFAIL"149        elif report.passed:..._nose_compat.py
Source:_nose_compat.py  
1import warnings2try:3    from unittest.case import _ExpectedFailure as ExpectedFailure, _UnexpectedSuccess as UnexpectedSuccess4except ImportError:5    from unittest2.case import _ExpectedFailure as ExpectedFailure, _UnexpectedSuccess as UnexpectedSuccess6def install_proxy():7    import nose.proxy8    class MyResultProxy(nose.proxy.ResultProxy):9        def addExpectedFailure(self, test, err):10            #from nose.plugins.expected import ExpectedFailure11            self.assertMyTest(test)12            plugins = self.plugins13            plugins.addError(self.test, (ExpectedFailure, err, None))14            addExpectedFailure = getattr(self.result, "addExpectedFailure", None)15            if addExpectedFailure:16                self.result.addExpectedFailure(self.test, self._prepareErr(err))17            else:18                warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",19                              RuntimeWarning)20                self.result.addSuccess(self)21        def addUnexpectedSuccess(self, test):22            #from nose.plugins.expected import UnexpectedSuccess23            self.assertMyTest(test)24            plugins = self.plugins25            plugins.addError(self.test, (UnexpectedSuccess, None, None))26            self.result.addUnexpectedSuccess(self.test)27            if self.config.stopOnError:28                self.shouldStop = True29    nose.proxy.ResultProxy = MyResultProxy30def install_result():31    import nose.result32    class MyTextTestResult(nose.result.TextTestResult):33        def addExpectedFailure(self, test, err):34            # 2.7 expected failure compat35            if ExpectedFailure in self.errorClasses:36                storage, label, isfail = self.errorClasses[ExpectedFailure]37                storage.append((test, self._exc_info_to_string(err, test)))38                self.printLabel(label, (ExpectedFailure, '', None))39        def addUnexpectedSuccess(self, test):40            # 2.7 unexpected success compat41            if UnexpectedSuccess in self.errorClasses:42                storage, label, isfail = self.errorClasses[UnexpectedSuccess]43                storage.append((test, 'This test was marked as an expected '44                    'failure, but it succeeded.'))45                self.printLabel(label, (UnexpectedSuccess, '', None))...xfail.py
Source:xfail.py  
1class UnexpectedSuccess(Exception):2    pass3class _stats(object):4    pass5stats = _stats()6stats.xfail = 07stats.warnings = []8def report():9    if stats.xfail:10        print stats.xfail, "expected failures"11    print "\n".join(stats.warnings)12        13import atexit14atexit.register(report)15def xfail(fun): # FIXME: what about generators?16    import os17    doc = fun.__doc__ or ""18    if 'http://code.pediapress.com/wiki/ticket/' not in doc:19        stats.warnings.append("expected failure %s.%s does not reference a ticket in it's docstring" % (fun.__module__, fun.__name__,))20        21    if 'XFAIL' in os.environ:22        def doit(*args, **kwargs):23            try:24                fun(*args, **kwargs)25            except:26                stats.xfail += 127                return28            raise UnexpectedSuccess('expected %r to fail' % (fun,))29        return doit...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
