How to use test_1 method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

test_cacheprovider.py

Source:test_cacheprovider.py Github

copy

Full Screen

...216 def test_lastfailed_usecase(self, testdir, monkeypatch):217 monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")218 p = testdir.makepyfile(219 """220 def test_1():221 assert 0222 def test_2():223 assert 0224 def test_3():225 assert 1226 """227 )228 result = testdir.runpytest()229 result.stdout.fnmatch_lines(["*2 failed*"])230 p.write(231 textwrap.dedent(232 """\233 def test_1():234 assert 1235 def test_2():236 assert 1237 def test_3():238 assert 0239 """240 )241 )242 result = testdir.runpytest("--lf")243 result.stdout.fnmatch_lines(["*2 passed*1 desel*"])244 result = testdir.runpytest("--lf")245 result.stdout.fnmatch_lines(246 [247 "collected 3 items",248 "run-last-failure: no previously failed tests, not deselecting items.",249 "*1 failed*2 passed*",250 ]251 )252 result = testdir.runpytest("--lf", "--cache-clear")253 result.stdout.fnmatch_lines(["*1 failed*2 passed*"])254 # Run this again to make sure clear-cache is robust255 if os.path.isdir(".pytest_cache"):256 shutil.rmtree(".pytest_cache")257 result = testdir.runpytest("--lf", "--cache-clear")258 result.stdout.fnmatch_lines(["*1 failed*2 passed*"])259 def test_failedfirst_order(self, testdir):260 testdir.tmpdir.join("test_a.py").write(261 textwrap.dedent(262 """\263 def test_always_passes():264 assert 1265 """266 )267 )268 testdir.tmpdir.join("test_b.py").write(269 textwrap.dedent(270 """\271 def test_always_fails():272 assert 0273 """274 )275 )276 result = testdir.runpytest()277 # Test order will be collection order; alphabetical278 result.stdout.fnmatch_lines(["test_a.py*", "test_b.py*"])279 result = testdir.runpytest("--ff")280 # Test order will be failing tests firs281 result.stdout.fnmatch_lines(["test_b.py*", "test_a.py*"])282 def test_lastfailed_failedfirst_order(self, testdir):283 testdir.makepyfile(284 **{285 "test_a.py": """\286 def test_always_passes():287 assert 1288 """,289 "test_b.py": """\290 def test_always_fails():291 assert 0292 """,293 }294 )295 result = testdir.runpytest()296 # Test order will be collection order; alphabetical297 result.stdout.fnmatch_lines(["test_a.py*", "test_b.py*"])298 result = testdir.runpytest("--lf", "--ff")299 # Test order will be failing tests firs300 result.stdout.fnmatch_lines(["test_b.py*"])301 assert "test_a.py" not in result.stdout.str()302 def test_lastfailed_difference_invocations(self, testdir, monkeypatch):303 monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")304 testdir.makepyfile(305 test_a="""\306 def test_a1():307 assert 0308 def test_a2():309 assert 1310 """,311 test_b="""\312 def test_b1():313 assert 0314 """,315 )316 p = testdir.tmpdir.join("test_a.py")317 p2 = testdir.tmpdir.join("test_b.py")318 result = testdir.runpytest()319 result.stdout.fnmatch_lines(["*2 failed*"])320 result = testdir.runpytest("--lf", p2)321 result.stdout.fnmatch_lines(["*1 failed*"])322 p2.write(323 textwrap.dedent(324 """\325 def test_b1():326 assert 1327 """328 )329 )330 result = testdir.runpytest("--lf", p2)331 result.stdout.fnmatch_lines(["*1 passed*"])332 result = testdir.runpytest("--lf", p)333 result.stdout.fnmatch_lines(["*1 failed*1 desel*"])334 def test_lastfailed_usecase_splice(self, testdir, monkeypatch):335 monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")336 testdir.makepyfile(337 """\338 def test_1():339 assert 0340 """341 )342 p2 = testdir.tmpdir.join("test_something.py")343 p2.write(344 textwrap.dedent(345 """\346 def test_2():347 assert 0348 """349 )350 )351 result = testdir.runpytest()352 result.stdout.fnmatch_lines(["*2 failed*"])353 result = testdir.runpytest("--lf", p2)354 result.stdout.fnmatch_lines(["*1 failed*"])355 result = testdir.runpytest("--lf")356 result.stdout.fnmatch_lines(["*2 failed*"])357 def test_lastfailed_xpass(self, testdir):358 testdir.inline_runsource(359 """360 import pytest361 @pytest.mark.xfail362 def test_hello():363 assert 1364 """365 )366 config = testdir.parseconfigure()367 lastfailed = config.cache.get("cache/lastfailed", -1)368 assert lastfailed == -1369 def test_non_serializable_parametrize(self, testdir):370 """Test that failed parametrized tests with unmarshable parameters371 don't break pytest-cache.372 """373 testdir.makepyfile(374 r"""375 import pytest376 @pytest.mark.parametrize('val', [377 b'\xac\x10\x02G',378 ])379 def test_fail(val):380 assert False381 """382 )383 result = testdir.runpytest()384 result.stdout.fnmatch_lines(["*1 failed in*"])385 def test_terminal_report_lastfailed(self, testdir):386 test_a = testdir.makepyfile(387 test_a="""388 def test_a1():389 pass390 def test_a2():391 pass392 """393 )394 test_b = testdir.makepyfile(395 test_b="""396 def test_b1():397 assert 0398 def test_b2():399 assert 0400 """401 )402 result = testdir.runpytest()403 result.stdout.fnmatch_lines(["collected 4 items", "*2 failed, 2 passed in*"])404 result = testdir.runpytest("--lf")405 result.stdout.fnmatch_lines(406 [407 "collected 2 items",408 "run-last-failure: rerun previous 2 failures (skipped 1 file)",409 "*2 failed in*",410 ]411 )412 result = testdir.runpytest(test_a, "--lf")413 result.stdout.fnmatch_lines(414 [415 "collected 2 items",416 "run-last-failure: 2 known failures not in selected tests",417 "*2 passed in*",418 ]419 )420 result = testdir.runpytest(test_b, "--lf")421 result.stdout.fnmatch_lines(422 [423 "collected 2 items",424 "run-last-failure: rerun previous 2 failures",425 "*2 failed in*",426 ]427 )428 result = testdir.runpytest("test_b.py::test_b1", "--lf")429 result.stdout.fnmatch_lines(430 [431 "collected 1 item",432 "run-last-failure: rerun previous 1 failure",433 "*1 failed in*",434 ]435 )436 def test_terminal_report_failedfirst(self, testdir):437 testdir.makepyfile(438 test_a="""439 def test_a1():440 assert 0441 def test_a2():442 pass443 """444 )445 result = testdir.runpytest()446 result.stdout.fnmatch_lines(["collected 2 items", "*1 failed, 1 passed in*"])447 result = testdir.runpytest("--ff")448 result.stdout.fnmatch_lines(449 [450 "collected 2 items",451 "run-last-failure: rerun previous 1 failure first",452 "*1 failed, 1 passed in*",453 ]454 )455 def test_lastfailed_collectfailure(self, testdir, monkeypatch):456 testdir.makepyfile(457 test_maybe="""458 import os459 env = os.environ460 if '1' == env['FAILIMPORT']:461 raise ImportError('fail')462 def test_hello():463 assert '0' == env['FAILTEST']464 """465 )466 def rlf(fail_import, fail_run):467 monkeypatch.setenv("FAILIMPORT", str(fail_import))468 monkeypatch.setenv("FAILTEST", str(fail_run))469 testdir.runpytest("-q")470 config = testdir.parseconfigure()471 lastfailed = config.cache.get("cache/lastfailed", -1)472 return lastfailed473 lastfailed = rlf(fail_import=0, fail_run=0)474 assert lastfailed == -1475 lastfailed = rlf(fail_import=1, fail_run=0)476 assert list(lastfailed) == ["test_maybe.py"]477 lastfailed = rlf(fail_import=0, fail_run=1)478 assert list(lastfailed) == ["test_maybe.py::test_hello"]479 def test_lastfailed_failure_subset(self, testdir, monkeypatch):480 testdir.makepyfile(481 test_maybe="""482 import os483 env = os.environ484 if '1' == env['FAILIMPORT']:485 raise ImportError('fail')486 def test_hello():487 assert '0' == env['FAILTEST']488 """489 )490 testdir.makepyfile(491 test_maybe2="""492 import os493 env = os.environ494 if '1' == env['FAILIMPORT']:495 raise ImportError('fail')496 def test_hello():497 assert '0' == env['FAILTEST']498 def test_pass():499 pass500 """501 )502 def rlf(fail_import, fail_run, args=()):503 monkeypatch.setenv("FAILIMPORT", str(fail_import))504 monkeypatch.setenv("FAILTEST", str(fail_run))505 result = testdir.runpytest("-q", "--lf", *args)506 config = testdir.parseconfigure()507 lastfailed = config.cache.get("cache/lastfailed", -1)508 return result, lastfailed509 result, lastfailed = rlf(fail_import=0, fail_run=0)510 assert lastfailed == -1511 result.stdout.fnmatch_lines(["*3 passed*"])512 result, lastfailed = rlf(fail_import=1, fail_run=0)513 assert sorted(list(lastfailed)) == ["test_maybe.py", "test_maybe2.py"]514 result, lastfailed = rlf(fail_import=0, fail_run=0, args=("test_maybe2.py",))515 assert list(lastfailed) == ["test_maybe.py"]516 # edge case of test selection - even if we remember failures517 # from other tests we still need to run all tests if no test518 # matches the failures519 result, lastfailed = rlf(fail_import=0, fail_run=0, args=("test_maybe2.py",))520 assert list(lastfailed) == ["test_maybe.py"]521 result.stdout.fnmatch_lines(["*2 passed*"])522 def test_lastfailed_creates_cache_when_needed(self, testdir):523 # Issue #1342524 testdir.makepyfile(test_empty="")525 testdir.runpytest("-q", "--lf")526 assert not os.path.exists(".pytest_cache/v/cache/lastfailed")527 testdir.makepyfile(test_successful="def test_success():\n assert True")528 testdir.runpytest("-q", "--lf")529 assert not os.path.exists(".pytest_cache/v/cache/lastfailed")530 testdir.makepyfile(test_errored="def test_error():\n assert False")531 testdir.runpytest("-q", "--lf")532 assert os.path.exists(".pytest_cache/v/cache/lastfailed")533 def test_xfail_not_considered_failure(self, testdir):534 testdir.makepyfile(535 """536 import pytest537 @pytest.mark.xfail538 def test():539 assert 0540 """541 )542 result = testdir.runpytest()543 result.stdout.fnmatch_lines(["*1 xfailed*"])544 assert self.get_cached_last_failed(testdir) == []545 def test_xfail_strict_considered_failure(self, testdir):546 testdir.makepyfile(547 """548 import pytest549 @pytest.mark.xfail(strict=True)550 def test():551 pass552 """553 )554 result = testdir.runpytest()555 result.stdout.fnmatch_lines(["*1 failed*"])556 assert self.get_cached_last_failed(testdir) == [557 "test_xfail_strict_considered_failure.py::test"558 ]559 @pytest.mark.parametrize("mark", ["mark.xfail", "mark.skip"])560 def test_failed_changed_to_xfail_or_skip(self, testdir, mark):561 testdir.makepyfile(562 """563 import pytest564 def test():565 assert 0566 """567 )568 result = testdir.runpytest()569 assert self.get_cached_last_failed(testdir) == [570 "test_failed_changed_to_xfail_or_skip.py::test"571 ]572 assert result.ret == 1573 testdir.makepyfile(574 """575 import pytest576 @pytest.{mark}577 def test():578 assert 0579 """.format(580 mark=mark581 )582 )583 result = testdir.runpytest()584 assert result.ret == 0585 assert self.get_cached_last_failed(testdir) == []586 assert result.ret == 0587 @pytest.mark.parametrize("quiet", [True, False])588 @pytest.mark.parametrize("opt", ["--ff", "--lf"])589 def test_lf_and_ff_prints_no_needless_message(self, quiet, opt, testdir):590 # Issue 3853591 testdir.makepyfile("def test(): assert 0")592 args = [opt]593 if quiet:594 args.append("-q")595 result = testdir.runpytest(*args)596 assert "run all" not in result.stdout.str()597 result = testdir.runpytest(*args)598 if quiet:599 assert "run all" not in result.stdout.str()600 else:601 assert "rerun previous" in result.stdout.str()602 def get_cached_last_failed(self, testdir):603 config = testdir.parseconfigure()604 return sorted(config.cache.get("cache/lastfailed", {}))605 def test_cache_cumulative(self, testdir):606 """607 Test workflow where user fixes errors gradually file by file using --lf.608 """609 # 1. initial run610 test_bar = testdir.makepyfile(611 test_bar="""612 def test_bar_1():613 pass614 def test_bar_2():615 assert 0616 """617 )618 test_foo = testdir.makepyfile(619 test_foo="""620 def test_foo_3():621 pass622 def test_foo_4():623 assert 0624 """625 )626 testdir.runpytest()627 assert self.get_cached_last_failed(testdir) == [628 "test_bar.py::test_bar_2",629 "test_foo.py::test_foo_4",630 ]631 # 2. fix test_bar_2, run only test_bar.py632 testdir.makepyfile(633 test_bar="""634 def test_bar_1():635 pass636 def test_bar_2():637 pass638 """639 )640 result = testdir.runpytest(test_bar)641 result.stdout.fnmatch_lines(["*2 passed*"])642 # ensure cache does not forget that test_foo_4 failed once before643 assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"]644 result = testdir.runpytest("--last-failed")645 result.stdout.fnmatch_lines(["*1 failed, 1 deselected*"])646 assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"]647 # 3. fix test_foo_4, run only test_foo.py648 test_foo = testdir.makepyfile(649 test_foo="""650 def test_foo_3():651 pass652 def test_foo_4():653 pass654 """655 )656 result = testdir.runpytest(test_foo, "--last-failed")657 result.stdout.fnmatch_lines(["*1 passed, 1 deselected*"])658 assert self.get_cached_last_failed(testdir) == []659 result = testdir.runpytest("--last-failed")660 result.stdout.fnmatch_lines(["*4 passed*"])661 assert self.get_cached_last_failed(testdir) == []662 def test_lastfailed_no_failures_behavior_all_passed(self, testdir):663 testdir.makepyfile(664 """665 def test_1():666 assert True667 def test_2():668 assert True669 """670 )671 result = testdir.runpytest()672 result.stdout.fnmatch_lines(["*2 passed*"])673 result = testdir.runpytest("--lf")674 result.stdout.fnmatch_lines(["*2 passed*"])675 result = testdir.runpytest("--lf", "--lfnf", "all")676 result.stdout.fnmatch_lines(["*2 passed*"])677 result = testdir.runpytest("--lf", "--lfnf", "none")678 result.stdout.fnmatch_lines(679 [680 "collected 2 items / 2 deselected",681 "run-last-failure: no previously failed tests, deselecting all items.",682 "* 2 deselected in *",683 ]684 )685 assert result.ret == EXIT_NOTESTSCOLLECTED686 def test_lastfailed_no_failures_behavior_empty_cache(self, testdir):687 testdir.makepyfile(688 """689 def test_1():690 assert True691 def test_2():692 assert False693 """694 )695 result = testdir.runpytest("--lf", "--cache-clear")696 result.stdout.fnmatch_lines(["*1 failed*1 passed*"])697 result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "all")698 result.stdout.fnmatch_lines(["*1 failed*1 passed*"])699 result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "none")700 result.stdout.fnmatch_lines(["*2 desel*"])701 def test_lastfailed_skip_collection(self, testdir):702 """703 Test --lf behavior regarding skipping collection of files that are not marked as704 failed in the cache (#5172).705 """706 testdir.makepyfile(707 **{708 "pkg1/test_1.py": """709 import pytest710 @pytest.mark.parametrize('i', range(3))711 def test_1(i): pass712 """,713 "pkg2/test_2.py": """714 import pytest715 @pytest.mark.parametrize('i', range(5))716 def test_1(i):717 assert i not in (1, 3)718 """,719 }720 )721 # first run: collects 8 items (test_1: 3, test_2: 5)722 result = testdir.runpytest()723 result.stdout.fnmatch_lines(["collected 8 items", "*2 failed*6 passed*"])724 # second run: collects only 5 items from test_2, because all tests from test_1 have passed725 result = testdir.runpytest("--lf")726 result.stdout.fnmatch_lines(727 [728 "collected 5 items / 3 deselected / 2 selected",729 "run-last-failure: rerun previous 2 failures (skipped 1 file)",730 "*2 failed*3 deselected*",731 ]732 )733 # add another file and check if message is correct when skipping more than 1 file734 testdir.makepyfile(735 **{736 "pkg1/test_3.py": """737 def test_3(): pass738 """739 }740 )741 result = testdir.runpytest("--lf")742 result.stdout.fnmatch_lines(743 [744 "collected 5 items / 3 deselected / 2 selected",745 "run-last-failure: rerun previous 2 failures (skipped 2 files)",746 "*2 failed*3 deselected*",747 ]748 )749 def test_lastfailed_with_known_failures_not_being_selected(self, testdir):750 testdir.makepyfile(751 **{752 "pkg1/test_1.py": """def test_1(): assert 0""",753 "pkg1/test_2.py": """def test_2(): pass""",754 }755 )756 result = testdir.runpytest()757 result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])758 py.path.local("pkg1/test_1.py").remove()759 result = testdir.runpytest("--lf")760 result.stdout.fnmatch_lines(761 [762 "collected 1 item",763 "run-last-failure: 1 known failures not in selected tests",764 "* 1 passed in *",765 ]766 )767 # Recreate file with known failure.768 testdir.makepyfile(**{"pkg1/test_1.py": """def test_1(): assert 0"""})769 result = testdir.runpytest("--lf")770 result.stdout.fnmatch_lines(771 [772 "collected 1 item",773 "run-last-failure: rerun previous 1 failure (skipped 1 file)",774 "* 1 failed in *",775 ]776 )777 # Remove/rename test.778 testdir.makepyfile(**{"pkg1/test_1.py": """def test_renamed(): assert 0"""})779 result = testdir.runpytest("--lf")780 result.stdout.fnmatch_lines(781 [782 "collected 1 item",783 "run-last-failure: 1 known failures not in selected tests (skipped 1 file)",784 "* 1 failed in *",785 ]786 )787class TestNewFirst(object):788 def test_newfirst_usecase(self, testdir):789 testdir.makepyfile(790 **{791 "test_1/test_1.py": """792 def test_1(): assert 1793 def test_2(): assert 1794 def test_3(): assert 1795 """,796 "test_2/test_2.py": """797 def test_1(): assert 1798 def test_2(): assert 1799 def test_3(): assert 1800 """,801 }802 )803 testdir.tmpdir.join("test_1/test_1.py").setmtime(1)804 result = testdir.runpytest("-v")805 result.stdout.fnmatch_lines(806 [807 "*test_1/test_1.py::test_1 PASSED*",808 "*test_1/test_1.py::test_2 PASSED*",809 "*test_1/test_1.py::test_3 PASSED*",810 "*test_2/test_2.py::test_1 PASSED*",811 "*test_2/test_2.py::test_2 PASSED*",812 "*test_2/test_2.py::test_3 PASSED*",813 ]814 )815 result = testdir.runpytest("-v", "--nf")816 result.stdout.fnmatch_lines(817 [818 "*test_2/test_2.py::test_1 PASSED*",819 "*test_2/test_2.py::test_2 PASSED*",820 "*test_2/test_2.py::test_3 PASSED*",821 "*test_1/test_1.py::test_1 PASSED*",822 "*test_1/test_1.py::test_2 PASSED*",823 "*test_1/test_1.py::test_3 PASSED*",824 ]825 )826 testdir.tmpdir.join("test_1/test_1.py").write(827 "def test_1(): assert 1\n"828 "def test_2(): assert 1\n"829 "def test_3(): assert 1\n"830 "def test_4(): assert 1\n"831 )832 testdir.tmpdir.join("test_1/test_1.py").setmtime(1)833 result = testdir.runpytest("-v", "--nf")834 result.stdout.fnmatch_lines(835 [836 "*test_1/test_1.py::test_4 PASSED*",837 "*test_2/test_2.py::test_1 PASSED*",838 "*test_2/test_2.py::test_2 PASSED*",839 "*test_2/test_2.py::test_3 PASSED*",840 "*test_1/test_1.py::test_1 PASSED*",841 "*test_1/test_1.py::test_2 PASSED*",842 "*test_1/test_1.py::test_3 PASSED*",843 ]844 )845 def test_newfirst_parametrize(self, testdir):846 testdir.makepyfile(847 **{848 "test_1/test_1.py": """849 import pytest850 @pytest.mark.parametrize('num', [1, 2])851 def test_1(num): assert num852 """,853 "test_2/test_2.py": """854 import pytest855 @pytest.mark.parametrize('num', [1, 2])856 def test_1(num): assert num857 """,858 }859 )860 testdir.tmpdir.join("test_1/test_1.py").setmtime(1)861 result = testdir.runpytest("-v")862 result.stdout.fnmatch_lines(863 [864 "*test_1/test_1.py::test_1[1*",865 "*test_1/test_1.py::test_1[2*",866 "*test_2/test_2.py::test_1[1*",867 "*test_2/test_2.py::test_1[2*",868 ]869 )870 result = testdir.runpytest("-v", "--nf")871 result.stdout.fnmatch_lines(872 [873 "*test_2/test_2.py::test_1[1*",874 "*test_2/test_2.py::test_1[2*",875 "*test_1/test_1.py::test_1[1*",876 "*test_1/test_1.py::test_1[2*",877 ]878 )879 testdir.tmpdir.join("test_1/test_1.py").write(880 "import pytest\n"881 "@pytest.mark.parametrize('num', [1, 2, 3])\n"882 "def test_1(num): assert num\n"883 )884 testdir.tmpdir.join("test_1/test_1.py").setmtime(1)885 result = testdir.runpytest("-v", "--nf")886 result.stdout.fnmatch_lines(887 [888 "*test_1/test_1.py::test_1[3*",889 "*test_2/test_2.py::test_1[1*",890 "*test_2/test_2.py::test_1[2*",891 "*test_1/test_1.py::test_1[1*",892 "*test_1/test_1.py::test_1[2*",893 ]894 )895class TestReadme(object):896 def check_readme(self, testdir):...

Full Screen

Full Screen

get_graph_func.py

Source:get_graph_func.py Github

copy

Full Screen

1from matplotlib import pyplot as plt2from IPython.display import display345def get_graphs(for_churn, segment: str = None, state: str = None, city: str = None):67 if (segment is None) & (state is None) & (city is None):8 filtered_churn = for_churn.groupby('report_dt').sum().reset_index()9 text = 'Total'10 else:11 segment_filter = (lambda x: '' if x is None else f"segment == '" + x + "'")(segment)12 state_filter = (lambda x: '' if x is None else f"state == '" + x + "'")(state)13 city_filter = (lambda x: '' if x is None else f"city == '" + x + "'")(city)1415 text = ' ,'.join([x for x in [city, state, segment] if x is not None])16 r = ' & '.join([x for x in [city_filter, state_filter, segment_filter] if len(x) > 0])17 filtered_churn = for_churn.query(r).groupby('report_dt').sum().reset_index()1819 filtered_churn['active_shift'] = filtered_churn['active'].shift(1)20 filtered_churn['active_avg'] = (filtered_churn['active'] + filtered_churn['active_shift']) / 221 filtered_churn['churn_rate'] = filtered_churn['become churn'] / filtered_churn['active_avg']22 filtered_churn['reactivation_rate'] = filtered_churn['reactivated'] / filtered_churn['active_avg']23 filtered_churn['avg_order'] = filtered_churn['sales'] / filtered_churn['orders_cnt']2425 filtered_churn['year'] = filtered_churn['report_dt'].dt.year.astype('int')26 filtered_churn['churn'] = [1 - i for i in filtered_churn['churn_rate']]27 filtered_churn['reactivation'] = [1 - i for i in filtered_churn['reactivation_rate']]2829 print('-------------ГОДОВАЯ ДИНАМИКА-------------')30 print('\n')31 test_1 = filtered_churn.groupby('year').prod()['churn'].reset_index()32 test_1['churn'] = (round(100 - test_1['churn'] * 100, 1)).astype('str') + '%'33 test_1 = test_1.T34 test_1.columns = test_1.iloc[0].astype('int')35 test_1 = test_1[1:]36 display(test_1)3738 test_1 = filtered_churn.groupby('year').prod()['reactivation'].reset_index()39 test_1['reactivation'] = round(100 - test_1['reactivation'] * 100, 1).astype('str') + '%'40 test_1 = test_1.T41 test_1.columns = test_1.iloc[0].astype('int')42 test_1 = test_1[1:]43 display(test_1)4445 test_1 = filtered_churn.groupby('year').sum()['sales'].reset_index()46 test_1['sales'] = test_1['sales'].astype('int')47 test_1 = test_1.T48 test_1.columns = test_1.iloc[0].astype('int')49 test_1 = test_1[1:]50 display(test_1)5152 test_1 = filtered_churn.groupby('year').sum()['orders_cnt'].reset_index()53 test_1['orders_cnt'] = test_1['orders_cnt'].astype('int')54 test_1 = test_1.T55 test_1.columns = test_1.iloc[0].astype('int')56 test_1 = test_1[1:]57 display(test_1)5859 test_1 = filtered_churn.groupby('year').mean()['avg_order'].reset_index()60 test_1['avg_order'] = round(test_1['avg_order'], 1)61 test_1 = test_1.T62 test_1.columns = test_1.iloc[0].astype('int')63 test_1 = test_1[1:]64 display(test_1)6566 test_1 = filtered_churn.groupby('year').sum()['new'].reset_index()67 test_1['new'] = test_1['new'].astype('int')68 test_1 = test_1.T69 test_1.columns = test_1.iloc[0].astype('int')70 test_1 = test_1[1:]71 display(test_1)7273 print('\n')74 print('-------------МЕСЯЧНАЯ ДИНАМИКА-------------')75 print('\n')76 fig, ax = plt.subplots(3, 2, figsize=(40, 20))7778 ax[0, 0].plot(filtered_churn[['report_dt', 'churn_rate']].sort_values(by=['report_dt']).set_index('report_dt'))79 ax[0, 0].set_title(f'Churn Rate, % for {text}')8081 dates = filtered_churn['report_dt']82 data_test = filtered_churn['churn_rate']8384 for x, y in zip(dates, data_test):85 if y == 0:86 pass87 else:88 label = f'{round(y * 100, 1)}'89 ax[0, 0].annotate(label,90 (x, y),91 textcoords="offset points",92 xytext=(-1, -10),93 ha='center')9495 ax[0, 1].plot(filtered_churn[['report_dt', 'reactivation_rate']].set_index('report_dt'))96 ax[0, 1].set_title(f'Reactivation Rate, % for {text}')9798 dates = filtered_churn['report_dt']99 data_test = filtered_churn['reactivation_rate']100101 for x, y in zip(dates, data_test):102 if y == 0:103 pass104 else:105 label = f'{round(y * 100, 1)}'106 ax[0, 1].annotate(label,107 (x, y),108 textcoords="offset points",109 xytext=(-1, -10),110 ha='center')111112 ax[1, 0].plot(filtered_churn[['report_dt', 'sales']].set_index('report_dt'))113 ax[1, 0].set_title(f"Sales Volume'000 for {text}")114115 dates = filtered_churn['report_dt']116 data_test = filtered_churn['sales']117118 for x, y in zip(dates, data_test):119 if y == 0:120 pass121 else:122 label = int(y / 1000)123 ax[1, 0].annotate(label,124 (x, y),125 textcoords="offset points",126 xytext=(-1, -10),127 ha='center')128129 ax[1, 1].plot(filtered_churn[['report_dt', 'orders_cnt']].set_index('report_dt'))130 ax[1, 1].set_title(f'Orders Count for {text}')131132 dates = filtered_churn['report_dt']133 data_test = filtered_churn['orders_cnt']134135 for x, y in zip(dates, data_test):136 if y == 0:137 pass138 else:139 label = int(y)140 ax[1, 1].annotate(label,141 (x, y),142 textcoords="offset points",143 xytext=(-1, -10),144 ha='center')145146 ax[2, 0].plot(filtered_churn[['report_dt', 'avg_order']].set_index('report_dt'))147 ax[2, 0].set_title(f'Average Order for {text}')148149 dates = filtered_churn['report_dt']150 data_test = filtered_churn['avg_order']151152 for x, y in zip(dates, data_test):153 if y == 0:154 pass155 else:156 label = int(y)157 ax[2, 0].annotate(label,158 (x, y),159 textcoords="offset points",160 xytext=(-1, -10),161 ha='center')162163 ax[2, 1].plot(filtered_churn[['report_dt', 'new']].set_index('report_dt'))164 ax[2, 1].set_title(f'New clients for {text}')165166 dates = filtered_churn['report_dt']167 data_test = filtered_churn['new']168169 for x, y in zip(dates, data_test):170 if y == 0:171 pass172 else:173 label = int(y)174 ax[2, 1].annotate(label,175 (x, y),176 textcoords="offset points",177 xytext=(-1, -10),178 ha='center') ...

Full Screen

Full Screen

test_ios_vrf.py

Source:test_ios_vrf.py Github

copy

Full Screen

1#2# (c) 2016 Red Hat Inc.3#4# This file is part of Ansible5#6# Ansible is free software: you can redistribute it and/or modify7# it under the terms of the GNU General Public License as published by8# the Free Software Foundation, either version 3 of the License, or9# (at your option) any later version.10#11# Ansible is distributed in the hope that it will be useful,12# but WITHOUT ANY WARRANTY; without even the implied warranty of13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the14# GNU General Public License for more details.15#16# You should have received a copy of the GNU General Public License17# along with Ansible. If not, see <http://www.gnu.org/licenses/>.18# Make coding more python3-ish19from __future__ import (absolute_import, division, print_function)20__metaclass__ = type21import json22from ansible.compat.tests.mock import patch23from ansible.modules.network.ios import ios_vrf24from .ios_module import TestIosModule, load_fixture, set_module_args25class TestIosVrfModule(TestIosModule):26 module = ios_vrf27 def setUp(self):28 self.mock_get_config = patch('ansible.modules.network.ios.ios_vrf.get_config')29 self.get_config = self.mock_get_config.start()30 self.mock_load_config = patch('ansible.modules.network.ios.ios_vrf.load_config')31 self.load_config = self.mock_load_config.start()32 self.mock_exec_command = patch('ansible.modules.network.ios.ios_vrf.exec_command')33 self.exec_command = self.mock_exec_command.start()34 def tearDown(self):35 self.mock_get_config.stop()36 self.mock_load_config.stop()37 self.mock_exec_command.stop()38 def load_fixtures(self, commands=None):39 self.get_config.return_value = load_fixture('ios_vrf_config.cfg')40 self.exec_command.return_value = (0, load_fixture('ios_vrf_config.cfg').strip(), None)41 self.load_config.return_value = None42 def test_ios_vrf_name(self):43 set_module_args(dict(name='test_4'))44 commands = ['vrf definition test_4', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit']45 self.execute_module(changed=True, commands=commands, sort=False)46 def test_ios_vrf_name_unchanged(self):47 set_module_args(dict(name='test_1', rd='1:100', description='test vrf 1'))48 self.execute_module()49 def test_ios_vrf_description(self):50 set_module_args(dict(name='test_1', description='test string'))51 commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']52 self.execute_module(changed=True, commands=commands, sort=False)53 def test_ios_vrf_rd(self):54 set_module_args(dict(name='test_1', rd='2:100'))55 commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'rd 2:100']56 self.execute_module(changed=True, commands=commands, sort=False)57 def test_ios_vrf_interfaces(self):58 set_module_args(dict(name='test_1', interfaces=['Ethernet1']))59 commands = ['interface Ethernet2', 'no vrf forwarding test_1',60 'interface Ethernet1', 'vrf forwarding test_1',61 'ip address 1.2.3.4/5']62 self.execute_module(changed=True, commands=commands, sort=False)63 def test_ios_vrf_state_absent(self):64 set_module_args(dict(name='test_1', state='absent'))65 commands = ['no vrf definition test_1']66 self.execute_module(changed=True, commands=commands)67 def test_ios_vrf_purge_all(self):68 set_module_args(dict(purge=True))69 commands = ['no vrf definition test_1', 'no vrf definition test_2',70 'no vrf definition test_3']71 self.execute_module(changed=True, commands=commands)72 def test_ios_vrf_purge_all_but_one(self):73 set_module_args(dict(name='test_1', purge=True))74 commands = ['no vrf definition test_2', 'no vrf definition test_3']75 self.execute_module(changed=True, commands=commands)76 def test_ios_vrfs_no_purge(self):77 vrfs = [{'name': 'test_1'}, {'name': 'test_4'}]78 set_module_args(dict(vrfs=vrfs))79 commands = ['vrf definition test_4',80 'address-family ipv4', 'exit',81 'address-family ipv6', 'exit']82 self.execute_module(changed=True, commands=commands)83 def test_ios_vrfs_purge(self):84 vrfs = [{'name': 'test_1'}, {'name': 'test_4'}]85 set_module_args(dict(vrfs=vrfs, purge=True))86 commands = ['vrf definition test_4',87 'address-family ipv4', 'exit',88 'address-family ipv6', 'exit',89 'no vrf definition test_2',90 'no vrf definition test_3']91 self.execute_module(changed=True, commands=commands)92 def test_ios_vrfs_global_arg(self):93 vrfs = [{'name': 'test_1'}, {'name': 'test_2'}]94 set_module_args(dict(vrfs=vrfs, description='test string'))95 commands = ['vrf definition test_1', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string',96 'vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']97 self.execute_module(changed=True, commands=commands, sort=False)98 def test_ios_vrfs_local_override_description(self):99 vrfs = [{'name': 'test_1', 'description': 'test vrf 1'},100 {'name': 'test_2'}]101 set_module_args(dict(vrfs=vrfs, description='test string'))102 commands = ['vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit', 'description test string']103 self.execute_module(changed=True, commands=commands, sort=False)104 def test_ios_vrfs_local_override_state(self):105 vrfs = [{'name': 'test_1', 'state': 'absent'},106 {'name': 'test_2'}]107 set_module_args(dict(vrfs=vrfs, description='test string'))108 commands = ['no vrf definition test_1', 'vrf definition test_2', 'address-family ipv4', 'exit', 'address-family ipv6', 'exit',109 'description test string']...

Full Screen

Full Screen

test_properties.py

Source:test_properties.py Github

copy

Full Screen

1import smart_imports2smart_imports.all()3class TestClient(properties.Client):4 pass5class TEST_PROPERTIES(properties.PROPERTIES):6 records = (('test_1', 0, 'тест 1', lambda x: x, lambda x: x, None, tt_api_properties.TYPE.REPLACE),7 ('test_2', 1, 'тест 2', str, int, list, tt_api_properties.TYPE.APPEND))8properties_client = TestClient(entry_point=accounts_conf.settings.TT_PLAYERS_PROPERTIES_ENTRY_POINT,9 properties=TEST_PROPERTIES)10class SetGetTests(utils_testcase.TestCase):11 def setUp(self):12 properties_client.cmd_debug_clear_service()13 def test_simple(self):14 properties_client.cmd_set_properties([(666, 'test_1', 'x.1')])15 properties = properties_client.cmd_get_properties({666: ['test_1']})16 self.assertEqual(properties[666].test_1, 'x.1')17 self.assertEqual(properties[666].test_2, [])18 def test_multiple(self):19 properties_client.cmd_set_properties([(666, 'test_1', 'x.1'),20 (777, 'test_2', 13),21 (888, 'test_1', 'x.2'),22 (888, 'test_2', 14)])23 properties = properties_client.cmd_get_properties({666: ['test_1'],24 777: ['test_1', 'test_2'],25 888: ['test_1', 'test_2']})26 self.assertEqual(properties[666].test_1, 'x.1')27 self.assertEqual(properties[666].test_2, [])28 self.assertEqual(properties[777].test_1, None)29 self.assertEqual(properties[777].test_2, [13])30 self.assertEqual(properties[888].test_1, 'x.2')31 self.assertEqual(properties[888].test_2, [14])32 def test_types(self):33 properties_client.cmd_set_properties([(666, 'test_1', 'x.1'),34 (777, 'test_2', 13),35 (666, 'test_1', 'x.2'),36 (777, 'test_2', 14)])37 properties = properties_client.cmd_get_properties({666: ['test_1', 'test_2'],38 777: ['test_1', 'test_2']})39 self.assertEqual(properties[666].test_1, 'x.2')40 self.assertEqual(properties[666].test_2, [])41 self.assertEqual(properties[777].test_1, None)42 self.assertEqual(properties[777].test_2, [13, 14])43 def test_unknown_property(self):44 with self.assertRaises(exceptions.TTPropertiesError):45 properties_client.cmd_set_properties([(666, 'unknown', 'x.1')])46 with self.assertRaises(exceptions.TTPropertiesError):47 properties_client.cmd_get_properties({666: ['unknown']})48class SetGetSimplifiedTests(utils_testcase.TestCase):49 def setUp(self):50 properties_client.cmd_debug_clear_service()51 def test_simple(self):52 properties_client.cmd_set_property(object_id=666, name='test_1', value='x.1')53 value = properties_client.cmd_get_object_property(object_id=666, name='test_1')54 self.assertEqual(value, 'x.1')55 def test_default(self):56 value = properties_client.cmd_get_object_property(object_id=666, name='test_2')57 self.assertEqual(value, [])58 def test_wrong_properties(self):59 with self.assertRaises(exceptions.TTPropertiesError):60 properties_client.cmd_set_property(object_id=666, name='unknown', value='x.1')61 with self.assertRaises(exceptions.TTPropertiesError):62 properties_client.cmd_get_object_property(object_id=666, name='unknown')63class GetAllObjectProperties(utils_testcase.TestCase):64 def setUp(self):65 properties_client.cmd_debug_clear_service()66 def test_simple(self):67 properties_client.cmd_set_properties([(666, 'test_1', 'x.1'),68 (666, 'test_2', 13),69 (777, 'test_2', 14)])70 properties = properties_client.cmd_get_all_object_properties(object_id=666)71 self.assertEqual(properties.test_1, 'x.1')72 self.assertEqual(properties.test_2, [13])73 properties = properties_client.cmd_get_all_object_properties(object_id=777)74 self.assertEqual(properties.test_1, None)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful