Best Python code snippet using avocado_python
inspection.py
Source:inspection.py  
1import unittest2import argparse3# from multiprocessing import Process4from utils import generate_tests, lcs_images, diff_images, case_key_from_id5import sys6import os7# import copy8start_dir = os.getcwd()9os.chdir(os.path.dirname(os.path.realpath(__file__)))10def main(argv=None):11    parser = argparse.ArgumentParser(12        description='Return a list of related pages between two pdfs.')13    parser.add_argument(14        '--include',15        action='append',16        default=['DefaultTest'],17        help="Include additional test classes (default=[DefaultTest])")18    parser.add_argument(19        '--exclude', action='append', default=[], help="Exclude test classes.")20    parser.add_argument(21        '--results',22        type=str,23        default='results.log',24        help="Test results output file, "25             "each line is a python dictionary (default=results.log).")26    parser.add_argument(27        '--cases',28        type=str,29        default='cases',30        help="Python module which stores test cases (default=cases).")31    parser.add_argument(32        '--check',33        type=str,34        choices=[35            'any',36            'all'],37        default='all',38        help="Require that any/all test cases pass "39             "for pages to be related (default=all).")40    parser.add_argument(41        '--diff',42        action='store_true',43        default=False,)44    parser.add_argument(45        '--window',46        type=int,47        default=None,48        help="If the absolute difference of index's of two pdf pages is"49             "greater than the window range, then pages are not related."50             "(default = None)")51    parser.add_argument(52        '--debug',53        action='store_true',54        default=False,)55    parser.add_argument('pdf_a', type=str)56    parser.add_argument('pdf_b', type=str)57    args = parser.parse_args(argv)58    settings = vars(args)59    if not os.path.isabs(settings['pdf_a']):60        settings['pdf_a'] = os.path.join(start_dir, settings['pdf_a'])61    if not os.path.isabs(settings['pdf_b']):62        settings['pdf_b'] = os.path.join(start_dir, settings['pdf_b'])63    load_tests = generate_tests(settings)64    terminal_out = sys.stdout65    if settings['debug']:66        f = sys.stderr67    else:68        f = open(os.devnull, 'w')69        sys.stdout = f70    tests = load_tests._tests71    results = unittest.TestResult()72    cases = set([case_key_from_id(test.id()) for test in tests])73    for case in cases:74        # import ipdb; ipdb.set_trace()75        checked_tests = []76#        print("{}: test begginning indexs".format(case))77        test_cases = [78            test for test in tests if case_key_from_id(test.id()) == case79        ]80        beginning_tests = [81            test for test in test_cases if test.page_i == test.page_j82        ]83        beginning_tests.sort(key=lambda test: test.page_i)84        start_index = 185        m_end = max([test.page_i for test in test_cases])86        n_end = max([test.page_j for test in test_cases])87        while beginning_tests:88            test = beginning_tests.pop(0)89            current_total_failures = len(results.failures)90            test.run(results)91            checked_tests.append(test)92            if current_total_failures < len(results.failures):93                # stop testing beginning indexs94                break95            else:96                f.write(test.id() + " ... ok\n")97                # fail other pages to reduce problem98                fail_tests = [99                    t for t in test_cases if (100                        t.page_i == test.page_i or101                        t.page_j == test.page_j102                    ) and103                    t != test]104                for ft in fail_tests:105                    test_info = (AssertionError, AssertionError(""), None)106                    results.addFailure(ft, test_info)107                    checked_tests.append(ft)108                start_index = start_index + 1109#        print("{}: test end indexs".format(case))110        # end_tests = []111        while start_index <= m_end and start_index <= n_end:112            for test in test_cases:113                if test.page_i == m_end and test.page_j == n_end:114                    break115            current_total_failures = len(results.failures)116            test.run(results)117            checked_tests.append(test)118            if current_total_failures < len(results.failures):119                break120            else:121                f.write(test.id() + " ... ok\n")122                fail_tests = [123                    t for t in test_cases if (124                        t.page_i == test.page_i or125                        t.page_j == test.page_j126                    ) and127                    t != test128                ]129                for ft in fail_tests:130                    test_info = (AssertionError, AssertionError(""), None)131                    results.addFailure(ft, test_info)132                    checked_tests.append(ft)133                m_end = m_end - 1134                n_end = n_end - 1135        checked_tests = list(set(checked_tests))  # remove duplicate tests136        # FYI duplicates were never run137        remaining_tests = [t for t in test_cases if t not in checked_tests]138        test_suite = unittest.TestSuite()139        test_suite.addTests(remaining_tests)140        changed_item_results = unittest.TextTestRunner(verbosity=3,141                                                       stream=f,142                                                       buffer=True,143                                                       failfast=False144                                                       ).run(test_suite)145        for test, errr in changed_item_results.failures:146            results.addFailure(test, (None, None, None))147        for (test, err) in changed_item_results.errors:148            results.addFailure(test, (None, None, None))149        for (test, reason) in changed_item_results.skipped:150            results.addError(test, reason)151        '''152#        test_suite.run(results)153#            if test.page_i == m_end and test.page_j == n_end:154#                end_tests.append(test)155#    print("Test End")156#    end_tests = []157#    while start_index <= m_end and start_index <= n_end:158#        while start <= m_end and start <= n_end:159#            if results.failures:160#                break161#            else:162#               result.addFailure(163#            if result:164#                break165        #while start <= m_end and start <= n_end and X[start] = Y[start]:166        #    start_index = start_index + 1167#    print("Test Beginning")168#    start_index = 1  # initialize start index169#    m_end = max( [test.page_i for test in tests ])170#    n_end = max( [test.page_j for test in tests ])171#    beginning_tests = [ test for test in tests if test.page_i==test.page_j]172#    beginning_tests.sort(key=lambda test:test.page_i)173#    beginning_cases = unittest.TestSuite()174#    beginning_cases.addTests(beginning_tests)175#    beginning_results = unittest.TextTestRunner(verbosity=3,176#                                      stream=f,177#                                      buffer=True,178#                                      failfast=True).run(beginning_cases)179#    if beginning_results.failures:180#        failure_object = beginning_results.failures.pop()181#        start_index = failure_object[0].page_i182#    else:183#        start_index = m_end + 1184#    for test in beginning_tests:185#        if test.page_i < start_index:186#             for t in tests:187#                 if t.page_i == test.page_i ^ t.page_j == test.page_j:188#    print("Test End")189#    end_tests = []190#    while start_index <= m_end and start_index <= n_end:191#        for test in load_tests._tests:192#            if test.page_i == m_end and test.page_j == n_end:193#                end_tests.append(test)194#        m_end = m_end - 1195#        n_end = n_end - 1196#    end_cases = unittest.TestSuite()197#    end_cases.addTests(end_tests)198#    end_results = unittest.TextTestRunner(verbosity=3,199#                                      stream=f,200#                                      buffer=True,201#                                      failfast=True).run(end_cases)202#    if end_results.failures:203#        m_end = end_results.failures[0][0].page_i204#        n_end = end_results.failures[0][0].page_j205#    end_results = unittest.TextTestRunner(verbosity=3,206#                                      stream=f,207#                                      buffer=True,208#                                      failfast=True).run(end_cases)209    # changed items210#    print("Test Changed Items")211#    changed_item_tests = [ test for test in load_tests._tests212#                                    if start_index - 1 <= test.page_i and t213est.page_i <= m_end214#                                        and215 start_index - 1 <= test.page_j and test.page_j <= n_end ]216#    changed_item_cases = unittest.TestSuite()217#    changed_item_cases.addTests(changed_item_cases)218#    changed_item_results = unittest.TextTestRunner(verbosity=3,219#                                      stream=f,220#                                      buffer=True,221#                                      failfast=False).run(changed_item_cases)222#    results.errors = beginning_results.errors + end_results.erro223rs + changed_item_results.errors224#    results.skipped = beginning_results.skipped + end_results.sk225ipped + changed_item_results.skipped226#    for test in beginning_tests:227#        failures = [ t for (t,e,m) in beginning_results.failures ]228#        if test in failures:229#            break230#        else:231#           results.failures = results.failures + [ (t,None,None)232 for t in tests if (t.page_i == test.page_i ^233#234      t.page_j == test.page_j ) ]235#236#    for test in end_tests:237#        failures = [ t for (t,e,m) in end_results.failures]238#        if test in failures:239#            break240#        else:241#           results.failures = results.failures + [ (t,None,None)242for t in tests if (t.page_i == test.page_i ^243#244     t.page_j == test.page_j ) ]245#    results.failures = results.failures + changed_item_results.failures246#    results = unittest.TextTestRunner(verbosity=3,247#                                      stream=f,248#                                      buffer=True,249#                                      failfast=False).run(load_tests)250#    import ipdb; ipdb.set_trace()'''251    sys.stdout = terminal_out252    if settings['diff']:253        diff = diff_images(tests, results, settings['check'])254        print(diff)255    else:256        related_page_list = lcs_images(tests, results, settings['check'])257        print(related_page_list)258if __name__ == "__main__":...test_result.py
Source:test_result.py  
...11            Result(UNIQUE_ID)12    def test_result_rate_all_succeeded(self):13        result = Result(UNIQUE_ID, LOGFILE)14        result.check_test({'status': 'PASS'})15        result.end_tests()16        self.assertEqual(result.rate, 100.0)17    def test_result_rate_all_succeeded_with_warns(self):18        result = Result(UNIQUE_ID, LOGFILE)19        result.check_test({'status': 'PASS'})20        result.check_test({'status': 'WARN'})21        result.end_tests()22        self.assertEqual(result.rate, 100.0)23    def test_result_rate_all_succeeded_with_skips(self):24        result = Result(UNIQUE_ID, LOGFILE)25        result.check_test({'status': 'PASS'})26        result.check_test({'status': 'SKIP'})27        result.end_tests()28        self.assertEqual(result.rate, 100.0)29    def test_result_rate_all_succeeded_with_cancelled(self):30        result = Result(UNIQUE_ID, LOGFILE)31        result.check_test({'status': 'PASS'})32        result.check_test({'status': 'CANCEL'})33        result.end_tests()34        self.assertEqual(result.rate, 100.0)35    def test_result_rate_half_succeeded(self):36        result = Result(UNIQUE_ID, LOGFILE)37        result.check_test({'status': 'PASS'})38        result.check_test({'status': 'FAIL'})39        result.end_tests()40        self.assertEqual(result.rate, 50.0)41    def test_result_rate_none_succeeded(self):42        result = Result(UNIQUE_ID, LOGFILE)43        result.check_test({'status': 'FAIL'})44        result.end_tests()45        self.assertEqual(result.rate, 0.0)46if __name__ == '__main__':...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
