How to use show_suites method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

main.py

Source:main.py Github

copy

Full Screen

1"""2lit - LLVM Integrated Tester.3See lit.pod for more information.4"""5import itertools6import os7import platform8import sys9import time10import lit.cl_arguments11import lit.discovery12import lit.display13import lit.LitConfig14import lit.reports15import lit.run16import lit.Test17import lit.util18def main(builtin_params={}):19 opts = lit.cl_arguments.parse_args()20 params = create_params(builtin_params, opts.user_params)21 is_windows = platform.system() == 'Windows'22 lit_config = lit.LitConfig.LitConfig(23 progname=os.path.basename(sys.argv[0]),24 path=opts.path,25 quiet=opts.quiet,26 useValgrind=opts.useValgrind,27 valgrindLeakCheck=opts.valgrindLeakCheck,28 valgrindArgs=opts.valgrindArgs,29 noExecute=opts.noExecute,30 debug=opts.debug,31 isWindows=is_windows,32 params=params,33 config_prefix=opts.configPrefix,34 echo_all_commands=opts.echoAllCommands)35 discovered_tests = lit.discovery.find_tests_for_inputs(lit_config, opts.test_paths,36 opts.indirectlyRunCheck)37 if not discovered_tests:38 sys.stderr.write('error: did not discover any tests for provided path(s)\n')39 sys.exit(2)40 if opts.show_suites or opts.show_tests:41 print_discovered(discovered_tests, opts.show_suites, opts.show_tests)42 sys.exit(0)43 if opts.show_used_features:44 features = set(itertools.chain.from_iterable(t.getUsedFeatures() for t in discovered_tests))45 print(' '.join(sorted(features)))46 sys.exit(0)47 # Command line overrides configuration for maxIndividualTestTime.48 if opts.maxIndividualTestTime is not None: # `not None` is important (default: 0)49 if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:50 lit_config.note(('The test suite configuration requested an individual'51 ' test timeout of {0} seconds but a timeout of {1} seconds was'52 ' requested on the command line. Forcing timeout to be {1}'53 ' seconds')54 .format(lit_config.maxIndividualTestTime,55 opts.maxIndividualTestTime))56 lit_config.maxIndividualTestTime = opts.maxIndividualTestTime57 determine_order(discovered_tests, opts.order)58 selected_tests = [t for t in discovered_tests if59 opts.filter.search(t.getFullName())]60 if not selected_tests:61 sys.stderr.write('error: filter did not match any tests '62 '(of %d discovered). ' % len(discovered_tests))63 if opts.allow_empty_runs:64 sys.stderr.write("Suppressing error because '--allow-empty-runs' "65 'was specified.\n')66 sys.exit(0)67 else:68 sys.stderr.write("Use '--allow-empty-runs' to suppress this "69 'error.\n')70 sys.exit(2)71 # When running multiple shards, don't include skipped tests in the xunit72 # output since merging the files will result in duplicates.73 tests_for_report = discovered_tests74 if opts.shard:75 (run, shards) = opts.shard76 selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)77 tests_for_report = selected_tests78 if not selected_tests:79 sys.stderr.write('warning: shard does not contain any tests. '80 'Consider decreasing the number of shards.\n')81 sys.exit(0)82 selected_tests = selected_tests[:opts.max_tests]83 mark_excluded(discovered_tests, selected_tests)84 start = time.time()85 run_tests(selected_tests, lit_config, opts, len(discovered_tests))86 elapsed = time.time() - start87 if opts.time_tests:88 print_histogram(discovered_tests)89 print_results(discovered_tests, elapsed, opts)90 for report in opts.reports:91 report.write_results(tests_for_report, elapsed)92 if lit_config.numErrors:93 sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)94 sys.exit(2)95 if lit_config.numWarnings:96 sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)97 has_failure = any(t.isFailure() for t in discovered_tests)98 if has_failure:99 sys.exit(1)100def create_params(builtin_params, user_params):101 def parse(p):102 return p.split('=', 1) if '=' in p else (p, '')103 params = dict(builtin_params)104 params.update([parse(p) for p in user_params])105 return params106def print_discovered(tests, show_suites, show_tests):107 tests.sort(key=lit.reports.by_suite_and_test_path)108 if show_suites:109 tests_by_suite = itertools.groupby(tests, lambda t: t.suite)110 print('-- Test Suites --')111 for suite, test_iter in tests_by_suite:112 test_count = sum(1 for _ in test_iter)113 print(' %s - %d tests' % (suite.name, test_count))114 print(' Source Root: %s' % suite.source_root)115 print(' Exec Root : %s' % suite.exec_root)116 features = ' '.join(sorted(suite.config.available_features))117 print(' Available Features: %s' % features)118 substitutions = sorted(suite.config.substitutions)119 substitutions = ('%s => %s' % (x, y) for (x, y) in substitutions)120 substitutions = '\n'.ljust(30).join(substitutions)121 print(' Available Substitutions: %s' % substitutions)122 if show_tests:123 print('-- Available Tests --')124 for t in tests:125 print(' %s' % t.getFullName())126def determine_order(tests, order):127 from lit.cl_arguments import TestOrder128 if order == TestOrder.EARLY_TESTS_THEN_BY_NAME:129 tests.sort(key=lambda t: (not t.isEarlyTest(), t.getFullName()))130 elif order == TestOrder.FAILING_FIRST:131 def by_mtime(test):132 return os.path.getmtime(test.getFilePath())133 tests.sort(key=by_mtime, reverse=True)134 elif order == TestOrder.RANDOM:135 import random136 random.shuffle(tests)137def touch_file(test):138 if test.isFailure():139 os.utime(test.getFilePath(), None)140def filter_by_shard(tests, run, shards, lit_config):141 test_ixs = range(run - 1, len(tests), shards)142 selected_tests = [tests[i] for i in test_ixs]143 # For clarity, generate a preview of the first few test indices in the shard144 # to accompany the arithmetic expression.145 preview_len = 3146 preview = ', '.join([str(i + 1) for i in test_ixs[:preview_len]])147 if len(test_ixs) > preview_len:148 preview += ', ...'149 msg = f'Selecting shard {run}/{shards} = ' \150 f'size {len(selected_tests)}/{len(tests)} = ' \151 f'tests #({shards}*k)+{run} = [{preview}]'152 lit_config.note(msg)153 return selected_tests154def mark_excluded(discovered_tests, selected_tests):155 excluded_tests = set(discovered_tests) - set(selected_tests)156 result = lit.Test.Result(lit.Test.EXCLUDED)157 for t in excluded_tests:158 t.setResult(result)159def run_tests(tests, lit_config, opts, discovered_tests):160 workers = min(len(tests), opts.workers)161 display = lit.display.create_display(opts, len(tests), discovered_tests,162 workers)163 def progress_callback(test):164 display.update(test)165 if opts.order == lit.cl_arguments.TestOrder.FAILING_FIRST:166 touch_file(test)167 run = lit.run.Run(tests, lit_config, workers, progress_callback,168 opts.max_failures, opts.timeout)169 display.print_header()170 interrupted = False171 error = None172 try:173 execute_in_tmp_dir(run, lit_config)174 except KeyboardInterrupt:175 interrupted = True176 error = ' interrupted by user'177 except lit.run.MaxFailuresError:178 error = 'warning: reached maximum number of test failures'179 except lit.run.TimeoutError:180 error = 'warning: reached timeout'181 display.clear(interrupted)182 if error:183 sys.stderr.write('%s, skipping remaining tests\n' % error)184def execute_in_tmp_dir(run, lit_config):185 # Create a temp directory inside the normal temp directory so that we can186 # try to avoid temporary test file leaks. The user can avoid this behavior187 # by setting LIT_PRESERVES_TMP in the environment, so they can easily use188 # their own temp directory to monitor temporary file leaks or handle them at189 # the buildbot level.190 tmp_dir = None191 if 'LIT_PRESERVES_TMP' not in os.environ:192 import tempfile193 tmp_dir = tempfile.mkdtemp(prefix="lit_tmp_")194 os.environ.update({195 'TMPDIR': tmp_dir,196 'TMP': tmp_dir,197 'TEMP': tmp_dir,198 'TEMPDIR': tmp_dir,199 })200 try:201 run.execute()202 finally:203 if tmp_dir:204 try:205 import shutil206 shutil.rmtree(tmp_dir)207 except Exception as e: 208 lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)209def print_histogram(tests):210 test_times = [(t.getFullName(), t.result.elapsed)211 for t in tests if t.result.elapsed]212 if test_times:213 lit.util.printHistogram(test_times, title='Tests')214def print_results(tests, elapsed, opts):215 tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()}216 for test in tests:217 tests_by_code[test.result.code].append(test)218 for code in lit.Test.ResultCode.all_codes():219 print_group(tests_by_code[code], code, opts.shown_codes)220 print_summary(tests_by_code, opts.quiet, elapsed)221def print_group(tests, code, shown_codes):222 if not tests:223 return224 if not code.isFailure and code not in shown_codes:225 return226 print('*' * 20)227 print('{} Tests ({}):'.format(code.label, len(tests)))228 for test in tests:229 print(' %s' % test.getFullName())230 sys.stdout.write('\n')231def print_summary(tests_by_code, quiet, elapsed):232 if not quiet:233 print('\nTesting Time: %.2fs' % elapsed)234 codes = [c for c in lit.Test.ResultCode.all_codes()235 if not quiet or c.isFailure]236 groups = [(c.label, len(tests_by_code[c])) for c in codes]237 groups = [(label, count) for label, count in groups if count]238 if not groups:239 return240 max_label_len = max(len(label) for label, _ in groups)241 max_count_len = max(len(str(count)) for _, count in groups)242 for (label, count) in groups:243 label = label.ljust(max_label_len)244 count = str(count).rjust(max_count_len)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful