How to use prepare_benchmarks method in pytest-benchmark

Best Python code snippet using pytest-benchmark

session.py

Source:session.py Github

copy

Full Screen

...84 config=self.config,85 machine_info=obj86 )87 return obj88 def prepare_benchmarks(self):89 for bench in self.benchmarks:90 if bench:91 compared = False92 for path, compared_mapping in self.compared_mapping.items():93 if bench.fullname in compared_mapping:94 compared = compared_mapping[bench.fullname]95 source = short_filename(path, self.machine_id)96 flat_bench = bench.as_dict(include_data=False, stats=False, cprofile=self.cprofile_sort_by)97 flat_bench.update(compared["stats"])98 flat_bench["path"] = str(path)99 flat_bench["source"] = source100 if self.compare_fail:101 for check in self.compare_fail:102 fail = check.fails(bench, flat_bench)103 if fail:104 self.performance_regressions.append((self.name_format(flat_bench), fail))105 yield flat_bench106 flat_bench = bench.as_dict(include_data=False, flat=True, cprofile=self.cprofile_sort_by)107 flat_bench["path"] = None108 flat_bench["source"] = compared and "NOW"109 yield flat_bench110 def save_json(self, output_json):111 with self.json as fh:112 fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode())113 self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True)114 def handle_saving(self):115 save = self.save or self.autosave116 if save or self.json:117 if not self.benchmarks:118 self.logger.warn("Not saving anything, no benchmarks have been run!")119 return120 machine_info = self.get_machine_info()121 commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config)122 self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info)123 if self.json:124 output_json = self.config.hook.pytest_benchmark_generate_json(125 config=self.config,126 benchmarks=self.benchmarks,127 include_data=True,128 machine_info=machine_info,129 commit_info=commit_info,130 )131 self.config.hook.pytest_benchmark_update_json(132 config=self.config,133 benchmarks=self.benchmarks,134 output_json=output_json,135 )136 self.save_json(output_json)137 if save:138 output_json = self.config.hook.pytest_benchmark_generate_json(139 config=self.config,140 benchmarks=self.benchmarks,141 include_data=self.save_data,142 machine_info=machine_info,143 commit_info=commit_info,144 )145 self.config.hook.pytest_benchmark_update_json(146 config=self.config,147 benchmarks=self.benchmarks,148 output_json=output_json,149 )150 self.storage.save(output_json, save)151 def handle_loading(self):152 compared_mapping = {}153 if self.compare:154 if self.compare is True:155 compared_benchmarks = list(self.storage.load())[-1:]156 else:157 compared_benchmarks = list(self.storage.load(self.compare))158 if not compared_benchmarks:159 msg = "Can't compare. No benchmark files in %r" % str(self.storage)160 if self.compare is True:161 msg += ". Can't load the previous benchmark."162 else:163 msg += " match %r." % self.compare164 self.logger.warn(msg)165 machine_info = self.get_machine_info()166 for path, compared_benchmark in compared_benchmarks:167 self.config.hook.pytest_benchmark_compare_machine_info(168 config=self.config,169 benchmarksession=self,170 machine_info=machine_info,171 compared_benchmark=compared_benchmark,172 )173 compared_mapping[path] = dict(174 (bench['fullname'], bench) for bench in compared_benchmark['benchmarks']175 )176 self.logger.info("Comparing against benchmarks from: %s" % path, newline=False)177 self.compared_mapping = compared_mapping178 def finish(self):179 self.handle_saving()180 prepared_benchmarks = list(self.prepare_benchmarks())181 if prepared_benchmarks:182 self.groups = self.config.hook.pytest_benchmark_group_stats(183 config=self.config,184 benchmarks=prepared_benchmarks,185 group_by=self.group_by186 )187 def display(self, tr):188 if not self.groups:189 return190 tr.ensure_newline()191 results_table = TableResults(192 columns=self.columns,193 sort=self.sort,194 histogram=self.histogram,...

Full Screen

Full Screen

PerfTest_c-compiler.py

Source:PerfTest_c-compiler.py Github

copy

Full Screen

...26 27 compile_time_in_sec = float(m)*60 + float(s)28 29 return compile_time_in_sec30def prepare_benchmarks(root):31 items = os.listdir(root)32 for item in items:33 path = os.path.join(root, item)34 if os.path.isdir(path):35 prepare_benchmarks(path)36 elif os.path.splitext('%s'%path)[1] in extensions:37 Glob.benchmarks.append(path)38def parse_args():39 # Error handling40 if not len(sys.argv) == 4:41 42 print('Argument Error. '43 'Usage:\n\tpython %s <c_compiler_name> <benchmark_dir> <repeats>'44 '\t\t<c_compiler_name> Only Support gcc, g++, clang or clang++'45 '\t\t<benchmark_dir> The root directory of benchmark files.'46 '\t\t<repeats> How many times do you want to run repeatedly for a case.'47 %(sys.argv[0]))48 exit(1)49 50 elif sys.argv[1] not in {'gcc', 'g++', 'clang', 'clang++'}:51 52 print('Argument 1 Error. Only gcc, g++, clang, clang++ is supported.')53 exit(1)54 elif not os.path.exists(sys.argv[2]) or os.path.isfile(sys.argv[2]):55 print('Argument 2 Error. Please give a valid directory.')56 exit(1)57 58 # write user parameters59 Glob.c_compiler = sys.argv[1]60 Glob.benchmark_dir = sys.argv[2]61 Glob.REPEATS = int(sys.argv[3])62def main():63 parse_args()64 prepare_benchmarks(Glob.benchmark_dir)65 # the error log. When compilation fails, write messages to it.66 logfile = open('log.err', 'w')67 # statistic68 suc = 069 fail = 070 total = len(options) * len(Glob.benchmarks)71 cur = 072 # start performance testing.73 for j in range(0, len(Glob.benchmarks)):74 result_file = open('[Result]%s.txt'%Glob.benchmarks[j].split('/')[-1].split('.')[0], 'w')75 for i in range(0, len(options)):76 cur += 177 78 compile_times = []...

Full Screen

Full Screen

data.py

Source:data.py Github

copy

Full Screen

...44 with io.open(filename, 'rU') as fh:45 trialmap[trial_name] = json.load(fh)46 yield mode, trialmap47@gen_dict # {'mode': [{'test': min}...]}48def prepare_benchmarks(raw_benchmarks, trial_names):49 for mode, trialmap in raw_benchmarks.items():50 envlist = []51 for trial_name in trial_names:52 trial = trialmap.get(trial_name, {}).get('benchmarks', [])53 benchenv = dict((bench['fullname'], bench['stats'].get('min'))54 for bench in trial)55 envlist.append(benchenv)56 yield mode, envlist57def load_benchmarks(bench_storage, modes):58 trial_names, benchmark_files, _ = ls_bench_storage(bench_storage, modes)59 return load_benchmarks_from_files(benchmark_files, trial_names)60def load_benchmarks_from_files(benchmark_files, trial_names):61 raw_benchmarks = load_raw_benchmarks(benchmark_files)62 benchmarks = prepare_benchmarks(raw_benchmarks, trial_names)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-benchmark automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful