How to use load_benchmarks method in pytest-benchmark

Best Python code snippet using pytest-benchmark

benchmarking.py

Source:benchmarking.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2#--------------------------------------------------------------------#3# This file is part of Py-notify. #4# #5# Copyright (C) 2007, 2008 Paul Pogonyshev. #6# #7# This library is free software; you can redistribute it and/or #8# modify it under the terms of the GNU Lesser General Public License #9# as published by the Free Software Foundation; either version 2.1 #10# of the License, or (at your option) any later version. #11# #12# This library is distributed in the hope that it will be useful, #13# but WITHOUT ANY WARRANTY; without even the implied warranty of #14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #15# Lesser General Public License for more details. #16# #17# You should have received a copy of the GNU Lesser General Public #18# License along with this library; if not, write to the Free #19# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #20# Boston, MA 02110-1301 USA #21#--------------------------------------------------------------------#22import gc23import optparse24import sys25import time26from benchmark.configobj import ConfigObj27from types import FunctionType, ModuleType28import notify29from notify.utils import raise_not_implemented_exception, ClassTypes, StringType30__all__ = ('main', 'load_benchmarks', 'Benchmark', 'BenchmarkSuite', 'BenchmarkProgram')31_NUM_RUNS = 532def load_benchmarks (source, *benchmark_names):33 toplevel_names = {}34 for name in benchmark_names:35 parts = name.split ('.', 2)36 if len (parts) == 1:37 toplevel_names[parts[0]] = ()38 else:39 if parts[0] in toplevel_names:40 if toplevel_names[parts[0]]:41 toplevel_names[parts[0]].append (parts[1])42 else:43 toplevel_names[parts[0]] = [parts[1]]44 suite = BenchmarkSuite ()45 if isinstance (source, ModuleType) or toplevel_names:46 if toplevel_names:47 subobjects = toplevel_names.keys ()48 else:49 subobjects = dir (source)50 for name in subobjects:51 object = getattr (source, name)52 if isinstance (object, FunctionType):53 object = object ()54 if isinstance (object, ClassTypes) and issubclass (object, Benchmark):55 suite.append (object ())56 elif isinstance (object, BenchmarkSuite):57 suite.append (object)58 elif isinstance (object, ModuleType):59 if toplevel_names:60 suite.append (load_benchmarks (object, *toplevel_names[name]))61 else:62 raise TypeError ("unsupported 'source' type (%s)" % type (source))63 return suite64class Benchmark (object):65 def initialize (self):66 pass67 def get_description (self, scale):68 return ("Benchmark '%s.%s', with scale %s"69 % (self.__module__, self.__class__.__name__, scale))70 def get_version (self):71 return notify.__version__72 def execute (self, scale):73 raise_not_implemented_exception (self)74 def finalize (self):75 pass76 def run (self, scale, num_runs = _NUM_RUNS, silent = False):77 if not silent:78 sys.stdout.write ('%s\n' % self.get_description (scale))79 times = []80 for k in range (0, num_runs):81 self.initialize ()82 gc.disable ()83 start = time.clock ()84 self.execute (scale)85 finish = time.clock ()86 gc.enable ()87 self.finalize ()88 times.append (finish - start)89 self.__time = min (times)90 if not silent:91 sys.stdout.write ('Executed in %s s\n\n' % self.__time)92 def has_been_run (self):93 try:94 self.get_time ()95 return True96 except:97 return False98 def get_time (self):99 return self.__time100 def get_full_name (benchmark):101 if isinstance (benchmark, Benchmark):102 return ('%s.%s-%s' % (benchmark.__module__,103 benchmark.__class__.__name__,104 benchmark.get_version ()))105 else:106 return None107 get_full_name = staticmethod (get_full_name)108class BenchmarkSuite (object):109 def __init__(self):110 self.__children = []111 def append (self, child):112 assert isinstance (child, (Benchmark, BenchmarkSuite))113 self.__children.append (child)114 def run (self, scale, num_runs = _NUM_RUNS, silent = False):115 for child in self.__children:116 child.run (scale, num_runs, silent)117 def __iter__(self):118 return iter (self.__children)119class BenchmarkProgram (object):120 def __init__(self, object = '__main__', default_benchmark_name = None):121 if isinstance (object, StringType):122 self.__object = __import__(object)123 for name_part in object.split ('.') [1:]:124 self.__object = getattr (self.__object, name_part)125 else:126 self.__object = object127 self.__default_benchmark_name = default_benchmark_name128 self.__options = None129 self.load_benchmarks ()130 self.run ()131 def load_benchmarks (self):132 self.__options, benchmark_names = self.__build_parser ().parse_args ()133 if not benchmark_names and self.__default_benchmark_name:134 benchmark_names = (self.__default_benchmark_name,)135 self.__suite = BenchmarkSuite ()136 self.__suite.append (load_benchmarks (self.__object, *benchmark_names))137 def run (self):138 should_run_test = lambda test_or_suite: True139 if self.__options.output is None:140 silent = False141 else:142 silent = True143 results = ConfigObj (self.__options.output)144 if not self.__options.force:145 should_run_test = (lambda test_or_suite:146 BenchmarkProgram.__test_is_new (test_or_suite, results))147 num_runs = _NUM_RUNS148 if self.__options.num_runs is not None:149 num_runs = self.__options.num_runs150 if num_runs > 1 and not silent:151 sys.stdout.write (('Each benchmark is executed %d times '152 'and the best performance is reported\n\n')153 % _NUM_RUNS)154 self.__do_run (self.__suite, self.__options.scale, num_runs, silent, should_run_test)155 if silent:156 self.__store_results (self.__suite, results)157 results.write ()158 def __build_parser (self):159 parser = optparse.OptionParser ()160 parser.add_option ('-o', '--output')161 parser.add_option ('-f', '--force', action = 'store_true', default = False)162 parser.add_option ('-r', '--num-runs', type = 'int')163 parser.add_option ('-s', '--scale', type = 'float', default = 1.0)164 return parser165 def __do_run (self, suite, scale, num_runs, silent, should_run_test):166 if not should_run_test (suite):167 return168 if isinstance (suite, BenchmarkSuite):169 for benchmark in suite:170 self.__do_run (benchmark, scale, num_runs, silent, should_run_test)171 elif isinstance (suite, Benchmark):172 suite.run (scale, num_runs)173 def __store_results (self, suite, results):174 if isinstance (suite, BenchmarkSuite):175 for benchmark in suite:176 self.__store_results (benchmark, results)177 elif isinstance (suite, Benchmark):178 if not suite.has_been_run ():179 return180 benchmark_name = Benchmark.get_full_name (suite)181 is_new_result = True182 for section in results:183 for name in results[section]:184 if name == benchmark_name:185 results[section][name] = suite.get_time ()186 is_new_result = False187 if is_new_result:188 if 'NEW RESULTS' in results:189 results['NEW RESULTS'][benchmark_name] = suite.get_time ()190 else:191 results['NEW RESULTS'] = { benchmark_name: suite.get_time () }192 def __test_is_new (test_or_suite, results):193 if isinstance (test_or_suite, Benchmark):194 benchmark_name = Benchmark.get_full_name (test_or_suite)195 for section in results:196 if benchmark_name in results[section]:197 return False198 return True199 __test_is_new = staticmethod (__test_is_new)200main = BenchmarkProgram201# Local variables:202# mode: python203# python-indent: 4204# indent-tabs-mode: nil205# fill-column: 90...

Full Screen

Full Screen

visualize_benchmarks.py

Source:visualize_benchmarks.py Github

copy

Full Screen

...8 os.path.dirname(__file__), "..", "data", "benchmarks.jsonl"9)10FIGURE_PATH = os.path.join(os.path.dirname(__file__), "..", "data", "benchmarks.jpg")11@lru_cache()12def load_benchmarks() -> List[Dict]:13 with open(BENCHMARKS_PATH, "r") as f:14 return [json.loads(line.strip()) for line in f.readlines() if line.strip()]15def load_recommended_words() -> List[Dict]:16 benchmarks = load_benchmarks()17 recommended = WordleSolver().recommend()18 words = [recommended.recommended, *recommended.alternatives]19 return [b for b in benchmarks if b["first_guess"] in words]20if __name__ == "__main__":21 plt.figure(figsize=(10, 10), dpi=250)22 benchmarks = load_benchmarks()23 words = [b["first_guess"] for b in benchmarks]24 win_percentages = [b["win_percentage"] for b in benchmarks]25 average_turns = [b["average_turns"] for b in benchmarks]26 plt.plot(win_percentages, average_turns, "b.", markersize=5)27 recommended = load_recommended_words()28 recommended_words = [b["first_guess"] for b in recommended]29 recommended_win_percentages = [b["win_percentage"] for b in recommended]30 recommended_average_turns = [b["average_turns"] for b in recommended]31 plt.plot(recommended_win_percentages, recommended_average_turns, "r.", markersize=5)32 plt.legend(["all words", "recommended"])33 plt.xlabel("Win Percentage")34 plt.ylabel("Avg. Turns to Win")35 for word, pct, turns in zip(words, win_percentages, average_turns):36 plt.annotate(...

Full Screen

Full Screen

benchmark_plot.py

Source:benchmark_plot.py Github

copy

Full Screen

...11 return len(entries) > 012 @classmethod13 def get_data_rows(cls, benchmark_data):14 if not isinstance(benchmark_data, pandas.DataFrame):15 benchmark_data = cls.load_benchmarks(benchmark_data)16 return benchmark_data.query(cls.query)17 @classmethod18 def load_benchmarks(cls, benchmark_datafiles):19 if isinstance(benchmark_datafiles, (str, bytes)):20 d = json.load(open(benchmark_datafiles))21 rows = cls.extract_benchmark_rows(d)22 rows["benchmark_run"] = benchmark_datafiles23 return rows24 else:25 return pandas.concat([cls.load_benchmarks(f) for f in benchmark_datafiles])26 @staticmethod27 def extract_benchmark_rows(benchmark_data):28 return pandas.DataFrame.from_dict(29 merge(30 dict(31 group=r["group"],32 name=r["name"],33 basename=r["name"].split("[")[0],34 walltime=t,35 ),36 r["params"] if r["params"] else {},37 dissoc(benchmark_data["commit_info"], "time"),38 )39 for r in benchmark_data["benchmarks"]...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-benchmark automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful