How to use check_regressions method in pytest-benchmark

Best Python code snippet using pytest-benchmark

regression.py

Source:regression.py Github

copy

Full Screen

1#!/usr/bin/env python2#3# Copyright (c) Microsoft Corporation.4# Licensed under the MIT License.5import json6from typing import Dict, List, Optional7from onefuzztypes.enums import ContainerType, TaskDebugFlag, TaskType8from onefuzztypes.models import NotificationConfig, RegressionReport9from onefuzztypes.primitives import Container, Directory, File, PoolName10from onefuzz.api import Command11from . import JobHelper12class Regression(Command):13 """Regression job"""14 def _check_regression(self, container: Container, file: File) -> bool:15 content = self.onefuzz.containers.files.get(Container(container), file)16 as_str = content.decode()17 as_obj = json.loads(as_str)18 report = RegressionReport.parse_obj(as_obj)19 if report.crash_test_result.crash_report is not None:20 return True21 if report.crash_test_result.no_repro is not None:22 return False23 raise Exception("invalid crash report")24 def generic(25 self,26 project: str,27 name: str,28 build: str,29 pool_name: PoolName,30 *,31 reports: Optional[List[str]] = None,32 crashes: Optional[List[File]] = None,33 target_exe: File = File("fuzz.exe"),34 tags: Optional[Dict[str, str]] = None,35 notification_config: Optional[NotificationConfig] = None,36 target_env: Optional[Dict[str, str]] = None,37 setup_dir: Optional[Directory] = None,38 reboot_after_setup: bool = False,39 target_options: Optional[List[str]] = None,40 dryrun: bool = False,41 duration: int = 24,42 crash_report_timeout: Optional[int] = None,43 debug: Optional[List[TaskDebugFlag]] = None,44 check_retry_count: Optional[int] = None,45 check_fuzzer_help: bool = True,46 delete_input_container: bool = True,47 check_regressions: bool = False,48 ) -> None:49 """50 generic regression task51 :param File crashes: Specify crashing input files to check in the regression task52 :param str reports: Specify specific report names to verify in the regression task53 :param bool check_regressions: Specify if exceptions should be thrown on finding crash regressions54 :param bool delete_input_container: Specify wether or not to delete the input container55 """56 self._create_job(57 TaskType.generic_regression,58 project,59 name,60 build,61 pool_name,62 crashes=crashes,63 reports=reports,64 target_exe=target_exe,65 tags=tags,66 notification_config=notification_config,67 target_env=target_env,68 setup_dir=setup_dir,69 reboot_after_setup=reboot_after_setup,70 target_options=target_options,71 dryrun=dryrun,72 duration=duration,73 crash_report_timeout=crash_report_timeout,74 debug=debug,75 check_retry_count=check_retry_count,76 check_fuzzer_help=check_fuzzer_help,77 delete_input_container=delete_input_container,78 check_regressions=check_regressions,79 )80 def libfuzzer(81 self,82 project: str,83 name: str,84 build: str,85 pool_name: PoolName,86 *,87 reports: Optional[List[str]] = None,88 crashes: Optional[List[File]] = None,89 target_exe: File = File("fuzz.exe"),90 tags: Optional[Dict[str, str]] = None,91 notification_config: Optional[NotificationConfig] = None,92 target_env: Optional[Dict[str, str]] = None,93 setup_dir: Optional[Directory] = None,94 reboot_after_setup: bool = False,95 target_options: Optional[List[str]] = None,96 dryrun: bool = False,97 duration: int = 24,98 crash_report_timeout: Optional[int] = None,99 debug: Optional[List[TaskDebugFlag]] = None,100 check_retry_count: Optional[int] = None,101 check_fuzzer_help: bool = True,102 delete_input_container: bool = True,103 check_regressions: bool = False,104 ) -> None:105 """106 libfuzzer regression task107 :param File crashes: Specify crashing input files to check in the regression task108 :param str reports: Specify specific report names to verify in the regression task109 :param bool check_regressions: Specify if exceptions should be thrown on finding crash regressions110 :param bool delete_input_container: Specify wether or not to delete the input container111 """112 self._create_job(113 TaskType.libfuzzer_regression,114 project,115 name,116 build,117 pool_name,118 crashes=crashes,119 reports=reports,120 target_exe=target_exe,121 tags=tags,122 notification_config=notification_config,123 target_env=target_env,124 setup_dir=setup_dir,125 reboot_after_setup=reboot_after_setup,126 target_options=target_options,127 dryrun=dryrun,128 duration=duration,129 crash_report_timeout=crash_report_timeout,130 debug=debug,131 check_retry_count=check_retry_count,132 check_fuzzer_help=check_fuzzer_help,133 delete_input_container=delete_input_container,134 check_regressions=check_regressions,135 )136 def _create_job(137 self,138 task_type: TaskType,139 project: str,140 name: str,141 build: str,142 pool_name: PoolName,143 *,144 crashes: Optional[List[File]] = None,145 reports: Optional[List[str]] = None,146 target_exe: File = File("fuzz.exe"),147 tags: Optional[Dict[str, str]] = None,148 notification_config: Optional[NotificationConfig] = None,149 target_env: Optional[Dict[str, str]] = None,150 setup_dir: Optional[Directory] = None,151 reboot_after_setup: bool = False,152 target_options: Optional[List[str]] = None,153 dryrun: bool = False,154 duration: int = 24,155 crash_report_timeout: Optional[int] = None,156 debug: Optional[List[TaskDebugFlag]] = None,157 check_retry_count: Optional[int] = None,158 check_fuzzer_help: bool = True,159 delete_input_container: bool = True,160 check_regressions: bool = False,161 ) -> None:162 if dryrun:163 return None164 self.logger.info("creating regression task from template")165 helper = JobHelper(166 self.onefuzz,167 self.logger,168 project,169 name,170 build,171 duration,172 pool_name=pool_name,173 target_exe=target_exe,174 )175 helper.define_containers(176 ContainerType.setup,177 ContainerType.crashes,178 ContainerType.reports,179 ContainerType.no_repro,180 ContainerType.unique_reports,181 ContainerType.regression_reports,182 )183 containers = [184 (ContainerType.setup, helper.containers[ContainerType.setup]),185 (ContainerType.crashes, helper.containers[ContainerType.crashes]),186 (ContainerType.reports, helper.containers[ContainerType.reports]),187 (ContainerType.no_repro, helper.containers[ContainerType.no_repro]),188 (189 ContainerType.unique_reports,190 helper.containers[ContainerType.unique_reports],191 ),192 (193 ContainerType.regression_reports,194 helper.containers[ContainerType.regression_reports],195 ),196 ]197 if crashes:198 helper.containers[199 ContainerType.readonly_inputs200 ] = helper.get_unique_container_name(ContainerType.readonly_inputs)201 containers.append(202 (203 ContainerType.readonly_inputs,204 helper.containers[ContainerType.readonly_inputs],205 )206 )207 helper.create_containers()208 if crashes:209 for file in crashes:210 self.onefuzz.containers.files.upload_file(211 helper.containers[ContainerType.readonly_inputs], file212 )213 helper.setup_notifications(notification_config)214 helper.upload_setup(setup_dir, target_exe)215 target_exe_blob_name = helper.setup_relative_blob_name(target_exe, setup_dir)216 self.logger.info("creating regression task")217 task = self.onefuzz.tasks.create(218 helper.job.job_id,219 task_type,220 target_exe_blob_name,221 containers,222 pool_name=pool_name,223 duration=duration,224 vm_count=1,225 reboot_after_setup=reboot_after_setup,226 target_options=target_options,227 target_env=target_env,228 tags=tags,229 target_timeout=crash_report_timeout,230 check_retry_count=check_retry_count,231 debug=debug,232 check_fuzzer_help=check_fuzzer_help,233 report_list=reports,234 )235 helper.wait_for_stopped = check_regressions236 self.logger.info("done creating tasks")237 helper.wait()238 if check_regressions:239 task = self.onefuzz.tasks.get(task.task_id)240 if task.error:241 raise Exception("task failed: %s", task.error)242 container = helper.containers[ContainerType.regression_reports]243 for filename in self.onefuzz.containers.files.list(container).files:244 self.logger.info("checking file: %s", filename)245 if self._check_regression(container, File(filename)):246 raise Exception(f"regression identified: {filename}")247 self.logger.info("no regressions")248 if (249 delete_input_container250 and ContainerType.readonly_inputs in helper.containers251 ):...

Full Screen

Full Screen

hooks.py

Source:hooks.py Github

copy

Full Screen

...12from .lib.regression.regressions import check_regressions13from .lib.util import get_dict_keys14from .settings import AUTH_FIELD15def check_and_notify_regressions(project, user):16 regressions = check_regressions(user, project)17 if regressions:18 notify_regressions(user, project, regressions)19def add_upload_token_to_projects(projects):20 repo = UploadTokenRepo(app)21 for project in projects:22 project['uploadToken'] = repo.find_token_by_project(23 project[ID_FIELD])['token']24def before_insert_users(users):25 for user in users:26 user['password'] = hash_password(user['password'])27 if 'email' not in user:28 user['email'] = ''29 return users30def after_insert_users(users):...

Full Screen

Full Screen

regressions.py

Source:regressions.py Github

copy

Full Screen

...58 subset,59 min(m['timestamp'] for m in subset)60 )61 return averages62def check_regressions(user, project):63 view_repo = ViewRepo(app)64 regressions = []65 for view in view_repo.get_views_with_watches(user, project):66 regressions += get_regressions(user, project, view, 1.10)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-benchmark automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful