How to use bad_test method in hypothesis

Best Python code snippet using hypothesis

report-flakies.py

Source:report-flakies.py Github

copy

Full Screen

1#!/usr/bin/env python2##3# Licensed to the Apache Software Foundation (ASF) under one4# or more contributor license agreements. See the NOTICE file5# distributed with this work for additional information6# regarding copyright ownership. The ASF licenses this file7# to you under the Apache License, Version 2.0 (the8# "License"); you may not use this file except in compliance9# with the License. You may obtain a copy of the License at10#11# http://www.apache.org/licenses/LICENSE-2.012#13# Unless required by applicable law or agreed to in writing, software14# distributed under the License is distributed on an "AS IS" BASIS,15# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.16# See the License for the specific language governing permissions and17# limitations under the License.18# pylint: disable=invalid-name19# To disable 'invalid constant name' warnings.20# pylint: disable=import-error21# Testing environment may not have all dependencies.22"""23This script uses Jenkins REST api to collect test result(s) of given build/builds and generates24flakyness data about unittests.25Print help: report-flakies.py -h26"""27import argparse28import logging29import os30import time31from collections import OrderedDict32from jinja2 import Template33import requests34import findHangingTests35parser = argparse.ArgumentParser()36parser.add_argument(37 '--urls', metavar='URL', action='append', required=True,38 help='Urls to analyze, which can refer to simple projects, multi-configuration projects or '39 'individual build run.')40parser.add_argument('--excluded-builds', metavar='n1,n2', action='append',41 help='List of build numbers to exclude (or "None"). Not required, '42 'but if specified, number of uses should be same as that of --urls '43 'since the values are matched.')44parser.add_argument('--max-builds', metavar='n', action='append', type=int,45 help='The maximum number of builds to use (if available on jenkins). Specify '46 '0 to analyze all builds. Not required, but if specified, number of uses '47 'should be same as that of --urls since the values are matched.')48parser.add_argument(49 "--mvn", action="store_true",50 help="Writes two strings for including/excluding these flaky tests using maven flags. These "51 "strings are written to files so they can be saved as artifacts and easily imported in "52 "other projects. Also writes timeout and failing tests in separate files for "53 "reference.")54parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")55args = parser.parse_args()56logging.basicConfig()57logger = logging.getLogger(__name__)58if args.verbose:59 logger.setLevel(logging.INFO)60def get_bad_tests(build_url):61 """62 Given url of an executed build, analyzes its console text, and returns63 [list of all tests, list of timeout tests, list of failed tests].64 Returns None if can't get console text or if there is any other error.65 """66 logger.info("Analyzing %s", build_url)67 response = requests.get(build_url + "/api/json").json()68 if response["building"]:69 logger.info("Skipping this build since it is in progress.")70 return {}71 console_url = build_url + "/consoleText"72 build_result = findHangingTests.get_bad_tests(console_url)73 if not build_result:74 logger.info("Ignoring build %s", build_url)75 return76 return build_result77def expand_multi_config_projects(cli_args):78 """79 If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),80 get urls for individual jobs.81 """82 job_urls = cli_args.urls83 excluded_builds_arg = cli_args.excluded_builds84 max_builds_arg = cli_args.max_builds85 if excluded_builds_arg is not None and len(excluded_builds_arg) != len(job_urls):86 raise Exception("Number of --excluded-builds arguments should be same as that of --urls "87 "since values are matched.")88 if max_builds_arg is not None and len(max_builds_arg) != len(job_urls):89 raise Exception("Number of --max-builds arguments should be same as that of --urls "90 "since values are matched.")91 final_expanded_urls = []92 for (i, job_url) in enumerate(job_urls):93 max_builds = 10000 # Some high number94 if max_builds_arg is not None and max_builds_arg[i] != 0:95 max_builds = int(max_builds_arg[i])96 excluded_builds = []97 if excluded_builds_arg is not None and excluded_builds_arg[i] != "None":98 excluded_builds = [int(x) for x in excluded_builds_arg[i].split(",")]99 response = requests.get(job_url + "/api/json").json()100 if response.has_key("activeConfigurations"):101 for config in response["activeConfigurations"]:102 final_expanded_urls.append({'url':config["url"], 'max_builds': max_builds,103 'excludes': excluded_builds})104 else:105 final_expanded_urls.append({'url':job_url, 'max_builds': max_builds,106 'excludes': excluded_builds})107 return final_expanded_urls108# Set of timeout/failed tests across all given urls.109all_timeout_tests = set()110all_failed_tests = set()111all_hanging_tests = set()112# Contains { <url> : { <bad_test> : { 'all': [<build ids>], 'failed': [<build ids>],113# 'timeout': [<build ids>], 'hanging': [<builds ids>] } } }114url_to_bad_test_results = OrderedDict()115# Contains { <url> : [run_ids] }116# Used for common min/max build ids when generating sparklines.117url_to_build_ids = OrderedDict()118# Iterates over each url, gets test results and prints flaky tests.119expanded_urls = expand_multi_config_projects(args)120for url_max_build in expanded_urls:121 url = url_max_build["url"]122 excludes = url_max_build["excludes"]123 json_response = requests.get(url + "/api/json").json()124 if json_response.has_key("builds"):125 builds = json_response["builds"]126 logger.info("Analyzing job: %s", url)127 else:128 builds = [{'number' : json_response["id"], 'url': url}]129 logger.info("Analyzing build : %s", url)130 build_id_to_results = {}131 num_builds = 0132 url_to_build_ids[url] = []133 build_ids_without_tests_run = []134 for build in builds:135 build_id = build["number"]136 if build_id in excludes:137 continue138 result = get_bad_tests(build["url"])139 if not result:140 continue141 if len(result[0]) > 0:142 build_id_to_results[build_id] = result143 else:144 build_ids_without_tests_run.append(build_id)145 num_builds += 1146 url_to_build_ids[url].append(build_id)147 if num_builds == url_max_build["max_builds"]:148 break149 url_to_build_ids[url].sort()150 # Collect list of bad tests.151 bad_tests = set()152 for build in build_id_to_results:153 [_, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]154 all_timeout_tests.update(timeout_tests)155 all_failed_tests.update(failed_tests)156 all_hanging_tests.update(hanging_tests)157 # Note that timedout tests are already included in failed tests.158 bad_tests.update(failed_tests.union(hanging_tests))159 # For each bad test, get build ids where it ran, timed out, failed or hanged.160 test_to_build_ids = {key : {'all' : set(), 'timeout': set(), 'failed': set(),161 'hanging' : set(), 'bad_count' : 0}162 for key in bad_tests}163 for build in build_id_to_results:164 [all_tests, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]165 for bad_test in test_to_build_ids:166 is_bad = False167 if all_tests.issuperset([bad_test]):168 test_to_build_ids[bad_test]["all"].add(build)169 if timeout_tests.issuperset([bad_test]):170 test_to_build_ids[bad_test]['timeout'].add(build)171 is_bad = True172 if failed_tests.issuperset([bad_test]):173 test_to_build_ids[bad_test]['failed'].add(build)174 is_bad = True175 if hanging_tests.issuperset([bad_test]):176 test_to_build_ids[bad_test]['hanging'].add(build)177 is_bad = True178 if is_bad:179 test_to_build_ids[bad_test]['bad_count'] += 1180 # Calculate flakyness % and successful builds for each test. Also sort build ids.181 for bad_test in test_to_build_ids:182 test_result = test_to_build_ids[bad_test]183 test_result['flakyness'] = test_result['bad_count'] * 100.0 / len(test_result['all'])184 test_result['success'] = (test_result['all'].difference(185 test_result['failed'].union(test_result['hanging'])))186 for key in ['all', 'timeout', 'failed', 'hanging', 'success']:187 test_result[key] = sorted(test_result[key])188 # Sort tests in descending order by flakyness.189 sorted_test_to_build_ids = OrderedDict(190 sorted(test_to_build_ids.iteritems(), key=lambda x: x[1]['flakyness'], reverse=True))191 url_to_bad_test_results[url] = sorted_test_to_build_ids192 if len(sorted_test_to_build_ids) > 0:193 print "URL: {}".format(url)194 print "{:>60} {:10} {:25} {}".format(195 "Test Name", "Total Runs", "Bad Runs(failed/timeout/hanging)", "Flakyness")196 for bad_test in sorted_test_to_build_ids:197 test_status = sorted_test_to_build_ids[bad_test]198 print "{:>60} {:10} {:7} ( {:4} / {:5} / {:5} ) {:2.0f}%".format(199 bad_test, len(test_status['all']), test_status['bad_count'],200 len(test_status['failed']), len(test_status['timeout']),201 len(test_status['hanging']), test_status['flakyness'])202 else:203 print "No flaky tests founds."204 if len(url_to_build_ids[url]) == len(build_ids_without_tests_run):205 print "None of the analyzed builds have test result."206 print "Builds analyzed: {}".format(url_to_build_ids[url])207 print "Builds without any test runs: {}".format(build_ids_without_tests_run)208 print ""209all_bad_tests = all_hanging_tests.union(all_failed_tests)210if args.mvn:211 includes = ",".join(all_bad_tests)212 with open("./includes", "w") as inc_file:213 inc_file.write(includes)214 excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests]215 with open("./excludes", "w") as exc_file:216 exc_file.write(",".join(excludes))217 with open("./timeout", "w") as timeout_file:218 timeout_file.write(",".join(all_timeout_tests))219 with open("./failed", "w") as failed_file:220 failed_file.write(",".join(all_failed_tests))221dev_support_dir = os.path.dirname(os.path.abspath(__file__))222with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f:223 template = Template(f.read())224with open("dashboard.html", "w") as f:225 datetime = time.strftime("%m/%d/%Y %H:%M:%S")226 f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests),...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful