How to use get_test_attributes method in autotest

Best Python code snippet using autotest_python

nightly.py

Source:nightly.py Github

copy

Full Screen

...14 for p in platforms_filter:15 if platform.startswith(p):16 return True17 return False18def get_test_attributes(testrunx):19 cmd = ( "select attribute, value from tko_test_attributes"20 " where test_idx = %d" % testrunx )21 nrows = perf.db_cur.execute(cmd)22 return dict(perf.db_cur.fetchall())23def get_antag(testrunx):24 attrs = get_test_attributes(testrunx)25 return attrs.get('antag', None)26def matching_test_attributes(attrs, required_test_attributes):27 if not required_test_attributes:28 return True29 matches = [attrs[key] == required_test_attributes[key]30 for key in attrs if key in required_test_attributes]31 return min(matches+[True]) # True if all jointly-existing keys matched32def collect_testruns(jobs, test, test_attributes,33 platforms_filter, by_hosts, no_antag):34 # get test_runs run #s for 1 test on 1 kernel and some platforms35 # TODO: Is jobs list short enough to use directly in 1 sql cmd?36 # TODO: add filtering on test series?37 runs = {} # platform --> list of test runs38 for jobx in jobs:39 cmd = ( "select test_idx, machine_idx from tko_tests"40 " where job_idx = %s and test = %s" )41 args = [jobx, test]42 nrows = perf.db_cur.execute(cmd, args)43 for testrunx, machx in perf.db_cur.fetchall():44 platform, host = perf.machine_idx_to_platform_host(machx)45 if by_hosts:46 platform += '.'+host47 if ( is_filtered_platform(platform, platforms_filter) and48 matching_test_attributes(get_test_attributes(testrunx),49 test_attributes) and50 (not no_antag or get_antag(testrunx) == '') ):51 runs.setdefault(platform, []).append(testrunx)52 return runs53def all_tested_platforms(test_runs):54 # extract list of all tested platforms from test_runs table55 platforms = set()56 for kernel in test_runs:57 platforms.update(set(test_runs[kernel].keys()))58 return sorted(platforms)59def divide_twoway_testruns(test_runs, platform):60 # partition all twoway runs based on name of antagonist progs61 twoway_runs = {}62 antagonists = set()63 for kernel in test_runs:64 runs = {}65 for testrunx in test_runs[kernel].get(platform, []):66 antag = get_antag(testrunx)67 if antag is not None:68 runs.setdefault(antag, []).append(testrunx)69 antagonists.add(antag)70 twoway_runs[kernel] = runs71 return twoway_runs, sorted(antagonists)72def collect_raw_scores(runs, metric):73 # get unscaled scores of test runs for 1 test on certain jobs74 # arrange them by platform type75 platform_scores = {} # platform --> list of perf scores76 for platform in runs:77 vals = perf.get_metric_at_point(runs[platform], metric)78 if vals:79 platform_scores[platform] = vals80 return platform_scores81def collect_scaled_scores(metric, test_runs, regressed_platforms, relative):82 # get scores of test runs for 1 test on some kernels and platforms83 # optionally make relative to oldest (?) kernel on that platform84 # arrange by plotline (ie platform) for gnuplot85 plot_data = {} # platform --> (kernel --> list of perf scores)86 baseline = {}87 for kernel in sorted(test_runs.keys()):88 for platform in test_runs[kernel]:89 if not (regressed_platforms is None or90 platform in regressed_platforms):91 continue # delete results for uninteresting platforms92 vals = perf.get_metric_at_point(test_runs[kernel][platform],93 metric)94 if vals:95 if relative:96 if platform not in baseline:97 baseline[platform], std = plotgraph.avg_dev(vals)98 vals = [v/baseline[platform] for v in vals]99 pdp = plot_data.setdefault(platform, {})100 pdp.setdefault(kernel, []).extend(vals)101 return plot_data102def collect_twoway_scores(metric, antagonists, twoway_runs, relative):103 alone = ''104 plot_data = {}105 for kernel in twoway_runs:106 for test2 in antagonists:107 runs = twoway_runs[kernel].get(test2, [])108 vals = perf.get_metric_at_point(runs, metric)109 plot_data.setdefault(test2, {})110 if vals:111 plot_data[test2][kernel] = vals112 if relative:113 vals = plot_data[alone].get(kernel, [])114 if vals:115 baseline = perf.average(vals)116 for test2 in antagonists:117 vals = plot_data[test2].get(kernel, [])118 vals = [val/baseline for val in vals]119 if vals:120 plot_data[test2][kernel] = vals121 else:122 for test2 in antagonists:123 if kernel in plot_data[test2]:124 del plot_data[test2][kernel]125 return plot_data126def find_regressions(kernels, test_runs, metric):127 # A test is regressed on some platform if its latest results are128 # definitely lower than on the reference kernel.129 # Runs for the latest kernel may be underway and incomplete.130 # In that case, selectively use next-latest kernel.131 # TODO: the next-latest method hurts if latest run is not sorted last,132 # or if there are several dev threads133 ref = kernels[0]134 latest = kernels[-1]135 prev = kernels[-2:][0]136 scores = {} # kernel --> (platform --> list of perf scores)137 for k in [ref, prev, latest]:138 if k in test_runs:139 scores[k] = collect_raw_scores(test_runs[k], metric)140 regressed_platforms = []141 for platform in scores[ref]:142 if latest in scores and platform in scores[latest]:143 k = latest144 elif prev in scores and platform in scores[prev]:145 k = prev146 else: # perhaps due to decay of test machines147 k = ref # no regression info avail148 ref_avg, ref_std = plotgraph.avg_dev(scores[ref][platform])149 avg, std = plotgraph.avg_dev(scores[ k ][platform])150 if avg+std < ref_avg-ref_std:151 regressed_platforms.append(platform)152 return sorted(regressed_platforms)153def get_testrun_context(testrun):154 cmd = ( 'select tko_jobs.label, tko_jobs.tag, tko_tests.subdir,'155 ' tko_tests.started_time'156 ' from tko_jobs, tko_tests'157 ' where tko_jobs.job_idx = tko_tests.job_idx'158 ' and tko_tests.test_idx = %d' % testrun )159 nrows = perf.db_cur.execute(cmd)160 assert nrows == 1161 row = perf.db_cur.fetchone()162 row = [row[0], row[1], row[2], row[3].strftime('%m/%d/%y %H:%M')]163 return row164def html_top():165 print "Content-Type: text/html\n\n<html><body>"166def abs_rel_link(myurl, passthru):167 # link redraws current page with opposite absolute/relative choice168 mod_passthru = passthru[:]169 if 'absolute' in passthru:170 mod_passthru.remove('absolute')171 opposite = 'relative'172 else:173 mod_passthru.append('absolute')174 opposite = 'absolute'175 url = '%s?%s' % (myurl, '&'.join(mod_passthru))176 return "<a href='%s'> %s </a>" % (url, opposite)177def table_1_metric_all_kernels(plot_data, columns, column_argname,178 kernels, kernel_dates,179 myurl, filtered_passthru):180 # generate html table of graph's numbers181 # for 1 benchmark metric over all kernels (rows),182 # over various platforms or various antagonists etc (cols).183 ref_thresholds = {}184 print "<table border=1 cellpadding=3 cellspacing=0>"185 print "<tr> <td><b> Kernel </b></td>",186 for label in columns:187 if not label and column_argname == 'antag':188 label = 'no antag'189 print "<td><b>", label.replace('_', '<br>_'), "</b></td>"190 print "</tr>"191 for kernel in kernels:192 print "<tr> <td><b>", kernel, "</b>",193 if kernel in kernel_dates:194 print "<br><small>", kernel_dates[kernel], "</small>"195 print "</td>"196 for col in columns:197 print "<td",198 vals = plot_data[col].get(kernel, [])199 if not vals:200 print "> ?",201 else:202 (avg, std_dev) = plotgraph.avg_dev(vals)203 if col not in ref_thresholds:204 ref_thresholds[col] = avg - std_dev205 if avg+std_dev < ref_thresholds[col]:206 print "bgcolor=pink",207 print "> ",208 args = filtered_passthru[:]209 perf.append_cgi_args(args,210 {column_argname:col, 'kernel':kernel})211 print "<a href='%s?%s&runs&attrs'>" % (myurl,212 '&'.join(args))213 print "<b>%.4g</b>" % avg, "</a><br>",214 print "&nbsp; <small> %dr </small>" % len(vals),215 print "&nbsp; <small> %.3g </small>" % std_dev,216 print "</td>"217 print "</tr>\n"218 print "</table>"219 print "<p> <b>Bold value:</b> Average of this metric, then <br>"220 print "number of good test runs, then standard deviation of those runs"221 print "<br> Pink if regressed from reference kernel"222def table_all_metrics_1_platform(test_runs, platform, relative):223 # TODO: show std dev in cells224 # can't mark regressions, since some metrics improve downwards225 kernels = perf.sort_kernels(test_runs.keys())226 scores = {}227 attrs = set()228 for kernel in kernels:229 testruns = test_runs[kernel].get(platform, [])230 if testruns:231 d = perf.collect_all_metrics_scores(testruns)232 scores[kernel] = d233 attrs.update(set(d.keys()))234 else:235 print "No runs completed on", kernel, "<br>"236 attrs = sorted(list(attrs))[:100]237 print "<table border=1 cellpadding=4 cellspacing=0>"238 print "<tr><td> Metric </td>"239 for kernel in kernels:240 kernel = kernel.replace("_", "_<br>")241 print "<td>", kernel, "</td>"242 print "</tr>"243 for attr in attrs:244 print "<tr>"245 print "<td>", attr, "</td>"246 baseline = None247 for kernel in kernels:248 print "<td>",249 if kernel in scores and attr in scores[kernel]:250 (avg, dev) = plotgraph.avg_dev(scores[kernel][attr])251 if baseline and relative:252 percent = (avg/baseline - 1)*100253 print "%+.1f%%" % percent,254 else:255 baseline = avg256 print "%.4g" % avg,257 else:258 print "?"259 print "</td>"260 print "</tr>"261 print "</table>"262def table_variants_all_tests(plot_data, columns, colkeys, benchmarks,263 myurl, filtered_passthru):264 # generate html table of graph's numbers265 # for primary metric over all benchmarks (rows),266 # on one platform and one kernel,267 # over various combos of test run attribute constraints (cols).268 ref_thresholds = {}269 print "<table border=1 cellpadding=3 cellspacing=0>"270 print "<tr> <td><b> Benchmark </b></td>",271 for col in columns:272 print "<td><b>", colkeys[col].replace(',', ',<br>'), "</b></td>"273 print "</tr>"274 for benchmark in benchmarks:275 print "<tr> <td><b>", benchmark, "</b></td>"276 for col in columns:277 print "<td>",278 vals = plot_data[col].get(benchmark, [])279 if not vals:280 print "?",281 else:282 (avg, std_dev) = plotgraph.avg_dev(vals)283 args = filtered_passthru[:]284 perf.append_cgi_args(args, {'test':benchmark})285 for keyval in colkeys[col].split(','):286 key, val = keyval.split('=', 1)287 perf.append_cgi_args(args, {key:val})288 print "<a href='%s?%s&runs&attrs'>" % (myurl,289 '&'.join(args))290 print "<b>%.4g</b>" % avg, "</a><br>",291 print "&nbsp; <small> %dr </small>" % len(vals),292 print "&nbsp; <small> %.3g </small>" % std_dev,293 print "</td>"294 print "</tr>\n"295 print "</table>"296 print "<p> <b>Bold value:</b> Average of this metric, then <br>"297 print "number of good test runs, then standard deviation of those runs"298def table_testrun_details(runs, metric, tko_server, show_attrs):299 print "<table border=1 cellpadding=4 cellspacing=0>"300 print "<tr><td> %s metric </td>" % metric301 print "<td> Job label </td> <td> Job tag </td> <td> Run results </td>"302 print "<td> Started_time </td>"303 if show_attrs:304 print "<td> Test attributes </td>"305 print "</tr>\n"306 for testrunx in runs:307 print "<tr> <td>",308 vals = perf.get_metric_at_point([testrunx], metric)309 for v in vals:310 print "%.4g&nbsp;" % v,311 print "</td>"312 row = get_testrun_context(testrunx)313 row[2] = ( "<a href='//%s/results/%s/%s/results/keyval'> %s </a>"314 % (tko_server, row[1], row[2], row[2]) )315 for v in row:316 print "<td> %s </td>" % v317 if show_attrs:318 attrs = get_test_attributes(testrunx)319 print "<td>",320 for attr in sorted(attrs.keys()):321 if attr == "sysinfo-cmdline": continue322 if attr[:4] == "svs-": continue323 val = attrs[attr]324 if len(val) > 40:325 val = val[:40-3] + "..."326 print "%s=%s &nbsp; &nbsp; " % (attr, val)327 print "</td>"328 print "</tr>\n"329 print "</table>"330def overview_thumb(test, metric, myurl, passthru):331 pass_ = passthru + ['test=%s' % test]332 if metric:...

Full Screen

Full Screen

integ_tests_junit.py

Source:integ_tests_junit.py Github

copy

Full Screen

...4from functions.lib.manifest import JasmineManifest5from functions.lib.junit_formatter import JunitHelper6def test_junit_passed(report_path):7 report = JunitHelper(report_path)8 case_list = report.get_test_attributes()9 contains_pass = False10 for case in case_list:11 if case.is_match(12 "should pass if an element is not supposed to exist and is missing #integrationSuite") and case.is_passed == "true":13 contains_pass = True14 assert contains_pass15def test_junit_failed(report_path):16 report = JunitHelper(report_path)17 case_list = report.get_test_attributes()18 contains_fail = False19 for case in case_list:20 if case.is_match(21 "should fail if an element is not supposed to exist but is found #integrationSuite") and case.is_failed == "true":22 contains_fail = True23 assert contains_fail24def test_junit_skipped(report_path):25 report = JunitHelper(report_path)26 case_list = report.get_test_attributes()27 contains_skip = False28 for case in case_list:29 if case.is_match("can be skipped if we want them to #integrationSuite") and case.is_skipped == "true":30 contains_skip = True31 assert contains_skip32def test_junit_expired(report_path):33 report = JunitHelper(report_path)34 case_list = report.get_test_attributes()35 contains_expired = False36 for case in case_list:37 if case.is_match(38 "Expired test is considered failed") and case.is_expired == "true" and case.is_failed == "true":39 contains_expired = True40 assert contains_expired41def test_junit_disabled(report_path):42 report = JunitHelper(report_path)43 case_list = report.get_test_attributes()44 contains_disabled = False45 for case in case_list:46 if case.is_match(47 "can be disabled and hidden from the Jasmine reporter #integrationSuite") and case.is_skipped == "true" and case.suite_name == "E2E.Disabled":48 contains_disabled = True49 assert contains_disabled50@pytest.mark.xfail(run=False, reason="Artifacts are no longer exposed this way, these need to be downloaded instead")51def test_junit_video_artifact(report_path):52 report = JunitHelper(report_path)53 case_list = report.get_test_attributes()54 has_video = False55 for case in case_list:56 if case.is_passed == "true":57 vid = requests.get(case.video_link)58 assert vid.status_code == 20059 assert vid.headers['content-type'] == "video/mp4"60 has_video = True61 assert has_video62@pytest.mark.xfail(run=False, reason="Artifacts are no longer exposed this way, these need to be downloaded instead")63def test_junit_chrome_log_artifact(report_path):64 report = JunitHelper(report_path)65 case_list = report.get_test_attributes()66 has_log = False67 for case in case_list:68 if case.is_passed == "true":69 log = requests.get(case.chrome_log_link)70 assert log.status_code == 20071 assert log.headers['content-type'] == "text/plain; charset=UTF-8"72 has_log = True73 assert has_log74@pytest.mark.xfail(run=False, reason="Artifacts are no longer exposed this way, these need to be downloaded instead")75def test_junit_chromedriver_log_artifact(report_path):76 report = JunitHelper(report_path)77 case_list = report.get_test_attributes()78 has_log = False79 for case in case_list:80 if case.is_passed == "true":81 log = requests.get(case.chromedriver_log_link)82 assert log.status_code == 20083 assert log.headers['content-type'] == "text/plain; charset=UTF-8"84 has_log = True85 assert has_log86@pytest.mark.xfail(run=False, reason="Artifacts are no longer exposed this way, these need to be downloaded instead")87def test_junit_console_log_artifact(report_path):88 report = JunitHelper(report_path)89 case_list = report.get_test_attributes()90 has_log = False91 for case in case_list:92 if case.is_passed == "true":93 log = requests.get(case.console_log_link)94 assert log.status_code == 20095 assert log.headers['content-type'] == "text/plain; charset=UTF-8"96 has_log = True97 assert has_log98def test_junit_retry(report_path):99 report = JunitHelper(report_path)100 case_list = report.get_test_attributes()101 has_retry = False102 for case in case_list:103 if case.is_retried == "true":104 has_retry = True105 assert has_retry106def test_junit_manifest(report_path):107 setup_tests_abs_path = os.path.abspath('pipeline/integration_tests/webdriver_tests/jasmine_reporter_test.ts')108 jasmine_manifest = JasmineManifest([setup_tests_abs_path],109 ['#integrationSuite'], ['#quarantine'])110 jasmine_manifest_skipped = JasmineManifest([setup_tests_abs_path],111 ['#quarantine'], [])112 report = JunitHelper(report_path)113 runnable_case_list = report.get_runnable_test_elements(jasmine_manifest)114 not_runnable_case_list = jasmine_manifest.get_all_non_runnable_tests()115 complete_case_list = jasmine_manifest.jasmine_tests116 total_junit_cases = len(report.get_test_attributes()) - 1 # one record is a fake for expired test117 total_tests = jasmine_manifest.get_total_number_tests()118 total_runnable = jasmine_manifest.get_total_number_runnable()119 total_not_runnable = jasmine_manifest.get_total_number_not_runnable()120 does_total_match = False121 for item in complete_case_list:122 print(item.test_name)123 if total_runnable + total_not_runnable == total_tests:124 does_total_match = True125 assert does_total_match126 assert total_tests == total_junit_cases127 assert jasmine_manifest.get_total_number_runnable() == jasmine_manifest_skipped.get_total_number_not_runnable()128 assert jasmine_manifest_skipped.get_total_number_runnable() == jasmine_manifest.get_total_number_not_runnable()129 assert jasmine_manifest.get_total_number_tests() == jasmine_manifest_skipped.get_total_number_tests()130 # runnable cases in junit are found within its own inventory131 for item in runnable_case_list:132 is_found = False133 for case in report.get_test_attributes():134 if case.is_match(item.get('name')):135 is_found = True136 break137 assert is_found138 for item in not_runnable_case_list:139 is_found = False140 for case in report.get_test_attributes():141 if case.is_match(item.test_name):142 is_found = True143 break144 assert is_found145 for item in complete_case_list:146 is_found = False147 for case in report.get_test_attributes():148 if case.is_match(item.test_name):149 is_found = True150 break...

Full Screen

Full Screen

main_cnn.py

Source:main_cnn.py Github

copy

Full Screen

...14from PIL import Image15import glob16import cv217#fonction pour chercher les attribus en fonction de l'index de l'image18def get_test_attributes(index,test_train):19 i=market[test_train]['image_index'].index(index);20 #print(i)21 tmp_list=[]22 attri_list=market[test_train]23 for attribute in attri_list :24 if attribute != "image_index":25 if attribute=="age":26 tmp_list.append(market[test_train][attribute][i]/4)#pour normaliser entre 0 et 127 else:28 tmp_list.append(market[test_train][attribute][i]-1)#pour normaliser entre 0 et 129 return tmp_list 30#fonction pour retourner une image horizontalement31def horizontal_flip(image_array):32 return image_array[:, ::-1]33#importation demarket.json34with open('market_attribute.json') as json_data:35 market= json.load(json_data)36#market=np.array(market)37#importation de gallery.json38with open('gallery.json') as json_data:39 gallery= json.load(json_data)40gallery=np.array(gallery)41#importation des images42images=[]43train_images=[]44test_images=[]45attributs_market=[]46attributs_train=[]47attributs_test=[]48#listes des index pour le modele d'identification49image_index_train=[]50image_index_test=[]51#trie des images entre le test et le train52for filename in glob.glob('Market-1501\*.jpg'): #assuming gif53 image = cv2.imread(filename, cv2.IMREAD_COLOR)54 images.append(image)55 str_index=filename[12:16]56 #print(type(str_index))57 if str_index in market['test']['image_index']:58 #print("l'image est dans test")59 test_images.append(image)60 attributs_test.append(get_test_attributes(str_index,"test"))61 image_index_test.append(str_index)62 else:63 #print("l'image est dans train")64 train_images.append(image)65 attributs_train.append(get_test_attributes(str_index,"train"))66 image_index_train.append(str_index)67print(len(image_index_test))68print(len(image_index_train))69 70#data-augmentation(question3) 71#symétrie horizontale des images72augmented_train_images=[]73augmentes_train_attributs=[]74augmented_test_images=[]75augmentes_test_attributs=[]76for images in train_images:77 augmented_train_images.append(image)78 augmented_train_images.append(horizontal_flip(image))79 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful