How to use test_details method in Kiwi

Best Python code snippet using Kiwi_python

manifest.py

Source:manifest.py Github

copy

Full Screen

1# This Source Code Form is subject to the terms of the Mozilla Public2# License, v. 2.0. If a copy of the MPL was not distributed with this3# file, You can obtain one at http://mozilla.org/MPL/2.0/.4from __future__ import absolute_import5import json6import os7import re8from six.moves.urllib.parse import parse_qs, urlsplit, urlunsplit, urlencode, unquote9from logger.logger import RaptorLogger10from manifestparser import TestManifest11from utils import transform_platform, transform_subtest12from constants.raptor_tests_constants import YOUTUBE_PLAYBACK_MEASURE13here = os.path.abspath(os.path.dirname(__file__))14raptor_ini = os.path.join(here, 'raptor.ini')15tests_dir = os.path.join(here, 'tests')16LOG = RaptorLogger(component='raptor-manifest')17LIVE_SITE_TIMEOUT_MULTIPLIER = 1.218required_settings = [19 'alert_threshold',20 'apps',21 'lower_is_better',22 'measure',23 'page_cycles',24 'test_url',25 'scenario_time',26 'type',27 'unit',28]29playback_settings = [30 'playback_pageset_manifest',31 'playback_recordings',32]33whitelist_live_site_tests = [34 "raptor-youtube-playback",35]36def filter_app(tests, values):37 for test in tests:38 if values["app"] in test['apps']:39 yield test40def filter_live_sites(tests, values):41 # if a test uses live sites only allow it to run if running locally or on try42 # this prevents inadvertently submitting live site data to perfherder43 for test in tests:44 if test.get("use_live_sites", "false") == "true":45 # can run with live sites when running locally46 if values["run_local"] is True:47 yield test48 # can run with live sites if running on try49 elif "hg.mozilla.org/try" in os.environ.get('GECKO_HEAD_REPOSITORY', 'n/a'):50 yield test51 # can run with live sites when white-listed52 elif filter(lambda name: test['name'].startswith(name), whitelist_live_site_tests):53 yield test54 else:55 LOG.warning('%s is not allowed to run with use_live_sites' % test['name'])56 else:57 # not using live-sites so go ahead58 yield test59def get_browser_test_list(browser_app, run_local):60 LOG.info(raptor_ini)61 test_manifest = TestManifest([raptor_ini], strict=False)62 info = {"app": browser_app, "run_local": run_local}63 return test_manifest.active_tests(exists=False,64 disabled=False,65 filters=[filter_app, filter_live_sites],66 **info)67def validate_test_ini(test_details):68 # validate all required test details were found in the test INI69 valid_settings = True70 for setting in required_settings:71 # measure setting not required for benchmark type tests72 if setting == 'measure' and test_details['type'] == 'benchmark':73 continue74 if setting == 'scenario_time' and test_details['type'] != 'scenario':75 continue76 if test_details.get(setting) is None:77 # if page-cycles is not specified, it's ok as long as browser-cycles is there78 if setting == "page-cycles" and test_details.get('browser_cycles') is not None:79 continue80 valid_settings = False81 LOG.error("setting '%s' is required but not found in %s"82 % (setting, test_details['manifest']))83 test_details.setdefault("page_timeout", 30000)84 # if playback is specified, we need more playback settings85 if test_details.get('playback') is not None:86 for setting in playback_settings:87 if test_details.get(setting) is None:88 valid_settings = False89 LOG.error("setting '%s' is required but not found in %s"90 % (setting, test_details['manifest']))91 # if 'alert-on' is specified, we need to make sure that the value given is valid92 # i.e. any 'alert_on' values must be values that exist in the 'measure' ini setting93 if test_details.get('alert_on') is not None:94 # support with or without spaces, i.e. 'measure = fcp, loadtime' or '= fcp,loadtime'95 # convert to a list; and remove any spaces96 # this can also have regexes inside97 test_details['alert_on'] = [_item.strip() for _item in test_details['alert_on'].split(',')]98 # this variable will store all the concrete values for alert_on elements99 # that have a match in "measure" list100 valid_alerts = []101 # if test is raptor-youtube-playback and measure is empty, use all the tests102 if test_details.get('measure') is None \103 and 'youtube-playback' in test_details.get('name', ''):104 test_details['measure'] = YOUTUBE_PLAYBACK_MEASURE105 # convert "measure" to string, so we can use it inside a regex106 measure_as_string = ' '.join(test_details['measure'])107 # now make sure each alert_on value provided is valid108 for alert_on_value in test_details['alert_on']:109 # replace the '*' with a valid regex pattern110 alert_on_value_pattern = alert_on_value.replace('*', '[a-zA-Z0-9.@_%]*')111 # store all elements that have been found in "measure_as_string"112 matches = re.findall(alert_on_value_pattern, measure_as_string)113 if len(matches) == 0:114 LOG.error("The 'alert_on' value of '%s' is not valid because "115 "it doesn't exist in the 'measure' test setting!"116 % alert_on_value)117 valid_settings = False118 else:119 # add the matched elements to valid_alerts120 valid_alerts.extend(matches)121 # replace old alert_on values with valid elements (no more regexes inside)122 # and also remove duplicates if any, by converting valid_alerts to a 'set' first123 test_details['alert_on'] = sorted(set(valid_alerts))124 return valid_settings125def add_test_url_params(url, extra_params):126 # add extra parameters to the test_url query string127 # the values that already exist are re-written128 # urlsplit returns a result as a tuple like (scheme, netloc, path, query, fragment)129 parsed_url = urlsplit(url)130 parsed_query_params = parse_qs(parsed_url.query)131 parsed_extra_params = parse_qs(extra_params)132 for name, value in parsed_extra_params.items():133 # overwrite the old value134 parsed_query_params[name] = value135 final_query_string = unquote(urlencode(parsed_query_params, doseq=True))136 # reconstruct test_url with the changed query string137 return urlunsplit((138 parsed_url.scheme,139 parsed_url.netloc,140 parsed_url.path,141 final_query_string,142 parsed_url.fragment143 ))144def write_test_settings_json(args, test_details, oskey):145 # write test settings json file with test details that the control146 # server will provide for the web ext147 test_url = transform_platform(test_details['test_url'], oskey)148 test_settings = {149 "raptor-options": {150 "type": test_details['type'],151 "cold": test_details['cold'],152 "test_url": test_url,153 "expected_browser_cycles": test_details['expected_browser_cycles'],154 "page_cycles": int(test_details['page_cycles']),155 "host": args.host,156 }157 }158 if test_details['type'] == "pageload":159 test_settings['raptor-options']['measure'] = {}160 # test_details['measure'] was already converted to a list in get_raptor_test_list below161 # the 'hero=' line is still a raw string from the test INI162 for m in test_details['measure']:163 test_settings['raptor-options']['measure'][m] = True164 if m == 'hero':165 test_settings['raptor-options']['measure'][m] = [h.strip() for h in166 test_details['hero'].split(',')]167 if test_details.get("alert_on", None) is not None:168 # alert_on was already converted to list above169 test_settings['raptor-options']['alert_on'] = test_details['alert_on']170 if test_details.get("page_timeout", None) is not None:171 test_settings['raptor-options']['page_timeout'] = int(test_details['page_timeout'])172 test_settings['raptor-options']['unit'] = test_details.get("unit", "ms")173 test_settings['raptor-options']['lower_is_better'] = test_details.get("lower_is_better", True)174 # support optional subtest unit/lower_is_better fields175 val = test_details.get('subtest_unit', test_settings['raptor-options']['unit'])176 test_settings['raptor-options']['subtest_unit'] = val177 subtest_lower_is_better = test_details.get('subtest_lower_is_better')178 if subtest_lower_is_better is None:179 # default to main test values if not set180 test_settings['raptor-options']['subtest_lower_is_better'] = (181 test_settings['raptor-options']['lower_is_better'])182 else:183 test_settings['raptor-options']['subtest_lower_is_better'] = subtest_lower_is_better184 if test_details.get("alert_change_type", None) is not None:185 test_settings['raptor-options']['alert_change_type'] = test_details['alert_change_type']186 if test_details.get("alert_threshold", None) is not None:187 test_settings['raptor-options']['alert_threshold'] = float(test_details['alert_threshold'])188 if test_details.get("screen_capture", None) is not None:189 test_settings['raptor-options']['screen_capture'] = test_details.get("screen_capture")190 # if Gecko profiling is enabled, write profiling settings for webext191 if test_details.get("gecko_profile", False):192 threads = ['GeckoMain', 'Compositor']193 # With WebRender enabled profile some extra threads194 if os.getenv('MOZ_WEBRENDER') == '1':195 threads.extend(['Renderer', 'WR'])196 if test_details.get('gecko_profile_threads'):197 test_threads = filter(None, test_details['gecko_profile_threads'].split(','))198 threads.extend(test_threads)199 test_settings['raptor-options'].update({200 'gecko_profile': True,201 'gecko_profile_entries': int(test_details.get('gecko_profile_entries', 1000000)),202 'gecko_profile_interval': int(test_details.get('gecko_profile_interval', 1)),203 'gecko_profile_threads': ','.join(set(threads)),204 })205 if test_details.get("newtab_per_cycle", None) is not None:206 test_settings['raptor-options']['newtab_per_cycle'] = \207 bool(test_details['newtab_per_cycle'])208 if test_details['type'] == "scenario":209 test_settings['raptor-options']['scenario_time'] = test_details['scenario_time']210 if 'background_test' in test_details:211 test_settings['raptor-options']['background_test'] = \212 bool(test_details['background_test'])213 else:214 test_settings['raptor-options']['background_test'] = False215 jsons_dir = os.path.join(tests_dir, 'json')216 if not os.path.exists(jsons_dir):217 os.mkdir(os.path.join(tests_dir, 'json'))218 settings_file = os.path.join(jsons_dir, test_details['name'] + '.json')219 try:220 with open(settings_file, 'w') as out_file:221 json.dump(test_settings, out_file, indent=4, ensure_ascii=False)222 out_file.close()223 except IOError:224 LOG.info("abort: exception writing test settings json!")225def get_raptor_test_list(args, oskey):226 '''227 A test ini (i.e. raptor-firefox-tp6.ini) will have one or more subtests inside,228 each with it's own name ([the-ini-file-test-section]).229 We want the ability to eiter:230 - run * all * of the subtests listed inside the test ini; - or -231 - just run a single one of those subtests that are inside the ini232 A test name is received on the command line. This will either match the name233 of a single subtest (within an ini) - or - if there's no matching single234 subtest with that name, then the test name provided might be the name of a235 test ini itself (i.e. raptor-firefox-tp6) that contains multiple subtests.236 First look for a single matching subtest name in the list of all availble tests,237 and if it's found we will just run that single subtest.238 Then look at the list of all available tests - each available test has a manifest239 name associated to it - and pull out all subtests whose manifest name matches240 the test name provided on the command line i.e. run all subtests in a specified ini.241 If no tests are found at all then the test name is invalid.242 '''243 tests_to_run = []244 # get list of all available tests for the browser we are testing against245 available_tests = get_browser_test_list(args.app, args.run_local)246 # look for single subtest that matches test name provided on cmd line247 for next_test in available_tests:248 if next_test['name'] == args.test:249 tests_to_run.append(next_test)250 break251 # no matches, so now look for all subtests that come from a test ini252 # manifest that matches the test name provided on the commmand line253 if len(tests_to_run) == 0:254 _ini = args.test + ".ini"255 for next_test in available_tests:256 head, tail = os.path.split(next_test['manifest'])257 if tail == _ini:258 # subtest comes from matching test ini file name, so add it259 tests_to_run.append(next_test)260 # go through each test and set the page-cycles and page-timeout, and some config flags261 # the page-cycles value in the INI can be overriden when debug-mode enabled, when262 # gecko-profiling enabled, or when --page-cycles cmd line arg was used (that overrides all)263 for next_test in tests_to_run:264 LOG.info("configuring settings for test %s" % next_test['name'])265 max_page_cycles = next_test.get('page_cycles', 1)266 max_browser_cycles = next_test.get('browser_cycles', 1)267 # if using playback, the playback recording info may need to be transformed268 if next_test.get('playback') is not None:269 next_test['playback_pageset_manifest'] = \270 transform_subtest(next_test['playback_pageset_manifest'],271 next_test['name'])272 next_test['playback_recordings'] = \273 transform_subtest(next_test['playback_recordings'],274 next_test['name'])275 if args.gecko_profile is True:276 next_test['gecko_profile'] = True277 LOG.info('gecko-profiling enabled')278 max_page_cycles = 3279 max_browser_cycles = 3280 if 'gecko_profile_entries' in args and args.gecko_profile_entries is not None:281 next_test['gecko_profile_entries'] = str(args.gecko_profile_entries)282 LOG.info('gecko-profiling entries set to %s' % args.gecko_profile_entries)283 if 'gecko_profile_interval' in args and args.gecko_profile_interval is not None:284 next_test['gecko_profile_interval'] = str(args.gecko_profile_interval)285 LOG.info('gecko-profiling interval set to %s' % args.gecko_profile_interval)286 if 'gecko_profile_threads' in args and args.gecko_profile_threads is not None:287 threads = filter(None, next_test.get('gecko_profile_threads', '').split(','))288 threads.extend(args.gecko_profile_threads)289 next_test['gecko_profile_threads'] = ','.join(threads)290 LOG.info('gecko-profiling extra threads %s' % args.gecko_profile_threads)291 else:292 # if the gecko profiler is not enabled, ignore all of it's settings293 next_test.pop('gecko_profile_entries', None)294 next_test.pop('gecko_profile_interval', None)295 next_test.pop('gecko_profile_threads', None)296 if args.debug_mode is True:297 next_test['debug_mode'] = True298 LOG.info("debug-mode enabled")299 max_page_cycles = 2300 # if --page-cycles was provided on the command line, use that instead of INI301 # if just provided in the INI use that but cap at 3 if gecko-profiling is enabled302 if args.page_cycles is not None:303 next_test['page_cycles'] = args.page_cycles304 LOG.info("setting page-cycles to %d as specified on cmd line" % args.page_cycles)305 else:306 if int(next_test.get('page_cycles', 1)) > max_page_cycles:307 next_test['page_cycles'] = max_page_cycles308 LOG.info("setting page-cycles to %d because gecko-profling is enabled"309 % next_test['page_cycles'])310 # if --browser-cycles was provided on the command line, use that instead of INI311 # if just provided in the INI use that but cap at 3 if gecko-profiling is enabled312 if args.browser_cycles is not None:313 next_test['browser_cycles'] = args.browser_cycles314 LOG.info("setting browser-cycles to %d as specified on cmd line" % args.browser_cycles)315 else:316 if int(next_test.get('browser_cycles', 1)) > max_browser_cycles:317 next_test['browser_cycles'] = max_browser_cycles318 LOG.info("setting browser-cycles to %d because gecko-profilng is enabled"319 % next_test['browser_cycles'])320 # if --page-timeout was provided on the command line, use that instead of INI321 if args.page_timeout is not None:322 LOG.info("setting page-timeout to %d as specified on cmd line" % args.page_timeout)323 next_test['page_timeout'] = args.page_timeout324 # for browsertime jobs, cold page-load mode is determined by command line argument; for325 # raptor-webext jobs cold page-load is determined by the 'cold' key in test manifest INI326 _running_cold = False327 if args.browsertime is True:328 if args.cold is True:329 _running_cold = True330 else:331 # running warm page-load so ignore browser-cycles if it was provided (set to 1)332 next_test['browser_cycles'] = 1333 else:334 if next_test.get("cold", "false") == "true":335 _running_cold = True336 if _running_cold:337 # when running in cold mode, set browser-cycles to the page-cycles value; as we want338 # the browser to restart between page-cycles; and set page-cycles to 1 as we only339 # want 1 single page-load for every browser-cycle340 next_test['cold'] = True341 next_test['expected_browser_cycles'] = int(next_test['browser_cycles'])342 next_test['page_cycles'] = 1343 # also ensure '-cold' is in test name so perfherder results indicate warm cold-load344 if "-cold" not in next_test['name']:345 next_test['name'] += "-cold"346 else:347 # when running in warm mode, just set test-cycles to 1 and leave page-cycles as/is348 next_test['cold'] = False349 next_test['expected_browser_cycles'] = 1350 # either warm or cold-mode, initialize the starting current 'browser-cycle'351 next_test['browser_cycle'] = 1352 # if --test-url-params was provided on the command line, add the params to the test_url353 # provided in the INI354 if args.test_url_params is not None:355 initial_test_url = next_test['test_url']356 next_test['test_url'] = add_test_url_params(initial_test_url, args.test_url_params)357 LOG.info("adding extra test_url params (%s) as specified on cmd line "358 "to the current test_url (%s), resulting: %s" %359 (args.test_url_params, initial_test_url, next_test['test_url']))360 if next_test.get('use_live_sites', "false") == "true":361 # when using live sites we want to turn off playback362 LOG.info("using live sites so turning playback off!")363 next_test['playback'] = None364 LOG.info("using live sites so appending '-live' to the test name")365 next_test['name'] = next_test['name'] + "-live"366 # allow a slightly higher page timeout due to remote page loads367 next_test['page_timeout'] = int(368 next_test['page_timeout']) * LIVE_SITE_TIMEOUT_MULTIPLIER369 LOG.info("using live sites so using page timeout of %dms" % next_test['page_timeout'])370 # browsertime doesn't use the 'measure' test ini setting; however just for the sake371 # of supporting both webext and browsertime, just provide a dummy 'measure' setting372 # here to prevent having to check in multiple places; it has no effect on what373 # browsertime actually measures; remove this when eventually we remove webext support374 if args.browsertime and next_test.get('measure') is None:375 next_test['measure'] = "fnbpaint, fcp, dcf, loadtime"376 # convert 'measure =' test INI line to list377 if next_test.get('measure') is not None:378 _measures = []379 for m in [m.strip() for m in next_test['measure'].split(',')]:380 # build the 'measures =' list381 _measures.append(m)382 next_test['measure'] = _measures383 # if using live sites, don't measure hero element as it only exists in recordings384 if 'hero' in next_test['measure'] and \385 next_test.get('use_live_sites', "false") == "true":386 # remove 'hero' from the 'measures =' list387 next_test['measure'].remove('hero')388 # remove the 'hero =' line since no longer measuring hero389 del next_test['hero']390 if next_test.get('lower_is_better') is not None:391 next_test['lower_is_better'] = bool_from_str(next_test.get('lower_is_better'))392 if next_test.get('subtest_lower_is_better') is not None:393 next_test['subtest_lower_is_better'] = bool_from_str(394 next_test.get('subtest_lower_is_better')395 )396 # write out .json test setting files for the control server to read and send to web ext397 if len(tests_to_run) != 0:398 for test in tests_to_run:399 if validate_test_ini(test):400 write_test_settings_json(args, test, oskey)401 else:402 # test doesn't have valid settings, remove it from available list403 LOG.info("test %s is not valid due to missing settings" % test['name'])404 tests_to_run.remove(test)405 else:406 LOG.critical("abort: specified test name doesn't exist")407 return tests_to_run408def bool_from_str(boolean_string):409 lower_boolean_str = boolean_string.lower()410 if lower_boolean_str == 'true':411 return True412 elif lower_boolean_str == 'false':413 return False414 else:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Kiwi automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful