How to use task_output_dir method in avocado

Best Python code snippet using avocado_python

process_perf_results.py

Source:process_perf_results.py Github

copy

Full Screen

1#!/usr/bin/env python2# Copyright 2018 The Chromium Authors. All rights reserved.3# Use of this source code is governed by a BSD-style license that can be4# found in the LICENSE file.5import argparse6import json7import shutil8import sys9import tempfile10from core import oauth_api11from core import upload_results_to_perf_dashboard12from core import results_merger13from os import listdir14from os.path import isfile, join15RESULTS_URL = 'https://chromeperf.appspot.com'16def _upload_perf_results(json_to_upload, name, configuration_name,17 build_properties, oauth_file, tmp_dir):18 """Upload the contents of result JSON(s) to the perf dashboard."""19 build_properties = json.loads(build_properties)20 args = [21 '--tmp-dir', tmp_dir,22 '--buildername', build_properties['buildername'],23 '--buildnumber', build_properties['buildnumber'],24 '--name', name,25 '--configuration-name', configuration_name,26 '--results-file', json_to_upload,27 '--results-url', RESULTS_URL,28 '--got-revision-cp', build_properties['got_revision_cp'],29 '--got-v8-revision', build_properties['got_v8_revision'],30 '--got-webrtc-revision', build_properties['got_webrtc_revision'],31 '--oauth-token-file', oauth_file,32 ]33 if _is_histogram(json_to_upload):34 args.append('--send-as-histograms')35 upload_results_to_perf_dashboard.main(args)36def _is_histogram(json_file):37 with open(json_file) as f:38 data = json.load(f)39 return isinstance(data, list)40 return False41def _merge_json_output(output_json, jsons_to_merge):42 """Merges the contents of one or more results JSONs.43 Args:44 output_json: A path to a JSON file to which the merged results should be45 written.46 jsons_to_merge: A list of JSON files that should be merged.47 """48 merged_results = results_merger.merge_test_results(jsons_to_merge)49 with open(output_json, 'w') as f:50 json.dump(merged_results, f)51 return 052def _process_perf_results(output_json, configuration_name,53 service_account_file,54 build_properties, task_output_dir):55 """Process one or more perf JSON results.56 Consists of merging the json-test-format output and uploading the perf test57 output (chartjson and histogram).58 Each directory in the task_output_dir represents one benchmark59 that was run. Within this directory, there is a subdirectory with the name60 of the benchmark that was run. In that subdirectory, there is a61 perftest-output.json file containing the performance results in histogram62 or dashboard json format and an output.json file containing the json test63 results for the benchmark.64 """65 directory_list = [66 f for f in listdir(task_output_dir)67 if not isfile(join(task_output_dir, f))68 ]69 benchmark_directory_list = []70 for directory in directory_list:71 benchmark_directory_list += [72 join(task_output_dir, directory, f)73 for f in listdir(join(task_output_dir, directory))74 ]75 test_results_list = []76 tmpfile_dir = tempfile.mkdtemp('resultscache')77 try:78 for directory in benchmark_directory_list:79 if '.reference' in directory:80 # We don't need to upload reference build data to the81 # flakiness dashboard since we don't monitor the ref build82 continue83 with open(join(directory, 'test_results.json')) as json_data:84 test_results_list.append(json.load(json_data))85 _merge_json_output(output_json, test_results_list)86 with oauth_api.with_access_token(service_account_file) as oauth_file:87 for directory in benchmark_directory_list:88 _upload_perf_results(join(directory, 'perf_results.json'),89 directory, configuration_name, build_properties,90 oauth_file, tmpfile_dir)91 finally:92 shutil.rmtree(tmpfile_dir)93 return 094def main():95 """ See collect_task.collect_task for more on the merge script API. """96 parser = argparse.ArgumentParser()97 # configuration-name (previously perf-id) is the name of bot the tests run on98 # For example, buildbot-test is the name of the obbs_fyi bot99 # configuration-name and results-url are set in the json file which is going100 # away tools/perf/core/chromium.perf.fyi.extras.json101 parser.add_argument('--configuration-name', help=argparse.SUPPRESS)102 parser.add_argument('--service-account-file', help=argparse.SUPPRESS)103 parser.add_argument('--build-properties', help=argparse.SUPPRESS)104 parser.add_argument('--summary-json', help=argparse.SUPPRESS)105 parser.add_argument('--task-output-dir', help=argparse.SUPPRESS)106 parser.add_argument('-o', '--output-json', required=True,107 help=argparse.SUPPRESS)108 parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)109 args = parser.parse_args()110 return _process_perf_results(111 args.output_json, args.configuration_name,112 args.service_account_file,113 args.build_properties, args.task_output_dir)114if __name__ == '__main__':...

Full Screen

Full Screen

run_harness.py

Source:run_harness.py Github

copy

Full Screen

1import sys2import os3import subprocess4import shutil5import shlex6import collections7from train_exit_status import TrainExitStatus8from graceful_interrupt import GracefulInterruptHandler9from termcolor import colored10TaskSpec = collections.namedtuple("TaskSpec", ["task_name", "variant_name", "run_params"])11def run(tasks_dir, output_dir, base_params, specs, stop_on_error=False, skip_complete=False):12 base_params_split = shlex.split(base_params)13 for spec in specs:14 print(colored("### Task {} ({}) ###".format(spec.task_name, spec.variant_name), "yellow"))15 run_params_split = shlex.split(spec.run_params)16 task_folder_train = os.path.join(tasks_dir, "{}_train".format(spec.task_name))17 if not os.path.isdir(task_folder_train):18 print(colored("Train directory doesn't exist. Parsing text file...", attrs=["dark"]))19 textfile = task_folder_train + ".txt"20 subprocess.run(["python3","ggtnn_graph_parse.py",textfile], check=True)21 task_folder_valid = os.path.join(tasks_dir, "{}_valid".format(spec.task_name))22 if not os.path.isdir(task_folder_valid):23 print(colored("Validation directory doesn't exist. Parsing text file...", attrs=["dark"]))24 textfile = task_folder_valid + ".txt"25 try:26 subprocess.run(["python3","ggtnn_graph_parse.py",textfile,"--metadata-file",os.path.join(task_folder_train,"metadata.p")], check=True)27 except subprocess.CalledProcessError:28 print(colored("Could not parse validation set! Skipping. You may need to regenerate the training set.","magenta"))29 continue30 task_output_dir = os.path.join(output_dir, spec.task_name, spec.variant_name)31 if not os.path.isdir(task_output_dir):32 os.makedirs(task_output_dir)33 completed_file = os.path.join(task_output_dir, "completed.txt")34 if os.path.exists(completed_file):35 with open(completed_file,'r') as f:36 reason = f.readline().strip()37 reason = colored(reason, "green" if (reason == "SUCCESS") else "red" if ("FAIL" in reason) else "magenta")38 print("Task is already completed, with result {}. Skipping...".format(reason))39 continue40 stdout_fn = os.path.join(task_output_dir, "stdout.txt")41 all_params = ["python3", "-u", "main.py", task_folder_train] + run_params_split + base_params_split42 all_params.extend(["--outputdir", task_output_dir])43 all_params.extend(["--validation", task_folder_valid])44 all_params.extend(["--set-exit-status"])45 all_params.extend(["--resume-auto"])46 all_params.extend(["--autopickle", os.path.join(output_dir, "model_cache")])47 print("Running command: " + " ".join(all_params))48 with open(stdout_fn, 'a', 1) as stdout_file:49 proc = subprocess.Popen(all_params, bufsize=1, universal_newlines=True, stdout=stdout_file, stderr=subprocess.STDOUT)50 with GracefulInterruptHandler() as handler:51 returncode = proc.wait()52 interrupted = handler.interrupted53 task_status = None54 was_error = False55 if returncode < 0:56 print(colored("Process was killed by a signal!","magenta"))57 was_error = True58 elif skip_complete:59 print(colored("Skipping saving the result (skip_complete=True)"))60 else:61 task_status = TrainExitStatus(returncode)62 if task_status == TrainExitStatus.success:63 print(colored("SUCCESS! Reached desired correctness.","green"))64 with open(completed_file,'w') as f:65 f.write("SUCCESS\n")66 elif task_status == TrainExitStatus.reached_update_limit:67 print(colored("FAIL! Reached update limit without attaining desired correctness.","red"))68 with open(completed_file,'w') as f:69 f.write("FAIL_UPDATE_LIMIT\n")70 elif task_status == TrainExitStatus.overfitting:71 print(colored("FAIL! Detected overfitting.","red"))72 with open(completed_file,'w') as f:73 f.write("FAIL_OVERFITTING\n")74 elif task_status in (TrainExitStatus.error, TrainExitStatus.malformed_command):75 print(colored("Got an error; skipping for now. See {} for details.".format(stdout_fn),"magenta"))76 was_error = True77 elif task_status == TrainExitStatus.nan_loss:78 print(colored("NaN loss detected; skipping for now.","magenta"))79 was_error = True80 81 if task_status == TrainExitStatus.interrupted or interrupted:82 print(colored("Process was interrupted! Stopping...","cyan"))83 break84 if was_error and stop_on_error:85 print(colored("Got an error. Exiting...","cyan"))...

Full Screen

Full Screen

read_tf_events.py

Source:read_tf_events.py Github

copy

Full Screen

1# coding=utf-82#3# Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.4#5# Licensed under the Apache License, Version 2.0 (the "License");6# you may not use this file except in compliance with the License.7# You may obtain a copy of the License at8#9# http://www.apache.org/licenses/LICENSE-2.010#11# Unless required by applicable law or agreed to in writing, software12# distributed under the License is distributed on an "AS IS" BASIS,13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.14# See the License for the specific language governing permissions and15# limitations under the License.16import os17import tensorflow as tf18import numpy as np19import argparse20acc_list = []21acc_dict = {}22parser = argparse.ArgumentParser()23parser.add_argument('--task_name', help='task_name')24parser.add_argument('--task_data_dir', help='task_data_dir')25parser.add_argument('--max_seq_length', help='max_seq_length')26parser.add_argument('--predict_batch_size', help='predict_batch_size')27parser.add_argument('--pretrained_model_dir', help='pretrained_model_dir')28parser.add_argument('--task_output_dir', help='task_output_dir')29args = parser.parse_args()30# get events files dir31events_name_list = os.listdir(os.path.join(args.task_output_dir, "eval"))32for e in tf.train.summary_iterator(os.path.join(args.task_output_dir, "eval", events_name_list[0])):33 for v in e.summary.value:34 if v.tag == 'eval_accuracy' or v.tag == 'eval_f':35 acc_list.append(v.simple_value)36 acc_dict[v.simple_value] = e.step37# save evaluation results of each checkpoint38results_file = os.path.join(args.task_output_dir, "eval", "all_eval_points_results.txt")39with open(results_file, "w") as writer:40 for ele in acc_list:41 writer.write('step ' + str(acc_dict[ele]) + ': ')42 writer.write(str(ele))43 writer.write('\n')44 writer.write('----best_result: ')45 best_re = np.max(np.array(acc_list))46 writer.write(str(best_re))47# do predict48if 'ner' in args.task_name:49 predict_script = "run_seq_labelling_predict.sh"50 predict_cmd = ["bash", predict_script, 'ner', args.task_data_dir, args.pretrained_model_dir,51 args.task_output_dir + 'model.ckpt-' + str(acc_dict[best_re]),52 args.max_seq_length, args.predict_batch_size, args.task_output_dir]53else:54 predict_script = "run_clf_predict.sh"55 predict_cmd = ["bash", predict_script, args.task_name, args.task_data_dir, args.pretrained_model_dir,56 args.task_output_dir + 'model.ckpt-' + str(acc_dict[best_re]),57 args.max_seq_length, args.predict_batch_size, args.task_output_dir]58try:59 os.system(" ".join(predict_cmd))60except:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful