How to use results_dir_basename method in avocado

Best Python code snippet using avocado_python

archive_layout_test_results.py

Source:archive_layout_test_results.py Github

copy

Full Screen

1#!/usr/bin/env python2# Copyright (c) 2012 The Chromium Authors. All rights reserved.3# Use of this source code is governed by a BSD-style license that can be4# found in the LICENSE file.5"""A tool to archive layout test results.6To archive files on Google Storage, pass a GS bucket name via --gs-bucket.7To control access to archives, pass a value for --gs-acl (e.g. 'public-read',8see https://developers.google.com/storage/docs/accesscontrol#extension9for other supported canned-acl values). If no gs_acl key is given,10then the bucket's default object ACL will be applied (see11https://developers.google.com/storage/docs/accesscontrol#defaultobjects).12When this is run, the current directory (cwd) should be the outer build13directory (e.g., chrome-release/build/).14For a list of command-line options, call this script with '--help'.15"""16import logging17import argparse18import os19import re20import socket21import sys22from common import archive_utils23from common import chromium_utils24from slave import build_directory25from slave import slave_utils26def _CollectZipArchiveFiles(output_dir):27 """Returns a list of layout test result files to archive in a zip file."""28 file_list = []29 for path, _, files in os.walk(output_dir):30 rel_path = path[len(output_dir + '\\'):]31 for name in files:32 if _IsIncludedInZipArchive(name):33 file_list.append(os.path.join(rel_path, name))34 if os.path.exists(os.path.join(output_dir, 'results.html')):35 file_list.append('results.html')36 if sys.platform == 'win32':37 if os.path.exists(os.path.join(output_dir, 'access_log.txt')):38 file_list.append('access_log.txt')39 if os.path.exists(os.path.join(output_dir, 'error_log.txt')):40 file_list.append('error_log.txt')41 return file_list42def _IsIncludedInZipArchive(name):43 """Returns True if a file should be included in the zip, False otherwise."""44 if '-stack.' in name or '-crash-log.' in name:45 return True46 extension = os.path.splitext(name)[1]47 if '-actual.' in name and extension in ('.txt', '.png', '.checksum', '.wav'):48 return True49 if '-expected.' in name:50 return True51 if '-wdiff.' in name:52 return True53 if name.endswith('-diff.txt') or name.endswith('-diff.png'):54 return True55 if name.endswith('.json'):56 return True57 return False58def archive_layout(args):59 chrome_dir = os.path.abspath(args.build_dir)60 results_dir_basename = os.path.basename(args.results_dir)61 args.results_dir = os.path.abspath(args.results_dir)62 print 'Archiving results from %s' % args.results_dir63 staging_dir = args.staging_dir or slave_utils.GetStagingDir(chrome_dir)64 print 'Staging in %s' % staging_dir65 if not os.path.exists(staging_dir):66 os.makedirs(staging_dir)67 file_list = _CollectZipArchiveFiles(args.results_dir)68 zip_file = chromium_utils.MakeZip(staging_dir,69 results_dir_basename,70 file_list,71 args.results_dir)[1]72 wc_dir = os.path.dirname(chrome_dir)73 last_change = slave_utils.GetHashOrRevision(wc_dir)74 builder_name = re.sub('[ .()]', '_', args.builder_name)75 build_number = str(args.build_number)76 print 'last change: %s' % last_change77 print 'build name: %s' % builder_name78 print 'build number: %s' % build_number79 print 'host name: %s' % socket.gethostname()80 # Create a file containing last_change revision. This file will be uploaded81 # after all layout test results are uploaded so the client can check this82 # file to see if the upload for the revision is complete.83 # See crbug.com/574272 for more details.84 last_change_file = os.path.join(staging_dir, 'LAST_CHANGE')85 with open(last_change_file, 'w') as f:86 f.write(last_change)87 # Copy the results to a directory archived by build number.88 gs_base = '/'.join([args.gs_bucket, builder_name, build_number])89 gs_acl = args.gs_acl90 # These files never change, cache for a year.91 cache_control = "public, max-age=31556926"92 slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl,93 cache_control=cache_control,94 add_quiet_flag=True)95 slave_utils.GSUtilCopyDir(args.results_dir, gs_base, gs_acl=gs_acl,96 cache_control=cache_control,97 add_quiet_flag=True)98 slave_utils.GSUtilCopyFile(last_change_file,99 gs_base + '/' + results_dir_basename,100 gs_acl=gs_acl,101 cache_control=cache_control,102 add_quiet_flag=True)103 # And also to the 'results' directory to provide the 'latest' results104 # and make sure they are not cached at all (Cloud Storage defaults to105 # caching w/ a max-age=3600).106 gs_base = '/'.join([args.gs_bucket, builder_name, 'results'])107 cache_control = 'no-cache'108 slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl,109 cache_control=cache_control,110 add_quiet_flag=True)111 slave_utils.GSUtilCopyDir(args.results_dir, gs_base, gs_acl=gs_acl,112 cache_control=cache_control,113 add_quiet_flag=True)114 slave_utils.GSUtilCopyFile(last_change_file,115 gs_base + '/' + results_dir_basename,116 gs_acl=gs_acl,117 cache_control=cache_control,118 add_quiet_flag=True)119 return 0120def _ParseArgs():121 parser = argparse.ArgumentParser()122 # TODO(crbug.com/655798): Make --build-dir not ignored.123 parser.add_argument('--build-dir', help='ignored')124 parser.add_argument('--results-dir', required=True,125 help='path to layout test results')126 parser.add_argument('--builder-name', required=True,127 help='The name of the builder running this script.')128 parser.add_argument('--build-number', type=int, required=True,129 help='Build number of the builder running this script.')130 parser.add_argument('--gs-bucket', required=True,131 help='The Google Storage bucket to upload to.')132 parser.add_argument('--gs-acl',133 help='The access policy for Google Storage files.')134 parser.add_argument('--staging-dir',135 help='Directory to use for staging the archives. '136 'Default behavior is to automatically detect '137 'slave\'s build directory.')138 slave_utils_callback = slave_utils.AddArgs(parser)139 args = parser.parse_args()140 args.build_dir = build_directory.GetBuildOutputDirectory()141 slave_utils_callback(args)142 return args143def main():144 args = _ParseArgs()145 logging.basicConfig(level=logging.INFO,146 format='%(asctime)s %(filename)s:%(lineno)-3d'147 ' %(levelname)s %(message)s',148 datefmt='%y%m%d %H:%M:%S')149 return archive_layout(args)150if '__main__' == __name__:...

Full Screen

Full Screen

2_calculate_success_rate_2block.py

Source:2_calculate_success_rate_2block.py Github

copy

Full Screen

1# load URDFs and get the canonical height for eachaabb = p.getAABB(obj_id)2# load directory3# for each episode, if the stable height is higher than the canonical height, we will treat it as a success4# otherwise, it will be a failure5# this corresponds with royal-brook-1975_checkpoint506import sys7import pybullet as p8import glob9from pathlib import Path10from utils import bandu_util11import os12from utils.pb_util import get_object_height13from bandu.config import TABLE_HEIGHT14import numpy as np15def stddev(binary_list_of_success_failures):16 import pdb17 pdb.set_trace()18 binary_list_of_success_failures = np.array(binary_list_of_success_failures)19 # calculate the mean20 mean = np.sum(binary_list_of_success_failures) / len(binary_list_of_success_failures)21 var = np.sum((binary_list_of_success_failures - mean)**2)/(len(binary_list_of_success_failures) - 1)22 return np.sqrt(var)23urdf_dir = sys.argv[1]24results_dir = sys.argv[2]25results_dir_basename = os.path.basename(os.path.normpath(results_dir))26max_trials = int(sys.argv[3]) # maximum number of trials before trials get ignored27# urdf_dir = "/home/richard/data/engmikedset/urdfs"28urdfs = [f for f in glob.glob(str(Path(urdf_dir) / "**/*.urdf"), recursive=True)]29urdfs += [f for f in glob.glob(str(Path(urdf_dir) / "**/*.URDF"), recursive=True)]30# mapping name to height31height_dict = dict()32best_theta_list_dict = dict()33results_save_dir = "../out/bandu_results"34p.connect(p.DIRECT)35for urdf_path in urdfs:36 print("ln34 urdf_path")37 print(urdf_path)38 obj_id = p.loadURDF(urdf_path, globalScaling=1.5)39 # aabb = p.getAABB(obj_id)40 #41 # aabbMinVec = aabb[0]42 # aabbMaxVec = aabb[1]43 #44 # obj_height = aabbMaxVec[-1] - aabbMinVec[-1]45 obj_height = get_object_height(obj_id)46 object_name = bandu_util.parse_urdf_xml_for_object_name(os.path.dirname(urdf_path) + "/model.config")47 height_dict[object_name] = obj_height48import json49with open(str(Path(results_save_dir) / "height_dict.json"), "w") as fp:50 json.dump(height_dict, fp, indent=4)51# results_dir = "/home/richard/data/results/engmikedset_dgcnn_mog5_unitvar"52height_success_dict = dict()53num_stacked_success_dict = dict()54json_paths = [f for f in glob.glob(str(Path(results_dir) / "**/*.json"), recursive=True)]55step_1_json_paths = [f for f in json_paths if "_1_" in f]56height_object_successes_dict = dict()57total_trials_dict = dict()58num_stacked_successes_dict = dict()59for k in height_dict.keys():60 height_object_successes_dict[k] = 061 total_trials_dict[k] = 062 best_theta_list_dict[k] = []63 num_stacked_successes_dict[k] = 064import torch65for path in sorted(step_1_json_paths):66 # extract object names67 objects_folder_name = os.path.basename(os.path.dirname(path))68 non_foundation_obj_name = objects_folder_name.split("foundation_")[1]69 obj_height = height_dict[non_foundation_obj_name]70 with open(path, "r") as fp:71 dic = json.load(fp)72 urdf_path = Path(urdf_dir) / non_foundation_obj_name73 if "best_theta" not in dic.keys():74 best_theta_list_dict[non_foundation_obj_name].append(99)75 else:76 if np.isnan(dic['best_theta']):77 continue78 best_theta_list_dict[non_foundation_obj_name].append(dic['best_theta'])79 # pkl = torch.load(Path(os.path.dirname(path)) / f"ep0_2_o3d.pkl")80 #81 # assert pkl['batch']['pybullet_object_ids'][0] == 282 #83 # # multiply relrot to get the target pose84 # # apply target pose to the normal vectors85 # pkl['batch']['current_quats']86 recorded_tower_height = dic['tower_height'] - TABLE_HEIGHT87 gt_tower_height = obj_height + height_dict['foundation']88 if total_trials_dict[non_foundation_obj_name] < max_trials:89 if recorded_tower_height > gt_tower_height * .98:90 height_object_successes_dict[non_foundation_obj_name] += 191 assert dic['num_stacked'] <= 2, dic['num_stacked']92 if dic['num_stacked'] == 2:93 num_stacked_successes_dict[non_foundation_obj_name] += 194 total_trials_dict[non_foundation_obj_name] += 195best_theta_means_dict = dict()96best_theta_stddevs_dict = dict()97for obj_name in total_trials_dict.keys():98 # each obj name should have exactly max trials99 assert total_trials_dict[obj_name] == max_trials, total_trials_dict[obj_name]100for obj_name in height_dict.keys():101 height_success_dict[obj_name] = height_object_successes_dict[obj_name] / total_trials_dict[obj_name]102 num_stacked_success_dict[obj_name] = num_stacked_successes_dict[obj_name] / total_trials_dict[obj_name]103 best_theta_means_dict[obj_name] = np.mean(best_theta_list_dict[obj_name])104 best_theta_stddevs_dict[obj_name] = np.std(best_theta_list_dict[obj_name])105 assert num_stacked_successes_dict[obj_name] >= height_success_dict[obj_name]106# for object_name in height_dict.keys():107# obj_dir = Path(results_dir) / object_name108#109# json_paths = [f for f in glob.glob(str(Path(obj_dir) / "**/*.json"), recursive=True)]110#111# object_successes = 0112# object_total = 0113#114# for pth in json_paths:115# with open(pth, "r") as fp:116# dic = json.load(fp)117#118# logged_obj_height = float(dic['tower_height']) - TABLE_HEIGHT119#120# if logged_obj_height > height_dict[object_name] * .98:121# object_successes += 1122# object_total += 1123#124# height_success_dict[object_name] = object_successes/object_total125import pprint126rdb = Path(results_save_dir) / results_dir_basename127rdb.mkdir(exist_ok=True, parents=True)128with open(str(Path(results_save_dir) / results_dir_basename / f"maxtrials{max_trials}_height_success_means_dict.json"), "w") as fp:129 json.dump(height_success_dict, fp, indent=4)130with open(str(Path(results_save_dir) / results_dir_basename / f"maxtrials{max_trials}_num_stacked_success_dict.json"), "w") as fp:131 json.dump(num_stacked_success_dict, fp, indent=4)132with open(str(Path(results_save_dir) / results_dir_basename / f"maxtrials{max_trials}_best_theta_means_dict.json"), "w") as fp:133 json.dump(best_theta_means_dict, fp, indent=4)134with open(str(Path(results_save_dir) / results_dir_basename / f"maxtrials{max_trials}_best_theta_stddevs_dict.json"), "w") as fp:135 json.dump(best_theta_stddevs_dict, fp, indent=4)136print("Height success dict")137pprint.pprint(height_success_dict)138print("Best theta means dict")139pprint.pprint(best_theta_means_dict)140print("Best theta stddevs dict")141pprint.pprint(best_theta_stddevs_dict)142# pprint.pprint(num_stacked_success_dict)143# print("num trials total")144# # pprint.pprint(len(step_1_json_paths))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful