How to use create_suites method in avocado

Best Python code snippet using avocado_python

testing_evolution.py

Source:testing_evolution.py Github

copy

Full Screen

1#!/usr/bin/python32import threading3import multiprocessing4import random5from collections import OrderedDict6import matplotlib.pyplot as plt7import numpy as np8from sklearn import metrics9from Testing.testing_evolution_create import create_suites, create_tests10from Testing.config import layers11import Testing.analysis as analysis12from Testing.devices import Device, Server13import time14import math15from Algorithms.k_means import KMeans_Server16import copy17import json18random.seed(242) 19np.random.rand(242)20NON_FED_KEY = "Traditional K-Means"21ENABLE_ROUND_PROGRESS_PLOT = True22PLOT = False23RUN_NON_FED = True24MULTIPROCESSED = True25MAX_PROC = 4026class MultiProcessing:27 def __init__(self, MULTIPROCESSED=True):28 self.MULTIPROCESSED = MULTIPROCESSED29 self.manager = multiprocessing.Manager()30 def convert(self, res):31 o = {}32 for k,v in res.items():33 o[k] = {}34 for k2,v2 in v.items():35 o[k][k2] = {36 "end": v2["end"].value,37 "rounds": list(v2["rounds"])38 }39 return o40 def run_single(self, *,41 result_dict, suite, test,42 progress_lock, number_of_tests, number_of_tests_finished,res43 ):44 name = suite["name"] + ": " + test["name"]45 d = DeviceSuite(suite, test, name = name)46 if ENABLE_ROUND_PROGRESS_PLOT:47 result_dict["rounds"].extend(d.run_rounds_with_accuracy())48 else:49 d.run_rounds_with_accuracy()50 result_dict["end"].value = d.accuracy()51 d.complete()52 with progress_lock:53 number_of_tests_finished.value += 154 o = self.convert(res)55 with open('results-inter2.json', 'w') as outfile:56 json.dump(o, outfile)57 58 print('\tProgress: {}/{} Complete \t {} \t {}'.format(number_of_tests_finished.value, number_of_tests, result_dict["end"].value, name))59 print('\tProgress: {}/{} Complete \t {} \t {}'.format(number_of_tests_finished.value, number_of_tests, result_dict["end"].value, name))60 print('\tProgress: {}/{} Complete \t {} \t {}'.format(number_of_tests_finished.value, number_of_tests, result_dict["end"].value, name))61 def run(self, construction, **kwargs):62 # Take a list of process specs and run in parallel63 (number_of_tests, specs, results_dict) = construction64 processes = []65 number_of_tests_finished = multiprocessing.Value('i', 0)66 print("\tRunning {} Tests".format(number_of_tests))67 for spec in specs:68 kwargs = spec69 kwargs["progress_lock"] = multiprocessing.Lock()70 kwargs["number_of_tests"] = number_of_tests71 kwargs["number_of_tests_finished"] = number_of_tests_finished72 kwargs["res"] = results_dict73 if not self.MULTIPROCESSED:74 self.run_single(**kwargs)75 else:76 p = multiprocessing.Process(target=self.run_single, kwargs=kwargs)77 processes.append(p)78 p.start()79 # self.run_non_fed(results_dict)80 if self.MULTIPROCESSED:81 for process in processes:82 process.join()83 return results_dict84 def createResultObjItem(self):85 return {86 "end": multiprocessing.Value("d", 0.0, lock=False),87 "rounds": self.manager.list()88 }89 def constructAndRun(self, *args, **kwargs):90 res = analysis.calculate_time(self.constructProcessTests)(*args, **kwargs)91 return self.run(res, **kwargs)92 def non_fed_k_means(self, suite):93 _, kmeans = KMeans_Server.find_optimal_k_silhouette(suite["dataset"].data)94 pred_labels = kmeans.predict(suite["dataset"].data)95 labels = suite["dataset"].true_labels96 return metrics.adjusted_rand_score(labels, pred_labels)97 def run_non_fed(self,results_dict, suites = None, cond = False, create = False):98 if suites is None: suites = self.suites99 for i,suite in enumerate(suites):100 key = suite["name"]101 if create: results_dict[key] = {}102 if suite["non_fed"] and (RUN_NON_FED or cond):103 results_dict[key][NON_FED_KEY] = self.createResultObjItem()104 a = self.non_fed_k_means(suite)105 results_dict[key][NON_FED_KEY]["end"].value = a106 results_dict[key][NON_FED_KEY]["rounds"].extend([a] * suite["rounds"])107 print("Finished Traditional K Means on Suite {}".format(i))108 return results_dict109 def constructProcessTests(self, suites, tests, current = None, **kwargs):110 # Create a list of process specs111 number_of_tests = 0112 specs = []113 results_dict = OrderedDict()114 for suite in suites:115 key = suite["name"]116 results_dict[key] = OrderedDict()117 added = 0118 for test in tests:119 if current is None or key not in current or len(current[key][test["name"]]["rounds"]) == 0:120 if current is not None and key not in current:121 results_dict[key] = {}122 added += 1123 results_dict[key][test["name"]] = self.createResultObjItem()124 specs.append(dict(125 result_dict=results_dict[key][test["name"]],126 suite=suite,127 test=test128 ))129 if added == 0:130 del results_dict[key]131 number_of_tests += added132 self.suites = suites133 return number_of_tests, specs, results_dict134class DeviceSuite:135 server = None136 devices = []137 groups = {}138 def __init__(139 self,140 suite,141 test,142 name = None143 ):144 self.suite = copy.deepcopy(suite)145 self.test = test146 self.name = name147 counter = 0148 for group, f in self.suite["groups"].items():149 self.groups[group] = [Device(self.test["device"], f()["sample"](), id_num=(i)) for i in range(self.suite["devices"])]150 self.server = Server(self.test["server"], self.groups)151 rounds = len(self.suite["timeline"])152 max_devices = 0153 154 if "device_multi" in self.test:155 for t,groups in self.suite["timeline"].items():156 for group, num_devices in groups.items():157 self.suite["timeline"][t][group] = num_devices * self.test["device_multi"]158 159 for x in self.suite["timeline"].values():160 max_devices = max(sum(x.values()),max_devices)161 # print("timeline", name, max_devices, self.suite["timeline"])162 163 if PLOT:164 self.server.define_plotter(rounds = rounds, devices=max_devices)165 def run_rounds(self):166 for _round, groups in sorted(self.suite["timeline"].items()):167 self.server.run_round(groups)168 print("\t\tCompleted round {}/{}".format(_round+1, len(self.suite["timeline"])))169 def get_population_of_round(self, target_round):170 indicies = None171 group_set = set()172 for _round, groups in sorted(self.suite["timeline"].items()):173 if _round <= target_round:174 for g,val in groups.items():175 if val > 0 and g not in group_set:176 group_set.add(g)177 f = self.suite["groups"][g]178 _indicies = f()["population"]()179 if indicies is None:180 indicies = _indicies181 else:182 indicies = np.concatenate((indicies, _indicies),0)183 indicies = np.unique(indicies)184 data,labels = self.suite["dataset"].get_data_for_indicies(indicies), self.suite["dataset"].get_labels_for_indicies(indicies)185 return data,labels186 def plot_round(self, target_round):187 data,labels = self.get_population_of_round(target_round)188 acc = self.sub_accuracy(data,labels)189 if self.server.PLOT:190 self.server.plotter.plot_a(int(target_round), data, self.last_pred)191 return acc192 def run_rounds_with_accuracy(self, return_rounds = True):193 # data = self.suite["dataset"].data194 round_accs = []195 for _round, groups in sorted(self.suite["timeline"].items()):196 self.server.run_round(groups, int(_round))197 if return_rounds:198 round_accs.append(self.plot_round(_round))199 print("\t\tCompleted round {}/{}".format(_round+1, len(self.suite["timeline"])))200 return round_accs201 202 def complete(self):203 if self.server.PLOT:204 self.server.plotter.save(self.name)205 last_pred = None206 def accuracy(self):207 data = self.suite["dataset"].data208 labels = self.suite["dataset"].true_labels209 pred_labels = self.server.classify(data)210 self.last_pred = pred_labels211 return metrics.adjusted_rand_score(labels, pred_labels)212 def sub_accuracy(self, data,labels):213 pred_labels = self.server.classify(data)214 self.last_pred = pred_labels215 return metrics.adjusted_rand_score(labels, pred_labels)216def u(obj1,obj2):217 o = copy.deepcopy(obj1)218 for k,v in obj2.items():219 if k not in o: o[k] = {}220 for k2,v2 in v.items():221 o[k][k2] = v2222 # for k3,v3 in v2.items():223 return o224def evaluate_accuracy_evolution():225 suites = analysis.calculate_time(create_suites)(layers)226 tests = analysis.calculate_time(create_tests)(layers)227 l = len(suites) * len(tests)228 max_proc = MAX_PROC229 split_n = math.floor(l/(l/max_proc))230 split_n = math.ceil(split_n / len(tests))231 partitions = [suites[i:i + split_n] for i in range(0, len(suites), split_n)]232 sets = len(partitions)233 print("Running {} Sets of Tests".format(sets))234 current = None235 # with open('results-updated.json') as f:236 # current = json.load(f)237 m = MultiProcessing(MULTIPROCESSED)238 results = {}239 for i, part in enumerate(partitions):240 res = analysis.calculate_time(m.constructAndRun)(part, tests, current = current)241 results.update(res)242 print("Progress: {} of {} Complete".format(i+1, sets))243 o = m.convert(results)244 with open('results-new.json', 'w') as outfile:245 json.dump(o, outfile)246 with open('results-updated.json', 'w') as outfile:247 if current is None: current = {}248 json.dump(u(current,o), outfile)249 analysis.save_test_results(results)250 if ENABLE_ROUND_PROGRESS_PLOT:251 analysis.calculate_time(analysis.plot_rounds)(results)252def run_non_fed_and_save():253 suites = analysis.calculate_time(create_suites)(layers)254 tests = analysis.calculate_time(create_tests)(layers)255 current = None256 # with open('results.json') as f:257 # current = json.load(f)258 m = MultiProcessing(MULTIPROCESSED)259 results = {}260 results = m.run_non_fed(results, suites, True, True)261 o = m.convert(results)262 with open('results-traditional-new.json', 'w') as outfile:263 json.dump(o, outfile)264 with open('results-traditional-updated.json', 'w') as outfile:265 json.dump(u(current,o), outfile)266if __name__ == "__main__":267 analysis.calculate_time(evaluate_accuracy_evolution)()268 # with open('results-updated.json') as f:269 # current = json.load(f)270 271 # # with open('_json_pieces/results-gossip.json') as f:272 # with open('results-new.json') as f:273 # o = json.load(f)274 # with open('results-updated.json', 'w') as outfile:275 # json.dump(u(current, o), outfile)276 277 # with open('results-onlineness-2-new.json') as f:278 # o2 = json.load(f)279 # with open('results-updated.json', 'w') as outfile:...

Full Screen

Full Screen

all_tests.py

Source:all_tests.py Github

copy

Full Screen

...34 smtp.connect(send_server)35 smtp.login(username,passwd)36 smtp.sendmail(sender,receiver,msg.as_string())37 smtp.quit()38def create_suites():39 base_dir = r"D:\Users\Administrator\workspace\UnitTest\test_case"40 thread_dir = [] 41 case_dir = []42 thread_suite = []43 44 all_files = os.listdir(base_dir)45 for file_name in all_files:46 if "thread" in file_name:47 thread_dir.append(file_name)48 #print "thread_dir = ",thread_dir,len(thread_dir)49 50 for thread in thread_dir:51 test_dir = []52 thread_path = os.path.join(base_dir,thread)53 if os.path.isdir(thread_path):54 tests = os.listdir(thread_path)55 if len(tests) > 0:56 for file in tests:57 if "test_" in file:58 test_dir.append(file)59 #print test_dir60 61 if len(test_dir) > 0:62 test_suite = unittest.TestSuite()63 for dir in test_dir: 64 test_dir_path = os.path.join(thread_path,dir) 65 #print test_dir_path 66 discover = unittest.defaultTestLoader.discover(str(test_dir_path), pattern = "test_*.py", top_level_dir = None)67 for suite in discover:68 for test_case in suite:69 test_suite.addTests(test_case)70 unittest.defaultTestLoader._top_level_dir = None71 #print "test_suite",test_suite72 73 '''74 test_case_path = os.path.join(thread_path,dir)75 test_files = os.listdir(test_case_path)76 for case in test_files:77 if re.match("^test_[A-Za-z0-9]*.py", case):78 test_suite.addTest(unittest.makeSuite(case)))79 '''80 thread_suite.append(test_suite) 81 #for suite in thread_suite:print type(suite),suite82 return thread_suite,thread_dir83def mutilty_process_run(suite,process_dir):84 now = time.strftime("%Y%m%d%H%M%S",time.localtime(time.time())) 85 report_name = r"D:\Users\Administrator\workspace\UnitTest\report\\" + "result_" + now + ".html"86 fp = file(report_name,"wb")87 #fp.write("xiaolong1")88 process_list = []89 for i in suite: 90 #print i 91 runner = HTMLTestRunner.HTMLTestRunner(stream=fp,92 title=u"测试报告",93 description=u"用例执行情况")94 runner.run(i)95 #process = multiprocessing.Process(target=runner.run,args=(suite[i],))96 #process_list.append(process)97 #fp.write("xiaolong2")98 for pro in process_list:pro.start()99 for pro in process_list:pro.join()100 101 102if __name__ == "__main__":103 suite,process_dir = create_suites()104 mutilty_process_run(suite,process_dir)105 ''' 106 test_suites = unittest.TestSuite()107 test_case_lists = all_test_case_lists.all_test_case_list()108 for test_case in test_case_lists:109 test_suites.addTest(unittest.makeSuite(test_case))110 print "test_suites",type(test_suites),test_suites111 112 #test_suites.addTest(unittest.makeSuite(test_baidu.TestBaidu))113 #test_suites.addTest(unittest.makeSuite(test_xiaolong1.Test_Xiaolong1))114 115 now = time.strftime("%Y%m%d%H%M%S",time.localtime())116 filename = r"D:\Users\Administrator\workspace\UnitTest\report\result_" + now + ".html"117 fp = file(filename,"wb")...

Full Screen

Full Screen

test_metadata.py

Source:test_metadata.py Github

copy

Full Screen

...3"""4import pytest5from ltp.metadata import RuntestMetadata6@pytest.fixture(autouse=True)7def create_suites(tmpdir):8 """9 Create testing suites.10 """11 root = tmpdir.mkdir("runtest")12 suitefile = root.join("suite01")13 suitefile.write("mytest01 mybin -a\n"14 "mytest02 mybin -b\n"15 "mytest03 mybin -c\n"16 "mytest04 mybin -d\n")17 suitefile = root.join("suite02")18 suitefile.write("mytest05 mybin -a\n"19 "mytest06 mybin -b\n"20 "mytest07 mybin -c\n"21 "mytest08 mybin -d\n")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful