Best Python code snippet using locust
overlapped_count_episode_time_constraint.py
Source:overlapped_count_episode_time_constraint.py  
...139        time_cost = time.time() - time_start140        # print("countIns cost:", (time.time() - time_start))141        print(l_last, file=open("%s_%d_%.3f_%d.%s" % (filename, 33, min_sup, len(l_last), 'exp1'), 'w'))142        return l_last, time_cost143def test_avg(s, file, ck):144    time_constraint = 1000000145    sum_time = 0146    REPEAT = 3147    for i in range(REPEAT):148        ap = count_episode_time_constraint(time_constraint, 0, s)149        cnt, time_cost = ap.do(file, ck)150        sum_time += time_cost151    print('target episdoe size: %d' % len(list(ck)[0]))152    print(time_constraint, ck, cnt, ', avg_cost_time=', '%.7f' % (sum_time / REPEAT))153def test():154    folder = 'exp_settings&data/real-data/'155    names = list(os.walk(folder))[0][2]156    files = [folder + i for i in names if i.endswith('.txt')]157    files = [u'exp_settings&data/syn-data/æ±æ».txt']158    for file in files:159        print('\n', file)160        f = open(file)161        s = f.readline()162        f.close()163        s = eval(s)164        # 2_real165        # test_avg(s, file, {(67,)})166        # test_avg(s, file, {(68, 67, 68)})167        # test_avg(s, file, {(68, 68, 67, 67, 68)})168        # test_avg(s, file, {(67, 68, 67, 68, 67, 68, 68)})169        # 3_real170        # test_avg(s, file, {(68, 22, 68)})171        # test_avg(s, file, {(68, 68, 67, 67, 22)})172        # test_avg(s, file, {(67, 68, 67, 68, 67, 68, 68)})173        # 1_syn174        # test_avg(s, file, {('AI', 'AI', 'S')})175        # test_avg(s, file, {('P', 'Q', 'P')})176        # test_avg(s, file, {('B', 'Q', 'J')})177        # test_avg(s, file, {('AA', 'AA', 'J')})178        #179        # test_avg(s, file, {('V', 'P', 'Q', 'V', 'Q')})180        # test_avg(s, file, {('B', 'AA', 'P', 'B', 'AA')})181        # test_avg(s, file, {('J', 'AA', 'AA', 'J', 'J')})182        # test_avg(s, file, {('J', 'J', 'J', 'J', 'AA')})183        #184        # test_avg(s, file, {('V', 'B', 'B', 'AA', 'V', 'B', 'V')})185        # test_avg(s, file, {('J', 'J', 'AA', 'P', 'B', 'B', 'J')})186        # 2_syn187        # test_avg(s, file, {('G',)})188        # test_avg(s, file, {('AH', 'J', 'AA')})189        # test_avg(s, file, {('AA', 'J', 'B', 'J', 'J')})190        # test_avg(s, file, {('J', 'AA', 'J', 'C', 'B', 'Y', 'J')})191        # 3_syn192        test_avg(s, file, {('AH', 'J', 'AA')})193        test_avg(s, file, {('J', 'AA', 'J', 'C', 'V')})194        test_avg(s, file, {('J', 'AA', 'J', 'C', 'V', 'AA', 'J')})195        #(2, 68, 65, 44, 22, 12, 44, 22, 8),(2, 68, 65, 44, 22, 12, 44),(2, 68,196        #65, 44, 59),(2, 68, 65)197if __name__ == '__main__':...summary.py
Source:summary.py  
1# -*- coding: utf-8 -*-2'''3print summary4'''5from __future__ import print_function6from collections import Counter, OrderedDict7import string8import re9import argparse10import json11import sys12reload(sys)13sys.setdefaultencoding('utf-8')14import pdb15import os16import math17import numpy as np18import collections19from prettytable import PrettyTable20def print_summary():21	lscmd = os.popen('ls '+sys.argv[1]+'/result.*').read()22	result_list = lscmd.split()23	num_args = len(result_list)24	assert num_args==2 or num_args==325	dev_input_file = open(sys.argv[1]+'/result.dev', 'rb')26	test_input_file = open(sys.argv[1]+'/result.test', 'rb')27	if num_args==2:28		print_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','FILE'])29	elif num_args==3:30		chl_input_file = open(sys.argv[1]+'/result.challenge', 'rb')31		print_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','CHL-AVG','CHL-EM','CHL-F1','FILE'])32	# style set33	print_table.align['FILE'] = 'l'34	print_table.float_format = '2.3'35	# data fill36	dev_avg = []37	dev_em = []38	dev_f1 = []39	dev_file = []40	for dline in dev_input_file.readlines():41		dline = dline.strip()42		if re.search('^{', dline):43			ddict = json.loads(dline)44			dev_avg.append(float(ddict['AVERAGE']))45			dev_em.append(float(ddict['EM']))46			dev_f1.append(float(ddict['F1']))47			dev_file.append(ddict['FILE'])48	test_avg = []49	test_em = []50	test_f1 = []51	test_file = []52	for dline in test_input_file.readlines():53		dline = dline.strip()54		if re.search('^{', dline):55			ddict = json.loads(dline)56			test_avg.append(float(ddict['AVERAGE']))57			test_em.append(float(ddict['EM']))58			test_f1.append(float(ddict['F1']))59			test_file.append(ddict['FILE'])60	if num_args==3:61		chl_avg = []62		chl_em = []63		chl_f1 = []64		chl_file = []65		for dline in chl_input_file.readlines():66			dline = dline.strip()67			if re.search('^{', dline):68				ddict = json.loads(dline)69				chl_avg.append(float(ddict['AVERAGE']))70				chl_em.append(float(ddict['EM']))71				chl_f1.append(float(ddict['F1']))72				chl_file.append(ddict['FILE'])73	# print74	if num_args == 2:75		min_len = min(len(dev_avg),len(test_avg))76		for k in range(min_len):77			print_table.add_row([k+1, dev_avg[k], dev_em[k], dev_f1[k], test_avg[k], test_em[k], test_f1[k], dev_file[k]])78	elif num_args == 3:79		min_len = min(len(dev_avg),len(test_avg),len(chl_avg))80		for k in range(min_len):81			print_table.add_row([k+1, dev_avg[k], dev_em[k], dev_f1[k], test_avg[k], test_em[k], test_f1[k], chl_avg[k], chl_em[k], chl_f1[k], dev_file[k]])82	if len(sys.argv)==3:83		sk = sys.argv[2].upper()84		print('sort key detected: {}'.format(sk))85		print(print_table.get_string(sortby=sk, reversesort=True))86	else:87		print(print_table)88	89	if num_args == 2:90		summary_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','FILE'])91		summary_table.add_row(["M", np.max(dev_avg), np.max(dev_em), np.max(dev_f1), 92								  np.max(test_avg), np.max(test_em), np.max(test_f1),"-"])93		summary_table.add_row(["A", np.mean(dev_avg), np.mean(dev_em), np.mean(dev_f1), 94								  np.mean(test_avg), np.mean(test_em), np.mean(test_f1),"-"])95		summary_table.add_row(["D", np.std(dev_avg), np.std(dev_em), np.std(dev_f1), 96								  np.std(test_avg), np.std(test_em), np.std(test_f1),"-"])97	elif num_args == 3:98		summary_table = PrettyTable(['#','DEV-AVG','DEV-EM','DEV-F1','TEST-AVG','TEST-EM','TEST-F1','CHL-AVG','CHL-EM','CHL-F1','FILE'])99		summary_table.add_row(["M", np.max(dev_avg), np.max(dev_em), np.max(dev_f1), 100								  np.max(test_avg), np.max(test_em), np.max(test_f1),101								  np.max(chl_avg), np.max(chl_em), np.max(chl_f1), "-"])102		summary_table.add_row(["A", np.mean(dev_avg), np.mean(dev_em), np.mean(dev_f1), 103								  np.mean(test_avg), np.mean(test_em), np.mean(test_f1),104								  np.mean(chl_avg), np.mean(chl_em), np.mean(chl_f1), "-"])105		summary_table.add_row(["D", np.std(dev_avg), np.std(dev_em), np.std(dev_f1), 106								  np.std(test_avg), np.std(test_em), np.std(test_f1),107								  np.std(chl_avg), np.std(chl_em), np.std(chl_f1), "-"])108	# style set109	summary_table.align['FILE'] = 'l'110	summary_table.float_format = '2.3'111	print(summary_table)112	return 0113if __name__ == '__main__':...naive_bayes_tester.py
Source:naive_bayes_tester.py  
1import naive_bayes_classifier2import pandas as pd3import numpy as np4df = pd.read_csv('../data/weather_data.csv')5# Split up X and y6y = df['RainTomorrow']7X = df.drop(columns=['RainTomorrow']) # Get rid of prediction8# containers9testing_outputs = []10train_outputs = []11alphas = int(input("Enter test number: "))12num_test = int(input("Enter number of tests per Hyperparameter: "))13step = float(input("Enter step value: "))14print(f'Hyperparameter Range being Run: ({(alphas * 0.1) - 0.1}, {alphas * 0.1}), step={step}.')15# test16alphas = np.arange((0.1 * alphas) - 0.1, (alphas * 0.1), step)17# Testing for alpha18for alpha in alphas:19    test_avg = 020    train_avg = 021    for i in range(num_test):22        y_pred, test_score, train_score = naive_bayes_classifier.run(X, y, alph=alpha)23        test_avg += test_score24        train_avg += train_score25    test_avg = test_avg / num_test26    train_avg = train_avg / num_test27    testing_outputs.append(test_avg)28    train_outputs.append(train_avg)29# to df30df = pd.DataFrame(list(zip(alphas, train_outputs, testing_outputs)))31print(df)32compression_opts = dict(method='zip', archive_name=f'bayes_output_data.csv')...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
