How to use output_summary method in stestr

Best Python code snippet using stestr_python

revolver_analysis.py

Source:revolver_analysis.py Github

copy

Full Screen

1# importing all essential packages 2import numpy as np3import pandas as pd4import networkx as nx5import matplotlib.pyplot as plt6import csv, pymysql, graphviz, os, sys, pprint7import tkinter as tk8from matplotlib import pyplot as plt # module used to plot the decision tree once trained.9from tkinter import *10from tkinter import filedialog11from sqlalchemy import create_engine12from sklearn.metrics import plot_confusion_matrix13from sklearn.tree import DecisionTreeClassifier # importinng the decision tree classification module. 14from sklearn import tree15from sklearn.metrics import classification_report, confusion_matrix # these are metrics used to validate the accuracy of the decision tree classifier16from sklearn.model_selection import train_test_split17from pick import pick18# DEFINING FUNCTION19# //////////////////////////////////////////////////////////////////////////////////20def input_file():21 # Generating a GUI for users to input data from their current working directory22 root = tk.Tk()23 filename = filedialog.askopenfilename(initialdir = cwd, 24 title = "Select a File", 25 filetypes = (("all files", 26 "*.*"),27 ("CSV files",28 "*.csv*"), 29 ("Text files", 30 "*.txt*"))) 31 root.withdraw()32 return filename33def create_table(csv_file):34 # creating a connection to a MySQL database and parsing the input data into35 # a new MyySQL table.36 df = pd.read_csv(csv_file)37 data = pd.DataFrame(df)38 engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}"39 .format(user="arap2",40 pw="pa55wd",41 db="MEDUSA"))42 df.to_sql("hetmap_data2", con = engine, if_exists='replace')43def let_user_pick(options):44 # Let the user choose which sections of the pipeline they want to run.45 print("Please choose (or enter 0 to exit):")46 decision = False47 for idx, element in enumerate(options):48 print("{}) {}".format(idx+1,element))49 while decision is False:50 i = input("Enter number: ")51 try:52 if int(i) == 0:53 break54 if 0 < int(i) <= len(options):55 return int(i)56 else:57 print("number out of range")58 except:59 print("Please enter a corresponding number")60 return exit()61def name_output(directory):62 # checks the current working directory for any files named analysis_output and63 # creates an indexed version of the directory for each run of the program.64 name = "analysis_output"65 counter = 066 file_exists = True67 while file_exists == True:68 if os.path.isdir(name) == True and counter >= 1:69 name = name.replace(f"{str(counter)}", f"{str(counter+1)}")70 counter +=171 elif os.path.isdir(name) == True and counter < 1:72 counter +=1 73 name = f"{name}({str(counter)})"74 elif os.path.isdir(name) == False:75 file_exists = False76 return name77def sort_predictions(data_frame, predictions):78 # sorts the unlabelled array returned from sk-learn's predict() method and returns79 # a dictionary of predictions matched to the original sample IDs.80 predictions_dict = {}81 for index, row in data_frame.iterrows():82 entry = {row[0]:predictions[index]}83 predictions_dict.update(entry)84 return predictions_dict85def filter_alterations(data_frame, cutoff):86 recurrent_alterations = []87 for key, value in data_frame.iteritems():88 if sum(value) >= int(cutoff):89 recurrent_alterations.append(key)90 return recurrent_alterations91# //////////////////////////////////////////////////////////////////////////////////92# pipeline choice93options = ["Produce ETs only", "Train a decision tree", "Run the full pipeline"]94process = let_user_pick(options)95 # setting the pipeline for this run of the program.96cwd = os.getcwd()97name = name_output(cwd)98new_dir = name99path = os.path.join(cwd, new_dir)100try:101 os.mkdir(path)102except OSError as error:103 print(str(error) + "\n" + """files will be overwritten, do you still wish104 to continue?""")105 # creating a new folder to store the output of the program in106 # this folder is nested in the current working directory.107# add option to cancel script (y/n) here in the future.108# PRODUCING EVOLUTIONARY TRAJECTORIES //////////////////////////////////////////////////////////////////109# user input110print("Chose a file to produce ETs")111filename = input_file()112create_table(filename) # parsing the csv file into a MySQL table.113 # user inputs a csv file used to map the evolutioanry trajectories.114 # choice of input data must be csv with column names delimited by a '__'115 # the csv file is converted to a table in a mysql database.116# parsing the data.117df = pd.read_csv(filename)118df_features = df.drop(columns=['sample', 'cluster'])119clusters = df.cluster.unique() # Extracting each unique cluster from the input data.120print("""Enter a cutoff value for recurrent alterations 121 (whole integers only):""")122ET_cutoff = int(input()) # user inputs the desired cutoff value.123recurrent_alterations = filter_alterations(df_features, ET_cutoff)124 # converts the csv file into a pandas dataframe and splits this into125 # two: 1) a dataframe of features. 2) a list of cluster labels.126 # The cutoff is set which is used to find all alterations that occur127 # >= to threshold.128# establishing a connection to the pymysql database.129con = pymysql.connect(130 host = 'localhost',131 user = 'arap2',132 password = 'pa55wd',133 database = 'MEDUSA'134 )135cur = con.cursor()136# sorting the transitional data into clusters.137labels = [] # labels is a list of list in which each nested list corresponds to138 # each cluster and each value corresponds to an alterations 139 # (order is conserved).140ETs = {} # a dictionary is created where the clusters are the keys and the141 # values are a list of transitions that pass the cutoff.142with con.cursor() as cur: # iterating the MySQL table.143 for cluster in clusters:144 c_labels = [] # holds all the sum of occurance for each transition that passes the cutoff for the current cluster. 145 # used later as edge labels in ET mapping.146 value = [] # holds all the transitions that will be placed in the ET for the current cluster.147 for alteration in recurrent_alterations:148 sql_query = """select sum(%s) from hetmap_data2 where 149 cluster = '%s'""" # sql query to return how many times a transition occurs150 # per cluster, if this sum is >= the cutoff, it is added to the ET151 # for that cluster.152 cur.execute(sql_query % (alteration, cluster)) # executing sql query 153 result = cur.fetchall()154 count = result[0][0]155 if count >= ET_cutoff: # assessing if the transition passess the threshold set by the user.156 value.append(alteration)157 c_labels.append(count) 158 ET_entry = {cluster:value} 159 ETs.update(ET_entry)160 labels.append(c_labels)161 # Within each nested list is the count for each alteration that passed the cutoff, e.g. labels[0][0]162 # contains the count for the first alteration in the cluster 1 ET.163# graphing the transitional data.164input_features = [] # a list containing the features (alterations) to be passed onto decision tree traiing.165colour_map = [] # contains the schema for colouring the nodes in the decision tree.166label_count = 0167for key, value in ETs.items(): # iterating the dictionary of clusters and transitions.168 g = nx.DiGraph() # initialising a new networkx graph for each cluster.169 for feature in value:170 fsplit = feature.split("__") # separating the transition into its two component parts. 171 g.add_edge(fsplit[0],fsplit[1], length=2) # adding an edge to the graph between 172 # the two components of a transition.173 for node in g.nodes(): # defining the colour schema for the current cluster.174 if node == 'GL':175 colour_map.append('red')176 else:177 colour_map.append('blue')178 input_features.append(node) # appending to a list containing all the alterations 179 # excluding the transitional information.180 pos = nx.circular_layout(g) # setting the layout of the figure.181 fig = plt.figure(figsize=(12,12))182 nx.draw_networkx_nodes(g, pos, node_size=4500, node_color=colour_map, alpha=0.5) # settings for plotting nodes183 nx.draw_networkx_edges(g, pos, width=2, alpha=0.5, edge_color='black') # settings for plotting edges184 nx.draw_networkx_labels(g, pos, font_size=16, font_family='sans-serif') # settings for plotting node labels185 # setting parameters for the graphing of186 # the nodes, edges and labels.187 label_dictionary = {}188 for index, edge in enumerate(g.edges()):189 entry = {edge:labels[label_count][index]}190 label_dictionary.update(entry)191 # putting transition counts for each cluster192 # into a dictionary for use with networkx.193 nx.draw_networkx_edge_labels(g, pos, edge_labels=label_dictionary, 194 font_family='sans-serif', font_size=16) # adding the counts of each transition to appropriate transition edges for the current ET (cluster).195 196 file = f"ET{key}.png" # naming figure 197 fig.savefig(os.path.join(path, file)) # save fig using this name198 plt.clf()199 colour_map = [] # resetting the coulour_map for the next cluster. 200 label_count +=1 # moving into the next nested list in the labels list.201if process == 1: # generate ETs only then exit the program - pipeline choice 1.202 exit()203# TRAINING THE DECISION TREE ///////////////////////////////////////////////////////////////204#user input205unique_features = set(input_features) # the recurrent alterations to feed into decision tree classification.206title = 'Choose features to train the algorithm with'207selection = pick(list(unique_features), title, multiselect=True, min_selection_count=4)208 # user selects from the recurrent alterations derived from the transitional data209 # to use as candidates in node selection for the decision tree classifier.210selection = [x[0] for x in selection] # unpacking the output from the user selection.211 212# parsing the data213feature_labels = []214filename_2 = input_file()215df2 = pd.read_csv(filename_2, delimiter='\t')216df2_features = pd.DataFrame(df2, columns=selection)217classes = pd.DataFrame(df2, columns=['CLUSTER'])218 # user inputs a csv files.219 # converts the csv file into a pandas dataframe and splits this into220 # two: 1) a dataframe of features. 2) a dataframe of cluster data.221# training the decision tree222clf = DecisionTreeClassifier(random_state=0, max_features=len(selection))223 # setting the parameters for the classification decision tree model we are going to use.224clf.fit(df2_features, classes) 225 # building the classification model (decision tree) on the training dataset (this is the features and classes)226# plotting the decision tree below227fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=300) # parameters for plotting decision tree.228file = "tree.dot"229 #generating DOT data for graphviz plot (method of plotting the decision tree)230tree.export_graphviz(clf, out_file=os.path.join(path, file), feature_names=selection, class_names=clusters,231 filled=True) # outputting the figure to the output_directory generated at the start of the script and assigning figure lables.232os.system("dot -Tpng tree.dot -o full_script_tree_med.png")233 # terminal command to convert .dot file to .png file234output_summary = open("output_summary.txt", "w")235output_summary.close()236os.chdir(path)237os.system("dot -Tpng tree.dot -o full_script_tree_med.png")238 # terminal command to convert .dot file to .png file only supported on linux for now.239output_summary = open('output_summary.txt', 'a')240 # creating a new file to hold information on the run of the program.241# EVALUATING THE DECISION TREE /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////242predictions = clf.predict(df2_features) 243sorted_predictions = sort_predictions(df2, predictions)244 # predictions on the training dataset used to validate the model below:245 # class predictions are sorted for easier display.246# printing parameter choices and predictions to outfile.247print(f"""Evolutionary trajectories mapped using: {filename} as input""", "\n", file=output_summary)248print(f"cutoff value: {ET_cutoff}", "\n", file=output_summary)249print(f"chosen feature set is {selection}", "\n", file=output_summary)250print(f"Decision tree trained on: {filename_2}", "\n", file=output_summary)251print("Predictions made during self validation: ", sorted_predictions, "\n", file=output_summary)252#performing statistical analyses of the decision tree classifier253c_matrix = confusion_matrix(classes, predictions, labels=clusters) 254classification_report = classification_report(classes, predictions)255# Printing the statistical analyses to outfile.256print("\n", "Model Evalutation", file=output_summary)257print( "\n", "classification matrix: ", "\n", c_matrix, file=output_summary)258print("\n", "classification_report: ", "\n", classification_report, 259 file=output_summary)260 # writing the predictions and their evalutations into output_summary.261if process == 2: # generate decision tree, evalutate then exit the program.262 exit()263# BINNING INDEPENDET DATA //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////264# user input and data parsing.265print("Select a file for stratification")266filename = input_file()267assignment_data = pd.read_csv(filename)268assignment_data2 = pd.DataFrame(assignment_data, columns=selection)269assignment_clusters = pd.DataFrame(assignment_data, columns=['CLUSTER'])270 # user inputting independent data and parsing as previously done for training dataset.271# assignments of classes to the new dataset.272assignments = clf.predict(assignment_data2)273sorted_assignments = sort_predictions(assignment_data, assignments)274# evaluating assignments275matrix = confusion_matrix(assignment_clusters, assignments)276# writing the predictions and evaluations to into output_summary.277print("\n", f"""predictions made on independent dataset: {filename} """, file=output_summary)278print("\n", sorted_assignments, file=output_summary)279print("\n", "classification matrix: ", "\n", matrix,280 "\n", file=output_summary)281print("classification accuracy on independent dataset:", 282 clf.score(assignment_data2, assignment_clusters), file=output_summary)283# OVERFITTING ANALYSIS /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////284# testing pruning on the accuracy of test predictions285pruning_path = clf.cost_complexity_pruning_path(df2_features, classes) # determining effective pruning values (ccp_alphas) for the current training data.286ccp_alphas, impurities = pruning_path.ccp_alphas, pruning_path.impurities # extracting node impurities for each pruning value.287clfs=[] # holds each tree trained using a different ccp_alpha value.288for ccp_alpha in ccp_alphas: # training a new decision tree for each new value of ccp_alpha.289 p_clf = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)290 p_clf.fit(df2_features, classes)291 clfs.append(p_clf)292clfs = clfs[:-1]293ccp_alphas = ccp_alphas[:-1]294train_scores = [clf.score(df2_features, classes) for clf in clfs]295test_scores = [clf.score(assignment_data2, assignment_clusters) for clf in clfs]296 # plotting the classification accuracy of each decision tree in clfs for both the training297 # and test datasets.298fig, ax = plt.subplots() # setting figure parameters and labels299ax.set_xlabel("alpha")300ax.set_ylabel("accuracy")301ax.set_title("Accuracy vs alpha for training and testing sets")302ax.plot(ccp_alphas, train_scores, marker='o', label="train",303 drawstyle="steps-post") # plotting all the classification accuracies of each tree with the training dataset.304ax.plot(ccp_alphas, test_scores, marker='o', label="test",305 drawstyle="steps-post")# plotting all the classification accuracies of each tree with the test dataset.306ax.legend()307fig_title = "accuracy vs alpha plot.png" # figure title308fig.savefig(os.path.join(path, fig_title)) # saving figure to the current output directory.309output_summary.close()...

Full Screen

Full Screen

adversarial_loss.py

Source:adversarial_loss.py Github

copy

Full Screen

1import tensorflow as tf2import numpy as np3#from ops import GDSummaryWriter4# Reference progressive_growing_of_gans5# https://github.com/tkarras/progressive_growing_of_gans6#----------------------------------------------------------------------------7# Convenience func that casts all of its arguments to tf.float32.8def fp32(*values):9 if len(values) == 1 and isinstance(values[0], tuple):10 values = values[0]11 values = tuple(tf.cast(v, tf.float32) for v in values)12 return values if len(values) >= 2 else values[0]13def lerp(a, b, t):14 with tf.name_scope('Lerp'):15 return a + (b - a) * t16#----------------------------------------------------------------------------17# cross entropy18def G_gan(model,params_fake, patch_gan = True):19 fake_scores_out = model(**params_fake)20 loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake_scores_out),logits=fake_scores_out)21 if patch_gan: # -> inf22 loss = tf.reduce_mean(loss, axis=[1,2,3])23 return loss24def D_gan(model,params_real,params_fake,patch_gan=True):25 real_scores_out = model(**params_real)26 fake_scores_out = model(**params_fake)27 real_input = params_real["inputs"]28 fake_input = params_fake["inputs"]29 real_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real_scores_out),logits=real_scores_out)30 fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(fake_scores_out),logits=fake_scores_out)31 output_summary = {}32 output_summary_image = {}33 if patch_gan:34 rd_real_loss = tf.reduce_mean(real_loss, axis=[1, 2, 3])35 rd_fake_loss = tf.reduce_mean(fake_loss, axis=[1, 2, 3])36 output_summary["real_scores_out"] = tf.reduce_mean(rd_real_loss)37 output_summary["fake_scores_out"] = tf.reduce_mean(rd_fake_loss)38 output_summary_image["real_scores_out"] = real_scores_out * 0.5 + 0.5 # notice here39 output_summary_image["fake_scores_out"] = fake_scores_out * 0.5 + 0.540 loss = rd_fake_loss + rd_real_loss41 else:42 output_summary["real_scores_out"] = tf.reduce_mean(real_loss)43 output_summary["fake_scores_out"] = tf.reduce_mean(fake_loss)44 loss = real_loss + fake_loss45 if True:46 # add gradients info47 output_summary['gradients'] = tf.reduce_mean(48 #tf.abs(tf.gradients(loss,fake_input))49 tf.abs(tf.gradients(fake_scores_out,fake_input))50 )51 return loss,output_summary,output_summary_image52#----------------------------------------------------------------------------53# WGAN-GP54def G_wgan(model,params_fake, patch_gan = True):55 fake_scores_out = model(**params_fake)56 if patch_gan: # -> inf57 loss = -tf.reduce_mean(fake_scores_out, axis=[1,2,3])58 else:59 loss = -fake_scores_out60 return loss61def D_wgan(model,params_real,params_fake, patch_gan = True,62 use_reduce_mean = False,63 wgan_lambda=10.0, # Weight for the gradient penalty term.64 wgan_epsilon=0.001, # Weight for the epsilon term, \epsilon_{drift}.65 wgan_target=1.0): # Target value for gradient magnitudes.66 real_scores_out = model(**params_real)67 fake_scores_out = model(**params_fake)68 real_input = params_real["inputs"]69 fake_input = params_fake["inputs"]70 batch_size = tf.shape(real_input)[0]71 output_summary = {}72 output_summary_image = {}73 if patch_gan:74 rd_real_scores_out = tf.reduce_mean(real_scores_out, axis=[1, 2, 3])75 rd_fake_scores_out = tf.reduce_mean(fake_scores_out, axis=[1, 2, 3])76 output_summary["real_scores_out"] = tf.reduce_mean(rd_real_scores_out)77 output_summary["fake_scores_out"] = tf.reduce_mean(rd_fake_scores_out)78 output_summary_image["real_scores_out"] = real_scores_out * 0.5 + 0.579 output_summary_image["fake_scores_out"] = fake_scores_out * 0.5 + 0.580 loss = rd_fake_scores_out - rd_real_scores_out # fake-> -inf, real->inf81 else:82 output_summary["real_scores_out"] = tf.reduce_mean(real_scores_out)83 output_summary["fake_scores_out"] = tf.reduce_mean(fake_scores_out)84 loss = fake_scores_out - real_scores_out85 with tf.name_scope("GradientPenalty"):86 params_mixed = params_fake.copy()87 mixing_factors = tf.random_uniform([batch_size, 1, 1, 1], 0.0, 1.0, dtype=real_input.dtype)88 mixed_images_out = lerp(real_input, fake_input, mixing_factors) # random sample89 params_mixed["inputs"] = mixed_images_out90 mixed_scores_out = model(**params_mixed) # run mixed result91 if patch_gan:92 rd_mixed_scores_out = tf.reduce_mean(mixed_scores_out, axis=[1, 2, 3])93 output_summary["mixed_scores"] = tf.reduce_mean(rd_mixed_scores_out)94 output_summary_image["mixed_scores"] = mixed_scores_out * 0.5 + 0.595 else:96 output_summary["mixed_scores"] = tf.reduce_mean(mixed_scores_out)97 #mixed_loss = rd_mixed_scores_out98 mixed_loss = mixed_scores_out99 mixed_grads = tf.gradients(mixed_loss,[mixed_images_out])[0] # compute gradients100 if use_reduce_mean:101 mixed_norms = tf.sqrt(tf.reduce_mean(tf.square(mixed_grads), axis=[1, 2, 3])) # remain batch dim102 else:103 mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3])) # remain batch dim104 gradient_penalty = tf.square(mixed_norms - wgan_target)105 output_summary["gradient_penalty"] = tf.reduce_mean(gradient_penalty)106 loss += gradient_penalty * (wgan_lambda / (wgan_target ** 2))107 with tf.name_scope("EpsilonPenalty"):108 epsilon_penalty = tf.square(real_scores_out)109 if patch_gan:110 epsilon_penalty = tf.reduce_mean(epsilon_penalty, axis=[1, 2, 3])111 output_summary["epsilon_penalty"] = tf.reduce_mean(epsilon_penalty)112 loss += epsilon_penalty * wgan_epsilon113 if True:114 # add gradients info115 output_summary['gradients'] = tf.reduce_mean(116 tf.abs(tf.gradients(fake_scores_out,fake_input))117 )118 return loss, output_summary, output_summary_image119#------------------------------------------------------------------------120# LSGAN121def D_lsgan(model,params_real,params_fake, patch_gan = True, style = 1):122 real_scores_out = model(**params_real)123 fake_scores_out = model(**params_fake)124 D_loss_real = tf.square(real_scores_out - 1.) #-> 1125 if style == 0:126 D_loss_fake = tf.square(fake_scores_out + 1.) #-> -1127 else:128 D_loss_fake = tf.square(fake_scores_out) #->0129 output_summary = {}130 if patch_gan:131 rd_loss_real = tf.reduce_mean(D_loss_real,axis=[1,2,3])132 rd_loss_fake = tf.reduce_mean(D_loss_fake,axis=[1,2,3])133 output_summary["real_scores_out"] = rd_loss_real134 output_summary["fake_scores_out"] = rd_loss_fake135 loss = rd_loss_fake + rd_loss_real136 else:137 output_summary["real_scores_out"] = D_loss_real138 output_summary["fake_scores_out"] = D_loss_fake139 loss = D_loss_fake + D_loss_real140 return loss, output_summary141def G_lsgan(model,params_fake, patch_gan = True, style = 1):142 fake_scores_out = model(**params_fake)143 if style == 0:144 D_loss_fake = tf.square(fake_scores_out) # ->0145 else:146 D_loss_fake = tf.square(fake_scores_out - 1.) # ->1147 if patch_gan:148 loss = tf.reduce_mean(D_loss_fake,axis=[1,2,3])149 else:150 loss = D_loss_fake...

Full Screen

Full Screen

lot_drill_down_json.py

Source:lot_drill_down_json.py Github

copy

Full Screen

1import string2import random3class LOTDrilDown():4 """docstring for LOTDrilDown"""5 def __init__(self):6 self.treated_color = "#ff1573"7 self.level_color_dict = ["#ff1573", "#92d050", "#00beb3", "#ffc000", "#9a457d"]8 def get_random_node_id(self):9 chars_list = '{}{}{}'10 chars_list = chars_list.format(11 string.ascii_uppercase,12 string.digits,13 string.ascii_lowercase14 )15 return ''.join(random.choice(chars_list) for _ in range(10))16 def get_color(self, level_id):17 return self.level_color_dict[level_id % len(self.level_color_dict)]18 def append_dict(self, k, v, summary, node_id, level_id=1):19 if isinstance(v, dict):20 temp_summary = {}21 temp_summary['name'] = k22 keys = list(v.keys())23 if len(keys) <= 1:24 temp_summary['count'] = keys[0]25 temp_summary['level'] = level_id26 temp_summary['nodeId'] = node_id27 temp_summary['color'] = self.get_color(level_id)28 temp_summary['children'] = []29 new_dict = v.get(keys[0])30 level_id = level_id + 131 temp_summary['children'] = []32 test = {}33 for key, val in new_dict.items():34 test = self.append_dict(key, val, temp_summary, node_id, level_id=level_id)35 temp_summary['children'].append(test)36 return temp_summary37 else:38 level_id = level_id + 139 test = {}40 for key, val in v.items():41 test = self.append_dict(key, val, summary, node_id, level_id=level_id)42 summary['children'].append(test)43 return summary44 else:45 temp_summary = {}46 temp_summary['name'] = k47 temp_summary['count'] = v48 temp_summary['level'] = level_id49 temp_summary['nodeId'] = node_id50 temp_summary['color'] = self.get_color(level_id)51 return temp_summary52 def construct_dict(self, dic, node_id):53 output_summary = []54 for k, v in dic.items():55 level_id = 156 output_summary.append(self.append_dict(k, v, output_summary, node_id, level_id=level_id))57 return output_summary58 def get_lot_json(self, out_dict):59 node_id = self.get_random_node_id()60 final_summary = {}61 loop = 162 for key, value in out_dict.items():63 if loop == 1:64 final_summary['name'] = key65 final_summary['count'] = value66 final_summary['color'] = self.treated_color67 final_summary['nodeId'] = node_id68 final_summary['level'] = 069 if 'children' in out_dict.keys():70 final_summary['children'] = []71 else:72 output = self.construct_dict(value, node_id)73 final_summary['children'] = output74 loop = loop + 1...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run stestr automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful