How to use current_path method in localstack

Best Python code snippet using localstack_python

DAE_AFE_lossweight.py

Source:DAE_AFE_lossweight.py Github

copy

Full Screen

1import os, sys2import numpy as np3print 'Read Multi Data Start ...'4current_path = '/home/ding/Forgithub/'5baseDir = current_path6trainingSet = 'multi/'7subDir = [ 'clean1' , 'clean2' , 'clean3' , 'clean4' , 8 'N1_SNR5' , 'N2_SNR5' , 'N3_SNR5' , 'N4_SNR5',9 'N1_SNR10', 'N2_SNR10', 'N3_SNR10', 'N4_SNR10',10 'N1_SNR15', 'N2_SNR15', 'N3_SNR15', 'N4_SNR15',11 'N1_SNR20', 'N2_SNR20', 'N3_SNR20', 'N4_SNR20']12training_list = []13multiTR_path = []14num_frame_group = 1115multi_length = []16multi_clean = []17multi_SNR20 = []18multi_SNR15 = []19multi_SNR10 = []20multi_SNR5 = []21for h in range(len(subDir)):22 dirs = os.listdir(baseDir + trainingSet + subDir[h])23 for num in range(len(dirs)):24 25 tmp = np.loadtxt(baseDir + trainingSet + subDir[h] + '/' + dirs[num])26 27 multi_length.append(tmp.size/39)28 29 multiTR_path.append(baseDir + trainingSet + '/' + dirs[num])30 zero = np.zeros(((num_frame_group -1)/2,39))31 tmp = np.concatenate((zero, tmp), axis=0)32 tmp = np.concatenate((tmp, zero), axis=0)33 # CMVN34 tmp = np.transpose(tmp)35 36 cur_mean = np.mean(tmp, axis=1)37 cur_std = np.std(tmp, axis=1, ddof=1)38 tmp = tmp - cur_mean[:, None]39 tmp = tmp / cur_std[:, None]40 tmp = np.nan_to_num(tmp)41 42 tmp = np.transpose(tmp)43 44 for num_split_group in range(tmp.size/39-num_frame_group+1): 45 46 temp_data = np.concatenate((tmp[num_split_group], tmp[num_split_group+1], tmp[num_split_group+2], tmp[num_split_group+3], tmp[num_split_group+4], tmp[num_split_group+5], tmp[num_split_group+6], tmp[num_split_group+7], tmp[num_split_group+8], tmp[num_split_group+9], tmp[num_split_group+10]), axis=0)47 training_list.append(temp_data)48 if h<4 :49 multi_clean.append(temp_data)50 elif h>=4 and h<8 :51 multi_SNR5.append(temp_data)52 elif h>=8 and h<12 :53 multi_SNR10.append(temp_data)54 elif h>=12 and h<16 :55 multi_SNR15.append(temp_data)56 elif h>=16 and h<20 :57 multi_SNR20.append(temp_data)58x_train_multi = np.array(training_list)59multi_clean = np.array(multi_clean)60multi_SNR5 = np.array(multi_SNR5)61multi_SNR10 = np.array(multi_SNR10)62multi_SNR15 = np.array(multi_SNR15)63multi_SNR20 = np.array(multi_SNR20)64print 'Read Multi Data End ...'65print 'Read Clean Data Start ...'66baseDir = current_path67trainingSet = 'clean/'68training_list = []69clean_clean = []70clean_SNR20 = []71clean_SNR15 = []72clean_SNR10 = []73clean_SNR5 = []74for h in range(len(subDir)):75 dirs = os.listdir(baseDir + trainingSet + subDir[h])76 for num in range(len(dirs)):77 78 tmp = np.loadtxt(baseDir + trainingSet + subDir[h] + '/' + dirs[num])79 80 tmp = np.transpose(tmp)81 82 cur_mean = np.mean(tmp, axis=1)83 cur_std = np.std(tmp, axis=1, ddof=1)84 tmp = tmp - cur_mean[:, None]85 tmp = tmp / cur_std[:, None]86 tmp = np.nan_to_num(tmp)87 88 tmp = np.transpose(tmp)89 90 for i in range(len(tmp)):91 training_list.append(tmp[i])92 if h<4 :93 clean_clean.append(tmp[i])94 elif h>=4 and h<8 :95 clean_SNR5.append(tmp[i])96 elif h>=8 and h<12 :97 clean_SNR10.append(tmp[i])98 elif h>=12 and h<16 :99 clean_SNR15.append(tmp[i])100 elif h>=16 and h<20 :101 clean_SNR20.append(tmp[i])102 103x_train_clean = np.array(training_list)104clean_clean = np.array(clean_clean)105clean_SNR5 = np.array(clean_SNR5)106clean_SNR10 = np.array(clean_SNR10)107clean_SNR15 = np.array(clean_SNR15)108clean_SNR20 = np.array(clean_SNR20)109print 'Read Clean Data End ...'110print 'Start training ...'111from keras.layers import *112from keras.models import *113import keras.optimizers114cleanw = []115SNR5w = []116SNR10w = []117SNR15w = []118SNR20w = []119for i in range(273713) : #clean120 cleanw.append(1.0)121for i in range(276048) : #5dB122 SNR5w.append(1.0)123for i in range(272158) : #10dB124 SNR10w.append(1.0)125for i in range(271282) : #15dB126 SNR15w.append(1.0)127for i in range(270481) : #20dB128 SNR20w.append(1.0)129cleanw = np.array(cleanw)130SNR5w = np.array(SNR5w)131SNR10w = np.array(SNR10w)132SNR15w = np.array(SNR15w)133SNR20w = np.array(SNR20w)134weightloss = np.concatenate((cleanw,SNR5w,SNR10w,SNR15w,SNR20w) , axis=0)135leakyrelu = keras.layers.LeakyReLU(alpha=0.3)136# optimizer137adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-5)138# instantiate model139model = Sequential()140model.add(Dense(500, input_dim=429))141model.add(leakyrelu)142model.add(Dense(500))143model.add(leakyrelu)144model.add(Dense(273))145model.add(leakyrelu)146model.add(Dense(300))147model.add(leakyrelu)148model.add(Dense(300))149model.add(leakyrelu)150model.add(Dense(39))151model.compile(loss='mse', optimizer=adam)152rclean=[]153rSNR5=[]154rSNR10=[]155rSNR15=[]156rSNR20=[]157rclean.append(1.0)158rSNR5.append(1.0)159rSNR10.append(1.0)160rSNR15.append(1.0)161rSNR20.append(1.0)162alphac = 0.1163alpha5 = 0.1164alpha10 = 0.1165alpha15 = 0.1166alpha20 = 0.1167for epoch in range(1) :168 model.fit(x_train_multi, x_train_clean, epochs=1, batch_size=100, shuffle=True, sample_weight=weightloss)169 loss_clean = model.evaluate(multi_clean, clean_clean, verbose=0, batch_size=500)170 loss_SNR5 = model.evaluate(multi_SNR5 , clean_SNR5 , verbose=0, batch_size=500)171 loss_SNR10 = model.evaluate(multi_SNR10, clean_SNR10, verbose=0, batch_size=500)172 loss_SNR15 = model.evaluate(multi_SNR15, clean_SNR15, verbose=0, batch_size=500)173 loss_SNR20 = model.evaluate(multi_SNR20, clean_SNR20, verbose=0, batch_size=500)174 175 avgloss = (loss_clean + loss_SNR5 + loss_SNR10 + loss_SNR15 + loss_SNR20)/5176 177 weightloss = []178 cleanw = cleanw + alphac*(loss_clean-avgloss)179 SNR5w = SNR5w + alpha5*(loss_SNR5-avgloss)180 SNR10w = SNR10w + alpha10*(loss_SNR10-avgloss)181 SNR15w = SNR15w + alpha15*(loss_SNR15-avgloss)182 SNR20w = SNR20w + alpha20*(loss_SNR20-avgloss)183 184 weightloss = np.concatenate((cleanw,SNR5w,SNR10w,SNR15w,SNR20w) , axis=0)185 alpha5 = alpha5 * 0.95186 alpha10 = alpha10 * 0.95187 alpha15 = alpha15 * 0.95188 alpha20 = alpha20 * 0.95189 alphac = alphac * 0.95190 rclean.append(cleanw[0])191 rSNR5.append(SNR5w[0])192 rSNR10.append(SNR10w[0])193 rSNR15.append(SNR15w[0])194 rSNR20.append(SNR20w[0])195 196 print 'Epoch = ', epoch+2197print 'training is over.'198rrclean = np.array(rclean)199rrSNR5 = np.array(rSNR5)200rrSNR10 = np.array(rSNR10)201rrSNR15 = np.array(rSNR15)202rrSNR20 = np.array(rSNR20)203np.savetxt('clean.txt', rrclean)204np.savetxt('SNR5.txt' , rrSNR5)205np.savetxt('SNR10.txt', rrSNR10)206np.savetxt('SNR15.txt', rrSNR15)207np.savetxt('SNR20.txt', rrSNR20)208print 'record is over'209cleanw = None210SNR5w = None211SNR10w = None212SNR15w = None213SNR20w = None214weightloss = None215print 'Create Directory ...'216output_dir = 'Output_DAE_AFE_lossweight1'217os.makedirs(current_path + output_dir)218os.makedirs(current_path + output_dir + '/multi')219os.makedirs(current_path + output_dir + '/testA')220os.makedirs(current_path + output_dir + '/testB')221os.makedirs(current_path + output_dir + '/testC')222os.makedirs(current_path + output_dir + '/testA/clean1')223os.makedirs(current_path + output_dir + '/testA/clean2')224os.makedirs(current_path + output_dir + '/testA/clean3')225os.makedirs(current_path + output_dir + '/testA/clean4')226os.makedirs(current_path + output_dir + '/testA/N1_SNR0')227os.makedirs(current_path + output_dir + '/testA/N1_SNR5')228os.makedirs(current_path + output_dir + '/testA/N1_SNR10')229os.makedirs(current_path + output_dir + '/testA/N1_SNR15')230os.makedirs(current_path + output_dir + '/testA/N1_SNR20')231os.makedirs(current_path + output_dir + '/testA/N1_SNR-5')232os.makedirs(current_path + output_dir + '/testA/N2_SNR0')233os.makedirs(current_path + output_dir + '/testA/N2_SNR5')234os.makedirs(current_path + output_dir + '/testA/N2_SNR10')235os.makedirs(current_path + output_dir + '/testA/N2_SNR15')236os.makedirs(current_path + output_dir + '/testA/N2_SNR20')237os.makedirs(current_path + output_dir + '/testA/N2_SNR-5')238os.makedirs(current_path + output_dir + '/testA/N3_SNR0')239os.makedirs(current_path + output_dir + '/testA/N3_SNR5')240os.makedirs(current_path + output_dir + '/testA/N3_SNR10')241os.makedirs(current_path + output_dir + '/testA/N3_SNR15')242os.makedirs(current_path + output_dir + '/testA/N3_SNR20')243os.makedirs(current_path + output_dir + '/testA/N3_SNR-5')244os.makedirs(current_path + output_dir + '/testA/N4_SNR0')245os.makedirs(current_path + output_dir + '/testA/N4_SNR5')246os.makedirs(current_path + output_dir + '/testA/N4_SNR10')247os.makedirs(current_path + output_dir + '/testA/N4_SNR15')248os.makedirs(current_path + output_dir + '/testA/N4_SNR20')249os.makedirs(current_path + output_dir + '/testA/N4_SNR-5')250os.makedirs(current_path + output_dir + '/testB/clean1')251os.makedirs(current_path + output_dir + '/testB/clean2')252os.makedirs(current_path + output_dir + '/testB/clean3')253os.makedirs(current_path + output_dir + '/testB/clean4')254os.makedirs(current_path + output_dir + '/testB/N1_SNR0')255os.makedirs(current_path + output_dir + '/testB/N1_SNR5')256os.makedirs(current_path + output_dir + '/testB/N1_SNR10')257os.makedirs(current_path + output_dir + '/testB/N1_SNR15')258os.makedirs(current_path + output_dir + '/testB/N1_SNR20')259os.makedirs(current_path + output_dir + '/testB/N1_SNR-5')260os.makedirs(current_path + output_dir + '/testB/N2_SNR0')261os.makedirs(current_path + output_dir + '/testB/N2_SNR5')262os.makedirs(current_path + output_dir + '/testB/N2_SNR10')263os.makedirs(current_path + output_dir + '/testB/N2_SNR15')264os.makedirs(current_path + output_dir + '/testB/N2_SNR20')265os.makedirs(current_path + output_dir + '/testB/N2_SNR-5')266os.makedirs(current_path + output_dir + '/testB/N3_SNR0')267os.makedirs(current_path + output_dir + '/testB/N3_SNR5')268os.makedirs(current_path + output_dir + '/testB/N3_SNR10')269os.makedirs(current_path + output_dir + '/testB/N3_SNR15')270os.makedirs(current_path + output_dir + '/testB/N3_SNR20')271os.makedirs(current_path + output_dir + '/testB/N3_SNR-5')272os.makedirs(current_path + output_dir + '/testB/N4_SNR0')273os.makedirs(current_path + output_dir + '/testB/N4_SNR5')274os.makedirs(current_path + output_dir + '/testB/N4_SNR10')275os.makedirs(current_path + output_dir + '/testB/N4_SNR15')276os.makedirs(current_path + output_dir + '/testB/N4_SNR20')277os.makedirs(current_path + output_dir + '/testB/N4_SNR-5')278os.makedirs(current_path + output_dir + '/testC/clean1')279os.makedirs(current_path + output_dir + '/testC/clean2')280os.makedirs(current_path + output_dir + '/testC/N1_SNR0')281os.makedirs(current_path + output_dir + '/testC/N1_SNR5')282os.makedirs(current_path + output_dir + '/testC/N1_SNR10')283os.makedirs(current_path + output_dir + '/testC/N1_SNR15')284os.makedirs(current_path + output_dir + '/testC/N1_SNR20')285os.makedirs(current_path + output_dir + '/testC/N1_SNR-5')286os.makedirs(current_path + output_dir + '/testC/N2_SNR0')287os.makedirs(current_path + output_dir + '/testC/N2_SNR5')288os.makedirs(current_path + output_dir + '/testC/N2_SNR10')289os.makedirs(current_path + output_dir + '/testC/N2_SNR15')290os.makedirs(current_path + output_dir + '/testC/N2_SNR20')291os.makedirs(current_path + output_dir + '/testC/N2_SNR-5')292print 'Start testing ...'293print 'Multi train data put into model...'294x_train_multi_new = model.predict(x_train_multi, batch_size=500)295print 'End Feeding ...'296print 'Write multi train data into directory ...'297index_count = 0298temp = []299for file_num in range(len(multi_length)): 300 for frame_num in range(multi_length[file_num]): 301 temp = np.concatenate((temp, x_train_multi_new[index_count + frame_num]), axis=0) 302 303 temp = temp.reshape(multi_length[file_num],39)304 305 temp = np.transpose(temp)306 savepath = multiTR_path[file_num].replace('multi',output_dir+'/multi')307 savefile = savepath.split('.')308 np.save(savefile[0], temp)309 310 index_count = index_count + multi_length[file_num]311 312 temp=[]313for testindex in ['testA/', 'testB/', 'testC/'] :314 315 baseDir = current_path316 trainingSet = testindex317 if testindex != 'testC/' :318 subDir = ['clean1', 'N1_SNR0', 'N1_SNR5', 'N1_SNR10', 'N1_SNR15', 'N1_SNR20', 'N1_SNR-5',319 'clean2', 'N2_SNR0', 'N2_SNR5', 'N2_SNR10', 'N2_SNR15', 'N2_SNR20', 'N2_SNR-5',320 'clean3', 'N3_SNR0', 'N3_SNR5', 'N3_SNR10', 'N3_SNR15', 'N3_SNR20', 'N3_SNR-5',321 'clean4', 'N4_SNR0', 'N4_SNR5', 'N4_SNR10', 'N4_SNR15', 'N4_SNR20', 'N4_SNR-5',]322 else :323 subDir = ['clean1', 'N1_SNR0', 'N1_SNR5', 'N1_SNR10', 'N1_SNR15', 'N1_SNR20', 'N1_SNR-5',324 'clean2', 'N2_SNR0', 'N2_SNR5', 'N2_SNR10', 'N2_SNR15', 'N2_SNR20', 'N2_SNR-5']325 set_list = []326 set_path = []327 num_frame_group = 11328 set_length = []329 for h in range(len(subDir)):330 dirs = os.listdir(baseDir + trainingSet + subDir[h])331 332 for num in range(len(dirs)):333 tmp = np.loadtxt(baseDir + trainingSet + subDir[h] + '/' + dirs[num])334 set_length.append(tmp.size/39)335 set_path.append(baseDir + trainingSet + subDir[h] +'/' + dirs[num])336 zero = np.zeros(((num_frame_group -1)/2,39))337 tmp = np.concatenate((zero, tmp), axis=0)338 tmp = np.concatenate((tmp, zero), axis=0)339 tmp = np.transpose(tmp)340 341 cur_mean = np.mean(tmp, axis=1)342 cur_std = np.std(tmp, axis=1, ddof=1)343 tmp = tmp - cur_mean[:, None]344 tmp = tmp / cur_std[:, None]345 tmp = np.nan_to_num(tmp)346 347 tmp = np.transpose(tmp)348 349 for num_split_group in range(tmp.size/39-num_frame_group+1):350 351 temp_data = np.concatenate((tmp[num_split_group], tmp[num_split_group+1], tmp[num_split_group+2], tmp[num_split_group+3], tmp[num_split_group+4], tmp[num_split_group+5], tmp[num_split_group+6], tmp[num_split_group+7], tmp[num_split_group+8], tmp[num_split_group+9], tmp[num_split_group+10]), axis=0)352 set_list.append(temp_data)353 354 x_test = np.array(set_list)355 356 x_test_new = model.predict(x_test, batch_size=500)357 358 index_count = 0359 temp = []360 for file_num in range(len(set_length)):361 for frame_num in range(set_length[file_num]):362 temp = np.concatenate((temp, x_test_new[index_count + frame_num]), axis=0)363 364 temp = temp.reshape(set_length[file_num],39)365 366 temp = np.transpose(temp)367 savepath = set_path[file_num].replace(testindex,output_dir+'/'+testindex)368 savefile = savepath.split('.')369 np.save(savefile[0], temp)370 371 index_count = index_count + set_length[file_num]372 373 temp=[]374 set_list = []375 set_path = []...

Full Screen

Full Screen

jumpsolve.py

Source:jumpsolve.py Github

copy

Full Screen

1#main function receiving data about the puzzle2def main():3 y = int(input("What is the length of the puzzle? "))4 x = int(input("What is the breadth of the puzzle? "))5 x1 = int(input("What is the x position of the start tile? "))6 y1 = int(input("What is the y position of the start tile? "))7 xG = int(input("What is the x position of the goal tile? "))8 yG = int(input("What is the y position of the goal tile? "))9 start = (x1,y1)10 goal = (xG,yG)11 board = list()12 for i in range(x):13 row = list()14 for j in range(y):15 temp = int(input("What is the value at the (" + str(i) + ", " + str(j) + ") tile? "))16 row.append(temp)17 board.append(row)18 astar_solution = jumpsolve_astar(start, goal, board)19 print("This is the A* Solution: ")20 print(astar_solution)21 bfs_solution = jumpsolve_bfs(start, goal, board)22 print("This is the BFS Solution:")23 print(bfs_solution)24 ids_solution = jumpsolve_ids(start, goal, board)25 print("This is the IDS Solution: ")26 print(ids_solution)2728#astar search implementation29def jumpsolve_astar(start, goal, board):30 astar_queue_list = list()31 current = start32 current_path = list()33 current_path.append(start)34 while (1):35 print(current)36 if ((current[1]-board[current[0]][current[1]]) >= 0) and board[current[0]][current[1]] != 0:37 temp_option = (current[0],(current[1]-board[current[0]][current[1]]))38 current_path.append(temp_option)39 if goal in current_path:40 break41 temp_list = current_path[:]42 astar_queue_list.append(temp_list)43 current_path.pop()44 if ((current[0]-board[current[0]][current[1]]) >= 0) and board[current[0]][current[1]] != 0:45 temp_option = ((current[0]-board[current[0]][current[1]]),current[1])46 current_path.append(temp_option)47 if goal in current_path:48 break49 temp_list = current_path[:]50 astar_queue_list.append(temp_list)51 current_path.pop()52 if ((current[1]+board[current[0]][current[1]]) < len(board[0])) and board[current[0]][current[1]] != 0:53 temp_option = (current[0], (current[1] + board[current[0]][current[1]]))54 current_path.append(temp_option)55 if goal in current_path:56 break57 temp_list = current_path[:]58 astar_queue_list.append(temp_list)59 current_path.pop()60 if ((current[0]+board[current[0]][current[1]]) < len(board)) and board[current[0]][current[1]] != 0:61 temp_option = ((current[0]+board[current[0]][current[1]]),current[1])62 current_path.append(temp_option)63 if goal in current_path:64 break65 temp_list = current_path[:]66 astar_queue_list.append(temp_list)67 current_path.pop()68 print(astar_queue_list)69 options_rating = list()70 for i in range(len(astar_queue_list)):71 options_rating.append(f_n(astar_queue_list[i],goal,board))72 print(options_rating)73 min_rating = min(options_rating)74 choice = options_rating.index(min_rating)75 current_path = astar_queue_list.pop(choice)76 current = current_path[-1]77 return current_path7879#heuristic function for astar search80def h_n(option, goal, board):81 return ((max(option[0],goal[0]) - min(option[0],goal[0])) + (max(option[1],goal[1]) - min(option[1],goal[1])))/(len(board))8283#calculating path cost for astar search84def g_n(option_path):85 return (len(option_path)-1)8687#calculating the sum of heuristic function and path cost88def f_n(option_path,goal,board):89 return g_n(option_path) + h_n(option_path[-1],goal,board)9091#implementation of breadth-first search92def jumpsolve_bfs(start, goal, board):93 bfs_queue_list = list()94 current = start95 current_path = list()96 current_path.append(start)97 while (1):98 if ((current[1]-board[current[0]][current[1]]) >= 0) and board[current[0]][current[1]] != 0:99 temp_option = (current[0],(current[1]-board[current[0]][current[1]]))100 current_path.append(temp_option)101 if goal in current_path:102 break103 temp_list = current_path[:]104 bfs_queue_list.append(temp_list)105 current_path.pop()106 if ((current[0]-board[current[0]][current[1]]) >= 0) and board[current[0]][current[1]] != 0:107 temp_option = ((current[0]-board[current[0]][current[1]]),current[1])108 current_path.append(temp_option)109 if goal in current_path:110 break111 temp_list = current_path[:]112 bfs_queue_list.append(temp_list)113 current_path.pop()114 if ((current[1]+board[current[0]][current[1]]) < len(board[0])) and board[current[0]][current[1]] != 0:115 temp_option = (current[0], (current[1] + board[current[0]][current[1]]))116 current_path.append(temp_option)117 if goal in current_path:118 break119 temp_list = current_path[:]120 bfs_queue_list.append(temp_list)121 current_path.pop()122 if ((current[0]+board[current[0]][current[1]]) < len(board)) and board[current[0]][current[1]] != 0:123 temp_option = ((current[0]+board[current[0]][current[1]]),current[1])124 current_path.append(temp_option)125 if goal in current_path:126 break127 temp_list = current_path[:]128 bfs_queue_list.append(temp_list)129 current_path.pop()130 current_path = bfs_queue_list.pop(0)131 current = current_path[-1]132 return current_path133134#implementation of iterative depth search, starting with limit = 1135def jumpsolve_ids(start, goal, board):136 ids_queue_list = list()137 current = start138 current_path = list()139 current_path.append(start)140 limit = 1141 while (1):142 if ((current[1] - board[current[0]][current[1]]) >= 0) and board[current[0]][current[1]] != 0:143 temp_option = (current[0], (current[1] - board[current[0]][current[1]]))144 current_path.append(temp_option)145 if goal in current_path:146 break147 temp_list = current_path[:]148 if len(temp_list)<=(limit+1):149 ids_queue_list.append(temp_list)150 current_path.pop()151 if ((current[0] - board[current[0]][current[1]]) >= 0) and board[current[0]][current[1]] != 0:152 temp_option = ((current[0] - board[current[0]][current[1]]), current[1])153 current_path.append(temp_option)154 if goal in current_path:155 break156 temp_list = current_path[:]157 if len(temp_list)<=(limit+1):158 ids_queue_list.append(temp_list)159 current_path.pop()160 if ((current[1] + board[current[0]][current[1]]) < len(board[0])) and board[current[0]][current[1]] != 0:161 temp_option = (current[0], (current[1] + board[current[0]][current[1]]))162 current_path.append(temp_option)163 if goal in current_path:164 break165 temp_list = current_path[:]166 if len(temp_list)<=(limit+1):167 ids_queue_list.append(temp_list)168 current_path.pop()169 if ((current[0] + board[current[0]][current[1]]) < len(board)) and board[current[0]][current[1]] != 0:170 temp_option = ((current[0] + board[current[0]][current[1]]), current[1])171 current_path.append(temp_option)172 if goal in current_path:173 break174 temp_list = current_path[:]175 if len(temp_list)<=(limit+1):176 ids_queue_list.append(temp_list)177 current_path.pop()178 if(len(ids_queue_list)==0):179 limit+=1180 current = start181 current_path = []182 current_path.append(start)183 else:184 current_path = ids_queue_list.pop()185 current = current_path[-1]186 return current_path187188#calling the main function to activate program ...

Full Screen

Full Screen

reformatDataGEO.py

Source:reformatDataGEO.py Github

copy

Full Screen

1import os2#function to rename GEO barcodes, genes, and matrix files and prepare appropriate folders for Seurat3def moveGEO():4 current_path = os.getcwd() #change into GEO data folder directory5 folder = "/mouse_heart_GEO_data"6 os.chdir(current_path + folder)7 #prepare folders for 4 zomes of the heart8 SAN_folder = '/SAN_GEO' #ZONE I9 AVN_folder = '/AVN_GEO' #ZONE II10 LPF_folder = '/LPF_GEO' #ZONE III11 RPF_folder = '/RPF_GEO' #ZONE III12 #create folders for each zone13 os.mkdir(current_path + folder + SAN_folder)14 os.mkdir(current_path + folder + AVN_folder)15 os.mkdir(current_path + folder + LPF_folder)16 os.mkdir(current_path + folder + RPF_folder)17 #move files into designated folders18 os.system('mv ' + current_path + folder + '/GSM3885058_SANbarcodes.tsv.gz ' + current_path + folder + SAN_folder + '/barcodes.tsv.gz')19 os.system('mv ' + current_path + folder + '/GSM3885058_SANgenes.tsv.gz ' + current_path + folder + SAN_folder + '/features.tsv.gz')20 os.system('mv ' + current_path + folder + '/GSM3885058_SANmatrix.mtx.gz ' + current_path + folder + SAN_folder + '/matrix.mtx.gz')21 os.system('mv ' + current_path + folder + '/GSM3885059_AVNbarcodes.tsv.gz ' + current_path + folder + AVN_folder + '/barcodes.tsv.gz')22 os.system('mv ' + current_path + folder + '/GSM3885059_AVNgenes.tsv.gz ' + current_path + folder + AVN_folder + '/features.tsv.gz')23 os.system('mv ' + current_path + folder + '/GSM3885059_AVNmatrix.mtx.gz ' + current_path + folder + AVN_folder + '/matrix.mtx.gz')24 os.system('mv ' + current_path + folder + '/GSM3885060_LPFbarcodes.tsv.gz ' + current_path + folder + LPF_folder + '/barcodes.tsv.gz')25 os.system('mv ' + current_path + folder + '/GSM3885060_LPFgenes.tsv.gz ' + current_path + folder + LPF_folder + '/features.tsv.gz')26 os.system('mv ' + current_path + folder + '/GSM3885060_LPFmatrix.mtx.gz ' + current_path + folder + LPF_folder + '/matrix.mtx.gz')27 os.system('mv ' + current_path + folder + '/GSM3885061_RPFbarcodes.tsv.gz ' + current_path + folder + RPF_folder + '/barcodes.tsv.gz')28 os.system('mv ' + current_path + folder + '/GSM3885061_RPFgenes.tsv.gz ' + current_path + folder + RPF_folder + '/features.tsv.gz')29 os.system('mv ' + current_path + folder + '/GSM3885061_RPFmatrix.mtx.gz ' + current_path + folder + RPF_folder + '/matrix.mtx.gz')...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful