Best Python code snippet using autotest_python
solution.py
Source:solution.py  
...53						row.append('.')54				dimension.append(row)55			updated_state.append(dimension)56		return updated_state57	def count_num_active(self,state):58		num_active = 059		num_dimensions = len(state)60		num_rows = len(state[0])61		for i in range(0,num_dimensions):62			for j in range(0,num_rows):63				for k in range(0,num_rows):64					if(state[i][j][k]=='#'):65						num_active = num_active + 166		return num_active67class Cube_4D():68	def __init__(self,state=[[[]]]):69		# state is 4D list: 1st D = hyper (w direction), 2nd D = dimension (z direction), 3rd D = row (x direction), 4th D = col (y direction)70		self.initial_state = state71	# add inactive borders to a state in hyper, dimension, row, and column.72	def add_border(self,state):73		num_hyper = len(state)74		num_dimensions = len(state[0])75		num_row = len(state[0][0])76		period_row = list('.'*(num_row + 2))77		period_hyper = []78		for j in range(0,num_dimensions+2):79			period_dimension = []80			for i in range(0,num_row + 2):81				period_dimension.append(period_row)82			period_hyper.append(period_dimension)83		new_state = [period_hyper]84		for h in range(0,num_hyper):85			hyper = [period_dimension]86			for i in range(0,num_dimensions):87				dimension = [period_row]88				for j in range(0,num_row):89					row = ['.']90					for k in range(0,num_row):91						row.append(state[h][i][j][k])92					row.append('.')93					dimension.append(row)94				dimension.append(period_row)95				hyper.append(dimension)96			hyper.append(period_dimension)97			new_state.append(hyper)98		new_state.append(period_hyper)99		return new_state100	def count_neighbors(self,state,indices):101		hyp, dim, row, col = indices[0],indices[1],indices[2],indices[3]102		num_active = 0103		for h in range(hyp-1,hyp+2):104			for i in range(dim-1,dim+2):105				for j in range(row-1,row+2):106					for k in range(col-1,col+2):107						if(state[h][i][j][k] == '#' and not (h == hyp and i == dim and j == row and k == col)):108							num_active = num_active + 1109		return num_active 110	def propagate(self,state):111		new_state = self.add_border(state) # makes it easier to check neighbors112		updated_state = []113		num_hyper = len(new_state)114		num_dimensions = len(new_state[0])115		num_rows = len(new_state[0][0])116		for h in range(1,num_hyper-1):117			hyper = []118			for i in range(1,num_dimensions-1):119				dimension = []120				for j in range(1,num_rows-1):121					row = []122					for k in range(1,num_rows-1):123						num_active = self.count_neighbors(new_state,[h,i,j,k])124						if(num_active == 3):125							row.append('#')126						elif(new_state[h][i][j][k]=='#' and num_active == 2):127							row.append('#')128						else:129							row.append('.')130					dimension.append(row)131				hyper.append(dimension)132			updated_state.append(hyper)133		return updated_state134	def count_num_active(self,state):135		num_active = 0136		num_hyper = len(state)137		num_dimensions = len(state[0])138		num_rows = len(state[0][0])139		for h in range(0,num_hyper):140			for i in range(0,num_dimensions):141				for j in range(0,num_rows):142					for k in range(0,num_rows):143						if(state[h][i][j][k]=='#'):144							num_active = num_active + 1145		return num_active146	def __repr__(self):147		string = ""148		for h in range(0,len(self.initial_state)):149			for i in range(0,len(self.initial_state[0])):150				for j in range(0,len(self.initial_state[0][0])):151					string = string + "".join(self.initial_state[h][i][j]) + "\n"152				string = string + "\n"153			string = string + "\n"154		string = string + "\nEND"155		return string156if __name__ == "__main__":157	with open(INPUT_FILE_NAME) as input_file:158		input_list = input_file.readlines()159		# create initial state that is 1 dimensional160		part1_cube = Cube_3D()161		for line in input_list:162			part1_cube.initial_state[0].append(list(line.rstrip()))163		for cycle in range(0,NUM_CYCLES):164			# add border165			new_state = part1_cube.add_border(part1_cube.initial_state)166			part1_cube.initial_state = None167			# propagate state168			part1_cube.initial_state = part1_cube.propagate(new_state)169		170		#count active states171		num_active = part1_cube.count_num_active(part1_cube.initial_state)172		print("(part 1): the number of active states after "+str(NUM_CYCLES)+" cycles for a 3D cube is "+str(num_active))	173		part2_cube = Cube_4D()174		for line in input_list:175			part2_cube.initial_state[0][0].append(list(line.rstrip()))	176		for cycle in range(0,NUM_CYCLES):177			# add border178			new_state = part2_cube.add_border(part2_cube.initial_state)179			part2_cube.initial_state = [[[]]]180			# propagate state181			part2_cube.initial_state = part2_cube.propagate(new_state)182		183		#count active states184		num_active = part2_cube.count_num_active(part2_cube.initial_state)...data_split.py
Source:data_split.py  
1import torch2import random3import hashlib4import json5def get_split(num_active, num_inactive, seed, dataset_name, shrink=False):6    active_idx = list(range(num_active))7    inactive_idx = list(range(num_active, num_active + num_inactive))8    random.seed(seed)9    random.shuffle(active_idx)10    random.shuffle(inactive_idx)11    if shrink == False:12        num_active_train = round(num_active * 0.8)13        num_inactive_train = round(num_inactive * 0.8)14        num_active_valid = round(num_active * 0.1)15        num_inactive_valid = round(num_inactive * 0.1)16        num_active_test = num_active - num_active_train - num_active_valid17        num_inactive_test = round(num_inactive * 0.1)18        filename = f'data_split/{dataset_name}_seed{seed}.pt'19    else:20        num_active_train = round(num_active * 0.8)21        num_inactive_train = 10000 if num_inactive >10000 else round(num_inactive*0.8)22        num_active_valid = round(num_active * 0.1)23        num_inactive_valid = round(num_inactive * 0.1)24        num_active_test = num_active - num_active_train - num_active_valid25        num_inactive_test = round(num_inactive * 0.1)26        filename = f'data_split/shrink_{dataset_name}_seed{seed}.pt'27    split_dict = {}28    split_dict['train'] = active_idx[:num_active_train]\29                          + inactive_idx[:num_inactive_train]30    split_dict['valid'] = active_idx[31                          num_active_train:num_active_train32                                           +num_active_valid] \33                          + inactive_idx[34                            num_inactive_train:num_inactive_train35                                               +num_inactive_valid]            36    split_dict['test'] = active_idx[37                         num_active_train + num_active_valid38                         : num_active_train39                           + num_active_valid40                           + num_active_test] \41                         + inactive_idx[42                           num_inactive_train + num_inactive_valid43                           : num_inactive_train44                             + num_inactive_valid45                             + num_inactive_test]46    # print(f'split_dict:{split_dict["test"]}')47    num_train = len(split_dict['train'])48    num_valid = len(split_dict['valid'])49    num_test = len(split_dict['test'])50    print(f'num_train:{num_train}, num_valid:{num_valid}, num_test:{num_test}')51    52    torch.save(split_dict, filename)53    data_md5 = hashlib.md5(json.dumps(split_dict, sort_keys=True).encode('utf-8')).hexdigest()54    print(f'data_md5_checksum:{data_md5}')55    print(f'file saved at {filename}')56    with open(f'{filename}.checksum', 'w+') as checksum_file:57        checksum_file.write(data_md5)58if __name__ == '__main__':59    dataset_info = {60        '435008':{'num_active':233, 'num_inactive':217923},#{'num_active':233, 'num_inactive':217925},61        '1798':{'num_active':187, 'num_inactive':61645},#{'num_active':187, 'num_inactive':61645},62        '435034': {'num_active':362, 'num_inactive':61393},#{'num_active':362, 'num_inactive':61394},63        '1843': {'num_active':172, 'num_inactive':301318},#{'num_active':172, 'num_inactive':301321},64        '2258': {'num_active':213, 'num_inactive':302189},#{'num_active':213, 'num_inactive':302192},65        '463087': {'num_active':703, 'num_inactive':100171},#{'num_active':703, 'num_inactive':100172},66        '488997': {'num_active':252, 'num_inactive':302051},#{'num_active':252, 'num_inactive':302054},67        '2689': {'num_active':172, 'num_inactive':319617},#{'num_active':172, 'num_inactive':319620},68        '485290': {'num_active':278, 'num_inactive':341026},#{'num_active':281, 'num_inactive':341084},69        '9999':{'num_active':37, 'num_inactive':226},70    }71    seed_list = [1,2,3]72    dataset_name_list = ['435008', '1798', '435034', '1843', '2258', '463087', '488997','2689', '485290', '9999']73    # dataset_name_list = ['1798']74    for dataset_name in dataset_name_list:75        for seed in seed_list:76            num_active = dataset_info[dataset_name]['num_active']77            num_inactive = dataset_info[dataset_name]['num_inactive']...new_data_split.py
Source:new_data_split.py  
1import torch2import random3import hashlib4import json5def get_split(num_active, num_inactive, seed, dataset_name, shrink=False):6    active_idx = list(range(num_active))7    inactive_idx = list(range(num_active, num_active + num_inactive))8    random.seed(seed)9    random.shuffle(active_idx)10    random.shuffle(inactive_idx)11    num_active_train = round(num_active * 0.8)12    num_inactive_train = round(num_inactive * 0.8)13    num_active_valid = round(num_active * 0.1)14    num_inactive_valid = round(num_inactive * 0.1)15    num_active_test = num_active - num_active_train - num_active_valid16    num_inactive_test = round(num_inactive * 0.1)17    print(f'num_active_train:{num_active_train} num_active_valid:{num_active_valid } num_active_test:{num_active_test}')18    19    split_dict = {}20    split_dict['train'] = active_idx[:num_active_train]\21                          + inactive_idx[:num_inactive_train]22    split_dict['valid'] = active_idx[23                          num_active_train:num_active_train24                                           +num_active_valid] \25                          + inactive_idx[26                            num_inactive_train:num_inactive_train27                                               +num_inactive_valid]            28    split_dict['test'] = active_idx[29                         num_active_train + num_active_valid30                         : num_active_train31                           + num_active_valid32                           + num_active_test] \33                         + inactive_idx[34                           num_inactive_train + num_inactive_valid35                           : num_inactive_train36                             + num_inactive_valid37                             + num_inactive_test]38    if shrink == True:39        trim_number = 10000 if num_inactive >10000 else round(num_inactive*0.8)40        split_dict['train'] = split_dict['train'][:(trim_number+num_active)]41        filename = f'data_split/preserve_shrink_{dataset_name}_seed{seed}.pt'42    else:43        filename = f'data_split/preserve_{dataset_name}_seed{seed}.pt'44    # print(f'split_dict:{split_dict["test"]}')45    num_train = len(split_dict['train'])46    num_valid = len(split_dict['valid'])47    num_test = len(split_dict['test'])48    print(f'num_train:{num_train}, num_valid:{num_valid}, num_test:{num_test}')49    50    torch.save(split_dict, filename)51    data_md5 = hashlib.md5(json.dumps(split_dict, sort_keys=True).encode('utf-8')).hexdigest()52    print(f'data_md5_checksum:{data_md5}')53    print(f'file saved at {filename}')54    with open(f'{filename}.checksum', 'w+') as checksum_file:55        checksum_file.write(data_md5)56if __name__ == '__main__':57    dataset_info = {58        '435008':{'num_active':233, 'num_inactive':217923},#{'num_active':233, 'num_inactive':217925},59        '1798':{'num_active':187, 'num_inactive':61645},#{'num_active':187, 'num_inactive':61645},60        '435034': {'num_active':362, 'num_inactive':61393},#{'num_active':362, 'num_inactive':61394},61        '1843': {'num_active':172, 'num_inactive':301318},#{'num_active':172, 'num_inactive':301321},62        '2258': {'num_active':213, 'num_inactive':302189},#{'num_active':213, 'num_inactive':302192},63        '463087': {'num_active':703, 'num_inactive':100171},#{'num_active':703, 'num_inactive':100172},64        '488997': {'num_active':252, 'num_inactive':302051},#{'num_active':252, 'num_inactive':302054},65        '2689': {'num_active':172, 'num_inactive':319617},#{'num_active':172, 'num_inactive':319620},66        '485290': {'num_active':278, 'num_inactive':341026},#{'num_active':281, 'num_inactive':341084},67        '9999':{'num_active':37, 'num_inactive':226},68    }69    seed_list = [1,2,3]70    dataset_name_list = ['435008', '1798', '435034', '1843', '2258', '463087', '488997','2689', '485290', '9999']71    # dataset_name_list = ['1798']72    for dataset_name in dataset_name_list:73        for seed in seed_list:74            num_active = dataset_info[dataset_name]['num_active']75            num_inactive = dataset_info[dataset_name]['num_inactive']...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
