How to use base_resize method in toolium

Best Python code snippet using toolium_python

optimization_l2.py

Source:optimization_l2.py Github

copy

Full Screen

1import json2import time3import torch4from distutils.dir_util import copy_tree5from DC.common import vgg_normalize, vgg_renormalize, d2_distance_matrix, select_distance_matrix_idx, \6 sort_by_z, custom_vgg, remove_invisible_z7from DC.compositing import composite_layers8from DC.constants import *9from DC.loss import loss_fn_l210from DC.utils import load_element, load_base_pattern, init_element_pos, render_constants, make_cuda, init_optimizer, \11 save_tensor, get_ss_l2, select_op_variables, save_tensor_z12# ------------------------- -------------------------13if not resume:14 copy_tree(cwd+ '/DC', folder+ '/DC')15 with open(folder+'hyperparameters.txt', 'w') as f:16 json.dump(args.__dict__, f, indent=2)17 if not os.path.exists(folder):18 os.makedirs(folder)19 else:20 print('FOLDER ALREADY EXISTS')21# ------------------------- -------------------------22w = 123w1 = 124w2 = 0.525w3 = 0.526w_o = 027tau =1.528remove_things = False29num_seeds = 130def main():31 global soft, w, w1, w2, w3, w_o, init_iter, base_resize, expand_size, n_soft_elements, tau, remove_things, num_seeds32 if args.complex_background:33 base_pattern, background, background_img = load_base_pattern(pattern_filename, args.tiled, base_resize, blur=False, complex_background=True)34 background_img = background_img.cuda()35 else:36 base_pattern, background = load_base_pattern(pattern_filename, args.tiled, base_resize, blur=False)37 background_img = None38 elements = load_element(element_filename, size=base_resize)39 parameters = init_element_pos(number)40 constants = render_constants(number[0]*number[1], scale_x_single, scale_y_single, base_resize, n_soft_elements, expand_size, color=args.color)41 base_pattern, background, mean_c, std_c = make_cuda([base_pattern, background, mean, std])42 base_pattern = vgg_normalize(base_pattern, mean_c[:,:3], std_c[:,:3])43 c_variables = make_cuda(parameters+constants)44 init_c_variables = []45 for i in c_variables:46 init_c_variables.append(i.clone().detach())47 op_variables = select_op_variables(n_elements, args, c_variables)48 if args.tiled:49 base_resize = [512, 512]50 background.requires_grad = True51 if non_white_background:52 op_variables = op_variables+[background, ]53 optimizer, scheduler = init_optimizer(op_variables, args.lr)54 # ----------------- remove unnecessary ------------------------------55 def remove_unecessary(c_variables):56 if not args.color:57 c_variables = remove_invisible_z(c_variables, 2000)58 c_variables = sort_by_z(c_variables)59 element_classes = torch.argmax(c_variables[7], dim=1)60 distance_matrix = d2_distance_matrix(torch.cat([c_variables[0][:, None]/10, c_variables[1][:, None]/10], dim=1))61 idx = select_distance_matrix_idx(distance_matrix, element_classes, c_variables[5], [10/256, 20/256, 10/256, 10/256, 20/256, 15/256, 15/256, 30/256, 30/256], min_dist=10/256)62 new_c_variables = []63 for i in c_variables:64 new_c_variables.append(i[idx])65 c_variables = make_cuda(new_c_variables)66 op_variables = select_op_variables(n_elements, args, c_variables)67 if non_white_background:68 op_variables = op_variables + [background, ]69 optimizer, scheduler = init_optimizer(op_variables, args.lr*0.1)70 return c_variables, optimizer, scheduler71 # ----------------- init ------------------------------72 c_variables[6].requires_grad = False73 if resume:74 load_iter = args.resume_int75 PATH = folder + 'checkpoint_' + str(load_iter)76 checkpoint = torch.load(PATH)77 c_variables = checkpoint['c_variables']78 background = checkpoint['background']79 if args.seed:80 c_variables_1 = c_variables.copy()81 c_variables_1[6] = c_variables_1[6] / 282 c_variables_2 = []83 for c in init_c_variables:84 c_variables_2.append(c.clone().detach())85 c_variables = []86 for idx in range(len(c_variables_1)):87 c_variables.append(torch.cat([c_variables_1[idx], c_variables_2[idx], ]))88 c_variables = make_cuda(c_variables)89 torch.cuda.empty_cache()90 op_variables = select_op_variables(n_elements, args, c_variables)91 background.requires_grad = True92 if non_white_background:93 op_variables = op_variables + [background, ]94 optimizer, scheduler = init_optimizer(op_variables, args.lr)95 # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])96 # scheduler = CyclicLR(optimizer, base_lr=lr, max_lr=lr * 4, cycle_momentum=False, mode='exp_range',97 # step_size_up=1500)98 # ----------------------declare variables -------------------99 init_iter = load_iter-1100 w = 1101 w1 = 1102 w2 = 1103 w3 = 1.0104 w_o = 0105 # tau = tau + 10106 if args.layers:107 c_variables[6].requires_grad = True108 current_soft = False109 base_pattern = vgg_renormalize(base_pattern, mean_c[:, :3], std_c[:, :3])110 save_tensor(folder, 'BASE', base_pattern)111 ss = get_ss_l2(base_pattern)112 start_time = time.time()113 for i in range(init_iter+1, init_iter + iter):114 # tau = 0.1115 render, overlap = composite_layers(elements, c_variables, background, n_elements, expand_size, current_soft, tau, color=args.color, background_img=background_img)116 ws = [w, w1, w2, w3, w_o]117 loss = loss_fn_l2(render, overlap, ss, ws) + overlap*w_o118 if i%10==0:119 save_tensor(folder, str(i), render)120 print('iteration {} '.format(i), (loss).to('cpu'), (overlap*(w_o)))121 optimizer.zero_grad()122 loss.backward()123 optimizer.step()124 ## Everything below this is to improve convergence125 if (i- 100000)==500 :126 w_o = 0#(10**-7)127 if args.layers:128 c_variables[6].requires_grad = True129 if i%3000==0:130 # tau = tau +1131 print(torch.exp(c_variables[6]/25))132 if args.layers:133 c_variables[6].requires_grad = True134 save_tensor_z(folder, str(i)+'_z', render, c_variables[0]/10, c_variables[1]/10, c_variables[6], size=128)135 save_tensor_z(folder, str(i)+'_idx', render, c_variables[0]/10, c_variables[1]/10, torch.arange(c_variables[6].shape[0]), size=128)136 torch.save({137 'c_variables': c_variables,138 'optimizer_state_dict': optimizer.state_dict(),139 'base_size':base_resize,140 'xy_scale_factor':10,141 'soft':current_soft,142 'background':background}, folder+'checkpoint_'+str(i))143 w1 = min(w1 + 0.05, 1.5)144 w1 = min(w1 + 0.05, 1.5)145 w2 = max(w2 - 0.05, 0.2)146 w3 = max(w3 - 0.05, 0.2)147 if ((i- 100000)%8000 - 4000 == 500 or (i- 100000)%8000== 500)and remove_things:148 w_o = 0149 for g in optimizer.param_groups:150 g['lr'] = args.lr151 if (i- 100000)%8000==4000 and remove_things:152 w_o = 0153 elapsed_time = time.time() - start_time154 print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))155 print(elapsed_time)156 c_variables, optimizer, scheduler = remove_unecessary(c_variables)157 print(torch.nn.functional.sigmoid(c_variables[5]/10))158 render, overlap = composite_layers(elements, c_variables, background, n_elements, expand_size, current_soft,159 tau, color=args.color, background_img=background_img)160 save_tensor(folder, 'postremoval_'+str(i), render)161 if args.seed and num_seeds>0:162 if (i- 100000)%8000==0:163 num_seeds = num_seeds - 1164 torch.save({165 'c_variables': c_variables,166 'optimizer_state_dict': optimizer.state_dict(),167 'base_size': base_resize,168 'xy_scale_factor': 10,169 'soft': current_soft,170 'background': background}, folder + 'checkpoint_' + str(i))171 if args.rotation:172 c_variables_1 = c_variables.copy()173 c_variables_1[6] = c_variables_1[6]*2/3174 # c_variables_1[7] = c_variables_1[7]175 c_variables_2 = c_variables.copy()176 c_variables_2[2] = c_variables_2[2] + (90/(1.5*3.14159*57.2958))177 c_variables_2[6] = c_variables_2[6]*2/3178 # c_variables_2[7] = torch.ones_like(c_variables_2[7])#/4179 c_variables_3 = c_variables.copy()180 c_variables_3[2] = c_variables_3[2] + (180/(1.5*3.14159*57.2958))181 c_variables_3[6] = c_variables_3[6]*2/3182 # c_variables_3[7] = torch.ones_like(c_variables_3[7])#/4183 c_variables_4 = c_variables.copy()184 c_variables_4[2] = c_variables_4[2] + (270/(1.5*3.14159*57.2958))185 c_variables_4[6] = c_variables_4[6]*2/3186 c_variables = []187 for idx in range(len(c_variables_1)):188 c_variables.append(torch.cat([c_variables_1[idx], c_variables_2[idx], c_variables_3[idx], c_variables_4[idx]]))189 c_variables = make_cuda(c_variables)190 op_variables = select_op_variables(n_elements, args, c_variables)191 render, overlap = composite_layers(elements, c_variables, background, n_elements, expand_size, current_soft,192 tau, color=args.color, background_img=background_img)193 save_tensor(folder, 'postseeding_'+str(i), render)194 optimizer, scheduler = init_optimizer(op_variables+[background, ], args.lr*0.1)195 remove_things = True196 else:197 c_variables_1 = c_variables.copy()198 if args.color:199 c_variables_1[7] = torch.ones_like(c_variables_1[7])200 c_variables_2 = []201 for c in init_c_variables:202 c_variables_2.append(c.clone().detach())203 c_variables = []204 for idx in range(len(c_variables_1)):205 c_variables.append(torch.cat([c_variables_1[idx], c_variables_2[idx],]))206 else:207 c_variables_1[6] = c_variables_1[6] / 2208 c_variables_2 = []209 for c in init_c_variables:210 c_variables_2.append(c.clone().detach())211 c_variables = []212 for idx in range(len(c_variables_1)):213 c_variables.append(torch.cat([c_variables_1[idx], c_variables_2[idx],]))214 c_variables = make_cuda(c_variables)215 op_variables = select_op_variables(n_elements, args, c_variables)216 render, overlap = composite_layers(elements, c_variables, background, n_elements, expand_size, current_soft,217 tau, color=args.color)218 save_tensor(folder, 'postseeding_'+str(i), render)219 if non_white_background:220 op_variables = op_variables + [background, ]221 optimizer, scheduler = init_optimizer(op_variables, args.lr*0.1)222 remove_things = True223 w_o = 0224if __name__ == '__main__':...

Full Screen

Full Screen

constants.py

Source:constants.py Github

copy

Full Screen

1import math2import os3from DC.common import make_tensor4from DC.config import args5from DC.scales import all_scales6# matplotlib.use('Agg')7# ------------------------- variables -------------------------8cwd = os.getcwd()9pattern = args.pattern10n_elements = 111n_soft_elements = int(len(all_scales[pattern])/2)12folder_root = args.folder13if folder_root == '':14 folder_root = cwd#os.path.dirname(cwd)15element_filename = []16for i in range(n_soft_elements):17 element_filename.append('{}/data/{}/elements/{}.png'.format(folder_root, pattern, i+1))18pattern_filename = '{}/data/{}/pattern.png'.format(folder_root, pattern)19folder = "{}/logs/expansion/{}_{}/".format(folder_root, pattern, args.version)20non_white_background = args.non_white21# number = [4, 4]22scale_x_single = all_scales[pattern][0::2]23scale_y_single = all_scales[pattern][1::2]24# ------------------------- calculated constants -------------------------25base_resize = [args.base_size, args.base_size]26expand_size = [1, 3, base_resize[0], base_resize[1]]27if args.expand:28 expand_size = [1, 3, base_resize[0]*2, base_resize[0]*2]29number = [int(1/(scale_y_single[0]* base_resize[1]/expand_size[3])), int(1/(scale_x_single[0]* base_resize[0]/expand_size[2]))]30number[0] = args.sample#(int(number[0] / n_elements)+1) * n_elements31number[1] = args.sample#(int(number[1] / n_elements)+1) * n_elements32repeats = int(number[0] * number[1] / n_elements)33print('Number of elements:', number)34expand_size[0] = repeats35# ------------------------- initializations and constants -------------------------36lr = args.lr37iter = args.num_iter38LBFGS = False39multi = True40soft = args.soft41smooth = False42init_iter = 10000043resume = args.resume44d_threshold = (math.sqrt((max(scale_x_single) * base_resize[0]) ** 2 + (max(scale_y_single) * base_resize[1]) ** 2) -20) / 245mean = make_tensor([0.485, 0.456, 0.406, 0])[None,:,None, None]46std = make_tensor([0.229, 0.224, 0.225, 1])[None,:,None, None]47content_layers = ['pool_4', 'relu4_2',]# 'relu5_2']48content_weights = {'pool_4': 1.0, 'relu4_2': 1.0,}# 'relu5_2': 1.0}49style_layers = ['conv_1', 'conv_2', 'conv_3', 'conv_4']#, 'conv_5']50style_weights = {'conv_1': 0.2, 'conv_2': 0.2, 'conv_3': 0.2, 'conv_4': 0.2}#, 'conv_5': 200}51hist_layers = ['relu4_2', 'conv_1']...

Full Screen

Full Screen

hyperopt.py

Source:hyperopt.py Github

copy

Full Screen

1from pytorch_ML.hyperopt import Hyperopt2import numpy as np3from numpy import random4from filet.mask_discriminator.transformations import PreProcessor_Crop_n_Resize_Box5class Mask_Hyperopt(Hyperopt):6 def __init__(self,base_lr,**kwargs):7 super().__init__(**kwargs)8 self.base_lr = base_lr9 def suggest_hyper_dict(self):10 '''11 Overloaded from base12 '''13 base_resize = (400,620)14 resize_scales = [0.33,0.5,0.75,1]15 chosen_resize_scale_index = np.random.randint(0,len(resize_scales))16 chosen_scale = resize_scales[chosen_resize_scale_index]17 resize_tup = [int(chosen_scale*base_resize[0]),int(chosen_scale * base_resize[1])]18 is_resize_sq = np.random.randint(0, 2)19 if is_resize_sq:20 resize_tup[0] = ( resize_tup[0] + resize_tup[1] ) // 221 resize_tup[1] = resize_tup[0]22 pad_choices = [5*i+25 for i in range(6)]23 pad = pad_choices[np.random.randint(0,len(pad_choices))]24 generated_values = {25 "optimizer" : { "lr": self.base_lr * random.uniform(1,100), "momentum": random.uniform(0.1, 0.6)},26 "scheduler" : {'gamma' : None},27 "loss" : {},28 "net" : {'two_layer_head' : random.random()>0.5,},29 'resize' : resize_tup,30 'pad' : pad,31 }32 lr_half_time = random.uniform(1000, 7500)33 generated_values['scheduler']["gamma"] = np.exp(-np.log(2)/lr_half_time)34 return generated_values35 def construct_trainer_objs(self,hyper):36 '''37 builds model, optimizer,scheduler and other objects needed for training. Must be deterministic given hyper_dict38 '''39 hyper_objs = super().construct_trainer_objs(hyper)40 dt = self.dt41 prep = PreProcessor_Crop_n_Resize_Box(resize_dims=hyper['resize'],pad=hyper['pad'],mean=[0.2010, 0.1944, 0.2488, 0.0000],std=[0.224, 0.224, 0.224, 1])42 dt.dataset.set_preprocessor(prep)43 hyper_objs['dt'] = dt...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run toolium automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful