How to use test_forward_dir method in autotest

Best Python code snippet using autotest_python

utils_unittest.py

Source:utils_unittest.py Github

copy

Full Screen

...363 def test_not_absolute(self):364 self.assertRaises(AssertionError, utils.get_relative_path, "a", "b")365 def test_same_dir(self):366 self.assertEqual(utils.get_relative_path("/a/b/c", "/a/b"), "c")367 def test_forward_dir(self):368 self.assertEqual(utils.get_relative_path("/a/b/c/d", "/a/b"), "c/d")369 def test_previous_dir(self):370 self.assertEqual(utils.get_relative_path("/a/b", "/a/b/c/d"), "../..")371 def test_parallel_dir(self):372 self.assertEqual(utils.get_relative_path("/a/c/d", "/a/b/c/d"),373 "../../../c/d")374if __name__ == "__main__":...

Full Screen

Full Screen

test_spring_opt.py

Source:test_spring_opt.py Github

copy

Full Screen

...168 # cr_path=join(self.cr_dir,'cr_{:08d}.obj'.format(sample_id))169 # self.write_obj(x_save,None,cr_path,patch_id=self.patch_id)170 except:171 continue172 def test_forward_dir(self,in_dir,out_dir,n_iters=1,start=0,end=2247):173 system=SpringOptSystem(self.stiffen_anchors_net,self.stiffen_anchors_reg,self.edges,self.l0,self.k,m_alpha=0.1,axial_data=self.axial_data)174 opt=NewtonOpt(system,newton_tol=1e-12,cg_tol=1e-3,cg_max_iter=250)175 if not isdir(out_dir):176 os.makedirs(out_dir)177 for sample_id in range(start,end+1):178 pd_path=join(in_dir,'{:08d}.obj'.format(sample_id))179 v,f=self.read_obj(pd_path,patch_id=self.patch_id)180 v=torch.from_numpy(v).to(device=self.device,dtype=self.dtype)181 x=v182 start_time=time.time()183 for i in range(n_iters):184 x,data,success=opt.solve(v,x)185 end_time=time.time()186 print('forward time:',end_time-start_time)187 x_save=x.detach().cpu().numpy()188 cr_path=join(out_dir,'{:08d}.obj'.format(sample_id))189 self.write_obj(x_save,f,cr_path,patch_id=self.patch_id)190 def test_backward(self,sample_id):191 pd_path=join(self.pd_dir,'pd_{:08d}.obj'.format(sample_id))192 pd_v,f=self.read_obj(pd_path,patch_id=self.patch_id)193 pd_v=torch.from_numpy(pd_v).to(device=self.device,dtype=self.dtype)194 gt_path=join(self.pd_dir,'gt_{:08d}.obj'.format(sample_id))195 gt_v,_=self.read_obj(gt_path,patch_id=self.patch_id)196 gt_v=torch.from_numpy(gt_v).to(device=self.device,dtype=self.dtype)197 dv=pd_v-gt_v198 cr_path=join(self.cr_dir,'cr_{:08d}.npy'.format(sample_id))199 cr_v=np.load(cr_path)200 cr_v=torch.from_numpy(cr_v).to(device=self.device,dtype=self.dtype)201 # m_path=join(self.cr_dir,'m_{:08d}.npy'.format(sample_id))202 # m_adjusted=np.load(m_path)203 # m_adjusted=torch.from_numpy(m_adjusted).to(device=self.device,dtype=self.dtype)204 # system=SpringOptSystem(self.m,self.edges,self.l0,self.k,m_alpha=0.1,axial_data=self.axial_data)205 system=SpringOptSystem(self.stiffen_anchors_net,self.stiffen_anchors_reg,self.edges,self.l0,self.k,m_alpha=0.1,axial_data=self.axial_data)206 system.use_m_adjusted=False207 data=system.get_data(cr_v)208 data['c']=pd_v209 data['anchors_net']=pd_v210 data['anchors_reg']=pd_v211 data['stiffen_anchors_net']=self.stiffen_anchors_net212 data['stiffen_anchors_reg']=self.stiffen_anchors_reg213 # data['m_adjusted']=m_adjusted214 J=system.get_J(data)215 norm_J=torch.norm(J)216 data['J_rms']=norm_J/np.sqrt(len(cr_v))217 dx=spring_opt_backward(system,data,dv,cg_tol=1e-3,cg_max_iter=250)218 grad_path=join(self.cr_dir,'grad_{:08d}.npy'.format(sample_id))219 print('save to',grad_path)220 np.save(grad_path,dx.cpu().numpy())221 def test_grad(self,sample_id):222 pd_path=join(self.pd_dir,'pd_{:08d}.obj'.format(sample_id))223 print('pd_path',pd_path)224 pd_vt,_=self.read_obj(pd_path,patch_id=self.patch_id)225 grad=np.load(join(self.cr_dir,'grad_{:08d}.npy'.format(sample_id)))226 print('grad.norm',np.linalg.norm(grad))227 grad_len=1228 ed_vt=pd_vt-grad*grad_len229 n_vts=len(pd_vt)230 obj_path=join(self.cr_dir,'grad_{:08d}.obj'.format(sample_id))231 print('write to',obj_path)232 with open(obj_path,'w') as f:233 for v in pd_vt:234 f.write('v {} {} {}\n'.format(v[0],v[1],v[2]))235 for v in ed_vt:236 f.write('v {} {} {}\n'.format(v[0],v[1],v[2]))237 for i in range(n_vts):238 f.write('l {} {}\n'.format(i+1,i+1+n_vts))239 def test_module(self,sample_id,n_iters=1):240 pd_path=join(self.pd_dir,'pd_{:08d}.obj'.format(sample_id))241 pd_v,f=self.read_obj(pd_path,patch_id=self.patch_id)242 pd_v=torch.from_numpy(pd_v).to(device=self.device,dtype=self.dtype).unsqueeze(0)243 gt_path=join(self.pd_dir,'gt_{:08d}.obj'.format(sample_id))244 gt_v,_=self.read_obj(gt_path,patch_id=self.patch_id)245 gt_v=torch.from_numpy(gt_v).to(device=self.device,dtype=self.dtype).unsqueeze(0)246 proj_module=SpringOptModule(self.res_ctx,self.ctx)247 x=pd_v248 x.requires_grad_(True)249 for i in range(n_iters):250 x=proj_module(x)251 loss=torch.sum((gt_v-x)**2)/2252 loss.backward()253 print('grad.norm',torch.norm(pd_v.grad))254 def test_loss_along_line(self,sample_id,n_iters):255 pd_path=join(self.pd_dir,'pd_{:08d}.obj'.format(sample_id))256 pd_v,f=self.read_obj(pd_path,patch_id=self.patch_id)257 pd_v=torch.from_numpy(pd_v).to(device=self.device,dtype=self.dtype).unsqueeze(0)258 # gt_vt=np.load(join(self.data_root_dir,'lowres_skin_npys/skin_{:08d}.npy'.format(sample_id)))+np.load(join(self.data_root_dir,'lowres_offsets_i10/offset_{:08d}.npy'.format(sample_id)))259 gt_path=join(self.pd_dir,'gt_{:08d}.obj'.format(sample_id))260 gt_v,f=self.read_obj(gt_path,patch_id=self.patch_id)261 gt_v=torch.from_numpy(gt_v).to(device=self.device,dtype=self.dtype).unsqueeze(0)262 proj_module=SpringOptModule(self.res_ctx,self.ctx)263 def f(x):264 for i in range(n_iters):265 x=proj_module(x)266 return torch.sum(((x-gt_v)**2).view(x.size(0),-1),dim=1)/2267 pd_v.requires_grad_(True)268 loss=f(pd_v)269 loss.backward()270 g=pd_v.grad[0]271 pd_v.requires_grad_(False)272 loss_list=[]273 total_n=100274 processed_n=0275 end=2276 batch_size=1277 while processed_n<total_n:278 x=pd_v.repeat(batch_size,1,1)279 for i in range(batch_size):280 t=(i+processed_n)/total_n*end281 x[i]-=t*g282 loss=f(x)283 loss_list+=loss.tolist()284 processed_n+=batch_size285 print(loss_list)286 np.savetxt(join(self.opt_dir,'loss_{}.txt'.format(end)),np.array(loss_list))287 def plot_loss_along_line(self,sample_id):288 end=2289 loss=np.loadtxt(join(self.opt_dir,'loss_{}.txt'.format(2)))290 x=np.linspace(0,end,len(loss))291 fig=plt.gcf()292 ax=plt.gca()293 ax.plot(x,loss)294 ax.set_title('iter=10')295 plot_path=join(self.opt_dir,'loss_{}.png'.format(end))296 print('plot_path',plot_path)297 fig.savefig(plot_path)298if __name__=='__main__':299 parser=argparse.ArgumentParser()300 parser.add_argument('-start',type=int,default=0)301 parser.add_argument('-end',type=int,default=0)302 args=parser.parse_args()303 test=SpringOptTest()304 # test.test_forward(106,n_iters=1)305 # test.test_dataset(args.start,args.end)306 test.test_backward(106)307 test.test_grad(106)308 # test.test_module(106,n_iters=10)309 # test.test_loss_along_line(106,n_iters=10)310 # test.plot_loss_along_line(106)311 # test.test_forward_dir('/data/zhenglin/PhysBAM/Private_Projects/cloth_on_virtual_body/joint_data/seq1/videos/collected_objs','/data/zhenglin/PhysBAM/Private_Projects/cloth_on_virtual_body/joint_data/seq1/videos/corrected_objs',start=args.start,end=args.end)...

Full Screen

Full Screen

imgpreprocess.py

Source:imgpreprocess.py Github

copy

Full Screen

1import numpy as np2import matplotlib3import h5py4import os, shutil5import matplotlib.pyplot as plt6from keras.optimizers import SGD7from keras import layers, models, optimizers8from keras.preprocessing import image9from keras.preprocessing.image import ImageDataGenerator10from keras.callbacks import EarlyStopping, ModelCheckpoint11from model import SqueezeNet1213import tensorflow as tf1415from keras.datasets import mnist16from keras.utils import np_utils17from keras.models import Sequential18from keras.layers import Dense192021###############################################################################22# Diretory Path Creation23# Can separate this code to different python file and just import to this24#original_dataset_dir = '/Users/jisuk/OneDrive/바탕 화면/datasets/catsAndDogs/train'25base_dir = '/home/jisukim/eye1001/datasets/eyesmall6'2627if os.path.exists(base_dir): # 반복적인 실행을 위해 디렉토리를 삭제합니다.28 shutil.rmtree(base_dir) # 이 코드는 책에 포함되어 있지 않습니다.29os.mkdir(base_dir)30# 훈련, 검증, 테스트 분할을 위한 디렉터리31train_dir = os.path.join(base_dir, 'train')32# os.path.join = base_dir에 선언된 주소에 'train' 이라는 폴더를 생성 33# ./datasets/cats_and_dogs_small/train 3435os.mkdir(train_dir)36# train_dir이라는 경로를 실제로 make directory함 3738validation_dir = os.path.join(base_dir, 'validation')39os.mkdir(validation_dir)4041test_dir = os.path.join(base_dir, 'test')42os.mkdir(test_dir)4344# 총 트레인, 검증, 테스트라는 폴더를 생성4546# 훈련용47train_forward_dir = os.path.join(train_dir, 'forward')48os.mkdir(train_forward_dir)4950train_closed_dir = os.path.join(train_dir, 'side')51os.mkdir(train_closed_dir)525354# 검증용55validation_forward_dir = os.path.join(validation_dir, 'forward')56os.mkdir(validation_forward_dir)5758validation_closed_dir = os.path.join(validation_dir, 'side')59os.mkdir(validation_closed_dir)606162# 테스트용63test_forward_dir = os.path.join(test_dir, 'forward')64os.mkdir(test_forward_dir)6566test_closed_dir = os.path.join(test_dir, 'side')67os.mkdir(test_closed_dir)6869#################################side##################################### 70 71fnames = ['{}.JPG'.format(i) for i in range(501,1001)]72for fname in fnames:73 src = os.path.join("/home/jisukim/eye1001/datasets/eye6/eye/train/side", fname)74 dst = os.path.join(train_closed_dir, fname)75 shutil.copyfile(src, dst) 76 77 78fnames = ['{}.JPG'.format(i) for i in range(201,501)]79for fname in fnames:80 src = os.path.join("/home/jisukim/eye1001/datasets/eye6/eye/validation/side", fname)81 dst = os.path.join(validation_closed_dir, fname)82 shutil.copyfile(src, dst)8384fnames = ['{}.JPG'.format(i) for i in range(1,201)]85for fname in fnames:86 src = os.path.join("/home/jisukim/eye1001/datasets/eye6/eye/test/side", fname)87 dst = os.path.join(test_closed_dir, fname)88 shutil.copyfile(src, dst)8990###############################forward#######################################9192fnames = ['{}.JPG'.format(i) for i in range(501,1001)]93for fname in fnames:94 src = os.path.join("/home/jisukim/eye1001/datasets/eye6/eye/train/forward", fname)95 dst = os.path.join(train_forward_dir, fname)96 shutil.copyfile(src, dst)97 98fnames = ['{}.JPG'.format(i) for i in range(201,501)]99for fname in fnames:100 src = os.path.join("/home/jisukim/eye1001/datasets/eye6/eye/validation/forward", fname)101 dst = os.path.join(validation_forward_dir, fname)102 shutil.copyfile(src, dst)103 104fnames = ['{}.JPG'.format(i) for i in range(1,201)]105for fname in fnames:106 src = os.path.join("/home/jisukim/eye1001/datasets/eye6/eye/test/forward", fname)107 dst = os.path.join(test_forward_dir, fname)108 shutil.copyfile(src, dst)109 110########################################################################################111# Start learning, as well as compiling model112sn = SqueezeNet(input_shape = (100, 75, 3), nb_classes=2)113'''114sn = Sequential()115sn.add(Flatten(input_shape=train_data.shape[1:]))116sn.add(Dense(256, activation='relu',kernel_regularizer=keras.regularizers.l2(0.001)))117sn.add(Dropout(0.3))118sn.add(BatchNormalization())119sn.add(Dense(2, activation='softmax'))120'''121sn.summary()122train_data_dir = '/home/jisukim/eye1001/datasets/eyesmall6/train'123validation_data_dir = '/home/jisukim/eye1001/datasets/eyesmall6/validation'124test_data_dir = '/home/jisukim/eye1001/datasets/eyesmall6/test'125train_samples = 1000126validation_samples = 600127epochs = 50128nb_class = 2129width, height = 100, 75130131sgd = SGD(lr=0.001, decay=0.0002, momentum=0.9, nesterov=True)132sn.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy', tf.keras.metrics.Precision(name='precision'), tf.keras.metrics.Recall(name='recall'), tf.keras.metrics.FalsePositives(name='false_positives'),tf.keras.metrics.FalseNegatives(name='false_negatives')])133134# Generator135train_datagen = ImageDataGenerator(136 rescale=1./255,137 shear_range=0.2,138 zoom_range=0.2,139 horizontal_flip=True)140141test_datagen = ImageDataGenerator(rescale=1./255)142143train_generator = train_datagen.flow_from_directory(144 train_data_dir,145 target_size=(width, height),146 batch_size=32,147 class_mode='categorical')148149validation_generator = test_datagen.flow_from_directory(150 validation_data_dir,151 target_size=(width, height),152 batch_size=32,153 class_mode='categorical')154 155test_generator = test_datagen.flow_from_directory(156 test_data_dir,157 target_size=(width, height), 158 batch_size=32,159 class_mode='categorical')160############################################################################161# Inlcude this Callback checkpoint if you want to make .h5 checkpoint files162# May slow your training163#early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0)164#checkpoint = ModelCheckpoint( 165# 'weights.{epoch:02d}-{val_loss:.2f}.h5',166# monitor='val_loss', 167# verbose=0, 168# save_best_only=True, 169# save_weights_only=True, 170# mode='min', 171# period=1) 172###########################################################################173174hist=sn.fit_generator(175 train_generator,176 steps_per_epoch=train_samples,177 epochs=epochs,178 validation_data=validation_generator,179 validation_steps=validation_samples 180 #,callbacks=[checkpoint]181)182183#########################################################################################33184185fig, loss_ax = plt.subplots()186187acc_ax = loss_ax.twinx()188189loss_ax.plot(hist.history['loss'], 'y', label='train loss')190loss_ax.plot(hist.history['val_loss'], 'g', label='val loss')191192acc_ax.plot(hist.history['accuracy'], 'r', label='train acc')193acc_ax.plot(hist.history['val_accuracy'], 'b', label='val acc')194195loss_ax.set_xlabel('epoch')196loss_ax.set_ylabel('loss')197acc_ax.set_ylabel('accuray')198199loss_ax.legend(loc='lower left')200acc_ax.legend(loc='upper left')201202plt.savefig('ourmodel2.png')203204###########################################################################205206print("-- Evaluate --")207scores = sn.evaluate_generator(test_generator, steps=5)208print("%s: %.2f%%" %(sn.metrics_names[1], scores[1]*100))209210print("-- Predict --")211output = sn.predict_generator(test_generator, steps=5)212np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})213214print(test_generator.class_indices)215print(output)216217print("Training Ended")218219sn.save_weights('ourweights2.h5')220print("Saved weight file")221222sn.save('ourmodel2.h5')223print("saved model file")224225# End of Code ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful