How to use get_test_times method in stestr

Best Python code snippet using stestr_python

Trainer.py

Source:Trainer.py Github

copy

Full Screen

...36 self.eval_data = AerialDataset(dataset=self.dataset,data_path=self.data_path,mode='val')37 self.eval_loader = DataLoader(self.eval_data,batch_size=1,shuffle=False,num_workers=2)38 if self.dataset=='Potsdam':39 self.num_of_class=640 self.epoch_repeat = get_test_times(6000,6000,self.train_crop_size,self.train_crop_size)41 elif self.dataset=='UDD5':42 self.num_of_class=543 self.epoch_repeat = get_test_times(4000,3000,self.train_crop_size,self.train_crop_size)44 elif self.dataset=='UDD6':45 self.num_of_class=646 self.epoch_repeat = get_test_times(4000,3000,self.train_crop_size,self.train_crop_size)47 else:48 raise NotImplementedError49 if args.model == 'FCN':50 self.model = models.FCN8(num_classes=self.num_of_class)51 elif args.model == 'DeepLabV3+':52 self.model = models.DeepLab(num_classes=self.num_of_class,backbone='resnet')53 elif args.model == 'GCN':54 self.model = models.GCN(num_classes=self.num_of_class)55 elif args.model == 'UNet':56 self.model = models.UNet(num_classes=self.num_of_class)57 elif args.model == 'ENet':58 self.model = models.ENet(num_classes=self.num_of_class)59 elif args.model == 'D-LinkNet':60 self.model = models.DinkNet34(num_classes=self.num_of_class)61 else:62 raise NotImplementedError63 if args.loss == 'CE':64 self.criterion = CrossEntropyLoss2d()65 elif args.loss == 'LS':66 self.criterion = LovaszSoftmax()67 elif args.loss == 'F':68 self.criterion = FocalLoss()69 elif args.loss == 'CE+D':70 self.criterion = CE_DiceLoss()71 else:72 raise NotImplementedError73 74 self.schedule_mode = args.schedule_mode75 self.optimizer = opt.AdamW(self.model.parameters(),lr=args.lr)76 if self.schedule_mode == 'step':77 self.scheduler = opt.lr_scheduler.StepLR(self.optimizer, step_size=30, gamma=0.1)78 elif self.schedule_mode == 'miou' or self.schedule_mode == 'acc':79 self.scheduler = opt.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max', patience=10, factor=0.1)80 elif self.schedule_mode == 'poly':81 iters_per_epoch=len(self.train_loader)82 self.scheduler = Poly(self.optimizer,num_epochs=args.epochs,iters_per_epoch=iters_per_epoch)83 else:84 raise NotImplementedError85 self.evaluator = Evaluator(self.num_of_class)86 self.model = nn.DataParallel(self.model)87 88 self.cuda = args.cuda89 if self.cuda is True:90 self.model = self.model.cuda()91 self.resume = args.resume92 self.finetune = args.finetune93 assert not (self.resume != None and self.finetune != None)94 if self.resume != None:95 print("Loading existing model...")96 if self.cuda:97 checkpoint = torch.load(args.resume)98 else:99 checkpoint = torch.load(args.resume, map_location='cpu') 100 self.model.load_state_dict(checkpoint['parameters'])101 self.optimizer.load_state_dict(checkpoint['optimizer'])102 self.scheduler.load_state_dict(checkpoint['scheduler'])103 self.start_epoch = checkpoint['epoch'] + 1104 #start from next epoch105 elif self.finetune != None:106 print("Loading existing model...")107 if self.cuda:108 checkpoint = torch.load(args.finetune)109 else:110 checkpoint = torch.load(args.finetune, map_location='cpu')111 self.model.load_state_dict(checkpoint['parameters'])112 self.start_epoch = checkpoint['epoch'] + 1113 else:114 self.start_epoch = 1115 if self.mode=='train': 116 self.writer = SummaryWriter(comment='-'+self.dataset+'_'+self.model.__class__.__name__+'_'+args.loss)117 self.init_eval = args.init_eval118 119 #Note: self.start_epoch and self.epochs are only used in run() to schedule training & validation120 def run(self):121 if self.init_eval: #init with an evaluation122 init_test_epoch = self.start_epoch - 1123 Acc,_,mIoU,_ = self.validate(init_test_epoch,save=True)124 self.writer.add_scalar('eval/Acc', Acc, init_test_epoch)125 self.writer.add_scalar('eval/mIoU', mIoU, init_test_epoch)126 self.writer.flush()127 end_epoch = self.start_epoch + self.epochs128 for epoch in range(self.start_epoch,end_epoch): 129 loss = self.train(epoch)130 self.writer.add_scalar('train/lr',self.optimizer.state_dict()['param_groups'][0]['lr'],epoch)131 self.writer.add_scalar('train/loss',loss,epoch)132 self.writer.flush()133 saved_dict = {134 'model': self.model.__class__.__name__,135 'epoch': epoch,136 'dataset': self.dataset,137 'parameters': self.model.state_dict(),138 'optimizer': self.optimizer.state_dict(),139 'scheduler': self.scheduler.state_dict()140 }141 torch.save(saved_dict, f'./{self.model.__class__.__name__}_{self.dataset}_epoch{epoch}.pth.tar')142 143 Acc,_,mIoU,_ = self.validate(epoch,save=True)144 self.writer.add_scalar('eval/Acc',Acc,epoch)145 self.writer.add_scalar('eval/mIoU',mIoU,epoch)146 self.writer.flush()147 if self.schedule_mode == 'step' or self.schedule_mode == 'poly':148 self.scheduler.step()149 elif self.schedule_mode == 'miou':150 self.scheduler.step(mIoU)151 elif self.schedule_mode == 'acc':152 self.scheduler.step(Acc)153 else:154 raise NotImplementedError155 self.writer.close()156 def train(self,epoch):157 self.model.train()158 print(f"----------epoch {epoch}----------")159 print("lr:",self.optimizer.state_dict()['param_groups'][0]['lr'])160 total_loss = 0161 num_of_batches = len(self.train_loader) * self.epoch_repeat162 for itr in range(100):163 for i,[img,gt] in enumerate(self.train_loader):164 print(f"epoch: {epoch} batch: {i+1+itr*len(self.train_loader)}/{num_of_batches}")165 print("img:",img.shape)166 print("gt:",gt.shape)167 self.optimizer.zero_grad()168 if self.cuda:169 img,gt = img.cuda(),gt.cuda()170 pred = self.model(img)171 print("pred:",pred.shape)172 loss = self.criterion(pred,gt.long())173 print("loss:",loss)174 total_loss += loss.data175 loss.backward()176 self.optimizer.step()177 return total_loss178 def validate(self,epoch,save):179 self.model.eval()180 print(f"----------validate epoch {epoch}----------")181 if save and not os.path.exists("epoch_"+str(epoch)):182 os.mkdir("epoch"+str(epoch))183 num_of_imgs = len(self.eval_loader)184 for i,sample in enumerate(self.eval_loader):185 img_name,gt_name = sample['img'][0],sample['gt'][0]186 print(f"{i+1}/{num_of_imgs}:")187 img = Image.open(img_name).convert('RGB')188 gt = np.array(Image.open(gt_name))189 times, points = self.get_pointset(img)190 print(f'{times} tests will be carried out on {img_name}...')191 W,H = img.size #TODO: check numpy & PIL dimensions192 label_map = np.zeros([H,W],dtype=np.uint8)193 score_map = np.zeros([H,W],dtype=np.uint8)194 #score_map not necessarily to be uint8 but uint8 gets better result...195 tbar = tqdm(points)196 for i,j in tbar:197 tbar.set_description(f"{i},{j}")198 label_map,score_map = self.test_patch(i,j,img,label_map,score_map)199 #finish a large200 self.evaluator.add_batch(label_map,gt)201 if save: 202 mask = ret2mask(label_map,dataset=self.dataset)203 png_name = os.path.join("epoch"+str(epoch),os.path.basename(img_name).split('.')[0]+'.png')204 Image.fromarray(mask).save(png_name)205 Acc = self.evaluator.Pixel_Accuracy()206 Acc_class = self.evaluator.Pixel_Accuracy_Class()207 mIoU = self.evaluator.Mean_Intersection_over_Union()208 FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() 209 print("Acc:",Acc)210 print("Acc_class:",Acc_class)211 print("mIoU:",mIoU)212 print("FWIoU:",FWIoU)213 self.evaluator.reset()214 return Acc,Acc_class,mIoU,FWIoU215 def test_patch(self,i,j,img,label_map,score_map):216 tr = EvaluationTransform(mean = [0.485, 0.456, 0.406], 217 std = [0.229, 0.224, 0.225])218 #print(img.size)219 cropped = img.crop((i,j,i+self.eval_crop_size,j+self.eval_crop_size))220 cropped = tr(cropped).unsqueeze(0)221 if self.cuda:222 cropped = cropped.cuda()223 out = self.model(cropped)224 #out = torch.nn.functional.softmax(out, dim=1)225 ret = torch.max(out.squeeze(),dim=0)226 score = ret[0].data.detach().cpu().numpy()227 label = ret[1].data.detach().cpu().numpy()228 #numpy array's shape is [H,W] while PIL.Image is [W,H]229 score_temp = score_map[j:j+self.eval_crop_size,i:i+self.eval_crop_size]230 label_temp = label_map[j:j+self.eval_crop_size,i:i+self.eval_crop_size]231 index = score > score_temp232 score_temp[index] = score[index]233 label_temp[index] = label[index]234 label_map[j:j+self.eval_crop_size,i:i+self.eval_crop_size] = label_temp235 score_map[j:j+self.eval_crop_size,i:i+self.eval_crop_size] = score_temp236 return label_map,score_map237 def get_pointset(self,img):238 W, H = img.size239 pointset = []240 count=0241 i = 0242 while i<W:243 break_flag_i = False244 if i+self.eval_crop_size >= W:245 i = W - self.eval_crop_size246 break_flag_i = True247 j = 0248 while j<H:249 break_flag_j = False250 if j + self.eval_crop_size >= H:251 j = H - self.eval_crop_size252 break_flag_j = True253 count+=1254 pointset.append((i,j))255 if break_flag_j:256 break257 j+=self.stride258 if break_flag_i:259 break260 i+=self.stride261 value = get_test_times(W,H,self.eval_crop_size,self.stride)262 assert count==value,f'count={count} while get_test_times returns {value}'263 return count, pointset 264if __name__ == "__main__":265 print("--Trainer.py--")...

Full Screen

Full Screen

om-cp_data_logger_software_class.py

Source:om-cp_data_logger_software_class.py Github

copy

Full Screen

...24 self.device_info.drop(range(2, len(self.device_info.index), 1), axis=0, inplace=True)25 self.device_info.columns = self.device_info.iloc[0]26 self.device_info = self.device_info.drop([0], axis=0).reset_index(drop=True)27 return self.device_info28 def get_test_times(self):29 test_times = pd.read_json(f'../json_tables/times_{self.name}.json',30 convert_dates=(self.start_col, self.end_col))31 return test_times32 def get_split_data(self):33 dfs = {}34 for x in range(len(self.get_test_times())):35 dfs[x] = self.data[self.get_test_times()[self.start_col][x]:self.get_test_times()[self.end_col][x]]36 return dfs37 @staticmethod38 def resample(df, col_name, order, cut_off, b_type):39 new_dfs = {}40 return_dfs = {}41 if type(df) == dict:42 for x in range(len(df)):43 b, a = signal.butter(order, cut_off, btype=b_type, analog=False)44 new_dfs[x] = pd.DataFrame(signal.filtfilt(b, a, df[x][col_name]))45 new_dfs[x].index = df[x].index46 return_dfs[x] = pd.concat([df[x], new_dfs[x]], axis=1)47 return_dfs[x].rename(columns={0: f"{col_name}_resample"}, inplace=True)48 return return_dfs49 else:50 b, a = signal.butter(order, cut_off, btype=b_type, analog=False)51 new_dfs = pd.DataFrame(signal.filtfilt(b, a, df[col_name]))52 new_dfs.index = df.index53 return_dfs = pd.concat([df, new_dfs], axis=1)54 return_dfs.rename(columns={0: f"{col_name}_resample"}, inplace=True)55 return return_dfs56 def graphing(self, df, name_addon=""):57 p = Path(f'../graphs/{self.name}')58 if type(df) == dict:59 for x in range(len(df)):60 fig, ax = plt.subplots(figsize=(10, 6))61 ax.plot(df[x]['Shock - Z Axis (g)'], color='#ffb300', marker='o', markersize=1, label='Test 5')62 ax.set_xlabel('Time (Hours:Minutes:Seconds.Milliseconds)', fontsize=15)63 ax.set_ylabel('Acceleration (g)', fontsize=15)64 plt.xticks(rotation=15)65 plt.grid()66 date_form = DateFormatter("%H:%M:%S.%f")67 ax.xaxis.set_major_formatter(date_form)68 self.annot_max(df[x].index, df[x]['Shock - Z Axis (g)'])69 self.annot_min(df[x].index, df[x]['Shock - Z Axis (g)'])70 p.mkdir(exist_ok=True)71 # plt.tight_layout()72 fig.set_size_inches(12, 8)73 fig.savefig(f'../graphs/{self.name}/impact_{self.name}_test_{x}{name_addon}.pdf')74 plt.close(fig)75 else:76 fig, ax = plt.subplots(figsize=(10, 6))77 ax.plot(df['Shock - Z Axis (g)'], color='#ffb300', marker='o', markersize=1, label='Test 5')78 ax.set_xlabel('Time (Hours:Minutes:Seconds)', fontsize=15)79 ax.set_ylabel('Acceleration (g)', fontsize=15)80 plt.xticks(rotation=15)81 plt.grid()82 date_form = DateFormatter("%H:%M:%S")83 ax.xaxis.set_major_formatter(date_form)84 self.annot_max(df.index, df['Shock - Z Axis (g)'])85 self.annot_min(df.index, df['Shock - Z Axis (g)'])86 p.mkdir(exist_ok=True)87 # plt.tight_layout()88 fig.set_size_inches(12, 8)89 fig.savefig(f'../graphs/{self.name}/impact_{self.name}_test{name_addon}.pdf')90 plt.close(fig)91 def graphing_2_dataframes(self, df1, df2, name_addon=""):92 p = Path(f'../graphs/{self.name}')93 if type(df1) == dict:94 for x in range(len(df1)):95 fig, ax = plt.subplots(figsize=(10, 6))96 ax.plot(df1[x]['Shock - Z Axis (g)'], color='#ffb300', marker='o', markersize=1, label='Original')97 ax.plot(df2[x]['Shock - Z Axis (g)_resample'], color='#00c8ff', marker='o', markersize=1,98 label='Resample')99 ax.set_xlabel('Time (Hours:Minutes:Seconds.Milliseconds)', fontsize=15)100 ax.set_ylabel('Acceleration (g)', fontsize=15)101 plt.xticks(rotation=15)102 plt.grid()103 ax.legend()104 date_form = DateFormatter("%H:%M:%S.%f")105 ax.xaxis.set_major_formatter(date_form)106 self.annot_max(df1[x].index, df1[x]['Shock - Z Axis (g)'])107 self.annot_min(df1[x].index, df1[x]['Shock - Z Axis (g)'])108 p.mkdir(exist_ok=True)109 # plt.tight_layout()110 fig.set_size_inches(12, 8)111 fig.savefig(f'../graphs/{self.name}/impact_{self.name}_test_{x}{name_addon}.pdf')112 plt.close(fig)113 else:114 fig, ax = plt.subplots(figsize=(10, 6))115 ax.plot(df1['Shock - Z Axis (g)'], color='#ffb300', marker='o', markersize=1, label='Original')116 ax.plot(df2['Shock - Z Axis (g)_resample'], color='#00c8ff', marker='o', markersize=1, label='Resample')117 ax.set_xlabel('Time (Hours:Minutes:Seconds)', fontsize=15)118 ax.set_ylabel('Acceleration (g)', fontsize=15)119 plt.xticks(rotation=15)120 plt.grid()121 ax.legend()122 date_form = DateFormatter("%H:%M:%S")123 ax.xaxis.set_major_formatter(date_form)124 self.annot_max(df1.index, df1['Shock - Z Axis (g)'])125 self.annot_min(df1.index, df1['Shock - Z Axis (g)'])126 p.mkdir(exist_ok=True)127 # plt.tight_layout()128 fig.set_size_inches(12, 8)129 fig.savefig(f'../graphs/{self.name}/impact_{self.name}_test{name_addon}.pdf')130 plt.close(fig)131 @staticmethod132 def annot_max(x, y, ax=None):133 x_max = x[np.argmax(y)]134 y_max = y.max()135 if not ax:136 ax = plt.gca()137 bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)138 arrow_props = dict(arrowstyle="->")139 kw = dict(xycoords='data', textcoords="axes fraction",140 arrowprops=arrow_props, bbox=bbox_props, ha="left", va="top")141 ax.annotate(f"Max={y_max}", xy=(x_max, y_max), xytext=(0.04, 0.94), **kw)142 @staticmethod143 def annot_min(x, y, ax=None):144 x_min = x[np.argmin(y)]145 y_min = y.min()146 if not ax:147 ax = plt.gca()148 bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)149 arrow_props = dict(arrowstyle="->")150 kw = dict(xycoords='data', textcoords="axes fraction",151 arrowprops=arrow_props, bbox=bbox_props, ha="left", va="top")152 ax.annotate(f"Min={y_min}", xy=(x_min, y_min), xytext=(0.80, 0.10), **kw)153 @staticmethod154 def find_max_and_min(df, col_name_x, col_name_y):155 if type(df) == dict:156 min = {}157 max = {}158 for k in range(len(df)):159 tmp = df[k].reset_index()160 x = tmp[col_name_x]161 y = tmp[col_name_y]162 min[k] = (x[np.argmin(y)], y.min())163 max[k] = (x[np.argmax(y)], y.max())164 return min, max165 else:166 tmp = df.reset_index()167 x = tmp[col_name_x]168 y = tmp[col_name_y]169 return (x[np.argmin(y)], y.min()), (x[np.argmax(y)], y.max())170 def auto_split_data(self, df, time):171 dfs = {}172 min = {}173 max = {}174 x_start = {}175 x_end = {}176 dfu = {}177 for k in range(len(self.get_test_times())):178 dfs[k] = df[self.get_test_times()[self.start_col][k]:self.get_test_times()[self.end_col][k]]179 min[k], max[k] = self.find_max_and_min(dfs[k], "Time", "Shock - Z Axis (g)")180 x_start[k] = min[k][0] - timedelta(milliseconds=time)181 x_end[k] = min[k][0] + timedelta(milliseconds=time)182 dfu[k] = df[x_start[k]:x_end[k]]183 return dfu184 @staticmethod185 def intergrate(x_array1, y_array2):186 tmp = float(np.trapz(abs(x_array1), y_array2))187 return tmp * (10 ** -9)188 @staticmethod189 def to_latex(df, filename, caption, label):190 df.to_latex(f'../tex_files/{filename}.tex', longtable=True,191 index=False, caption=caption, label=label)192 df.to_latex(f'../tex_files/{filename}_escape.tex', longtable=True,...

Full Screen

Full Screen

test_util_functions.py

Source:test_util_functions.py Github

copy

Full Screen

...8 """Test `num_or_str_to_date()`."""9 self.assertIsInstance(util.num_or_str_to_date(1), datetime.datetime)10 parsed_date = util.num_or_str_to_date("2016-07-14T03:25:00.000+0000")11 self.assertIsInstance(parsed_date, datetime.datetime)12 def test_get_test_times(self):13 """Test `get_test_times()`."""14 def assert_instancesof_datetime(tuples):15 """16 Assert that all of the items in a list of tuples are instanced of17 `datetime.datetime`.18 """19 for pair in tuples:20 for item in pair:21 self.assertIsInstance(item, datetime.datetime)22 perf_json = {23 "results": [],24 "start": "2016-07-14T03:25:00.000+0000",25 "end": "2016-07-14T03:24:00.000+0000",26 }27 times = util.get_test_times(perf_json)28 self.assertEqual(len(times), 1)29 assert_instancesof_datetime(times)30 perf_json = {31 "results": [32 {"start": 1, "end": 2},33 {"start": 3, "end": 4},34 {"start": 5, "end": "2016-07-14T03:24:00.000+0000"},35 ]36 }37 times = util.get_test_times(perf_json)38 self.assertEqual(len(times), 3)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run stestr automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful