How to use i_ran method in Testify

Best Python code snippet using Testify_python

tf_kitti_to_calib.py

Source:tf_kitti_to_calib.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2# @Author: twankim3# @Date: 2017-06-26 16:55:004# @Last Modified by: twankim5# @Last Modified time: 2017-09-27 14:30:056from __future__ import absolute_import7from __future__ import division8from __future__ import print_function9import argparse10import sys11import os12import glob13from skimage.io import (imread,imsave)14import numpy as np15import tensorflow as tf16import tensorflow.contrib.slim as slim17import _init_paths18from datasets.config import cfg19from datasets.dataset_kitti import get_calib_mat20 21from datasets.utils_dataset import *22def main(args):23 if not args.is_cpu:24 os.environ["CUDA_VISIBLE_DEVICES"] = args.gid25 else:26 os.environ["CUDA_VISIBLE_DEVICES"] = ""27 verbose = args.verbose28 path_kitti = args.path_kitti29 assert os.path.exists(path_kitti),\30 "Download KITTI Dataset or enter correct path"31 32 path_out = args.path_out33 if not os.path.exists(path_out):34 os.makedirs(path_out)35 path_cal = os.path.join(path_kitti,cfg._TASK,cfg._DIR_CALIB)36 path_velo = os.path.join(path_kitti,cfg._TASK,cfg._DIR_VELO)37 path_img = os.path.join(path_kitti,cfg._TASK,cfg._DIR_IMAGE)38 if args.is_train:39 image_set = 'training'40 f_tfrec = os.path.join(path_out,cfg._TF_FORMAT_TRAIN.format(41 'kitti',42 image_set.split('ing')[0]))43 else:44 max_theta = args.max_theta45 max_dist = args.max_dist46 image_set = 'testing'47 f_tfrec = os.path.join(path_out,cfg._TF_FORMAT_TEST.format(48 'kitti',49 image_set.split('ing')[0],50 max_theta,51 max_dist))52 imList = glob.glob(os.path.join(path_cal,53 image_set,54 cfg._TYPE_CALIB,55 '*'+cfg._FORMAT_CALIB))56 imList.sort()57 imNames = [os.path.split(d)[1].strip('.txt') for d in imList]58 print("... Writing {} set".format(image_set))59 with tf.python_io.TFRecordWriter(f_tfrec) as tfrecord_writer:60 with tf.Graph().as_default():61 with tf.Session('') as sess:62 for iter, imName in enumerate(imNames):63 # Get original calibration info64 f_calib = os.path.join(path_cal,65 image_set,66 cfg._TYPE_CALIB,67 imName+cfg._FORMAT_CALIB)68 calib_dict = get_calib_mat(f_calib)69 # Read velodyne points70 f_velo = os.path.join(path_velo,71 image_set,72 cfg._TYPE_VELO,73 imName+cfg._FORMAT_VELO)74 points_org = np.fromfile(f_velo,dtype=np.float32)75 # exclude points reflectance76 points = points_org.reshape(-1,4)[:,:3]77 # Read image file78 f_img = os.path.join(path_img,79 image_set,80 cfg._TYPE_IMAGE,81 imName+cfg._FORMAT_IMAGE)82 im = imread(f_img)83 im_height,im_width = np.shape(im)[0:2]84 # For training, generate random decalib while training,85 # For testing, generate fixed random decalib.86 if image_set == 'training':87 if verbose:88 sys.stdout.write(89 '... ({}) Writing file to TfRecord {}/{}\n'.format(90 image_set,iter+1,len(imNames)))91 sys.stdout.flush()92 # Write to tfrecord93 im_placeholder = tf.placeholder(dtype=tf.uint8)94 encoded_image = tf.image.encode_png(im_placeholder)95 png_string = sess.run(encoded_image,96 feed_dict={im_placeholder:im})97 mat_intrinsic = calib_dict[cfg._SET_CALIB[0]].flatten()98 mat_rect = calib_dict[cfg._SET_CALIB[1]].flatten()99 mat_extrinsic = calib_dict[cfg._SET_CALIB[2]].flatten()100 example = calib_to_tfexample_train(101 png_string,102 b'png',103 im_height,104 im_width,105 points_org,106 mat_intrinsic,107 mat_rect,108 mat_extrinsic109 )110 tfrecord_writer.write(example.SerializeToString())111 elif image_set == 'testing':112 # # !!!! For debugging113 # # Project velodyne points to image plane114 # points2D, pointsDist = project_lidar_to_img(calib_dict,115 # points,116 # im_height,117 # im_width)118 # im_depth_ho = points_to_img(points2D,119 # pointsDist,120 # im_height,121 # im_width)122 # imsave('data_ex/ho_{}.png'.format(image_set),im_depth_ho)123 # --- Generate random ratation for decalibration data ---124 # Generate random vectors for decalibration125 param_rands = gen_ran_decalib(max_theta,126 max_dist,127 cfg._NUM_GEN)128 list_im = [im]*cfg._NUM_GEN129 list_im_depth = [None]*cfg._NUM_GEN130 list_crop = [None]*cfg._NUM_GEN131 list_param_decalib = [None]*cfg._NUM_GEN132 for i_ran in xrange(cfg._NUM_GEN):133 param_decalib = gen_decalib(max_theta,134 max_dist,135 param_rands,136 i_ran)137 # Copy intrinsic parameters and rotation matrix 138 # (for reference cam)139 ran_dict = calib_dict.copy()140 # Replace extrinsic parameters to decalibrated ones141 ran_dict[cfg._SET_CALIB[2]] = np.dot(142 ran_dict[cfg._SET_CALIB[2]],143 quat_to_transmat(param_decalib['q_r'],144 param_decalib['t_vec']))145 146 points2D_ran, pointsDist_ran = project_lidar_to_img(147 ran_dict,148 points,149 im_height,150 im_width)151 list_im_depth[i_ran],list_crop[i_ran] = points_to_img(152 points2D_ran,153 pointsDist_ran,154 im_height,155 im_width)156 list_param_decalib[i_ran] = param_decalib157 # !!!! For debugging158 # imsave('data_ex/ho_{}_{}.png'.format(image_set,i_ran),159 # list_im_depth[i_ran])160 # print(' - Angle:{}, nonzero:{}'.format(161 # param_decalib['rot'],162 # sum(sum(list_im_depth[i_ran]>0)))) 163 im_placeholder = tf.placeholder(dtype=tf.uint8)164 im_depth_placeholder = tf.placeholder(dtype=tf.uint8)165 encoded_image = tf.image.encode_png(im_placeholder)166 encoded_image_depth = tf.image.encode_png(im_depth_placeholder)167 if verbose:168 sys.stdout.write(169 '... ({}) Writing file to TfRecord {}/{}\n'.format(170 image_set,iter+1,len(imNames)))171 sys.stdout.flush()172 png_strings = [sess.run([encoded_image,encoded_image_depth],173 feed_dict={im_placeholder:im,174 im_depth_placeholder:im_depth}) \175 for im,im_depth in zip(list_im,list_im_depth)]176 for i_string in xrange(cfg._NUM_GEN):177 example = calib_to_tfexample_test(178 png_strings[i_string][0],179 png_strings[i_string][1],180 b'png',181 im_height,182 im_width,183 list_param_decalib[i_string]['y'],184 list_param_decalib[i_string]['rot'],185 list_param_decalib[i_string]['a_vec'],186 list_crop[i_string]187 )188 tfrecord_writer.write(example.SerializeToString())189def parse_args():190 def str2bool(v):191 return v.lower() in ('true', '1')192 parser = argparse.ArgumentParser(description=193 'Dataset conversion to TF format')194 parser.add_argument('-gid', dest='gid',195 help='CUDA_VISIBLE_DEVICES (Check your machine ID and ex) 0,1',196 default = '0', type = str)197 parser.add_argument('-is_cpu', dest='is_cpu',198 help='Use CPU only. True/False',199 default = False, type = str2bool)200 parser.add_argument('-dir_in', dest='path_kitti',201 help='Path to kitti dataset',202 default = '/data/kitti', type = str)203 parser.add_argument('-dir_out', dest='path_out',204 help='Path to save tfrecord kitti dataset',205 default = '/data/tf/kitti_calib', type = str)206 parser.add_argument('-max_theta', dest='max_theta',207 help='Range of rotation angle in degree [-theta,+theta)',208 default = 20, type=int)209 parser.add_argument('-max_dist', dest='max_dist',210 help='Maximum translation distance in meter',211 default = 1.5, type=float)212 parser.add_argument('-verbose', dest='verbose',213 help='True: Print every data, False: print only train/test',214 default = False, type=str2bool)215 parser.add_argument('-is_train', dest='is_train',216 help='True: Generate training set',217 default = True, type=str2bool)218 args = parser.parse_args()219 return args220if __name__ == '__main__':221 args = parse_args()222 print ("Called with args:")223 print (args)...

Full Screen

Full Screen

deep_calib_kitti.py

Source:deep_calib_kitti.py Github

copy

Full Screen

1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.2# Modifications copyright (C) 2017 UT Austin/Taewan Kim3#4# Licensed under the Apache License, Version 2.0 (the "License");5# you may not use this file except in compliance with the License.6# You may obtain a copy of the License at7#8# http://www.apache.org/licenses/LICENSE-2.09#10# Unless required by applicable law or agreed to in writing, software11# distributed under the License is distributed on an "AS IS" BASIS,12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.13# See the License for the specific language governing permissions and14# limitations under the License.15# ==============================================================================16"""Generic evaluation script that evaluates a model using a given dataset."""17# Modified for deep learning based calibration code18# Code for real application on KITTI19# Input: Image, LIDAR, calib file (P2, Rect0), initial guess (H_init)20# Output: Result of calibration (image, H)21from __future__ import absolute_import22from __future__ import division23from __future__ import print_function24from deep_calibrator import *25import os26from skimage.io import (imread,imsave)27import glob28import numpy as np29import tensorflow as tf30import _init_paths31from datasets.config import cfg32from datasets.dataset_kitti import get_calib_mat33from datasets.utils_dataset import *34from nets import factory_nets35tf.app.flags.DEFINE_string(36 'master', '', 'The address of the TensorFlow master to use.')37tf.app.flags.DEFINE_integer(38 'is_rand', True, 'Turn on random decalibration')39tf.app.flags.DEFINE_integer(40 'num_gen', 5, 'Number of random decalibs to generate')41tf.app.flags.DEFINE_string(42 'checkpoint_path', '/data/tf/kitti_calib/checkpoints/vgg_16/weight_1',43 'The directory where the model was written to or an absolute path to a '44 'checkpoint file.')45tf.app.flags.DEFINE_string(46 'dataset_name', 'kitti', 'The name of the dataset to load.')47tf.app.flags.DEFINE_string(48 'dir_image', None, 'The directory where the image files are stored.')49tf.app.flags.DEFINE_string(50 'dir_lidar', None, 'The directory where the lidar files are stored.')51tf.app.flags.DEFINE_string(52 'dir_calib', None, 'The directory where the calibration files are stored.')53tf.app.flags.DEFINE_string(54 'dir_out', None, 'The directory where the output files are stored.')55tf.app.flags.DEFINE_string(56 'format_image', 'png', 'The format of image. default=png')57tf.app.flags.DEFINE_string(58 'format_lidar', 'bin', 'The format of lidar. default=bin')59tf.app.flags.DEFINE_string(60 'format_calib', 'txt', 'The format of calibartion file. default=txt')61tf.app.flags.DEFINE_string(62 'list_param', '20,1.5',63 'List of parameters for the random decalib. max_rotation,max_translation')64tf.app.flags.DEFINE_string(65 'lidar_pool', None,66 'Kernel size for Max-pooling LIDAR Image: height,width. default=None')67tf.app.flags.DEFINE_string(68 'model_name', 'vgg_16', 'The name of the architecture to evaluate.')69tf.app.flags.DEFINE_string(70 'preprocessing_name', None, 'The name of the preprocessing to use. If left '71 'as `None`, then the model_name flag is used.')72tf.app.flags.DEFINE_string(73 'weight_loss', None,74 'The weight to balance predictions. ex) multiplied to the rotation quaternion')75tf.app.flags.DEFINE_integer(76 'eval_image_size', None, 'Eval image size')77tf.app.flags.DEFINE_boolean(78 'is_crop', True, 'Eval image size')79FLAGS = tf.app.flags.FLAGS80def main(_):81 if not FLAGS.dir_image:82 raise ValueError('You must supply the image directory with --dir_image')83 if not FLAGS.dir_lidar:84 raise ValueError('You must supply the lidar directory with --dir_lidar')85 if not FLAGS.dir_calib:86 raise ValueError('You must supply the calibration directory with --dir_calib')87 if not os.path.exists(FLAGS.dir_out):88 os.makedirs(FLAGS.dir_out)89 # Parameters for random generation90 max_theta,max_dist = map(float,FLAGS.list_param.split(','))91 # Get the list of images to process92 imList = glob.glob(os.path.join(FLAGS.dir_image,'*.'+FLAGS.format_image))93 imList.sort()94 imNames = [os.path.split(pp)[1].strip('.{}'.format(FLAGS.format_image)) \95 for pp in imList]96 preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name97 if tf.gfile.IsDirectory(FLAGS.checkpoint_path):98 checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)99 else:100 checkpoint_path = FLAGS.checkpoint_path101 lidar_pool = [int(l_i) for l_i in FLAGS.lidar_pool.split(',')]102 predictor = Predictor(FLAGS.model_name,103 preprocessing_name,104 checkpoint_path,105 FLAGS.eval_image_size,106 lidar_pool,107 FLAGS.is_crop)108 for iter,imName in enumerate(imNames):109 print('\n-input: {}.png/.bin'.format(imName))110 decalibs_gt = []111 decalibs_pred = []112 decalibs_qr_gt = []113 decalibs_qr_pred = []114 # Get original calibration info115 f_calib = os.path.join(FLAGS.dir_calib,imName+'.'+FLAGS.format_calib)116 temp_dict = get_calib_mat(f_calib)117 # Read lidar points118 f_lidar = os.path.join(FLAGS.dir_lidar,imName+'.'+FLAGS.format_lidar)119 points_org = np.fromfile(f_lidar,dtype=np.float32).reshape(-1,4)120 points = points_org[:,:3] # exclude reflectance121 # Read image file122 f_image = os.path.join(FLAGS.dir_image,imName+'.'+FLAGS.format_image)123 im = imread(f_image)124 im_height,im_width = np.shape(im)[0:2]125 # Project velodyne points to image plane126 points2D, pointsDist = project_lidar_to_img(temp_dict,127 points,128 im_height,129 im_width)130 # Write as one image (Ground truth)131 im_depth,_ = points_to_img(points2D,pointsDist,im_height,im_width)132 f_res_im = os.path.join(FLAGS.dir_out,'{}_gt.{}'.format(133 imName,FLAGS.format_image))134 imlidarwrite(f_res_im,im,points_to_img(points2D,135 pointsDist,136 im_height,137 im_width,138 'standard')[0])139 # Randomly generate dealibration140 param_rands = gen_ran_decalib(max_theta,max_dist,FLAGS.num_gen)141 for i_ran in xrange(FLAGS.num_gen):142 param_decalib = gen_decalib(max_theta,max_dist,param_rands,i_ran)143 ran_dict = temp_dict.copy()144 ran_dict[cfg._SET_CALIB[2]] = np.dot(145 ran_dict[cfg._SET_CALIB[2]],146 quat_to_transmat(param_decalib['q_r'],param_decalib['t_vec']))147 points2D_ran, pointsDist_ran = project_lidar_to_img(ran_dict,148 points,149 im_height,150 im_width)151 # Write before the calibration152 im_depth_ran, params_crop = points_to_img(points2D_ran,153 pointsDist_ran,154 im_height,155 im_width)156 f_res_im_ran = os.path.join(FLAGS.dir_out,'{}_rand{}.{}'.format(157 imName,i_ran,FLAGS.format_image))158 # Save ground truth decalibration159 decalibs_gt.append(param_decalib['y'])160 decalibs_qr_gt.append(param_decalib['q_r'])161 # ---------- Prediction of y (decalibration) ----------162 # For debugging163 # Check actual patches provided to the network164 if FLAGS.is_crop:165 y_preds_val,img_temp,lidar_temp = predictor.predict(im,im_depth_ran,166 params_crop)167 # Normalize quaternion to have unit norm168 q_r_preds = yr_to_qr(y_preds_val[:4],max_theta)169 print(' {}) Norm_previous:{}'.format(i_ran,np.linalg.norm(q_r_preds)))170 q_r_preds = q_r_preds/np.linalg.norm(q_r_preds)171 path_crop = os.path.join(FLAGS.dir_out,'crops')172 if not os.path.exists(path_crop):173 os.makedirs(path_crop)174 if i_ran==0:175 imsave(os.path.join(path_crop,'{}_rgb_org.png'.format(imName)),im)176 crop_name = os.path.join(path_crop,'{}_{}'.format(imName,i_ran))177 imsave(crop_name+'_rgb.png',img_temp)178 imsave(crop_name+'_lidar.png',lidar_temp.astype(np.uint8))179 imsave(crop_name+'_lidar_org.png',np.squeeze(im_depth_ran,axis=2))180 else:181 y_preds_val,q_r_preds = predictor.predict(im,im_depth_ran)182 # Save predicted decalibration183 decalibs_pred.append(y_preds_val)184 decalibs_qr_pred.append(q_r_preds)185 points2D_cal, pointsDist_cal = predictor.calibrate(ran_dict,186 q_r_preds,187 y_preds_val[4:],188 points,189 im_height,190 im_width)191 # Write after the calibration192 im_depth_cal,_ = points_to_img(points2D_cal,193 pointsDist_cal,194 im_height,195 im_width)196 f_res_im_cal = os.path.join(FLAGS.dir_out,'{}_cal{}.{}'.format(197 imName,i_ran,FLAGS.format_image))198 imlidarwrite(f_res_im_ran,im,points_to_img(points2D_ran,199 pointsDist_ran,200 im_height,201 im_width,202 'standard')[0])203 imlidarwrite(f_res_im_cal,im,points_to_img(points2D_cal,204 pointsDist_cal,205 im_height,206 im_width,207 'standard')[0])208 209 # write 7vec, MSE as txt file210 # decalibs_pred, decalibs_gt211 with open(os.path.join(FLAGS.dir_out,imName+'_res.txt'),'w') as f_res:212 for i_ran,(vec_gt,vec_pred) in enumerate(zip(decalibs_gt,decalibs_pred)):213 f_res.write('*{}, gt:{}\n'.format(i_ran,vec_gt))214 f_res.write('*{}, pred:{}\n'.format(i_ran,vec_pred))215 mse_val = ((vec_gt - vec_pred)**2).mean()216 mse_rot = ((vec_gt[:4]-vec_pred[:4])**2).mean()217 mse_tran = ((vec_gt[4:]-vec_pred[4:])**2).mean()218 f_res.write('*{}, MSE:{}, MSE_rot:{}, MSE_trans:{}\n\n'.format(219 i_ran,mse_val,mse_rot,mse_tran))220 with open(os.path.join(FLAGS.dir_out,imName+'_res_qr.txt'),'w') as f_res:221 for i_ran,(qr_gt,qr_pred) in enumerate(zip(decalibs_qr_gt,decalibs_qr_pred)):222 f_res.write('*{}, gt:{}\n'.format(i_ran,qr_gt))223 f_res.write('*{}, pred:{}\n'.format(i_ran,qr_pred))224 mse_qr = ((qr_gt-qr_pred)**2).mean()225 f_res.write('*{}, MSE_qr:{}\n\n'.format(i_ran,mse_qr))226if __name__ == '__main__':...

Full Screen

Full Screen

softmax.py

Source:softmax.py Github

copy

Full Screen

1import numpy as np2from random import shuffle3from past.builtins import xrange4def softmax_loss_naive(W, X, y, reg):5 dW = np.zeros_like(W)6 #############################################################################7 # TODO: Compute the softmax loss and its gradient using explicit loops. #8 # Store the loss in loss and the gradient in dW. If you are not careful #9 # here, it is easy to run into numeric instability. Don't forget the #10 # regularization! #11 #############################################################################12 num_classes = W.shape[1]13 num_train = X.shape[0]14 loss = 0.015 for i in xrange(len(X)):16 index = y[i]17 scores = X[i].dot(W)18 scores = scores - np.max(scores)19 correct_class_score = scores[index]20 loss -= np.log(np.exp(scores[index])/(np.sum(np.exp(scores))))21 for j in xrange(num_classes):22 wrong = np.exp(scores[j])/np.sum(np.exp(scores)) + np.max(scores)23 right = np.exp(scores[index])/np.sum(np.exp(scores)) + np.max(scores)24 if j==y[i]:25 dW[:,j] = dW[:,j] + (right-1) * X[i]26 else:27 dW[:,j] = dW[:,j] + wrong * X[i]28 loss = (loss/float(len(X))) + reg * np.sum(np.square(W))/2 + loss29 30 dW = dW / len(X) + reg * W 31 #############################################################################32 # END OF YOUR CODE #33 #############################################################################34 return loss, dW35def softmax_loss_vectorized(W, X, y, reg):36 37 38 loss = 0.039 dW = np.zeros_like(W)40 #############################################################################41 # TODO: Compute the softmax loss and its gradient using no explicit loops. #42 # Store the loss in loss and the gradient in dW. If you are not careful #43 # here, it is easy to run into numeric instability. Don't forget the #44 # regularization! #45 #############################################################################46 num_classes = W.shape[0]47 num_train = X.shape[0]48 i_ran = np.arange(len(X))49 50 scores = np.dot(X,W)51 scores-= np.max(scores)52 cor = scores[i_ran,y].reshape(len(X),1)53 54 exp_sum = np.sum(np.exp(scores),axis=1).reshape(num_train,1)55 loss = loss + np.sum(np.log(exp_sum) - cor)56 loss = (loss/float(len(X))) + reg * np.sum(np.square(W))/2 + loss57 58 59 Grad = np.exp(scores)/exp_sum60 Grad[i_ran,y] = Grad[i_ran,y]-1.061 dW = np.dot(X.T,Grad)/num_train + reg*W62 63 #############################################################################64 # END OF YOUR CODE #65 #############################################################################...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testify automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful