How to use get_data_dir method in avocado

Best Python code snippet using avocado_python

proj_templates.py

Source:proj_templates.py Github

copy

Full Screen

...11from string import Template12import gzip13import logging14import simplejson as json15def get_data_dir():16 if 'DATA_DIR' in os.environ:17 s = os.environ['DATA_DIR']18 return "%s/path_inference/" % s19 else:20 return "%s/path_inference/"%data_path()21 #return "/windows/D/arterial_data/high_frequency/"22#data_dir = "/windows/D/arterial_data/high_frequency/"23#data_dir = "/mnt2/data/"24# Main data file - used by Jython25data_file = "raw/raw_arterial_201.json.gz"26raw_data_tpl = "projected_id${driver_id}_0of0.json.gz"27_raw_data_tpl_re = "projected_id(.+)_0of0.json.gz"28# Raw tracks - removed all spurious points and split by driver:29raw_tracks_tpl = "raw_points_id${driver_id}.json.gz"30_raw_tracks_tpl_re = "raw_points_id(.+).json.gz"31# The mapped points32mapped_points_tpl = "mapped_points_id${driver_id}.json.gz"33_mapped_points_tpl_re = "mapped_points_id(.+).json.gz"34# The complete mapped tracks35mapped_tracks_tpl = "mapped_tracks_id${driver_id}.json.gz"36_mapped_tracks_tpl_re = "mapped_tracks_id(.+).json.gz"37mapped_trajs_tpl = "mapped_trajs_id${driver_id}_traj${track_idx}_dt${res}.json.gz"38_mapped_trajs_tpl_re = "mapped_trajs_id(.+)_traj([0-9]*)_dt([0-9]*).json.gz"39traj_points_tpl = "traj_points_id${driver_id}_traj${track_idx}_dt${res}.json.gz"40_traj_points_tpl_re = "traj_points_id(.+)_traj([0-9]*)_dt([0-9]*).json.gz"41features_tpl = "features_id${driver_id}_traj${track_idx}_dt${res}.pkl.gz"42features_tpl_re = "features_id(.+)_traj([0-9]*)_dt([0-9]*).pkl.gz"43most_likely_indexes_tpl = "most_likely_indexes_id${driver_id}_traj${track_idx}_dt${res}.pkl"44most_likely_indexes_tpl_re = "most_likely_indexes_id(.+)_traj([0-9]*)_dt([0-9]*).pkl"45_partition_data_tpl = "partition_dt${res}.pkl"46_learned_parameter_tpl = "learned_dt${res}_batch${batch_idx}.pkl"47_evaluation_data_tpl = "evaluation_dt${res}_batch${batch_idx}.pkl"48_evaluation_data_re = "evaluation_dt([0-9]*)_batch([0-9]*).pkl"49_evaluation_em_data_tpl = "evaluation_em_dt${res}_batch${batch_idx}.pkl"50_evaluation_em_data_re = "evaluation_em_dt([0-9]*)_batch([0-9]*).pkl"51_em_intermediate_file_tpl = "em_temp_${strategy_type}_batch${batch_idx}_${iteration}.pkl"52_em_intermediate_file_re = "em_temp_(.+)_batch([0-9]*)_([0-9]*).pkl"53def get_data_fname(**_):54 """ Returns the name of the file containing the raw data.55 Accepted arguments:56 none57 """58 return get_data_dir() + data_file59def get_data_file(mode='w', **_):60 """ Returns a file descriptor-like object to the file containing the data.61 Arguments:62 mode: r/w mode63 """64 return gzip.open(get_data_dir() + data_file, mode)65def get_partition_file(mode='r', **kwargs):66 """ Opens a partition file.67 Accepted kwargs:68 - res69 """70 fname = get_data_dir() + Template(_partition_data_tpl).substitute(**kwargs)71 return open(fname, mode)72def get_learned_parameter_file(mode='r', **kwargs):73 """ Opens a partition file.74 Accepted kwargs:75 - res76 """77 fname = get_data_dir() + Template(_learned_parameter_tpl).substitute(**kwargs)78 return open(fname, mode)79def get_evaluation_data_file(mode='r', **kwargs):80 """ Opens a partition file.81 Accepted kwargs:82 - res83 """84 fname = get_data_dir() + Template(_evaluation_data_tpl).substitute(**kwargs)85 return open(fname, mode)86def get_evaluation_em_data_file(mode='r', **kwargs):87 """ Opens a partition file.88 Accepted kwargs:89 - res90 - batch_idx91 """92 fname = get_data_dir() + Template(_evaluation_em_data_tpl).substitute(**kwargs)93 return open(fname, mode)94def get_evaluation_fnames(res=None, batch=None):95 """ Returns the list of raw data files in the work directory, filtered (if96 supplied) by a driver id.97 Generator function. Each generator element is a list of98 (full path name, driver id)99 """100 files = os.listdir(get_data_dir())101 p = re.compile(_evaluation_data_re)102 for f in files:103 m = p.match(f)104 if m:105 _res = int(m.group(1))106 if res is not None and _res != res:107 continue108 _batch = int(m.group(2))109 if batch is not None and _batch != batch:110 continue111 yield (get_data_dir() + f, _res, _batch)112def get_em_evaluation_fnames(res=None, batch=None):113 """ Returns the list of raw data files in the work directory, filtered (if114 supplied) by a driver id.115 Generator function. Each generator element is a list of116 (full path name, driver id)117 """118 files = os.listdir(get_data_dir())119 #logging.info("data dir: %r",get_data_dir())120 #logging.info("Files:")121 #logging.info(files)122 p = re.compile(_evaluation_em_data_re)123 for f in files:124 m = p.match(f)125 if m:126 _res = int(m.group(1))127 if res is not None and _res != res:128 continue129 _batch = int(m.group(2))130 if batch is not None and _batch != batch:131 continue132 logging.info((get_data_dir() + f, _res, _batch))133 yield (get_data_dir() + f, _res, _batch)134def get_raw_data_file(mode='r', **kwargs):135 """ Returns a file descriptor-like object to the file containing the raw data.136 Arguments:137 mode: r/w mode138 driver_id: string, the id of the driver139 """140 fname = get_data_dir() + Template(raw_data_tpl).substitute(**kwargs)141 return gzip.open(fname, mode)142def get_raw_track_file(mode='r', **kwargs):143 """ Returns a file descriptor-like object to the file containing the144 raw track.145 (A raw track is a collection of unmapped points)146 Arguments:147 mode: r/w mode148 driver_id: string, the id of the driver149 """150 fname = get_data_dir() + Template(raw_tracks_tpl).substitute(**kwargs)151 return gzip.open(fname, mode)152def get_mapped_points_file(mode='r', **kwargs):153 """ Returns a file descriptor-like object to the file containing the154 raw track.155 (A raw track is a collection of unmapped points)156 Arguments:157 mode: r/w mode158 driver_id: string, the id of the driver159 """160 fname = get_data_dir() + Template(mapped_points_tpl).substitute(**kwargs)161 return gzip.open(fname, mode)162def get_traj_points_file(mode='r', **kwargs):163 """ Returns a file descriptor-like object to the file containing the164 points associated to a trajectory.165 (A trajectory is a sequence of mapped points with positive flow)166 Arguments:167 mode: r/w mode168 driver_id: string, the id of the driver169 track_idx: integer, the index of the track170 """171 fname = get_data_dir() + Template(traj_points_tpl).substitute(**kwargs)172 return gzip.open(fname, mode)173def get_mapped_tracks_fname(mode='r', **kwargs):174 fname = get_data_dir() + Template(mapped_tracks_tpl).substitute(**kwargs)175 return fname176def get_mapped_tracks_file(mode='r', **kwargs):177 """ Returns a file descriptor-like object to the file containing the178 raw track.179 (A mapped track is a collection of tuples). Each tuple is:180 - linking pairs181 - paths182 - linking pairs183 - points184 Arguments:185 mode: r/w mode186 driver_id: string, the id of the driver187 """188 fname = get_data_dir() + Template(mapped_tracks_tpl).substitute(**kwargs)189 return gzip.open(fname, mode)190def get_mapped_trajs_file(mode='r', **kwargs):191 """ Returns a file descriptor-like object to the file containing the192 trajectory.193 (A mapped track is a collection of tuples). Each tuple is:194 - linking pairs195 - paths196 - linking pairs197 - points198 Furthermore, this sequence has positive flow through the linking pairs.199 Arguments:200 mode: r/w mode201 driver_id: string, the id of the driver202 track_idx: integer, the index of the track203 """204 fname = get_data_dir() + Template(mapped_trajs_tpl).substitute(**kwargs)205 return gzip.open(fname, mode)206def get_features_file(mode='r', **kwargs):207 """ Returns a file descriptor-like object to the file containing the208 feature vectors of a trajectory.209 The feature vectors alternate points and path feature vectors, in a210 single list.211 Arguments:212 mode: r/w mode213 driver_id: string, the id of the driver214 track_idx: integer, the index of the track215 res: integer, temporal resolution (seconds). 0 means the reference216 track (undecimated).217 """218 fname = get_data_dir() + Template(features_tpl).substitute(**kwargs)219 return open(fname, mode)220def get_most_likely_indexes_file(mode='r', **kwargs):221 """ Returns a file descriptor-like object to the file containing the222 feature vectors of a trajectory.223 The feature vectors alternate points and path feature vectors, in a224 single list.225 Arguments:226 mode: r/w mode227 driver_id: string, the id of the driver228 track_idx: integer, the index of the track229 res: integer, temporal resolution (seconds). 0 means the reference230 track (undecimated).231 """232 fname = get_data_dir() + Template(most_likely_indexes_tpl).substitute(**kwargs)233 return open(fname, mode)234def get_raw_data_fnames(driver_id=None):235 """ Returns the list of raw data files in the work directory, filtered (if236 supplied) by a driver id.237 Generator function. Each generator element is a list of238 (full path name, driver id)239 """240 files = os.listdir(get_data_dir())241 p = re.compile(_raw_data_tpl_re)242 for f in files:243 m = p.match(f)244 if m:245 _driver_id = m.group(1)246 if driver_id and _driver_id != driver_id:247 continue248 yield (get_data_dir() + f, _driver_id)249def get_em_intermediate_fnames(type=None, iteration=None, batch_idx=None):250 """ Returns the list of the EM intermediate files computed from the big251 cloud data. (in pickle format)252 Generator function. Each generator element is a list of253 (full path name, )254 """255 files = os.listdir(get_data_dir())256 p = re.compile(_em_intermediate_file_re)257 for f in files:258 m = p.match(f)259 if m:260 _type = m.group(1)261 _batch_idx = int(m.group(2))262 _iteration = int(m.group(3))263 if type is not None and _type != type:264 continue265 if iteration is not None and _iteration != iteration:266 continue267 if batch_idx is not None and _batch_idx != batch_idx:268 continue269 yield (get_data_dir() + f, _type, _batch_idx, _iteration)270def get_em_intermediate_file(type, iteration, batch_idx, mode='r'):271 fname = get_data_dir() + "/" + Template(_em_intermediate_file_tpl).substitute(strategy_type=type, iteration=iteration, batch_idx=batch_idx)272 return open(fname, mode)273def get_raw_tracks_fnames(driver_id=None):274 """ Returns the list of raw track files in the work directory, filtered (if275 supplied) by a driver id.276 Generator function. Each generator element is a list of277 (full path name, driver id, track idx, number of tracks)278 """279 files = os.listdir(get_data_dir())280 p = re.compile(_raw_tracks_tpl_re)281 for f in files:282 m = p.match(f)283 if m:284 _driver_id = m.group(1)285 if driver_id and _driver_id != driver_id:286 continue287 yield (get_data_dir() + f, _driver_id)288def get_mapped_points_fnames(driver_id=None):289 """ Returns the list of mapped point files in the work directory, filtered290 if supplied) by a driver id.291 Generator function. Each generator element is a list of292 (full path name, driver id)293 """294 files = os.listdir(get_data_dir())295 p = re.compile(_mapped_points_tpl_re)296 for f in files:297 m = p.match(f)298 if m:299 _driver_id = m.group(1)300 if driver_id and _driver_id != driver_id:301 continue302 yield (get_data_dir() + f, _driver_id)303def get_mapped_tracks_fnames(driver_id=None):304 files = os.listdir(get_data_dir())305 p = re.compile(_mapped_tracks_tpl_re)306 for f in files:307 m = p.match(f)308 if m:309 _driver_id = m.group(1)310 if driver_id and _driver_id != driver_id:311 continue312 yield (get_data_dir() + f, _driver_id)313def get_features_fnames(driver_id=None, track_idx=None, dt=None):314 files = os.listdir(get_data_dir())315 p = re.compile(_mapped_points_tpl_re)316 for f in files:317 m = p.match(f)318 if m:319 _driver_id = m.group(1)320 _track_idx = int(m.group(2))321 _res = int(m.group(3))322 if driver_id and _driver_id != driver_id:323 continue324 if track_idx is not None and _track_idx != track_idx:325 continue326 if dt is not None and _res != dt:327 continue328 yield (get_data_dir() + f, _driver_id, _track_idx, _res)329def get_mapped_trajs_fnames(driver_id=None, track_idx=None, res=None):330 files = os.listdir(get_data_dir())331 p = re.compile(_mapped_trajs_tpl_re)332 for f in files:333 m = p.match(f)334 if m:335 _driver_id = m.group(1)336 _track_idx = int(m.group(2))337 _res = int(m.group(3))338 if driver_id and _driver_id != driver_id:339 continue340 if track_idx is not None and _track_idx != track_idx:341 continue342 if res is not None and _res != res:343 continue344 yield (get_data_dir() + f, _driver_id, _track_idx, _res)345def map_file_from_raw(mapping_closure, driver_id=None):346 """ Takes a raw track file and maps in into mapped points.347 """348 input_fdata = get_raw_tracks_fnames(driver_id=driver_id)349 for (in_fname, _driver_id) in input_fdata:350 fin = get_raw_track_file(mode='r', driver_id=_driver_id)351 print in_fname # Open and process this file line by line352 fin.readline() # Skip the first line353 fout = get_mapped_points_file(mode='w', driver_id=_driver_id)354 fout.write("[\n")355 for line in fin:356 if line == "0]": # End of the file? finish357 break358 dct = json.loads(line[:-2]) # Process one line, Remove the last column359 dct_out = mapping_closure(dct)360 if dct_out is not None:361 fout.write(json.dumps(dct_out))362 fout.write(",\n")363 fout.write("0]")364 fout.close()365 fin.close()366def map_file(mapping_closure, fdata_in, out_closure=None):367 for _args in fdata_in:368 print "mapping ", _args369 # Unpack the arguments, depends on the type of file.370 in_fname = _args[0]371 (_driver_id, _track_idx, _res) = (None, None, None)372 if len(_args) > 1:373 _driver_id = _args[1]374 else:375 _driver_id = None376 if len(_args) > 2:377 _track_idx = _args[2]378 else:379 _track_idx = None380 if len(_args) > 3:381 _res = _args[3]382 else:383 _res = None384 # Open and process this file line by line385 if in_fname.endswith('.gz'):386 fin = gzip.open(in_fname, 'r')387 else:388 fin = open(in_fname, 'r')389 fout = None390 if out_closure:391 fout = out_closure(mode='w', driver_id=_driver_id, \392 track_idx=_track_idx, \393 res=_res)394 print "output: ", fout395 fout.write("[\n")396 # Skip the first line397 fin.readline()398 for line in fin:399 # End of the file? finish400 if line == "0]":401 break402 # Process one line403 # Remove the last column404 dct = json.loads(line[:-2])405 dct_out = mapping_closure(dct)406 if dct_out is not None and fout:407 fout.write(json.dumps(dct_out))408 fout.write(",\n")409 del dct410 del dct_out411# print gc.get_count()412# gc.collect()413 fin.close()414 if fout:415 fout.write("0]")416 fout.close()417def cut_track_into_trajs(driver_id, segments):418 """ TODO: doc419 """420 fin = get_mapped_tracks_file(mode='r', driver_id=driver_id)421 print("<<<%s"%str(fin))422 # Skip the first line423 fin.readline()424 track_counter = 0425 elts_counter = 0426 for segment in segments:427 track_counter += 1428 fout = get_mapped_trajs_file(mode='w', \429 driver_id=driver_id, \430 track_idx=track_counter, res=0)431 print("OUT>>>%s"%str(fout))432 fout.write("[\n")433 for idx in segment:434 while elts_counter < idx:435 fin.readline()436 elts_counter += 1437 fout.write(fin.readline())438 elts_counter += 1439 fout.write("0]")440 fout.close()441 fin.close()442def cut_track_into_traj_points(driver_id, segments):443 """ TODO: doc444 """445 fin_points = get_mapped_points_file(mode='r', driver_id=driver_id)446 print("<<<%s"%str(fin_points))447 # Skip the first line448 fin_points.readline()449 track_counter = 0450 elts_counter = 0451 for segment in segments:452 track_counter += 1453 fout_points = get_traj_points_file(mode='w', \454 driver_id=driver_id, \455 track_idx=track_counter, res=0)456 print("OUT>>>%s"%str(fout_points))457 fout_points.write("[\n")458 for idx in segment:459 while elts_counter < idx:460 fin_points.readline()461 elts_counter += 1462 fout_points.write(fin_points.readline())463 elts_counter += 1464 fout_points.write("0]")465 fout_points.close()466 fin_points.close()467def create_traj_path_iterator(driver_id, track_idx, res):468 def traj_path_iterator():469 fin = get_mapped_trajs_file(mode='r', driver_id=driver_id, \470 track_idx=track_idx, res=res)471 fin.readline()472 fin.readline() # The first element only contains the first point473# print "Traj iterator:",fin474 for line in fin:475 if line == '0]':476 break477 dct = json.loads(line[:-2])478 (_, paths_dct, _, _) = dct479 yield paths_dct480 fin.close()481 return traj_path_iterator482def get_network_dct(net_id):483 ''' Loads a network.484 TODO: move to MM-specific parts.485 '''486 fname = get_data_dir() + '/network_%s.json'%str(net_id)487 fin = open(fname, "r")488 dct = json.load(fin)489 fin.close()...

Full Screen

Full Screen

factory.py

Source:factory.py Github

copy

Full Screen

...30 else:31 logging.fatal('Not one of sbpd')32 return dataset33class Loader():34 def get_data_dir():35 pass36 def get_meta_data(self, file_name, data_dir=None):37 if data_dir is None:38 data_dir = self.get_data_dir()39 full_file_name = os.path.join(data_dir, 'meta', file_name)40 assert(fu.exists(full_file_name)), \41 '{:s} does not exist'.format(full_file_name)42 ext = os.path.splitext(full_file_name)[1]43 if ext == '.txt':44 ls = []45 with fu.fopen(full_file_name, 'r') as f:46 for l in f:47 ls.append(l.rstrip())48 elif ext == '.pkl':49 ls = utils.load_variables(full_file_name)50 return ls51 def load_building(self, name, data_dir=None):52 if data_dir is None:53 data_dir = self.get_data_dir()54 out = {}55 out['name'] = name56 out['data_dir'] = data_dir57 out['room_dimension_file'] = os.path.join(data_dir, 'room-dimension',58 name+'.pkl')59 out['class_map_folder'] = os.path.join(data_dir, 'class-maps')60 return out61 def load_building_meshes(self, building):62 dir_name = os.path.join(building['data_dir'], 'mesh', building['name'])63 mesh_file_name = glob.glob1(dir_name, '*.obj')[0]64 mesh_file_name_full = os.path.join(dir_name, mesh_file_name)65 logging.error('Loading building from obj file: %s', mesh_file_name_full)66 shape = renderer.Shape(mesh_file_name_full, load_materials=True, 67 name_prefix=building['name']+'_')68 return [shape]69class StanfordBuildingParserDataset(Loader):70 def __init__(self, ver):71 self.ver = ver72 self.data_dir = None73 74 def get_data_dir(self):75 if self.data_dir is None:76 self.data_dir = 'data/stanford_building_parser_dataset/'77 return self.data_dir78 def get_benchmark_sets(self):79 return self._get_benchmark_sets()80 def get_split(self, split_name):81 if self.ver == 'sbpd':82 return self._get_split(split_name)83 else:84 logging.fatal('Unknown version.')85 def _get_benchmark_sets(self):86 sets = ['train1', 'val', 'test']87 return sets88 def _get_split(self, split_name):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful