How to use _get_data_from_file method in avocado

Best Python code snippet using avocado_python

dataset.py

Source:dataset.py Github

copy

Full Screen

1import numpy as np2import mixture_multivariateBernoulli.config as config3def _get_data_from_file(args):4 with np.load(args['data_file']) as parameters:5 all_outcomes = parameters['all_outcomes']6 prob_of_outcomes = parameters['prob_of_outcomes']7 data = parameters['train_data']8 data_probs = parameters['train_data_probs']9 if config.random_data:10 rnd_prm = np.random.permutation(len(data))11 data = data[rnd_prm]12 # if not(data_probs == None):13 # if (data_probs is not None):14 if len(data_probs.shape) > 0:15 data_probs = data_probs[rnd_prm]16 train_data = data[0:args['train_size']]17 valid_data = data[args['train_size']:(args['train_size'] + args['valid_size'])]18 # if not(data_probs == None):19 # if data_probs is not None:20 if len(data_probs.shape) > 0:21 train_data_probs = data_probs[0:args['train_size']]22 valid_data_probs = data_probs[args['train_size']:(args['train_size'] + args['valid_size'])]23 else:24 train_data_probs = None25 valid_data_probs = None26 if args['test_size'] == 'FULL_TEST':27 test_data = all_outcomes28 test_data_probs = prob_of_outcomes29 else:30 data = parameters['test_data']31 data_probs = parameters['test_data_probs']32 if config.random_data:33 rnd_prm = np.random.permutation(len(data))34 data = data[rnd_prm]35 # if not(data_probs == None):36 # if data_probs is not None:37 if len(data_probs.shape) > 0:38 data_probs = data_probs[rnd_prm]39 test_data = data[0:args['test_size']]40 # if not(data_probs == None):41 # if data_probs is not None:42 if len(data_probs.shape) > 0:43 test_data_probs = data_probs[0:args['test_size']]44 else:45 test_data_probs = None46 return {'train_data': train_data,47 'train_data_probs': train_data_probs,48 'valid_data': valid_data,49 'valid_data_probs': valid_data_probs,50 'test_data': test_data,51 'test_data_probs': test_data_probs}52def get_data(args):53 if args['data_name'] == 'grid':54 args['data_file'] = 'datasets/grid' + str(args['height']) + 'by' + str(args['width']) + '.npz'55 return _get_data_from_file(args)56 elif args['data_name'] == 'Boltzmann':57 args['data_file'] = 'datasets/Boltzman_' + str(args['n']) + '&' + str(args['m']) + '.npz'58 return _get_data_from_file(args)59 elif args['data_name'].startswith('mnist'):60 if args['digit'] == 'All':61 tr = args['train_size']62 va = args['valid_size']63 te = args['test_size']64 args['train_size'] = args['train_size'] // 1065 args['valid_size'] = args['valid_size'] // 1066 args['test_size'] = args['test_size'] // 1067 args['data_file'] = 'datasets/binary_mnist_' + str(0) + '.npz'68 res = _get_data_from_file(args)69 for d in range(1, 10):70 args['data_file'] = 'datasets/binary_mnist_' + str(d) + '.npz'71 tmp = _get_data_from_file(args)72 res['train_data'] = np.concatenate([res['train_data'], tmp['train_data']], axis=0)73 res['valid_data'] = np.concatenate([res['valid_data'], tmp['valid_data']], axis=0)74 res['test_data'] = np.concatenate([res['test_data'], tmp['test_data']], axis=0)75 np.random.shuffle(res['train_data'])76 np.random.shuffle(res['valid_data'])77 np.random.shuffle(res['test_data'])78 args['train_size'] = tr79 args['valid_size'] = va80 args['test_size'] = te81 return res82 else:83 raise Exception("ERROR: mnist should not be run for just one digit")84 # args['data_file'] = 'datasets/binary_mnist_'+ str(args['digit']) + '.npz'85 # return _get_data_from_file(args)86 elif args['data_name'].startswith('ocr'):87 tr = args['train_size']88 va = args['valid_size']89 te = args['test_size']90 args['train_size'] = tr // 2091 args['valid_size'] = va // 2092 args['test_size'] = te // 2093 args['data_file'] = 'datasets/ocr_' + str(config.ocr_characters[0]) + '.npz'94 res = _get_data_from_file(args)95 for d in range(1, 20):96 args['train_size'] = tr // 2097 args['valid_size'] = va // 2098 args['test_size'] = te // 2099 args['data_file'] = 'datasets/ocr_' + str(config.ocr_characters[d]) + '.npz'100 tmp = _get_data_from_file(args)101 res['train_data'] = np.concatenate([res['train_data'], tmp['train_data']], axis=0)102 res['valid_data'] = np.concatenate([res['valid_data'], tmp['valid_data']], axis=0)103 res['test_data'] = np.concatenate([res['test_data'], tmp['test_data']], axis=0)104 np.random.shuffle(res['train_data'])105 np.random.shuffle(res['valid_data'])106 np.random.shuffle(res['test_data'])107 #################### in baayad avaz beshe!!!108 args['train_size'] = tr109 args['valid_size'] = va110 args['test_size'] = te111 return res112 elif args['data_name'] == 'k_sparse':113 args['data_file'] = 'datasets/k_sparse_' + str(args['n']) + '_' + str(args['sparsity_degree']) + '.npz'114 return _get_data_from_file(args)115 elif args['data_name'] == 'rcv1':116 args['data_file'] = 'datasets/rcv1.npz'117 return _get_data_from_file(args)118 elif args['data_name'] == 'BayesNet':119 args['data_file'] = 'datasets/BayesNet_' + str(args['n']) + '_' + str(args['par_num']) + '.npz'120 return _get_data_from_file(args)121 else:...

Full Screen

Full Screen

data_getter.py

Source:data_getter.py Github

copy

Full Screen

...5 TOWN_DATA = CURRENT_DIR + "/../../preprocessing_data/towns.txt"6 CSV_TRAIN_DATA = CURRENT_DIR + "/../../data/Test_rev1.csv"7 UNIQUE_JOB_DATA = CURRENT_DIR + "/../../preprocessing_data/job_roles_unique.txt"8 @staticmethod9 def _get_data_from_file(filename, lowercase=True):10 with open(filename, 'r') as f:11 read_data = []12 for row in f.readlines():13 row_data = row.lower() if lowercase else row14 read_data.append(row_data.replace("\n", ""))15 return read_data16 @classmethod17 def get_towns(cls, lowercase=True):18 """19 get list of towns above 1000020 :return:list of towns21 """22 return DataGetter._get_data_from_file(cls.TOWN_DATA, lowercase)23 @classmethod24 def get_stop_word_inc_cities(cls, lowercase=True):25 """26 get list of towns above 10000 pop + stop words27 :return:list of towns28 """29 return DataGetter._get_data_from_file(cls.STOP_WORDS_WITH_CITIES, lowercase)30 @classmethod31 def get_unique_job_roles(cls, lowercase=True):32 """33 Get list of unique jobs as specifed by british achieve34 """...

Full Screen

Full Screen

cpc6128.py

Source:cpc6128.py Github

copy

Full Screen

...4from memory import Memory, RomChunk, UpperRomChunk5from instruction import Cpu6import cpcio7import screen8def _get_data_from_file(fname):9 with open(fname, 'rb') as fin:10 data = fin.read()11 return data12def _build_memory(screen):13 """ Initialize CPC464 memory map """14 memory = Memory(screen)15 memory.add_chunk(0x0000, RomChunk(_get_data_from_file('6128L.rom')))16 memory.add_chunk(0xC000, UpperRomChunk(0, _get_data_from_file('6128U-basic.rom')))17 memory.apply_ram_map(0)18 memory.dump_map()19 return memory20def _build_cpu(memory_map):21 return Cpu(memory_map)22class Cpc6128(object):23 def __init__(self):24 self.screen = scr = screen.Screen()25 self.memory = _build_memory(self.screen)26 self.cpu = _build_cpu(self.memory)27 self.gatearray = cpcio.GateArray(self.cpu, self.memory, scr)28 self.ppi8255 = cpcio.PPI8255(self.cpu)29 self.printer = cpcio.PrinterPort(self.cpu)30 self.crtc = cpcio.Crtc(self.cpu, scr)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful