How to use test_alphabets method in autotest

Best Python code snippet using autotest_python

preprocess_omni.py

Source:preprocess_omni.py Github

copy

Full Screen

1import argparse2import numpy as np3import os4import pickle5from scipy.io import loadmat6parser = argparse.ArgumentParser()7parser.add_argument('--data-dir', type=str, default="/home/gigi/ns_data/omniglot_ns")8def _load():9 # load data10 file = os.path.join(args.data_dir, 'chardata.mat')11 data = loadmat(file)12 # data is in train/test split so read separately13 train_images = data['data'].astype(np.float32).T14 train_alphabets = np.argmax(data['target'].astype(np.float32).T, axis=1)15 train_characters = data['targetchar'].astype(np.float32)16 test_images = data['testdata'].astype(np.float32).T17 test_alphabets = np.argmax(data['testtarget'].astype(np.float32).T, axis=1)18 test_characters = data['testtargetchar'].astype(np.float32)19 # combine train and test data20 images = np.concatenate([train_images, test_images], axis=0)21 alphabets = np.concatenate([train_alphabets, test_alphabets], axis=0)22 characters = np.concatenate([np.ravel(train_characters),23 np.ravel(test_characters)], axis=0)24 data = (images, alphabets, characters)25 return data26# def load():27# # load data28# file = os.path.join(args.data_dir, 'chardata.mat')29# data = loadmat(file)30# # data is in train/test split so read separately31# train_images = data['data'].astype(np.float32).T32# train_alphabets = np.argmax(data['target'].astype(np.float32).T, axis=1)33# train_characters = data['targetchar'].astype(np.float32)34# tr_images = np.concatenate([train_images], axis=0)35# tr_alphabets = np.concatenate([train_alphabets], axis=0)36# tr_characters = np.concatenate([np.ravel(train_characters)], axis=0)37# test_images = data['testdata'].astype(np.float32).T38# test_alphabets = np.argmax(data['testtarget'].astype(np.float32).T, axis=1)39# test_characters = data['testtargetchar'].astype(np.float32)40# # combine train and test data41# ts_images = np.concatenate([test_images], axis=0)42# ts_alphabets = np.concatenate([test_alphabets], axis=0)43# ts_characters = np.concatenate([np.ravel(test_characters)], axis=0)44 45# tr_data = (tr_images, tr_alphabets, tr_characters)46# ts_data = (ts_images, ts_alphabets, ts_characters)47# return tr_data, ts_data48# def modify(data):49# # We don't care about alphabets, so combine all alphabets50# # into a single character ID.51# # First collect all unique (alphabet, character) pairs.52# images, alphabets, characters = data53# unique_alphabet_character_pairs = list(set(zip(alphabets, characters)))54# # Now assign each pair an ID55# ids = np.asarray([unique_alphabet_character_pairs.index((alphabet, character))56# for (alphabet, character) in zip(alphabets, characters)])57# # Now split into train(1200)/val(323)/test(100) by character58# # train_images = images[ids < 1200]59# # train_labels = ids[ids < 1200]60# # # val_images = images[(1200 <= ids) * (ids < 1523)]61# # val_labels = ids[(1200 <= ids) * (ids < 1523)]62# # test_images = images[1523 <= ids]63# # test_labels = ids[1523 <= ids]64# # split_data = (train_images, train_labels, 65# # val_images, val_labels, 66# # test_images, test_labels)67# return (images, ids)68# def _modify(data):69# # We don't care about alphabets, so combine all alphabets70# # into a single character ID.71# # First collect all unique (alphabet, character) pairs.72# images, alphabets, characters = data73# unique_alphabet_character_pairs = list(set(zip(alphabets, characters)))74# # Now assign each pair an ID75# ids = np.asarray([unique_alphabet_character_pairs.index((alphabet, character))76# for (alphabet, character) in zip(alphabets, characters)])77# print(ids.shape)78# print(images.shape)79# # Now split into train(1200)/val(323)/test(100) by character80# train_images = images[ids < 1200]81# train_labels = ids[ids < 1200]82# test_images = images[1200 <= ids]83# test_labels = ids[1200 <= ids]84# #val_images = images[(1200 <= ids) * (ids < 1523)]85# #val_labels = ids[(1200 <= ids) * (ids < 1523)]86# #test_images = images[1523 <= ids]87# #test_labels = ids[1523 <= ids]88# print(train_images.shape, test_images.shape)89# print(train_labels.shape, test_labels.shape)90# split_data = (train_images, train_labels, 91# test_images, test_labels)92# return split_data93def _modify(data):94 # We don't care about alphabets, so combine all alphabets95 # into a single character ID.96 # First collect all unique (alphabet, character) pairs.97 images, alphabets, characters = data98 unique_alphabet_character_pairs = list(set(zip(alphabets, characters)))99 # Now assign each pair an ID100 ids = np.asarray([unique_alphabet_character_pairs.index((alphabet, character))101 for (alphabet, character) in zip(alphabets, characters)])102 print(ids.shape)103 print(images.shape)104 # Now split into train(1000)/val(200)/test(460) by character105 train_images = images[ids < 1000]106 train_labels = ids[ids < 1000]107 108 val_images = images[(1000 <= ids) * (ids < 1200)]109 val_labels = ids[(1000 <= ids) * (ids < 1200)]110 test_images = images[1200 <= ids]111 test_labels = ids[1200 <= ids]112 113 print(train_images.shape, val_images.shape, test_images.shape)114 print(train_labels.shape, val_labels.shape, test_labels.shape)115 split_data = (train_images, train_labels,116 val_images, val_labels, 117 test_images, test_labels)118 return split_data119# def main():120# tr, ts = load()121# tr_data = modify(tr)122# ts_data = modify(ts)123# tr_img, tr_lbl = tr_data124# ts_img, ts_lbl = ts_data125# print(tr_img.shape, tr_lbl.shape, ts_img.shape, ts_lbl.shape)126# data = (tr_img, tr_lbl, ts_img, ts_lbl)127# #save(data)128def save(data):129 savepath = os.path.join(args.data_dir, 'omni_train_val_test.pkl')130 with open(savepath, 'wb') as file:131 pickle.dump(data, file)132def _main():133 data = _load()134 modified_data = _modify(data)135 save(modified_data)136if __name__ == '__main__':137 args = parser.parse_args()138 assert (args.data_dir is not None) and (os.path.isdir(args.data_dir))...

Full Screen

Full Screen

omnicreate.py

Source:omnicreate.py Github

copy

Full Screen

1import argparse2import numpy as np3import os4import pickle5from scipy.io import loadmat6parser = argparse.ArgumentParser()7parser.add_argument('--data-dir', required=True, type=str, default=None)8args = parser.parse_args()9assert (args.data_dir is not None) and (os.path.isdir(args.data_dir))10def load():11 # load data12 file = os.path.join(args.data_dir, 'chardata.mat')13 data = loadmat(file)14 # data is in train/test split so read separately15 train_images = data['data'].astype(np.float32).T16 train_alphabets = np.argmax(data['target'].astype(np.float32).T, axis=1)17 train_characters = data['targetchar'].astype(np.float32)18 test_images = data['testdata'].astype(np.float32).T19 test_alphabets = np.argmax(data['testtarget'].astype(np.float32).T, axis=1)20 test_characters = data['testtargetchar'].astype(np.float32)21 # combine train and test data22 images = np.concatenate([train_images, test_images], axis=0)23 alphabets = np.concatenate([train_alphabets, test_alphabets], axis=0)24 characters = np.concatenate([np.ravel(train_characters),25 np.ravel(test_characters)], axis=0)26 data = (images, alphabets, characters)27 return data28def modify(data):29 # We don't care about alphabets, so combine all alphabets30 # into a single character ID.31 # First collect all unique (alphabet, character) pairs.32 images, alphabets, characters = data33 unique_alphabet_character_pairs = list(set(zip(alphabets, characters)))34 # Now assign each pair an ID35 ids = np.asarray([unique_alphabet_character_pairs.index((alphabet, character))36 for (alphabet, character) in zip(alphabets, characters)])37 # Now split into train(1200)/val(323)/test(100) by character38 train_images = images[ids < 1200]39 train_labels = ids[ids < 1200]40 val_images = images[(1200 <= ids) * (ids < 1523)]41 val_labels = ids[(1200 <= ids) * (ids < 1523)]42 test_images = images[1523 <= ids]43 test_labels = ids[1523 <= ids]44 split_data = (train_images, train_labels, val_images,45 val_labels, test_images, test_labels)46 return split_data47def save(data):48 savepath = os.path.join(args.data_dir, 'train_val_test_split.pkl')49 with open(savepath, 'wb') as file:50 pickle.dump(data, file)51def main():52 data = load()53 modified_data = modify(data)54 save(modified_data)55if __name__ == '__main__':...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful