How to use exists_file method in Airtest

Best Python code snippet using Airtest

search_data.py

Source:search_data.py Github

copy

Full Screen

...47 save_data(dictionary, 'dictionary')48 save_data(bow, 'bow')49 return corpus, dictionary, bow50def exists_data_files():51 return exists_file('corpus') and exists_file('dictionary') and exists_file('bow')52def exists_file(name):53 return path.exists('res/pickle/' + name + '.pkl')54def load_data_files():55 return load_file('corpus'), load_file('dictionary'), load_file('bow')56def save_data(data, name):57 pkl.dump(data, open('res/pickle/' + name + '.pkl', "wb"), protocol=pkl.HIGHEST_PROTOCOL)58def load_file(name):59 return pkl.load(open('res/pickle/' + name + '.pkl', "rb"))60def split_space(text):61 return text.translate(str.maketrans('', '', string.punctuation)).split(' ') if text != "" else []62def split_underscore(tokens):63 return [word for token in tokens for word in token.split('_')]64def handle_camel_case(tokens):65 words = []66 for token in tokens:67 matches = finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', token)68 words += [m.group(0) for m in matches]69 return words70def normalize_tokens(tokens):71 return [token.lower() for token in tokens]72def filter_stopwords(tokens):73 for token in tokens:74 if token in ['test', 'tests', 'main']:75 return []76 return tokens77def normalize_query(query):78 return query.strip().lower().split()79def query_frequency(query, bow, dictionary):80 return filter_results(create_top_5_result_tuples(get_freq_model(bow, dictionary)[dictionary.doc2bow(query)]))81def get_freq_model(bow, dictionary):82 return load_file('model_freq') if exists_file('model_freq') else create_freq_model(bow, dictionary)83def create_freq_model(bow, dictionary):84 model = SparseMatrixSimilarity(bow, num_features=len(dictionary.token2id))85 save_data(model, 'model_freq')86 return model87def query_tfidf(query, bow, dictionary):88 model = get_tfidf_model(bow)89 matrix = get_tfidf_matrix(model, bow, dictionary)90 return filter_results(create_top_5_result_tuples(matrix[model[dictionary.doc2bow(query)]]))91def get_tfidf_model(bow):92 return load_file('model_tfidf') if exists_file('model_tfidf') else create_tfidf_model(bow)93def create_tfidf_model(bow):94 model = TfidfModel(bow)95 save_data(model, 'model_tfidf')96 return model97def get_tfidf_matrix(model, bow, dictionary):98 return load_file('matrix_tfidf') if exists_file('matrix_tfidf') else create_tfidf_matrix(model, bow, dictionary)99def create_tfidf_matrix(model, bow, dictionary):100 matrix = SparseMatrixSimilarity(model[bow], num_features=len(dictionary.token2id))101 save_data(matrix, 'matrix_tfidf')102 return model103def query_lsi(query, bow, dictionary):104 model = get_lsi_model(bow, dictionary)105 matrix = get_lsi_matrix(model, bow)106 vector = model[dictionary.doc2bow(query)]107 result = abs(matrix[vector])108 embedding = [[value for _, value in vector]] + [[value for _, value in model[bow][i]] for i, value in109 sorted(enumerate(result), key=lambda x: x[1], reverse=True)[:5]]110 return filter_results(create_top_5_result_tuples(result)), embedding111def get_lsi_model(bow, dictionary):112 return load_file('model_lsi') if exists_file('model_lsi') else create_lsi_model(bow, dictionary)113def create_lsi_model(bow, dictionary):114 model = LsiModel(bow, id2word=dictionary, num_topics=300)115 save_data(model, 'model_lsi')116 return model117def get_lsi_matrix(model, bow):118 return load_file('matrix_lsi') if exists_file('matrix_lsi') else create_lsi_matrix(model, bow)119def create_lsi_matrix(model, bow):120 matrix = MatrixSimilarity(model[bow])121 save_data(matrix, 'matrix_lsi')122 return matrix123def create_top_5_result_tuples(arrg):124 return sorted(enumerate(arrg), key=lambda x: x[1], reverse=True)[:5]125def filter_results(tuples):126 return [i for i, v in tuples]127def query_doc2vec(query, corpus):128 model = get_doc2vec_model(get_doc2vec_corpus(corpus))129 vector = model.infer_vector(query)130 similar = model.docvecs.most_similar([vector], topn=5)131 return [index for (index, _) in similar], \132 [list(vector)] + [list(model.infer_vector(corpus[index])) for index, _ in similar]133def get_doc2vec_corpus(corpus):134 return [TaggedDocument(simple_preprocess(' '.join(element)), [index])135 for index, element in enumerate(corpus)]136def get_doc2vec_model(corpus):137 return load_file('model_doc2vec') if exists_file('model_doc2vec') else create_doc2vec_model(corpus)138def create_doc2vec_model(corpus):139 model = Doc2Vec(vector_size=300, min_count=2, epochs=77)140 model.build_vocab(corpus)141 model.train(corpus, total_examples=model.corpus_count, epochs=model.epochs)142 save_data(model, 'model_doc2vec')143 return model144def create_result_dataframe(queries_dictionary, df):145 for key, values in queries_dictionary.items():146 for index in values:147 row = df.iloc[index]148 yield [row["name"], row["file"], row["line"], row["type"], row["comment"], key]149def print_results(df):150 grouped = df.groupby(['search'])151 for key, item in grouped:...

Full Screen

Full Screen

test_import_particles.py

Source:test_import_particles.py Github

copy

Full Screen

1import os2import sys3import unittest4import numpy as np5import h5py as h56sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))7from import_toolkit.cluster import Cluster8np.set_printoptions(suppress=True)9class TestMixin(unittest.TestCase):10 def test_particle_group_number(self):11 # Read in celr_e | halo 0 | z=012 path = '/cosma5/data/dp004/dc-pear3/data/eagle/halo_00/data/particledata_029_z000p000'13 with h5.File(os.path.join(path, 'eagle_subfind_particles_029_z000p000.0.hdf5'), 'r') as f:14 hd5set = f['/PartType1/GroupNumber']15 group_number = hd5set[...]16 print(f"\n{' celr_e | halo 0 | z=0 ':-^60}")17 print(f"Particles with group number < 0: {len(np.where(group_number<0)[0])} particles found.")18 print(f"Particles with group number = 0: {len(np.where(group_number==0)[0])} particles found.")19 print(f"Particles with group number = 1: {len(np.where(group_number==1)[0])} particles found.")20 # Read in celr_b | halo 0 | z=021 path = '/cosma5/data/dp004/dc-pear3/data/bahamas/halo_00/data/particledata_029'22 with h5.File(os.path.join(path, 'eagle_subfind_particles_029.0.hdf5'), 'r') as f:23 hd5set = f['/PartType1/GroupNumber']24 group_number = hd5set[...]25 print(f"\n{' celr_b | halo 0 | z=0 ':-^60}")26 print(f"Particles with group number < 0: {len(np.where(group_number < 0)[0])} particles found.")27 print(f"Particles with group number = 0: {len(np.where(group_number == 0)[0])} particles found.")28 print(f"Particles with group number = 1: {len(np.where(group_number == 1)[0])} particles found.")29 # Read in macsis | halo 0 | z=030 path = '/cosma5/data/dp004/dc-hens1/macsis/macsis_gas/halo_0000/data/particledata_022'31 with h5.File(os.path.join(path, 'eagle_subfind_particles_022.0.hdf5'), 'r') as f:32 hd5set = f['/PartType1/GroupNumber']33 group_number = hd5set[...]34 print(f"\n{' macsis | halo 0 | z=0 ':-^60}")35 print(f"Particles with group number < 0: {len(np.where(group_number < 0)[0])} particles found.")36 print(f"Particles with group number = 0: {len(np.where(group_number == 0)[0])} particles found.")37 print(f"Particles with group number = 1: {len(np.where(group_number == 1)[0])} particles found.")38 # Read in ceagle | halo 0 | z=039 path = '/cosma5/data/dp004/C-EAGLE/Complete_Sample/CE_00/data/particledata_029_z000p000'40 group_number = np.zeros(0, dtype=np.int)41 file_index = 042 while file_index > -1:43 try:44 with h5.File(os.path.join(path, f'eagle_subfind_particles_029_z000p000.{str(file_index)}.hdf5'),45 'r') as f:46 hd5set = f['/PartType1/GroupNumber']47 group_number = np.concatenate((group_number, hd5set[...]), axis=0)48 file_index += 149 except:50 file_index = -151 print(f"\n{' ceagle | halo 0 | z=0 ':-^60}")52 print(f"Particles with group number < 0: {len(np.where(group_number < 0)[0])} particles found.")53 print(f"Particles with group number = 0: {len(np.where(group_number == 0)[0])} particles found.")54 print(f"Particles with group number = 1: {len(np.where(group_number == 1)[0])} particles found.")55 def test_filenames(self):56 # Read in celr_e | halo 0 | z=057 path = '/cosma5/data/dp004/dc-pear3/data/eagle'58 exists_dir = os.path.isdir(os.path.join(path, 'halo_00/data/particledata_029_z000p000'))59 exists_file = os.path.isfile(os.path.join(path, 'halo_00/data/particledata_029_z000p000',60 'eagle_subfind_particles_029_z000p000.0.hdf5'))61 print(f"\n{' celr_e | halo 0 | z=0 ':-^60}")62 print(f"Data directory exists: {exists_dir}.")63 print(f"Data file exists: {exists_file}.")64 # Read in celr_b | halo 0 | z=065 path = '/cosma5/data/dp004/dc-pear3/data/bahamas'66 exists_dir = os.path.isdir(os.path.join(path, 'halo_00/data/particledata_029'))67 exists_file = os.path.isfile(os.path.join(path, 'halo_00/data/particledata_029',68 'eagle_subfind_particles_029.0.hdf5'))69 print(f"\n{' celr_b | halo 0 | z=0 ':-^60}")70 print(f"Data directory exists: {exists_dir}.")71 print(f"Data file exists: {exists_file}.")72 # Read in macsis | halo 0 | z=073 path = '/cosma5/data/dp004/dc-hens1/macsis/macsis_gas'74 exists_dir = os.path.isdir(os.path.join(path, 'halo_0000/data/particledata_022'))75 exists_file = os.path.isfile(os.path.join(path, 'halo_0000/data/particledata_022',76 'eagle_subfind_particles_022.0.hdf5'))77 print(f"\n{' macsis | halo 0 | z=0 ':-^60}")78 print(f"Data directory exists: {exists_dir}.")79 print(f"Data file exists: {exists_file}.")80 # Read in ceagle | halo 0 | z=081 path = '/cosma5/data/dp004/C-EAGLE/Complete_Sample'82 exists_dir = os.path.isdir(os.path.join(path, 'CE_00/data/particledata_029_z000p000'))83 print(f"\n{' ceagle | halo 0 | z=0 ':-^60}")84 print(f"Data directory exists: {exists_dir}.")85 collection_exists_file = []86 file_index = 087 exists_file = True88 while exists_file:89 exists_file = os.path.isfile(os.path.join(path, f'CE_00/data/particledata_029_z000p000',90 f'eagle_subfind_particles_029_z000p000.{str(file_index)}.hdf5'))91 collection_exists_file.append(exists_file)92 print(f"Data file {file_index:03d} exists: {exists_file}.")93 file_index += 194 print(f"{' SOFTWARE TEST ':=^60}")95 for sim in ['celr_e', 'celr_b', 'macsis', 'ceagle']:96 cluster = Cluster(simulation_name=sim, clusterID=0, redshift='z000p000')97 print(f"\n {sim}{' | halo 0 | z=0 ':-^60}")98 # print("cluster.groups_filePaths", cluster.groups_filePaths(), sep='\n')99 # Check the files exist100 for file in cluster.groups_filePaths():101 print(os.path.isfile(file), file)102 # print("cluster.partdata_filePaths", cluster.partdata_filePaths(), sep='\n')103 # Check the files exist104 for file in cluster.partdata_filePaths():105 print(os.path.isfile(file), file)106if __name__ == '__main__':...

Full Screen

Full Screen

ImageNET.py

Source:ImageNET.py Github

copy

Full Screen

1from DL_img_utils import tar_unzip2from DL_img_utils import ano_img3from DL_img_utils import Annotation_folder_check4from DL_img_utils import delete_error_img5from DL_img_utils import class_update6from DL_img_utils import delete_error_ano7import os8import argparse9from DL_img_utils import File_matome10from DL_img_utils import trans_YOLO_Data11import shutil12def Img_DL(class_name,limit,mode):13 #クラスのリストを読み込む14 #fileobj = open("./DL_img_utils/TEST_LIST.txt", "r", encoding="utf_8")15 fileobj = open("./DL_img_utils/ANO_LIST.txt", "r", encoding="utf_8")16 exists_file = 017 wnid_list=[]18 while True:19 line = fileobj.readline()20 tmp_class_list = line.split()21 class_list = []22 #,をけす処理23 for s in tmp_class_list:24 if ',' in s:25 text = s.replace(',', '')26 class_list.append(text)27 else:28 class_list.append(s)29 #class_name = class_name + " "30 31 #print(class_name,"//")32 if line:33 if class_name in class_list:34 wnid=line.split()[0]35 if os.path.exists("./DL_img_utils/Annotation_all/"+wnid+".tar.gz"):36 exists_file = 137 wnid_list.append(wnid)38 print(wnid)39 #Anotationがはいってる圧縮ファイルを解凍40 tar_unzip.main(wnid)41 #アノテーションファイル内に不正なファイルがあるかを確認42 Annotation_folder_check.main(wnid)43 #画像をDL44 ano_img.main(wnid,limit=limit,verbose=False)45 #エラー画像を削除46 delete_error_img.main(wnid)47 #アノテーションのエラーを削除48 delete_error_ano.main(wnid)49 else:50 break51 return exists_file52#引数のクラスIDを参照して画像をDLする53def ALL_DL(wnid,class_name):54 if os.path.exists("./DL_img_utils/Annotation_all/"+wnid+".tar.gz"):55 #アノテーションファイル内に不正なファイルがあるかを確認56 Annotation_folder_check.main(wnid)57 #画像をDL58 ano_img.main(wnid,limit=500,verbose=False)59 #エラー画像を削除60 delete_error_img.main(wnid)61 #アノテーションのエラーを削除62 delete_error_ano.main(wnid)63 64 #ファイルをまとめる65 print("===========================")66 print("===ファイルを整理しています==")67 print("===========================")68 File_matome.main(wnid,class_name,limit=500)69 """70 fileobj = open("./DL_img_utils/Class.txt", "r", encoding="utf_8")71 #重複確認72 Duplicate = False73 while True:74 line = fileobj.readline()75 if line:76 if line == class_name:77 Duplicate = True 78 else:79 break80 fileobj.close()81 #クラスリストに追加書き込み82 if Duplicate == False: 83 with open("./DL_img_utils/Class.txt", mode='a') as f:84 print("ImageNet_line90:class_name:",class_name)85 f.write('{}\n'.format(class_name))86 """87 88 trans_YOLO_Data.main(class_name)89 90def main():91 #画像のDL数を設定92 limit = opt.limit93 #クラスのリストを読み込む94 fileobj = open("./DL_img_utils/TEST_LIST.txt", "r", encoding="utf_8")95 #クラス名を取得96 class_name=input("クラス名:")97 #class_name="remote"98 #クラス名が一致するwnidを保存するリスト99 #指定されたクラスの画像が存在したかを確認するフラグ100 exists_file = 0101 wnid_list=[]102 while True:103 line = fileobj.readline()104 class_list = line.split()105 if line:106 if class_name in class_list:107 wnid=line.split()[0]108 if os.path.exists("./DL_img_utils/Annotation_all/"+wnid+".tar.gz"):109 exists_file = 1110 wnid_list.append(wnid)111 print(wnid)112 #Anotationがはいってる圧縮ファイルを解凍113 tar_unzip.main(wnid)114 #アノテーションファイル内に不正なファイルがあるかを確認115 Annotation_folder_check.main(wnid)116 #画像をDL117 ano_img.main(wnid,verbose=False)118 #エラー画像を削除119 delete_error_img.main(wnid)120 #アノテーションのエラーを削除121 delete_error_ano.main(wnid)122 else:123 break124 #ファイルをまとめる125 if opt.mode == 0 and exists_file == 1:126 print("===========================")127 print("===ファイルを整理しています==")128 print("===========================")129 #File_matome.main(wnid_list[0],class_name,0)130 with open("./DL_img_utils/Class.txt", mode='a') as f:131 f.write('{}\n'.format(class_name))132 trans_YOLO_Data.main()133 #不要なファイルを削除134 #shutil.rmtree('./DL_img_utils/Annotations')135 #shutil.rmtree('./DL_img_utils/img')136 elif exists_file == 0:137 print("指定された画像のクラスはまだ実装されていません")138if __name__ == "__main__":139 parser = argparse.ArgumentParser()140 parser.add_argument('--limit', type=int, default=0, help='Number DL imags')141 parser.add_argument('--mode', type=int, default=0, help='0 is DL.1 is 試し')142 opt = parser.parse_args()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful