How to use _get_abs_path method in avocado

Best Python code snippet using avocado_python

train.py

Source:train.py Github

copy

Full Screen

...46def download_data(data_url, md5_url):47 """48 download_data49 """50 model_files = _get_abs_path("model_files.tar.gz")51 md5_files = _get_abs_path("model_files.tar.gz.md5")52 md5_files_new = _get_abs_path("model_files.tar.gz.md5.new")53 model_files_prefix = _get_abs_path("model_files")54 get_http_url(md5_url, md5_files_new)55 if os.path.exists(model_files) and os.path.exists(md5_files):56 with open(md5_files, 'r') as fr:57 md5 = fr.readline().strip('\r\n').split(' ')[0]58 with open(md5_files_new, 'r') as fr:59 md5_new = fr.readline().strip('\r\n').split(' ')[0]60 if md5 == md5_new:61 return 062 if os.path.exists(model_files):63 os.remove(model_files)64 if os.path.exists(model_files_prefix):65 shutil.move(model_files_prefix, model_files_prefix + '.' + str(int(time.time())))66 shutil.move(md5_files_new, md5_files)67 get_http_url(data_url, model_files)68 untar(model_files, _get_abs_path("./"))69 return 170def dataset_reader_from_params(params_dict):71 """72 :param params_dict:73 :return:74 """75 dataset_reader = DataSet(params_dict)76 dataset_reader.build()77 return dataset_reader78def model_from_params(params_dict):79 """80 :param params_dict:81 :return:82 """83 opt_params = params_dict.get("optimization", None)84 dataset_reader = params_dict.get("dataset_reader")85 num_train_examples = 086 # 按配置计算warmup_steps87 if opt_params and opt_params.__contains__("warmup_steps"):88 trainers_num = int(os.getenv("PADDLE_TRAINERS_NUM", "1"))89 num_train_examples = dataset_reader.train_reader.get_num_examples()90 batch_size_train = dataset_reader.train_reader.config.batch_size91 epoch_train = dataset_reader.train_reader.config.epoch92 max_train_steps = epoch_train * num_train_examples // batch_size_train // trainers_num93 warmup_steps = opt_params.get("warmup_steps", 0)94 if warmup_steps == 0:95 warmup_proportion = opt_params.get("warmup_proportion", 0.1)96 warmup_steps = int(max_train_steps * warmup_proportion)97 logging.info("Device count: %d" % trainers_num)98 logging.info("Num train examples: %d" % num_train_examples)99 logging.info("Max train steps: %d" % max_train_steps)100 logging.info("Num warmup steps: %d" % warmup_steps)101 opt_params = {}102 opt_params["warmup_steps"] = warmup_steps103 opt_params["max_train_steps"] = max_train_steps104 opt_params["num_train_examples"] = num_train_examples105 # combine params dict106 params_dict["optimization"].update(opt_params)107 model_name = params_dict.get("type")108 model_class = RegisterSet.models.__getitem__(model_name)109 model = model_class(params_dict)110 return model, num_train_examples111def build_trainer(params_dict, dataset_reader, model, num_train_examples=0):112 """build trainer"""113 trainer_name = params_dict.get("type", "CustomTrainer")114 trainer_class = RegisterSet.trainer.__getitem__(trainer_name)115 params_dict["num_train_examples"] = num_train_examples116 trainer = trainer_class(params=params_dict, data_set_reader=dataset_reader, model_class=model)117 return trainer118class Senta(object):119 """docstring for Senta"""120 def __init__(self):121 super(Senta, self).__init__()122 self.__get_params()123 def __get_params(self):124 """125 __get_params126 """127 config_dir = _get_abs_path("config")128 param_path = os.path.join(config_dir, 'infer.json')129 param_dict = from_file(param_path)130 self._params = replace_none(param_dict)131 def __load_inference_model(self, model_path, use_gpu):132 """133 :param meta_path:134 :return:135 """136 check_cuda(use_gpu)137 config = AnalysisConfig(model_path + "/" + "model", model_path + "/" + "params")138 if use_gpu:139 config.enable_use_gpu(1024)140 else:141 config.disable_gpu()142 config.enable_mkldnn()143 inference = create_paddle_predictor(config.to_native_config())144 return inference145 def get_support_model(self):146 """147 get_support_model148 """149 pre_train_model = list(self._params.get("model_name").keys())150 return pre_train_model151 def get_support_task(self):152 """153 get_support_task154 """155 tasks = list(self._params.get("task_name").keys())156 return tasks157 def init_model(self, model_class="ernie_1.0_skep_large_ch", task="sentiment_classify", use_cuda=False):158 """159 init_model160 """161 ptm = self._params.get("model_name").get(model_class)162 ptm_id = ptm.get('type')163 task_id = self._params.get("task_name").get(task)164 model_dict = self._params.get("model_class").get(ptm_id + task_id)165 # step 1: get_init_model, if download166 data_url = model_dict.get("model_file_http_url")167 md5_url = model_dict.get("model_md5_http_url")168 is_download_data = download_data(data_url, md5_url)169 # step 2 get model_class170 register.import_modules()171 model_name = model_dict.get("type")172 self.model_class = RegisterSet.models.__getitem__(model_name)(model_dict)173 # step 3 init data params174 model_path = _get_abs_path(model_dict.get("inference_model_path"))175 data_params_path = model_path + "/infer_data_params.json"176 param_dict = from_file(data_params_path)177 param_dict = replace_none(param_dict)178 self.input_keys = param_dict.get("fields")179 # step 4 init env180 self.inference = self.__load_inference_model(model_path, use_cuda)181 # step 5: tokenizer182 tokenizer_info = model_dict.get("predict_reader").get('tokenizer')183 tokenizer_name = tokenizer_info.get('type')184 tokenizer_vocab_path = _get_abs_path(tokenizer_info.get('vocab_path'))185 tokenizer_params = None186 if tokenizer_info.__contains__("params"):187 tokenizer_params = tokenizer_info.get("params")188 bpe_v_file = tokenizer_params["bpe_vocab_file"]189 bpe_j_file = tokenizer_params["bpe_json_file"]190 tokenizer_params["bpe_vocab_file"] = _get_abs_path(bpe_v_file)191 tokenizer_params["bpe_json_file"] = _get_abs_path(bpe_j_file)192 tokenizer_class = RegisterSet.tokenizer.__getitem__(tokenizer_name)193 self.tokenizer = tokenizer_class(vocab_file=tokenizer_vocab_path,194 split_char=" ",195 unk_token="[UNK]",196 params=tokenizer_params)197 self.max_seq_len = 512198 self.truncation_type = 0199 self.padding_id = 1 if tokenizer_name == "GptBpeTokenizer" else 0200 self.inference_type = model_dict.get("inference_type", None)201 # step6: label_map202 label_map_file = model_dict.get("label_map_path", None)203 self.label_map = {}204 if isinstance(label_map_file, str):205 label_map_file = _get_abs_path(label_map_file)206 with open(label_map_file, 'r') as fr:207 for line in fr.readlines():208 line = line.strip('\r\n')209 items = line.split('\t')210 idx, label = int(items[1]), items[0]211 self.label_map[idx] = label212 213 def predict(self, texts_, aspects=None):214 """215 the sentiment classifier's function216 :param texts: a unicode string or a list of unicode strings.217 :return: sentiment prediction results.218 """219 if isinstance(texts_, text_type):...

Full Screen

Full Screen

files_utils.py

Source:files_utils.py Github

copy

Full Screen

1import os2import re3def _get_abs_path(path):4 return os.path.join(os.path.dirname(os.path.realpath(__file__)), path).replace('utils/', '')5def get_cloudformation_templates(reverse=False):6 folder_templates = 'templates-cloudformation'7 cf_templates = []8 files = os.listdir(_get_abs_path(folder_templates))9 files.sort(reverse=reverse)10 for filename in files:11 path = _get_abs_path(folder_templates) + "/" + filename12 with open(path) as f:13 template_body = f.read()14 cf_template = {15 'stack_name': 'cfn-' + filename.split('.')[1],16 'template_body': template_body,17 'filename': filename18 }19 cf_templates.append(cf_template)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful