How to use error_step method in grail

Best Python code snippet using grail_python

SQL_converter.py

Source:SQL_converter.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2import json3import os4import codecs5import pickle6import babel7import unicodedata8import re9from tqdm import tqdm10from babel.numbers import parse_decimal, NumberFormatError11from nsm import word_embeddings12from nsm import data_utils13from utils import load_jsonl, create_envs, collect_traj_for_program, FLAGS14def find_cmd_head(program, pos):15 '''16 find the command name given a position in the whole program17 '''18 if program[pos] == '(':19 return 'abort', 020 for i in range(pos, -1, -1):21 if program[i] == '(':22 return program[i+1], (pos-i)23 return None, None24def find_entity(namespace, token, type_constraint=None):25 '''26 # first try to determine the type of the entity being numerical or string27 if type_constraint is None:28 try:29 if type(token) == int or type(token) == float:30 val = float(token)31 else:32 val = float(babel.numbers.parse_decimal(token))33 numerical = True34 except NumberFormatError:35 val = normalize(token)36 numerical = False37 else:38 if type_constraint == 'str':39 numerical = False40 val = token if type(token) is str else str(int(token))41 else:42 raise NotImplementedError43 '''44 if type(token) == unicode:45 numerical = False46 val = normalize(token)47 else:48 numerical = True49 val = float(token)50 while True:51 # now we enumerate to find the entity52 for i in range(namespace.n_var-1, -1, -1):53 variable = namespace['v'+str(i)]54 if variable['type'] == 'num_list' and numerical:55 # sanity check56 assert(len(variable['value']) == 1)57 if val == variable['value'][0]:58 return i, numerical59 elif variable['type'] == 'string_list' and not numerical:60 # sanity check61 assert(len(variable['value']) == 1)62 if val == variable['value'][0]:63 return i, numerical64 else:65 continue66 #return -1, numerical67 # entity not found -> try the other type68 if not numerical:69 try:70 val = float(babel.numbers.parse_decimal(token))71 numerical = True72 except NumberFormatError:73 return -1, numerical74 else:75 return -1, numerical76 # return -1 when such token is not found as an entity77 return -1, numerical78# ################## copied from preprocess.py ##############################79def normalize(x):80 if not isinstance(x, unicode):81 x = x.decode('utf8', errors='ignore')82 # Remove diacritics83 x = ''.join(c for c in unicodedata.normalize('NFKD', x)84 if unicodedata.category(c) != 'Mn')85 # Normalize quotes and dashes86 x = re.sub(ur"[‘’´`]", "'", x)87 x = re.sub(ur"[“”]", "\"", x)88 x = re.sub(ur"[‐‑‒–—−]", "-", x)89 while True:90 old_x = x91 # Remove citations92 x = re.sub(ur"((?<!^)\[[^\]]*\]|\[\d+\]|[•♦†‡*#+])*$", "", x.strip())93 # Remove details in parenthesis94 x = re.sub(ur"(?<!^)( \([^)]*\))*$", "", x.strip())95 # Remove outermost quotation mark96 x = re.sub(ur'^"([^"]*)"$', r'\1', x.strip())97 if x == old_x:98 break99 # Remove final '.'100 if x and x[-1] == '.':101 x = x[:-1]102 # Collapse whitespaces and convert to lower case103 x = re.sub(ur'\s+', ' ', x, flags=re.U).lower().strip()104 return x105# ################## copied from preprocess.py ##############################106def convert(env):107 '''108 Some explanation for the sql language:109 sel: column index (starting from 0)110 agg: aggregation index111 conds: a list of cond112 cond: column_idx, operator_idx, condition113 operator: ['=', '>', '<', 'OP']114 aggregation: ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']115 '''116 # define some constants to fit the grammar of wikisql117 filters =['filter_eq', 'filter_greater', 'filter_less', 'filter_other', 'filter_eq']118 aggregations = ['none', 'maximum', 'minimum', 'count_stub', 'sum', 'average']119 sql = env.question_annotation['sql']120 code = []121 # first select the rows according to the condition using filters122 for i, cond in enumerate(sql['conds']):123 if i == 0:124 rows = 'all_rows'125 else:126 rows = 'v' + str(env.interpreter.namespace.n_var + i - 1)127 # 1. get the column index128 column = 'v' + str(cond[0])129 # 2. try to locate the token in the entity list and get its type130 entity_var, entity_numerical= find_entity(env.interpreter.namespace, cond[2])131 if entity_var == -1:132 return None133 else:134 value = 'v' + str(entity_var)135 # 3. use the correct filter136 if cond[1] == 0:137 # equal filter, need to know the type138 fltr = filters[0] if entity_numerical else filters[4]139 else:140 fltr = filters[cond[1]]141 statement = ' '.join(['(', fltr, rows, value, column, ')'])142 code.append(statement)143 # then perform the aggregation process144 rows = 'all_rows' if len(sql['conds']) == 0 else ('v' + str(env.interpreter.namespace.n_var+len(sql['conds']) - 1))145 if sql['agg'] == 0: # just hop to the value of the first row146 column = 'v' + str(sql['sel'])147 statement = ' '.join(['(', 'hop', rows, column, ')'])148 else:149 if sql['agg'] == 3: # count, which has slightly different grammar than the other aggs150 statement = ' '.join(['(', 'count', rows, ')'])151 else:152 agg = aggregations[sql['agg']]153 column = 'v' + str(sql['sel'])154 statement = ' '.join(['(', agg, rows, column, ')'])155 code.append(statement)156 # add <END> to the end of the program proper interpretation157 code.append('<END>')158 code = ' '.join(code)159 return code160def get_env_trajs(envs):161 good_oracle_envs = []162 envs_trajs = []163 envs_programs = []164 for i, env in enumerate(envs):165 program = convert(env)166 if program is not None: # means all entities are found167 traj, error_info = collect_traj_for_program(env, program.split(' '), debug=True)168 if traj is None:169 # find the error cmd170 program = error_info[2]171 error_step = len(error_info[1])172 cmd, re_pos = find_cmd_head(program, error_step)173 error_cmd_pos = error_step - re_pos + 1174 # attempt to fix the traj if the interpreter failed to parse175 if cmd == 'filter_eq':176 # change the entity from the num_list one to string_list one177 num_entity_idx = int(program[error_cmd_pos+2][1:])178 str_entity = 'v' + str(num_entity_idx-1)179 # see if we get the right str entity180 if env.interpreter.namespace[str_entity]['type'] == 'string_list':181 program[error_cmd_pos] = 'filter_eq'182 program[error_cmd_pos+2] = str_entity183 # see if this simple flip fix this problem184 traj, error_info = collect_traj_for_program(env, program, debug=True)185 if traj is not None: # means interpreter can successfully parse converted code186 if traj.rewards[-1] == 1.0: # means the code can get reward in the environment187 good_oracle_envs.append(env)188 envs_trajs.append(traj)189 envs_programs.append(program)190 return good_oracle_envs, envs_trajs #, envs_programs191data_folder = "../../data/wikisql/"192train_shard_dir = "../../data/wikisql/processed_input/preprocess_4/"193train_shard_prefix = "train_split_shard_30-"194table_file = "../../data/wikisql/processed_input/preprocess_4/tables.jsonl"195vocab_file = "../../data/wikisql/raw_input/wikisql_glove_vocab.json"196embedding_file = "../../data/wikisql/raw_input/wikisql_glove_embedding_mat.npy"197en_vocab_file = "../../data/wikisql/processed_input/preprocess_4/en_vocab_min_count_5.json"198def get_train_shard_path(i):199 return os.path.join(train_shard_dir, train_shard_prefix + str(i) + '.jsonl')200def get_envs(env_files=None):201 dataset = []202 if env_files is None:203 fns = [get_train_shard_path(i) for i in range(0, 30)]204 else:205 fns = env_files206 for fn in fns:207 dataset += load_jsonl(fn)208 tables = load_jsonl(table_file)209 table_dict = dict([(table['name'], table) for table in tables])210 # Load pretrained embeddings.211 embedding_model = word_embeddings.EmbeddingModel(vocab_file, embedding_file )212 with open(en_vocab_file, 'r') as f:213 vocab = json.load(f)214 en_vocab = data_utils.Vocab([])215 en_vocab.load_vocab(vocab)216 # Create environments.217 envs = create_envs(table_dict, dataset, en_vocab, embedding_model)218 return envs219def error_analysis():220 f = codecs.open('converter_error_list.bin', 'rb')221 error_list = pickle.load(f)222 f.close()223 # pick the ones that failed to interpret224 interpreter_error_list = [error_example for error_example in error_list if error_example[0] == 'interpreter failed to parse']225 # two set of commands (filter/aggregation) that make the interpreter fail226 filter_list = ['filter_eq', 'filter_greater', 'filter_less', 'filter_str_contain_any']227 agg_list = ['maximum', 'minimum', 'count', 'sum', 'average']228 other_cmd_list = ['hop', 'abort']229 # see for filters/agg, at which argument do they fail to interpret230 filter_error_dict = dict()231 agg_error_dict = dict()232 other_error_dict = dict()233 for filter in filter_list:234 filter_error_dict[filter] = [0,0,0,0,0,0]235 for agg in agg_list:236 agg_error_dict[agg] = [0,0,0,0,0]237 for cmd in other_cmd_list:238 other_error_dict[cmd] = [0,0,0,0,0]239 # see at which step (filter/aggregation) do they fail240 for example in interpreter_error_list:241 program = example[1][2]242 error_step = len(example[1][1])243 cmd, re_pos = find_cmd_head(program, error_step)244 if cmd in filter_list:245 filter_error_dict[cmd][re_pos] += 1246 elif cmd in agg_list:247 agg_error_dict[cmd][re_pos] += 1248 elif cmd in other_cmd_list:249 other_error_dict[cmd][re_pos] += 1250 else:251 print(cmd, program, error_step)252 raise NotImplementedError253 print('Total %d examples can not be interpreted' % len(interpreter_error_list))254 print('%d are filter errors as %s ' % (sum(map(sum, filter_error_dict.values())), filter_error_dict))255 print('%d are aggregation errors as %s ' % (sum(map(sum, agg_error_dict.values())), agg_error_dict))256 print('%d are other errors as %s ' % (sum(map(sum, other_error_dict.values())), other_error_dict))257 # pick the ones that got the wrong answer258 answer_error_list = [error_example for error_example in error_list if error_example[0] == 'wrong answer']259 length_mismatch = 0260 correct_after_normalize = 0261 first_item_match = 0262 for _, env_answer, traj_answer in answer_error_list:263 if len(env_answer) == 0 or len(traj_answer) == 0:264 continue265 if len(env_answer) != len(traj_answer):266 length_mismatch += 1267 if isinstance(env_answer[0], unicode) and isinstance(traj_answer[0], unicode):268 if normalize(env_answer[0]) == normalize(traj_answer[0]):269 first_item_match+= 1270 else:271 if env_answer[0] == traj_answer[0]:272 first_item_match+= 1273 else:274 if all([isinstance(answer, unicode) for answer in (env_answer+traj_answer)]):275 normalized_env_answer = [normalize(answer) for answer in env_answer]276 normalized_traj_answer = [normalize(answer) for answer in traj_answer]277 if normalized_env_answer == normalized_traj_answer:278 correct_after_normalize += 1279 print('Total %d examples got the wrong answer' % len(answer_error_list))280 print('%d are length mismatch but with %d match on the first item' % (length_mismatch, first_item_match))281 print('%d are correct after normalize' % correct_after_normalize)282 return None283def main():284 envs = get_envs()285 # error analysis286 error_list = []287 error_log_file = codecs.open('error_log.txt', 'wb', encoding='utf-8')288 success = 0289 error_1 = 0290 error_2 = 0291 error_3 = 0292 for i, env in tqdm(enumerate(envs)):293 code = convert(env)294 if code is None:295 error_1 += 1296 error_list.append(('can not find entity', None, None))297 continue298 else:299 # verify the correctness of the code300 traj, error_info = collect_traj_for_program(env, code.split(' '), debug=True)301 #'''302 if traj is None:303 # find the error cmd304 program = error_info[2]305 error_step = len(error_info[1])306 cmd, re_pos = find_cmd_head(program, error_step)307 error_cmd_pos = error_step - re_pos + 1308 # attempt to fix the traj if the interpreter failed to parse309 if cmd == 'filter_eq':310 # change the entity from the num_list one to string_list one311 num_entity_idx = int(program[error_cmd_pos+2][1:])312 str_entity = 'v' + str(num_entity_idx-1)313 # see if we get the right str entity314 if env.interpreter.namespace[str_entity]['type'] == 'string_list':315 program[error_cmd_pos] = 'filter_eq'316 program[error_cmd_pos+2] = str_entity317 # see if this simple flip fix this problem318 traj, error_info = collect_traj_for_program(env, program, debug=True)319 #'''320 if traj is not None:321 if traj.rewards[-1] == 1.0:322 success += 1323 else:324 error_3 += 1325 error_list.append(('wrong answer', env.answer, traj.answer))326 err_log = '%d, expected answer %s, but got answer %s, question is \' %s \' with table %s \n' \327 % (i, env.answer, traj.answer, env.question_annotation['question'], env.question_annotation['context'])328 print(err_log)329 error_log_file.write(err_log)330 else:331 error_2 += 1332 error_list.append(('interpreter failed to parse', error_info, None))333 error_step = len(error_info[1])334 error_token = error_info[2][error_step]335 cmd, re_pos = find_cmd_head(error_info[2], error_step)336 err_log = '%d, command %s step %d error token %s, full program: %s \n' % (i, cmd, error_step, error_token, error_info[2])337 print(err_log)338 error_log_file.write(err_log)339 error_log_file.close()340 print('total %d example, successful converted %d (%f), %d (%f) can not find entity, %d (%f) failed to interpret and %d (%f) got wrong answer'341 % (len(envs),342 success, float(success)/len(envs),343 error_1, float(error_1)/len(envs),344 error_2, float(error_2)/len(envs),345 error_3, float(error_3)/len(envs)))346 with codecs.open('converter_error_list.bin', 'wb') as f:347 pickle.dump(error_list, f)348def loaded_program_analysis():349 envs = get_envs()350 saved_program_file = '../../data/wikisql/processed_input/preprocess_2/all_train_saved_programs-1k_5.json'351 #saved_program_file = '../../data/wikitable/processed_input/all_train_saved_programs.json'352 with open(saved_program_file, 'r') as f:353 program_dict = json.load(f)354 non_empty_env = 0355 spurious_program_enc = 0356 avg_nonempty_env = 0357 for key in program_dict.keys():358 program_list = program_dict[key]359 if len(program_list) > 0:360 non_empty_env += 1361 avg_nonempty_env += len(program_list)362 if len(program_list) > 1:363 spurious_program_enc += 1364 avg_nonempty_env = avg_nonempty_env / float(non_empty_env)365 print '%d items in loaded programs, with %d non-empty and %d have spurious forms with avg of %f' \366 % (len(program_dict), non_empty_env, spurious_program_enc, avg_nonempty_env)367 return368 envs, env_trajs, env_programs = get_env_trajs(envs)369 # stats370 non_empty_env = 0371 match_oracle = 0372 for env, env_traj, program in zip(envs, env_trajs, env_programs):373 env_loaded_program_list = program_dict[env.name]374 if env_loaded_program_list is not None and len(env_loaded_program_list) != 0:375 non_empty_env += 1376 if program in env_loaded_program_list:377 match_oracle += 1378 print '%d items in loaded programs, with %d non-empty and %d envs have an oracle match' % (len(program_dict), non_empty_env, match_oracle)379if __name__ == '__main__':380 FLAGS.executor = 'wikisql'381 #main()382 #error_analysis()...

Full Screen

Full Screen

visualization.py

Source:visualization.py Github

copy

Full Screen

1import numpy as np2import matplotlib.pyplot as plt3from matplotlib import collections as mc4from io import StringIO, BytesIO5import PIL6import cv27import rslo.utils.pose_utils_np as pun8def pltfig2data(fig):9 """10 @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it11 @param fig a matplotlib figure12 @return a numpy 3D array of RGBA values13 """14 # draw the renderer15 # fig.canvas.draw()16 # # Get the RGBA buffer from the figure17 # w, h = fig.canvas.get_width_height()18 # buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)19 # buf.shape = (w, h, 4)20 # # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode21 # buf = np.roll(buf, 3, axis=2)22 # buf = buf.astype(float)/25523 # 申请缓冲地址24 buffer_ = BytesIO() # StringIO() # using buffer,great way!25 # 保存在内存中,而不是在本地磁盘,注意这个默认认为你要保存的就是plt中的内容26 fig.savefig(buffer_, format='png')27 buffer_.seek(0)28 # 用PIL或CV2从内存中读取29 dataPIL = PIL.Image.open(buffer_)30 # 转换为nparrary,PIL转换就非常快了,data即为所需31 data = np.asarray(dataPIL)32 data = data.astype(float)/255.33 # cv2.imwrite('test.png', data)34 # 释放缓存35 buffer_.close()36 plt.close(fig)37 return data38# def draw_odometry(odom_vectors, gt_vectors=None, view='bv', saving_dir=None):39def draw_trajectory(poses_pred, poses_gt=None, view='bv', saving_dir=None, figure=None, ax=None, color='b', error_step=1, odom_errors=None):40 """[summary]41 Arguments:42 poses_pred {[np.array]} -- [(N,7)]43 Keyword Arguments:44 poses_gt {[np.array]} -- [(N,7)] (default: {None})45 view {str} -- [description] (default: {'bv'})46 saving_dir {[type]} -- [description] (default: {None})47 figure {[type]} -- [description] (default: {None})48 ax {[type]} -- [description] (default: {None})49 color {str} -- [description] (default: {'b'})50 """51 assert(view in ['bv', 'front', 'side'])52 translation, rotation = poses_pred[:, :3], poses_pred[:, 3:]53 if poses_gt is not None:54 assert len(poses_pred) == len(poses_gt)55 translation_gt, rotation_gt = poses_gt[:, :3], poses_gt[:, 3:]56 if view == 'bv':57 dim0, dim1 = 0, 158 elif view == 'front':59 dim0, dim1 = 0, 160 elif view == 'side':61 dim0, dim1 = 0, 162 if figure is None or ax is None:63 figure = plt.figure()64 ax = figure.add_subplot(111)65 for i in range(1, len(translation)):66 if i == 1:67 ax.plot([translation[i-1][dim0]], [68 translation[i-1][dim1]], '*', markersize=10, color=color)69 ax.plot([translation[i-1][dim0], translation[i][dim0]], [70 translation[i-1][dim1], translation[i][dim1]], '-', markersize=0.5, color=color)71 if poses_gt is not None:72 ax.plot([translation_gt[i-1][dim0], translation_gt[i][dim0]], [73 translation_gt[i-1][dim1], translation_gt[i][dim1]], '-', markersize=0.5, color='r')74 if i % 50 == 0:75 # plot connection lines76 ax.plot([translation[i][dim0], translation_gt[i][dim0]], [77 translation[i][dim1], translation_gt[i][dim1]], '-', markersize=0.03, color='gray')78 # and i%error_step==0 and i//error_step<len(errors):79 if 0:#odom_errors is not None:80 odom_errors = odom_errors[::error_step]81 l = min(len(translation[::error_step] ), len(odom_errors))82 cm = plt.cm.get_cmap('hot')83 ax.scatter(translation[::error_step, dim0][:l]+10, translation[::error_step, dim1][:l]+10,84 marker='o', c=odom_errors[:, 0], cmap=cm, vmin=np.min(odom_errors[:, 0]), vmax=np.max(odom_errors[:, 0]), linewidths=0.01)85 ax.scatter(translation[::error_step, dim0][:l]-10, translation[::error_step, dim1][:l]-10,86 marker='x', c=odom_errors[:, 1], cmap=cm, vmin=np.min(odom_errors[:, 1]), vmax=np.max(odom_errors[:, 1]), linewidths=0.01)87 88 if saving_dir is not None:89 figure.savefig(saving_dir)90 return figure, ax91def draw_odometry(odom_vectors, view='bv', saving_dir=None, figure=None, ax=None, color='b'):92 """[draw odometry]93 Args:94 odom_vectors ([numpy arrays of size (N,7)]): quaternion+translation95 gt (as the same as odom_vectors, optional): Defaults to None.96 view([str], optional): The view to draw97 """98 assert(view in ['bv', 'front', 'side'])99 translation, rotation = odom_vectors[:, :3], odom_vectors[:, 3:]100 if view == 'bv':101 # translation = translation[:, [0, 2]]102 dim0, dim1 = 0, 1103 # translation = translation[:, [0, 1]]104 elif view == 'front':105 dim0, dim1 = 0, 1106 # translation = translation[:, [0, 1]]107 elif view == 'side':108 dim0, dim1 = 0, 1109 # translation = translation[:, [1, 2]]110 if figure is None or ax is None:111 figure = plt.figure()112 ax = figure.add_subplot(111)113 # lines = np.stack([starts, ends], axis=1)114 # lc = mc.LineCollection(lines, linewidths=0.3)115 # ax.add_collection(lc)116 t_prev = translation[0:1]117 r_prev = rotation[0:1]118 for i in range(1, len(translation)):119 r_cur = pun.qmult(r_prev, rotation[i:i+1])120 t_cur = t_prev + \121 pun.rotate_vec_by_q(122 translation[i:i+1], r_prev)123 # t_cur = translation[i]124 if i == 1:125 ax.plot([t_prev[0][dim0], t_cur[0][dim0]], [126 t_prev[0][dim1], t_cur[0][dim1]], '*', markersize=10, color=color)127 ax.plot([t_prev[0][dim0], t_cur[0][dim0]], [128 t_prev[0][dim1], t_cur[0][dim1]], '-', markersize=0.5, color=color)129 t_prev = t_cur130 r_prev = r_cur131 if saving_dir is not None:132 figure.savefig(saving_dir)...

Full Screen

Full Screen

model.py

Source:model.py Github

copy

Full Screen

1from typing import List, Tuple2import config3import random4#Podstawowe dane symulacji5TRACK_LENGTH = 1.06DEFAULT_VEL = 1.07ERROR_STEP = config.ERROR_STEP8MAX_ERROR = config.MAX_ERROR9#Współczynnik losowości sprawia, że każda prędkość kuli jest jeszcze leciutko modyfikowana10#Kule nie mogą się już trywialnie spotkać po dłuższym czasie11RANDOMNESS_FACTOR = ERROR_STEP/312assert MAX_ERROR <= TRACK_LENGTH13assert ERROR_STEP < MAX_ERROR14DEFAULT_NUM = round(MAX_ERROR/ERROR_STEP)15#Wyliczanie poszczególnych prędkości wszysktich kul16CELLS_VEL = [DEFAULT_VEL + ERROR_STEP*i + RANDOMNESS_FACTOR * random.uniform(-1,1) for i in range(-DEFAULT_NUM,DEFAULT_NUM + 1)]17CELLS_VEL[DEFAULT_NUM] = DEFAULT_VEL18CELLS = len(CELLS_VEL)19#Tutaj przechowywane będą wszystkie wyliczone parametry kul20Cache : List[Tuple[int,float,int]] = [(0, 0, 0) for _ in range(CELLS)]21#Liczenie kolejno kierunku, pozycji i liczby odbić kuli w zależności od czasu i prędkości22def get_relative_ball_info(t: float, v: float):23 pre = (t * v) % (2 * TRACK_LENGTH)24 pre_nob = int((t * v) // (2 * TRACK_LENGTH))25 if pre >= TRACK_LENGTH:26 return (-1, 2 * TRACK_LENGTH - pre, 2*pre_nob + 1) 27 else:28 return (1, pre, 2*pre_nob)29#Aktualizacja informacji o kulach30def update_pos(t: float):31 global Cache32 for i in range(CELLS):33 Cache[i] = get_relative_ball_info(t, CELLS_VEL[i])34#Zdobycie minimalnej i maksymalnej pozycji kul przy aktualnym stanie tablicy Cache35def get_min_max_x():36 min_x = 1.037 max_x = 0.038 for (_,x,_) in Cache:39 min_x = min(x,min_x)40 max_x = max(x,max_x)41 return (min_x, max_x)42#Liczenie błędów zwrotu, pozycji i odbić43#Odbicia są liczone starą metodą, którą odrzuciłem dla liczenia błędu pozycji44#ale uznałem, że odbicia nie są na tyle istotne, żeby je też aktualizować45def calculate_error():46 (min_x, max_x) = get_min_max_x()47 delta_x = (max_x-min_x)48 delta_b = 049 known_dir = True50 for (dir,_,b) in Cache:51 delta_b = max(abs(b-Cache[DEFAULT_NUM][2]),delta_b)52 known_dir = known_dir and (dir == Cache[DEFAULT_NUM][0]) ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run grail automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful