How to use _patch_dict method in autotest

Best Python code snippet using autotest_python

update.py

Source:update.py Github

copy

Full Screen

1import copy2import datetime3from collections import defaultdict4from pathlib import Path5from warnings import warn6class Updater():7 def __init__(self):8 tag_list = 'mp ls ne za_pred za_ante dp_label dp_head sr_pred sr_args cr'.split(' ')9 self.col_idx = {v:i+3 for i,v in zip(range(len(tag_list)), tag_list)}10 def config(self, tsv_path, comment, log):11 self.tsv_path = Path(tsv_path)12 self.comment_file, self.comment_enc = Path(comment.name), comment.encoding13 self.log_file, self.log_enc = Path(log.name), log.encoding14 comment.close(); log.close()15 self.log_file.unlink()16 self.comment_file.unlink()17 def _rec_ddict(self):18 return defaultdict(self._rec_ddict)19 def load_prepatch(self, ppatch_file):20 self.patch = []21 self.comment_list = []22 # define pre_patch23 self.prepatch = self._rec_ddict()24 lines = [line.strip('\n') for line in ppatch_file]25 ppatch_file.close()26 for line in lines:27 one_ppatch = line.strip('\n').split('\t')28 if not len(one_ppatch) == 4:29 raise Exception(f"Line not composed of 4 columns, current line: {line}, number of columns : {len(one_ppatch)}")30 #check if comment31 if not one_ppatch[-1] == '':32 self.comment_list.append('\t'.join(one_ppatch))33 doc_id = '-'.join(one_ppatch[0].split('-')[:2])34 self.prepatch[doc_id][one_ppatch[0]][one_ppatch[1]] = one_ppatch[2]35 def make_patch(self):36 self._patch_dict = self._rec_ddict()37 for doc_id, gwid_items in self.prepatch.items():38 tsv_file = self.tsv_path/f'{doc_id}.unified.min.tsv'39 #TODO: need refactoring, repeated structure.... 1) tsv load and check 2) writable check40 if tsv_file.exists():41 with tsv_file.open(encoding = 'utf8') as f: lines = f.readlines()42 #to check if writable43 copied_tsv = copy.copy(lines)44 for line_idx, line in enumerate(lines):45 tsv_line= line.strip('\n').split('\t')46 # if matched line exists in prepatch47 if tsv_line[0] in self.prepatch[doc_id].keys():48 self._cp_patchline = copy.copy(self.prepatch[doc_id][tsv_line[0]])49 for field, after in self.prepatch[doc_id][tsv_line[0]].items():50 after = after.strip('\n')51 # CAUTION, shallow copys, be sure not to make change in nested object52 del self._cp_patchline[field]53 # fix morpheme unit in case [mp, ls, en]54 if field.split('.')[0] in ['mp', 'ls', 'ne'] and '.' in field:55 field_name, sub_field = field.split('.')56 field_idx = self.col_idx[field_name]57 before = tsv_line[field_idx].split(' + ')[int(sub_field)-1]58 if not before == after:59 # --- from write60 sub_fields = tsv_line[field_idx].split(' + ')61 field_name, sub_idx = field.split('.')62 sub_fields[int(sub_idx)-1] = after63 tsv_line[field_idx] = ' + '.join(sub_fields)64 lines[line_idx] = '\t'.join(tsv_line).strip('\n') + '\n'65 # ---66 self._patch_dict[doc_id][tsv_line[0]][field] = after67 self.patch.append([tsv_line[0], field, before, after, ''])68 else:69 field_idx = self.col_idx[field]70 before = tsv_line[field_idx].strip('\n')71 if not tsv_line[field_idx] == after:72 73 # --- from write74 field_idx = self.col_idx[field]75 tsv_line[field_idx] = after76 lines[line_idx] = '\t'.join(tsv_line).strip('\n') + '\n'77 # --- 78 self._patch_dict[doc_id][tsv_line[0]][field] = after79 self.patch.append([tsv_line[0], field, before, after, ''])80 # error Not all prepatchs reviewed line field has not existing gwid81 if self._cp_patchline:82 raise Exception(f"Some prepatch lines not checked, gwid: {self._cp_patchline.keys()}, field: {tsv_line[0]}")83 self._cp_patchline = {}84 85 if not len(lines) == len(copied_tsv):86 raise Exception(f"{tsv_file.name} tsv patch before/after line number not matched. before: {len(copied_tsv)}, after: {len(lines)}")87 else:88 raise Exception(f"corresponding tsv file doesn't exist, given prepatch document id: {doc_id}")89 self.patch.sort()90 def write(self):91 self.datenow = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")92 if self._patch_dict:93 for doc_id, gwid_items in self._patch_dict.items():94 tsv_file = self.tsv_path/f'{doc_id}.unified.min.tsv'95 with tsv_file.open(encoding = 'utf8') as f: lines = f.readlines()96 copied_original = copy.copy(lines)97 for line_idx, line in enumerate(lines):98 tsv_line= line.split('\t')99 if tsv_line[0] in self._patch_dict[doc_id].keys():100 for field, after in self._patch_dict[doc_id][tsv_line[0]].items(): 101 if field.split('.')[0] in ['mp', 'ls', 'ne'] and '.' in field:102 field_name, sub_idx = field.split('.')103 field_idx = self.col_idx[field_name]104 sub_fields = tsv_line[field_idx].split(' + ')105 sub_fields[int(sub_idx)-1] = after106 tsv_line[field_idx] = ' + '.join(sub_fields)107 lines[line_idx] = '\t'.join(tsv_line).strip('\n') + '\n'108 109 else:110 field_idx = self.col_idx[field]111 tsv_line[field_idx] = after112 lines[line_idx] = '\t'.join(tsv_line).strip('\n') + '\n'113 if not copied_original == lines:114 with tsv_file.open('w') as f: print(''.join(lines).strip('\n'), file=f)115 if self.comment_list:116 with self.comment_file.open('w', encoding = self.comment_enc) as f:117 for log in self.comment_list:118 line = log.split('\t')[:2] + [self.datenow] + [log.split('\t')[-1]]119 print('\t'.join(line).strip('\n'), file = f)120 if self.patch:121 with self.log_file.open('w', encoding = self.log_enc) as f:122 for log in self.patch:123 line = log[:2] + [self.datenow] + log[2:4]...

Full Screen

Full Screen

conftest.py

Source:conftest.py Github

copy

Full Screen

...9 return faker.Faker()10# pylama:ignore=C90111@pytest.fixture12def patch_dict():13 def _patch_dict(orig, updates):14 for k, v in updates.items():15 delete = str(k).startswith("-")16 replace = str(k).startswith("!")17 if delete or replace:18 k = k[1:]19 if delete:20 del orig[k]21 continue22 if k not in orig or replace:23 orig[k] = v24 elif isinstance(orig[k], dict) and isinstance(v, dict):25 _patch_dict(orig[k], v)26 elif isinstance(orig[k], list) and (27 isinstance(v, list) or isinstance(v, dict)28 ):29 _patch_list(orig[k], v)30 else:31 orig[k] = v32 return orig33 def _patch_list(orig, updates):34 if isinstance(updates, list):35 updates = {i: updates[i] for i in range(len(updates))}36 for k, v in updates.items():37 delete = str(k).startswith("-")38 replace = str(k).startswith("!")39 if delete or replace:40 k = int(k[1:])41 if delete:42 del orig[k]43 continue44 if k >= len(orig) or replace:45 orig += [None] * (k - len(orig) + 1)46 orig[k] = v47 elif isinstance(orig[k], list) and (48 isinstance(v, list) or isinstance(v, dict)49 ):50 _patch_list(orig[k], v)51 elif isinstance(orig[k], dict) and isinstance(v, dict):52 _patch_dict(orig[k], v)53 else:54 orig[k] = v55 return orig56 return _patch_dict57@pytest.fixture58def fake_address(fake, patch_dict):59 def _fake_address(**opts):60 address = patch_dict(61 {62 "address": None,63 "address_family": 4,64 "cidr": None,65 "gateway": None,66 "management": True,...

Full Screen

Full Screen

test_acid_diff.py

Source:test_acid_diff.py Github

copy

Full Screen

...12from .test_acid import gen_random_grid, gen_random_scalar, gen_random_str13GENERATION_NUMBER, PERCENT_PATCH, PERCENT_MOVE_COL, PERCENT_ADD_VAL, PERCENT_DUPLICATE = (10, 30, 5, 10, 5)14class RefuseRemove(BaseException):15 pass16def _patch_dict(a_dict, cols=None):17 """18 Args:19 a_dict:20 cols:21 """22 a_dict = a_dict.copy()23 max_rand = int(len(a_dict) * (PERCENT_PATCH / 100))24 keys = list(a_dict.keys())25 # Remove REMOVE flag26 for val in a_dict.values():27 if val is REMOVE:28 raise RefuseRemove()29 if keys:30 for _ in range(0, random.randint(0, max_rand) + 1):31 j = random.randint(0, len(keys) - 1)32 k = keys[j]33 if k != 'id':34 while True:35 a_dict[k] = gen_random_scalar()36 if a_dict[k] is not REMOVE:37 break38 # Add keys39 if cols and random.randint(0, 100) < PERCENT_ADD_VAL:40 k = list(cols.keys())[random.randint(0, len(cols) - 1)]41 if k != 'id':42 a_dict[k] = gen_random_str()43 return a_dict44def gen_diff_metadata(metadata):45 """46 Args:47 metadata:48 """49 return _patch_dict(metadata)50def gen_diff_meta_cols(cols):51 """52 Args:53 cols:54 """55 cols = cols.copy()56 for col in cols:57 cols[col] = gen_diff_metadata(cols[col])58 # Move col59 if random.randint(0, 100) < PERCENT_MOVE_COL:60 i = random.randint(0, len(cols) - 1)61 j = random.randint(0, len(cols) - 1)62 k = cols.at(i)63 col = cols.pop_at(i)64 cols.add_item(k, col, index=j)65 return cols66def gen_new_row(grid):67 """68 Args:69 grid:70 """71 for row in grid:72 row = _patch_dict(row, grid.column)73 yield _patch_dict(row)74def gen_diff(orig):75 """76 Args:77 orig:78 """79 new_metadata = gen_diff_metadata(orig.metadata)80 new_meta_cols = gen_diff_meta_cols(orig.column)81 grid = Grid(orig.version, metadata=new_metadata, columns=new_meta_cols)82 for row in gen_new_row(orig):83 grid.append(row)84 if "id" not in row and random.randint(0, 100) < PERCENT_DUPLICATE:85 grid.append(row.copy())86 return grid87def _try_diff():...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful