How to use _load_data_file method in gabbi

Best Python code snippet using gabbi_python

oprules.py

Source:oprules.py Github

copy

Full Screen

...14 yield entry.name15class OperationFactory(object):16 def __init__(self, data_file):17 self.data = None18 self._load_data_file(data_file)19 self.err = list()20 def _load_data_file(self, file):21 pass22 def op(self, _line):23 """仅仅处理已经存在一行数据"""24 return _line25 def patch(self, file):26 return None27 def log(self, _msg, _level=1):28 # define info 1,warn 2,error,329 if _level == 3:30 self.err.append(_msg)31 print(_msg)32class RemoveUseless(OperationFactory):33 """ oprules.py -o RemoveUseless\n oprules.py --operate=RemoveUseless34 remove has no content rules,remove any to any rules,remove ip icmp rules"""35 def __init__(self, data_file):36 super().__init__(data_file)37 def op(self, _line):38 if _line.strip(' ').startswith('#') or len(_line.strip('\n\r')) == 0:39 return _line40 if 'content:' not in _line:41 self.log('No Content remove {}'.format(_line))42 return '# {}'.format(_line)43 r = re.search(r'alert (tcp|udp) (.*) any -> (.*) any ', _line)44 if r:45 self.log('any to any remove {}'.format(_line))46 return '# {}'.format(_line)47 r = re.search(r'alert (icmp|ip) ', _line)48 if r:49 self.log('icmp or ip type remove {}'.format(_line))50 return '# {}'.format(_line)51 return _line52class PatchRules(OperationFactory):53 """ oprules.py -o PatchRules -p patchfile\n oprules.py --operate=PatchRules --patchfile=patchfile54 patch file indicate file:rules"""55 def __init__(self, data_file):56 self.sids = list()57 super().__init__(data_file)58 def _load_data_file(self, file):59 self.data = dict()60 pattern = r'(.*?).rules:(.*)'61 with open(file) as fp:62 for line in fp:63 r = re.match(pattern, line)64 if r:65 path = '{}.rules'.format(r.group(1).strip(' '))66 filename = os.path.basename(path)67 # abspath = os.path.abspath(path)68 # print(filename)69 # print(abspath)70 rule = r.group(2).strip(' ')71 s = re.findall(r'sid:(\d+);', rule)72 if len(s) == 1:73 self.sids.append(s[0])74 if filename in self.data.keys():75 self.data[filename].append(rule)76 else:77 self.data[filename] = [rule]78 def patch(self, file):79 """如果存在增加新规则的情况,需要额外的对每一条新增加的规则进行处理80 其他仅仅是对规则本身进行处理,不涉及规则的增加81 """82 if file in self.data.keys():83 for r in self.data[file]:84 yield '{}\n'.format(r)85 def op(self, _line):86 # 如果patch文件中增加或者修改的规则sid已经存在,应当把这些规则先注释停用87 s = re.findall(r'sid:(\d+);', _line)88 sid = None89 if len(s) == 1:90 if s[0] in self.sids:91 sid = s[0]92 if sid:93 if not _line.startswith('#'):94 # print('[#] {}'.format(sid))95 return '# {}'.format(_line)96 return _line97class EnableRules(OperationFactory):98 """ oprules.py -o EnableRules -s sidfile -e enable/disable\n oprules.py --operate=EnableRules --sidfile=sidfile99 --enable=enable/disable enable or disable rules sid file and enable flag should be present """100 def __init__(self, data_file):101 super().__init__(data_file)102 # self._load_data_file(data_file)103 def op(self, _line):104 sid = None105 r = re.findall(r'sid:(\d+);', _line)106 _line = _line.strip(' \n\r')107 if len(r) == 1:108 sid = r[0]109 if sid and int(sid) in self.data['enable']:110 self.log("{} in enable".format(sid), 1)111 if _line.startswith('#'):112 # disable status113 self.log('[+] {}'.format(sid), 1)114 # disable -> enable115 _line = _line[1:]116 _line = _line.strip(' ')117 else:118 self.log('[=] {}'.format(sid), 1)119 if sid and int(sid) in self.data['disable']:120 self.log("{} in disable".format(sid), 1)121 if _line.startswith('alert'):122 self.log('[-] {}'.format(sid), 1)123 # enable -> disable124 _line = '# {}'.format(_line)125 else:126 self.log('[=] {}'.format(sid), 1)127 return '{}\n'.format(_line)128 def _load_data_file(self, file):129 pattern = r'(#)*\s*(\d+)'130 self.data = {'enable': list(), 'disable': list()}131 with open(file) as fp:132 for _line in fp:133 r = re.match(pattern, _line)134 if r:135 print(r.group(0))136 if r.group(1) is None:137 self.data['enable'].append(int(r[2]))138 if r.group(1) == '#':139 self.data['disable'].append(int(r[2]))140class ChangePopularity(OperationFactory):141 """ oprules.py -o ChangePopularity -f popfile\n oprules.py --operate=ChangePriority --popfile=popfile142 according to popfile to change the popularity of rules."""143 def __init__(self, data_file):144 super().__init__(data_file)145 def op(self, _line):146 sid = None147 pattern = r'sid\s*:\s*(\d+);'148 r = re.search(pattern, _line)149 if r:150 sid = r.group(1)151 if sid not in self.data.keys():152 return _line153 else:154 raise (KeyError('Not find Sid'))155 pattern = r'reference\s*:\s*pop,(\d);'156 r = re.search(pattern, _line)157 if r:158 # find pop content159 # modify old value and return160 _line = re.sub(pattern, 'reference:pop,{};'.format(161 self.data[sid]), _line, count=1)162 self.log(f'[O] {sid} change popularity as {self.data[sid]}')163 self.log(f'New:{_line}')164 return _line165 pattern = r'reference:(.*?);'166 r = re.search(pattern, _line)167 if r:168 # insert new pop content before first 'reference' and return169 _line = re.sub(pattern, '{} reference:pop,{};'.format(170 r[0], self.data[sid]), _line, count=1)171 self.log(f'[O] {sid} add popularity as {self.data[sid]}')172 self.log(f'New:{_line}')173 else:174 self.log('[x] Reference can not be found:{}'.format(_line), 3)175 return _line176 def _load_data_file(self, file):177 """ Demo: 10086:1 """178 pattern = r'(\d+):([1-3]{1})'179 self.data = dict()180 with open(file) as fp:181 for _line in fp:182 if _line.startswith('#'):183 continue184 r = re.match(pattern, _line)185 if r:186 self.data[r.group(1).strip(' ')] = int(r.group(2))187class ChangePriority(OperationFactory):188 """ oprules.py -o ChangePriority -c classfile\n oprules.py --operate=ChangePriority --classfile=classfile189 according to classification change the priority of rules,classification should be present"""190 def __init__(self, data_file):191 super().__init__(data_file)192 def op(self, _line):193 if 'priority:' in _line:194 # self.log('[!] {} has priority key word'.format(_line))195 return _line196 pattern = r'classtype\s*:\s*([a-z\-]+);'197 # if _line.startswith('#'):198 # return _line199 r = re.search(pattern, _line)200 if r:201 if r[1] in self.data.keys():202 # replace and set priority value203 _line = re.sub(pattern, '{} priority:{};'.format(204 r[0], self.data[r[1]]), _line)205 else:206 self.log('[x] classtype: {} can not be found'.format(r[1]), 3)207 else:208 self.log('[x] classtype can not be found:{}'.format(_line), 3)209 return _line210 def _load_data_file(self, file):211 """ Demo: config classification: successful-admin,Successful Administrator Privilege Gain,1"""212 pattern = r'config classification: ([a-z\-]+),([a-zA-Z\-\s]+),(\d+)'213 self.data = dict()214 with open(file) as fp:215 for _line in fp:216 if _line.startswith('#'):217 continue218 r = re.match(pattern, _line)219 if r:220 self.data[r.group(1).strip(' ')] = int(r.group(3))221 # else:222 # print('Error:{}'.format(_line))223def process_rules(path, op_instance):224 files = get_rules_list(os.path.join(RULES_DIR, path))...

Full Screen

Full Screen

timit.py

Source:timit.py Github

copy

Full Screen

...40 assert not np.any(np.isnan(self.y))41 assert not np.any(np.isnan(self.sentence_ids))42 if preprocessor:43 preprocessor.apply(self)44 def _load_data_file(self, name):45 data = np.load(name)46 return data['X'], data['Y'], data['sentence_ids'].ravel()47 def _load_phone_index(self):48 file_name = os.path.join(49 string_utils.preprocess("${PYLEARN2_DATA_PATH}"),50 "timit",51 "timit_phone_index_table.txt")52 phones = []53 with open(file_name) as timit_file:54 for line in timit_file:55 idx, label = line.split(",")56 idx = int(idx) - 1 # -1 because 1-based indexing57 phone, junk = label.strip().split(" ")58 if phone not in phones:59 phones.append(phone)60 return phones61 def _load_train(self):62 n_batches = 563 Xs = []64 Ys = []65 sentence_ids = []66 for b in xrange(1, n_batches+1):67 fname = os.path.join(68 string_utils.preprocess("${PYLEARN2_DATA_PATH}"),69 "timit",70 "timit_train_b" + str(b) + ".npz")71 X, Y, ids = self._load_data_file(fname)72 Xs.append(X)73 Ys.append(Y)74 sentence_ids.append(ids)75 X = np.concatenate(Xs, axis=0)76 Y = np.concatenate(Ys, axis=0)77 sentence_ids = np.concatenate(sentence_ids)78 return X, Y, sentence_ids79 def _load_test(self):80 fname = os.path.join(81 string_utils.preprocess("${PYLEARN2_DATA_PATH}"),82 "timit",83 "timit_test.npz")84 return self._load_data_file(fname)85 def _load_valid(self):86 fname = os.path.join(87 string_utils.preprocess("${PYLEARN2_DATA_PATH}"),88 "timit",89 "timit_valid.npz")90 return self._load_data_file(fname)91 def _get_fold_table(self):92 # Source (KFL): http://repository.cmu.edu/cgi/viewcontent.cgi?article=2768&context=compsci93 #94 # The fold table maps name -> folded name95 # Names not in the fold_table aren't changed by folding.96 return {97 # Folding rules from the folding table in KFL.98 'ux': 'uw',99 'axr': 'er',100 'em': 'm',101 'nx': 'n',102 'eng': 'ng',103 'hv': 'hh',104 # Folding rules for closures and silence....

Full Screen

Full Screen

load_raw_data.py

Source:load_raw_data.py Github

copy

Full Screen

...20from .file_schemas import FoodNutrient21from .file_schemas import FoodUpdateLogEntry22from .file_schemas import Nutrient23from .file_schemas import RawData24def _load_data_file(data_dir, filename, data_cls):25 """Load a data file, returning a list of dicts.26 Loads a data file in the USDA FDC data dump format. Each27 file contains a header row, which is used to convert every28 other row into a dict. This function returns a list of dicts,29 representing the rows of the file.30 Args:31 data_dir: The directory containing USDA data.32 data_cls: The namedtuple for this data file.33 Returns:34 A list of dicts, whose keys are the column names.35 """36 print('loading file: %s' % filename)37 with open(os.path.join(data_dir, filename)) as f:38 reader = csv.reader(f)39 header_row = next(reader)40 print (header_row)41 print (data_cls._fields)42 # Verify the header rows match the fields43 assert header_row == list(data_cls._fields)44 # For each row after the header row, convert from a45 # list to an instance of data_cls.46 return list(map(data_cls._make, reader))47def load_raw_data(data_dir):48 """Load raw CSV data.49 Loads raw CSV data, converting to RawData. RawData is a50 namedtuple where each field represents a CSV file. Each51 file is represented as a list of `namedtuple`s, where the52 `namedtuple` for each entry has fields that have the same53 names as the column names.54 The raw data format represents data parsed from CSV files55 available for download from56 https://fdc.nal.usda.gov/download-datasets.html57 For efficiency it's best to download just the Branded Food58 data, unzip it, then also download the59 "Supporting data for Downloads" and copy the needed files60 from there into the directory containing Branded Food data.61 Currently from the supporting data, only nutrient.csv is62 used.63 """64 return RawData(65 branded_foods=_load_data_file(66 data_dir, 'branded_food.csv', BrandedFood),67 food_nutrients=_load_data_file(68 data_dir, 'food_nutrient.csv', FoodNutrient),69 food_attributes=_load_data_file(70 data_dir, 'food_attribute.csv', FoodAttribute),71 food_update_log_entries=_load_data_file(72 data_dir, 'food_update_log_entry.csv', FoodUpdateLogEntry),73 foods=_load_data_file(74 data_dir, 'food.csv', Food),75 nutrients=_load_data_file(...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run gabbi automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful