How to use generated_content method in tox

Best Python code snippet using tox_python

protocol_analysis.py

Source:protocol_analysis.py Github

copy

Full Screen

1class Protocol:2 """3 一种通用的网络协议解析方法,比如tls,一般格式为(内容长度(固定字节数)+内容)4 修改日期 202202115 """6 def __init__(self, struct: dict, content: bytes = None, strict=False):7 if content: # 输入内容,对内容进行解析8 # 分别返回解析的内容以及内容对应的属性9 self.parsed_content, self._parsed_content = self._parse(struct, content, strict)10 else: # 没有内容,根据struct生成字节串11 self.generated_content = self.generate(struct)12 self.parsed_content = None # 不能少,用于判断是解析还是生成字节串13 self.__prefix = "" # 用于repr格式化显示14 def __bool__(self):15 return bool(self.parsed_content)16 @classmethod17 def generate(cls, struct):18 """19 根据struct,生成字节串20 :param struct: 输入的字典21 :return: 输出字节串22 """23 generated_content = b"" # 最后生成的字节串24 length_num = None # 根据内容的大小,生成内容的长度,用lengthNum个字节表示。None表示不需要计算长度25 for k, v in struct.items():26 if isinstance(v, tuple):27 num, content = v # 对应的字节数和实际内容28 if content is None:29 if not isinstance(num, int):30 raise TypeError(f"类型错误,{k}对应的元组的第一位,必须是一个整数值,来限定字节数")31 length_num = num # 下面内容的长度未知,之后再添加,先记下位置和字节数32 elif isinstance(content, bytes): # content是bytes类型,没有问题,对num进行验证33 if num is not None:34 if isinstance(num, int):35 if len(content) != num:36 # num不是None,证明内容具有固定长度,因此对内容长度进行检验37 raise ValueError(38 f"值错误,{k} 对应的值(元组第二位)的字节数应该是{num},实际字节数是{len(content)}")39 else: # num不是None,也不是int类型,有问题40 raise TypeError(f"类型错误,{k} 需要限定字节数,必须是一个整数值")41 if length_num: # 需要补上本内容的长度,用lengthNum个字节数表示42 if len(content) > 256 ** length_num - 1: # 内容长度超出lengthNum个字节数表示范围43 raise ValueError(44 f"内容超出限制,{k} 内容(元组第二位)最大允许长度是{256 ** length_num - 1},实际内容长度是{len(content)}")45 else:46 generated_content += bytes.fromhex(47 hex(len(content))[2:].zfill(length_num * 2)) # 先加上本次内容的长度48 length_num = None # 这个字节的内容已经确定了,lengthNum肯定是None49 generated_content += content # 加上本次内容50 else: # 必须是字节类型或者None51 raise TypeError(f"类型错误,{k} 对应的值(元组第二位)应该是内容应该是bytes类型或者是None,实际类型是{type(content)}")52 elif isinstance(v, dict): # 对应的值是字典53 content = cls.generate(v)54 if length_num: # 需要填充之前的长度段55 if len(content) > 256 ** length_num - 1: # 内容长度超出lengthNum个字节数表示范围56 raise ValueError(f"内容超出限制,{k} 内容最大长度是{256 ** length_num - 1},实际内容长度是{len(content)}")57 else:58 generated_content += bytes.fromhex(hex(len(content))[2:].zfill(num * 2)) # 先加上本次内容的长度59 length_num = None # 这个字节的内容已经确定了,lengthNum肯定是None60 generated_content += content # 加上本次内容61 else:62 raise TypeError(f"类型错误,{k} 对应的应该是数组或者字典,输入的类型是{type(v)}")63 return generated_content64 @classmethod65 def _parse(cls, struct, content, strict=False, lastone=True):66 """67 将字节串(content)解析为具有一定结构(struct)的字典,建议使用match68 :param struct:字典,表示协议的结构69 :param content: 字节串内容70 :param strict: 严格模式下,需要根据参考值对内容的值进行校验71 :param lastone: 最上层,只返回需要的内容,不是最上层,要返回更多参数供上层使用72 :return: 返回content解析后的字典结构,以及带有详细内容的字典结构73 """74 try:75 if isinstance(content, bytes):76 parsed_content = {} # 最后返回的content解析后的字典结构77 _parsed_content = {} # 具有详细的属性,但使用不方便,主要用于显示78 _length = hex(len(content))[2:] # 用十六进制表示内容的长度79 if len(_length) % 2 != 0: # 字节串长度一定是8位的整除,一定是有偶数个十六进制值80 _length = "0" + _length # 补0,形成正确的16进制81 _length = bytes.fromhex(_length) # 16进制字节串82 for k, v in struct.items():83 if isinstance(v, tuple):84 _num, _content_reference = v # 所占的位数和参考值,如果strict为True,会根据参考值对内容进行判断85 if _num is None: # 证明这个长度不是固定的,而是由之前的值指定的86 _length = int.from_bytes(_length, "big") # length是上个字节串的内容,表示这次的内容长度87 parsed_content[k] = content[:_length]88 _parsed_content[k] = (content[:_length], _length)89 content = content[_length:]90 elif isinstance(_num, int):91 _length = _temp_content = content[:_num] # content可能是下一个片段的长度,保存到_length92 if strict and _content_reference is not None: # 严格模式下且有参考值的时候,需要判断内容是否正确93 if isinstance(_content_reference, list): # 参考内容是列表,对应的值应该是其中一个94 if _temp_content not in _content_reference:95 raise ValueError(96 f"内容错误,{k} 内容应该是 {_content_reference} 中的一个,实际内容是 {_temp_content}")97 elif isinstance(_content_reference, bytes): # 参考内容不是列表而是单一的字节串98 if _temp_content != _content_reference:99 raise ValueError(f"内容错误,{k} 内容应该是 {_content_reference},实际内容是 {_temp_content}")100 else:101 raise TypeError(f"结构错误,{k} 的参考值类型应该是列表、字节串,实际类型是 {type(_content_reference)}")102 parsed_content[k] = _temp_content # 记录内容长度和内容103 _parsed_content[k] = (_temp_content, _num) # 记录内容长度和内容104 content = content[_num:]105 else:106 raise TypeError(f"结构错误,{k} 的长度限定值,必须是一个整数值或者是None,实际是{type(_num)}")107 elif isinstance(v, dict): # 下一级协议或者之前长度值所包含的内容108 _length = int.from_bytes(_length, "big") # length是之前保存的本片段长度,转化成整数109 _pc, __pc, content = cls._parse(v, content[:_length], lastone=False)110 parsed_content[k] = _pc111 _parsed_content[k] = (__pc, _length)112 elif isinstance(v, list): # 不定长元素,一定在最末尾113 list_parsed_content = []114 _list_parsed_content = {}115 _ele_num = 0 # 表示解析出的各个元素,从0开始计数116 _clength = _length = len(content)117 if len(v) != 1 or not isinstance(v[0], dict):118 raise TypeError(f"结构错误,{k} 参考值list应该只有一个元素,且元素类型是字典")119 while True:120 _pc, __pc, content = cls._parse(v[0], content, lastone=False)121 ll = len(content)122 list_parsed_content.append((_pc))123 _list_parsed_content[str(_ele_num)] = (__pc, _length - ll)124 _length = ll125 _ele_num += 1126 if not content:127 break128 parsed_content[k] = list_parsed_content129 _parsed_content[k] = (_list_parsed_content, _clength)130 else:131 raise TypeError(f"结构错误,{k} 参考值的类型应该是dict、tuple、list中的一个,实际内容是 {type(v)}")132 if lastone:133 return (parsed_content, _parsed_content)134 else:135 return (parsed_content, _parsed_content, content)136 else:137 raise TypeError(f"content 对应的值应该是bytes类型,输入的类型是{type(content)}")138 except Exception:139 return (None, None)140 @classmethod141 def match(cls, struct, content, strict=False):142 """143 查看字节串(content)是否符合struct的结构144 :param struct: 字典,表示协议的结构145 :param content: 字节串内容146 :param strict: 严格模式下,需要根据参考值对内容的值进行校验147 :return: 如果匹配返回字典,如果匹配失败,返回False148 """149 try:150 parsed_content = cls._parse(struct, content, strict)[0]151 return parsed_content152 except Exception:153 return False154 def _to_string(self, _parsed_content):155 repr_str = ""156 self.__prefix += " "157 for k, v in _parsed_content.items():158 repr_str += f"\n{self.__prefix}{k}:"159 if isinstance(v[0], dict):160 repr_str += f"({v[1]}) {self._to_string(v[0])}"161 else:162 repr_str += f"({v[1]}) {v[0]}"163 self.__prefix = self.__prefix[2:]164 return repr_str165 def __repr__(self):166 return self._to_string(self._parsed_content)167 def __bytes__(self):...

Full Screen

Full Screen

metagenerate.py

Source:metagenerate.py Github

copy

Full Screen

1from presenter import *2import wiki3import sys4from outbuffer import *5from visitor import *6from entities import SpecificEnabler, DeprecatedSpecificEnabler, Application, PrettyPrinter7import logging8from fidoc import FIdoc9def generate_page(dw, outpage, meta): 10 # out = FileBuffer(outfile)11 out = PageBuffer(dw, outpage)12 out << dw.heading(1, "Generated output from FIcontent's Meta-Structure")13 14 generated_content = []15 16 pp = PrettyPrinter()17 18 # Overall timeline of experiments19 #######################################20 21 generated_content += [22 ("Timeline of Experiments", ExperimentTimelinePresenter()),23 ]24 25 26 # Experiments per site27 #######################################28 29 sites = ["Zurich", "Brittany", "Lancaster", "Cologne", "Berlin", "Barcelona"]30 generated_content += [31 ("Experiments in %s" % s, ExperimentTimelinePresenter(s)) for s in sites32 ]33 34 # All tested scenarios35 #######################################36 37 generated_content += [38 ("All Tested Scenarios", ListPresenter(TestedScenariosVisitor(), pp.print_Scenario)),39 ]40 41 # All SEs and their relations42 #######################################43 44 generated_content += [(45 "Relations of %s SE" % se.get_name(),46 SEGraphPresenter(se, pp.dispatch)47 ) for se in meta.get_specific_enablers()48 ]49 50 # All SEs and their descriptions51 #######################################52 53 generated_content += [(54 "Description of %s SE" % se.get_name(),55 PropertyPresenter(se, '/spec/documentation/what-it-does')56 ) for se in meta.get_specific_enablers()57 ]58 # All SEs and their resources59 #######################################60 61 generated_content += [(62 "Resources of %s SE" % se.get_name(),63 ResourcesPresenter(dw, se, pp.dispatch)64 ) for se in meta.get_specific_enablers()65 ]66 # All SEs and their release cycle67 #######################################68 69 generated_content += [(70 "Release cycle of %s SE" % se.get_name(),71 ReleaseCyclePresenter(dw, se, pp.dispatch)72 ) for se in meta.get_specific_enablers()73 ]74 # Dependencies per scenario75 #######################################76 77 # v = ExperimentsVisitor()78 # v.visit(meta_structure)79 80 # experiments = list(set([(e.scenario, e.site) for e in v.result]))81 82 # Dependencies per scenario (only actual usage)83 # generated_content += [84 # ('Scenario "%s" on Site %s - USES' % e, DependencyPresenter(e[0], e[1], ['USES'])) for e in experiments85 # ]86 # Dependencies per scenario (actual and planned usage)87 # relations = ['USES', 'WILL USE', 'MAY USE']88 # generated_content += [89 # ('Scenario "%s" on Site %s - ALL' % e, DependencyPresenter(e[0], e[1], relations)) for e in experiments90 # ]91 92 # Enablers used in experiments93 # niceenabler = lambda e: e.identifier + ' ' + e.entity94 95 # experiments = v.result # [e for e in v.result if (e.site == "Barcelona") and (e.application.identifier == "Smart City Guide (Android App)")]96 # generated_content += [(97 # 'Enablers tested in Scenario "%s" on Site %s at %s' % (e.scenario, e.site, e.date),98 # ListPresenter(99 # EnablersTestedVisitor(e.application, ts = e.date),100 # niceenabler101 # )102 # ) for e in experiments103 # ]104 105 # GE Utilization106 #######################################107 108 generated_content += [(109 "Utilization of %s GE" % ge.get_name(),110 ListPresenter(UsedByVisitor(111 ge,112 follow_relations = ['USES'],113 collect_entities = [SpecificEnabler, DeprecatedSpecificEnabler, Application]114 ), pp.dispatch)115 ) for ge in meta.get_generic_enablers()116 ]117 118 119 # Overall Uptake of Generic Enablers120 #######################################121 122 generated_content += [123 ("Overall Uptake of Generic Enablers", UptakePresenter(pp.dispatch, hideunused=True))124 ]125 126 127 # FI-PPP SEis Usage and General Information128 #######################################129 col_fippp = ['name', 'owner', 'product', 'open-source', 'mode', 'last-update', 'next-update', 'assets', 'catalog']130 col_overview = ['name', 'owner', 'final-release']131 generated_content += [132 ("FI-PPP SEis Usage and General Information", CockpitPresenter(col_fippp, pp.dispatch)),133 ("Overview of FIcontent SEs", CockpitPresenter(col_overview, pp.dispatch, sort = ['name']))134 ]135 136 # SE Discovery Summary137 #######################################138 139 generated_content += [140 ("SE Discovery Summary", SummaryPresenter())141 ]142 143 # Incomplete/invalid SEis144 #######################################145 generated_content += [146 ("Incomplete and/or invalid SEs", ListPresenter(InvalidEntitiesVisitor('SE'), pp.dispatch))147 ]148 149 # GE Validation Survey150 #######################################151 # generated_content += [152 # ("GE Validation Survey", GESurveyPresenter())153 # ]154 155 # Roadmap Releases156 #######################################157 # releases = set([rel.get_name() for rel in meta.get_releases()])158 roadmaps = ['socialtv', 'smartcity', 'gaming', 'common']159 160 for rel in meta.get_releases():161 generated_content += [(162 "Roadmap %s - %s" % (road, rel.get_name()),163 RoadmapPresenter(dw, road, rel)164 ) for road in roadmaps165 ]166 167 #######################################168 # main generation loop169 #######################################170 171 for h, p in generated_content:172 logging.info('Generating -> %s ...' % h)173 p.present(meta)174 out << dw.heading(2, h)175 p.dump(out)176 out << ''177 178 logging.info("Flushing generated content ...")179 out.flush()180def generate_meta_information(fidoc, generatedpage):181 dw = fidoc.get_wiki()182 meta = fidoc.get_meta_structure()183 # pub = fidoc.get_publisher()184 185 if meta is None:186 logging.fatal("Invalid meta structure.")187 188 generate_page(dw, generatedpage, meta)189 190 191 192 193if __name__ == "__main__":194 195 import wikiconfig196 metapage = ":FIcontent:private:meta:"197 if len(sys.argv) > 1:198 metapage = sys.argv[1]199 generatedpage = ":FIcontent:private:meta:generated"200 if len(sys.argv) > 2:201 generatedpage = sys.argv[2]202 try:203 logging.info("Connecting to remote DokuWiki at %s" % wikiconfig.url)204 # dw = wiki.DokuWikiLocal(url, 'pages', 'media')205 dw = wiki.DokuWikiRemote(wikiconfig.url, wikiconfig.user, wikiconfig.passwd)206 skipchecks = [207 # tv208 # 'Content Similarity', 'Audio Fingerprinting',209 # city210 # 'Local Information', 'Recommendation Services',211 # gaming212 # 'Visual Agent Design', 'Augmented Reality - Marker Tracking', 'Networked Virtual Character',213 # common214 # 'POI Storage', 'Content Sharing'215 ]216 217 logging.info("Loading FIdoc object ...")218 fidoc = FIdoc(dw, skipchecks)219 220 generate_meta_information(fidoc, generatedpage)221 222 logging.info("Finished")223 224 except logging.FatalError:225 pass...

Full Screen

Full Screen

batch_functions.py

Source:batch_functions.py Github

copy

Full Screen

1import numpy as np2from PIL import Image3import itertools4from tensorflow.keras.utils import Sequence5import numpy as np6class OCR_generator(Sequence):7 """Generator for the input data to the OCR model. We're also preparing 8 arrays for the CTC loss which are related to the output dimensions"""9 def __init__(self, base_generator, batch_size, char_to_lbl_dict,10 img_h , keras_augmentor, epoch_size=500, validation=False):11 """Inputs12 base_generator: the base trdg generator13 batch_size: number of examples fed to the NN simultaneously14 char_to_lbl_dict: mapping from character to its label (int number)15 img_h: we assume that the input here is already scaled to the correct height16 keras_augmentor: Keras augmentor to add more augmenting, the current base generator doesn'17 for example zoom, translate etc"""18 self.base_generator = base_generator19 self.batch_size = batch_size20 self.char_to_lbl_dict = char_to_lbl_dict21 self.img_h = img_h22 self.epoch_size = epoch_size23 self.validation = validation24 self.keras_augmentor = keras_augmentor25 # total number of unique characters26 self.num_chars = len(char_to_lbl_dict)27 def __len__(self):28 """Denotes the number of batches per epoch29 :return: number of batches per epoch """30 return self.epoch_size31 def __getitem__(self, index):32 """Generate one batch of data"""33 # stores the length (number of characters) of each word in a batch34 label_lens = np.zeros((self.batch_size),dtype=np.float32)35 # generate content for the batch as a list of lists36 generated_content = list(list(tup) for tup in itertools.islice(self.base_generator,self.batch_size))37 # preprocess the batch content38 generated_content, img_w, max_word_len_batch = \39 self.preprocess_batch_imgs(generated_content)40 # allocate the vectors for batch labels (integers for each character in a word)41 # and the padded + preprocessed images42 batch_labels = np.zeros((self.batch_size, max_word_len_batch),dtype=np.float32)43 batch_imgs = np.zeros((self.batch_size, img_w, self.img_h, 3),dtype=np.float32)44 # the number of time distributed values, or another words the length of the time axis in the output,45 # or equivalently the width of the image after convolutions. Needed to input in the CTC loss46 # each maxpooling halves the width dimension so in our model scaling is 1/4 with 2 maxpoolings47 t_dist_dim = int(img_w / 4)48 # we need to give it for every entry49 input_length = np.full((self.batch_size),t_dist_dim,dtype=np.float32)50 # fill the batch51 for batch_ind in range(self.batch_size):52 # get a new image and a the content word for it53 img_arr, word = generated_content[batch_ind]54 batch_imgs[batch_ind,:,:] = img_arr55 # the labels for each word, even if the max number of characters is say for example 1056 # and the word is just 5 characters, the first 5 positions are filled by the character labels57 # and the rest are whatever (zeros in our implementation), however in the real loss theyre ignored58 # because of the label_length input59 labels_arr = np.array([self.char_to_lbl_dict[char] for char in word])60 batch_labels[batch_ind,0:len(labels_arr)] = labels_arr61 label_lens[batch_ind] = len(word)62 # now the hacky part63 # keras requires in the loss function to y_pred and y_true to be the same shape64 # but the ctc losses use y_pred of shape (batchsize, tdistdim, num_chars) from NN65 # and batch_labels, input_length, label_lens which are the "y_true" but these are66 # different dimension so pack them to (batchsize, tdistdim, num_chars) and later67 # unpack in the loss to stop the whining.68 y_true = np.zeros((self.batch_size, t_dist_dim, self.num_chars),dtype=np.float32)69 y_true[:, 0:max_word_len_batch, 0] = batch_labels70 y_true[:, 0, 1] = label_lens71 y_true[:, 0, 2] = input_length72 if self.validation:73 # for validation we return slightly different things so we can do fancy74 # stuff at callback75 return batch_imgs, batch_labels, input_length, label_lens76 else: #return x, y for the model77 return batch_imgs, y_true78 def preprocess_batch_imgs(self,generated_content):79 """Function to do augmentations, padd images, return longest word len etc"""80 # check the largest image width and word len in the batch81 pil_images = [img for img, word in generated_content]82 max_width = max([img.size[0] for img in pil_images])83 max_word_len_batch = max([len(word) for img, word in generated_content])84 # expand img with to mod 4_ds so that the maxpoolings wil result into85 # well defined integer length for the mapped tdist dimension ("new width")86 if max_width % 4 == 0:87 img_w = max_width88 else:89 img_w = max_width + 4 - (max_width % 4)90 #augment batch images91 for batch_ind in range(self.batch_size):92 # pad the image width with to the largest (fixed) image width93 pil_img = pil_images[batch_ind]94 width, height = pil_img.size95 new_img = Image.new(pil_img.mode, (img_w, self.img_h), (255,255,255))96 new_img.paste(pil_img, ((img_w - width) // 2, 0))97 # convert to numpy array98 img_arr = np.array(new_img)99 100 #some additional augmentation101 img_arr = self.keras_augmentor.random_transform(img_arr)102 # scale with 255 so that the values are between 0 and 1103 # and save to batch, also transpose because the "time axis" is width104 generated_content[batch_ind][0] = img_arr.transpose((1,0,2)) / 255...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tox automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful