Best Python code snippet using yandex-tank
requirement.py
Source:requirement.py  
1# Licensed under the Apache License, Version 2.0 (the "License"); you may2# not use this file except in compliance with the License. You may obtain3# a copy of the License at4#5#      http://www.apache.org/licenses/LICENSE-2.06#7# Unless required by applicable law or agreed to in writing, software8# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT9# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the10# License for the specific language governing permissions and limitations11# under the License.12"""Requirements handling."""13# This module has no IO at all, and none should be added.14import collections15import re16import pkg_resources17# A header for the requirements file(s).18# TODO(lifeless): Remove this once constraints are in use.19_REQS_HEADER = [20    '# The order of packages is significant, because pip processes '21    'them in the order\n',22    '# of appearance. Changing the order has an impact on the overall '23    'integration\n',24    '# process, which may cause wedges in the gate later.\n',25]26class Requirement(collections.namedtuple('Requirement',27                                         ['package', 'location', 'specifiers',28                                          'markers', 'comment', 'extras'])):29    def __new__(cls, package, location, specifiers, markers, comment,30                extras=None):31        return super(Requirement, cls).__new__(32            cls, package, location, specifiers, markers, comment,33            frozenset(extras or ()))34Requirements = collections.namedtuple('Requirements', ['reqs'])35url_re = re.compile(36    '^(?P<url>\s*(?:-e\s)?\s*(?:(?:git+)?https|http|file)://[^#]*)'37    '#egg=(?P<name>[-\.\w]+)')38def canonical_name(req_name):39    """Return the canonical form of req_name."""40    return pkg_resources.safe_name(req_name).lower()41def parse(content, permit_urls=False):42    return to_dict(to_reqs(content, permit_urls=permit_urls))43def parse_line(req_line, permit_urls=False):44    """Parse a single line of a requirements file.45    requirements files here are a subset of pip requirements files: we don't46    try to parse URL entries, or pip options like -f and -e. Those are not47    permitted in global-requirements.txt. If encountered in a synchronised48    file such as requirements.txt or test-requirements.txt, they are illegal49    but currently preserved as-is.50    They may of course be used by local test configurations, just not51    committed into the OpenStack reference branches.52    :param permit_urls: If True, urls are parsed into Requirement tuples.53        By default they are not, because they cannot be reflected into54        setuptools kwargs, and thus the default is conservative. When55        urls are permitted, -e *may* be supplied at the start of the line.56    """57    end = len(req_line)58    hash_pos = req_line.find('#')59    if hash_pos < 0:60        hash_pos = end61    # Don't find urls that are in comments.62    if '://' in req_line[:hash_pos]:63        if permit_urls:64            # We accept only a subset of urls here - they have to have an egg65            # name so that we can tell what project its for without doing66            # network access. Egg markers use a fragment, so we need to pull67            # out url from the entire line.68            m = url_re.match(req_line)69            name = m.group('name')70            location = m.group('url')71            parse_start = m.end('name')72            hash_pos = req_line[parse_start:].find('#')73            if hash_pos < 0:74                hash_pos = end75            else:76                hash_pos = hash_pos + parse_start77        else:78            # Trigger an early failure before we look for ':'79            pkg_resources.Requirement.parse(req_line)80    else:81        parse_start = 082        location = ''83    semi_pos = req_line.find(';', parse_start, hash_pos)84    colon_pos = req_line.find(':', parse_start, hash_pos)85    marker_pos = max(semi_pos, colon_pos)86    if marker_pos < 0:87        marker_pos = hash_pos88    markers = req_line[marker_pos + 1:hash_pos].strip()89    if hash_pos != end:90        comment = req_line[hash_pos:]91    else:92        comment = ''93    req_line = req_line[parse_start:marker_pos]94    extras = ()95    if parse_start:96        # We parsed a url before97        specifier = ''98    elif req_line:99        # Pulled out a requirement100        parsed = pkg_resources.Requirement.parse(req_line)101        name = parsed.project_name102        extras = parsed.extras103        specifier = str(parsed.specifier)104    else:105        # Comments / blank lines etc.106        name = ''107        specifier = ''108    return Requirement(name, location, specifier, markers, comment, extras)109def to_content(reqs, marker_sep=';', line_prefix='', prefix=True):110    lines = []111    if prefix:112        lines += _REQS_HEADER113    for req in reqs.reqs:114        comment_p = ' ' if req.package else ''115        comment = (comment_p + req.comment if req.comment else '')116        marker = marker_sep + req.markers if req.markers else ''117        package = line_prefix + req.package if req.package else ''118        location = req.location + '#egg=' if req.location else ''119        lines.append('%s%s%s%s%s\n' % (120            location, package, req.specifiers, marker, comment))121    return u''.join(lines)122def to_dict(req_sequence):123    reqs = dict()124    for req, req_line in req_sequence:125        if req is not None:126            key = canonical_name(req.package)127            reqs.setdefault(key, []).append((req, req_line))128    return reqs129def _pass_through(req_line, permit_urls=False):130    """Identify unparsable lines."""131    if permit_urls:132        return (req_line.startswith('http://tarballs.openstack.org/') or133                req_line.startswith('-f'))134    else:135        return (req_line.startswith('http://tarballs.openstack.org/') or136                req_line.startswith('-e') or137                req_line.startswith('-f'))138def to_reqs(content, permit_urls=False):139    for content_line in content.splitlines(True):140        req_line = content_line.strip()141        if _pass_through(req_line, permit_urls=permit_urls):142            yield None, content_line143        else:..._update_metadata.py
Source:_update_metadata.py  
1#!/usr/bin/env python2from os.path import *3import glob4import re5import json6pgn_dir = 'pgn'7index_file = 'pgn_index.json'8#PGN tag names, as per PGN spec9EVENT_TAG = 'Event'10#PGN parsing states11PARSE_START = 012PARSE_TAGS = 113PARSE_MOVES = 214def main():15	global index_file, pgn_dir16	normalize_paths()17	print 'Updating game metadata...'18	all_metadata = []19	for pgn_file_path in glob.glob(join(pgn_dir, '*.pgn')):20		pgn_file = basename(pgn_file_path)21		print 'Processing pgn file: ' + pgn_file22		pgn_games_data = parse_pgn(pgn_file_path)23		#dump_games(pgn_data)24		add_file_metadata(all_metadata, pgn_file, pgn_games_data)25	write_metadata(all_metadata, index_file)26	print 'Updated index file [' + index_file + ']'27def parse_pgn(pgn_file):28	with open(pgn_file, 'r') as f:29		state = PARSE_START30		pgn_data = []31		i_line = 032		for line in f:33			line = line.strip()34			if line.startswith('['):35				#TODO: process tag36				#print 'Tag found: ' + line37				if state == PARSE_START:38					curr_game = {}39					state = PARSE_TAGS40				elif state == PARSE_MOVES:41					parse_error(pgn_file, i_line)42					add_game_metadata(pgn_data, curr_game)43					curr_game = {}44					state = PARSE_TAGS45				else:46					#continuing tags for curr game47					pass48				tag_name, tag_value = parse_tag(line)49				curr_game[tag_name] = tag_value50			elif line.startswith('1. '): #TODO: make movetext parsing more robust51				if state == PARSE_START or state == PARSE_TAGS:52					parse_error(pgn_file, i_line)53					curr_game = {}54					state = PARSE_MOVES55				#ignore movetext, we only need metadata56				curr_game["movetext"] = line57			elif line == '':58				if state == PARSE_START:59					pass60				elif state == PARSE_TAGS:61					state = PARSE_MOVES62				elif state == PARSE_MOVES:63					add_game_metadata(pgn_data, curr_game)64					state = PARSE_START65				else:66					parse_error(pgn_file, i_line)67			else:68				parse_error(pgn_file, i_line)69				add_game_metadata(pgn_data, curr_game)70				state = PARSE_START71			i_line = i_line + 172	return pgn_data73def parse_tag(line):74	#tagline_regex = re.compile(r'\[(.+)\] \[\"(.+)\"\]')75	tagline_regex = re.compile('^(.+) "(.+)"$')76	m = tagline_regex.match(line[1:-1])77	return m.group(1), m.group(2)78def normalize_paths():79	global index_file, pgn_dir80	index_file = join(dirname(realpath(__file__)), index_file)81	pgn_dir = join(dirname(realpath(__file__)), pgn_dir)82def parse_error(pgn_file, i_line):83	print '!! Parse error at line ' + str(i_line) + ' of file ' + pgn_file84def add_game_metadata(pgn_data, curr_game):85	if not curr_game is None and len(curr_game) > 0:86		pgn_data.append(curr_game)87def add_file_metadata(all_metadata, pgn_file, games_data):88	pgn_label = pgn_file89	#use event name as label if present90	for g in games_data:91		if EVENT_TAG in g:92			pgn_label = g[EVENT_TAG]93			break94	pgn_data = {}95	pgn_data['file'] = 'pgn/' + pgn_file96	pgn_data['label'] = pgn_label97	pgn_data['games'] = games_data98	all_metadata.append(pgn_data)99def dump_games(pgn_data):100	for g in pgn_data:101		print '------'102		for k in g:103			print k + ':', g[k]104def write_metadata(pgn_data, index_file):105	with open(index_file, 'w') as f:106		json.dump(pgn_data, f)107if __name__ == '__main__':...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
