Best Python code snippet using pytest-bdd_python
TemplatedPathPlugin.js
Source:TemplatedPathPlugin.js  
...27		const length = arg && parseInt(arg, 10);28		if (length && handler) {29			result = handler(length);30		} else {31			const hash = replacer(match, arg, input);32			result = length ? hash.slice(0, length) : hash;33		}34		if (assetInfo) {35			assetInfo.immutable = true;36			if (Array.isArray(assetInfo[hashName])) {37				assetInfo[hashName] = [...assetInfo[hashName], result];38			} else if (assetInfo[hashName]) {39				assetInfo[hashName] = [assetInfo[hashName], result];40			} else {41				assetInfo[hashName] = result;42			}43		}44		return result;45	};46	return fn;47};48const replacer = (value, allowEmpty) => {49	const fn = (match, arg, input) => {50		if (typeof value === "function") {51			value = value();52		}53		if (value === null || value === undefined) {54			if (!allowEmpty) {55				throw new Error(56					`Path variable ${match} not implemented in this context: ${input}`57				);58			}59			return "";60		} else {61			return `${value}`;62		}63	};64	return fn;65};66const deprecationCache = new Map();67const deprecatedFunction = (() => () => {})();68const deprecated = (fn, message, code) => {69	let d = deprecationCache.get(message);70	if (d === undefined) {71		d = util.deprecate(deprecatedFunction, message, code);72		deprecationCache.set(message, d);73	}74	return (...args) => {75		d();76		return fn(...args);77	};78};79/**80 * @param {string | function(PathData, AssetInfo=): string} path the raw path81 * @param {PathData} data context data82 * @param {AssetInfo} assetInfo extra info about the asset (will be written to)83 * @returns {string} the interpolated path84 */85const replacePathVariables = (path, data, assetInfo) => {86	const chunkGraph = data.chunkGraph;87	/** @type {Map<string, Function>} */88	const replacements = new Map();89	// Filename context90	//91	// Placeholders92	//93	// for /some/path/file.js?query#fragment:94	// [file] - /some/path/file.js95	// [query] - ?query96	// [fragment] - #fragment97	// [base] - file.js98	// [path] - /some/path/99	// [name] - file100	// [ext] - .js101	if (typeof data.filename === "string") {102		// check that filename is data uri103		let match = data.filename.match(/^data:([^;,]+)/);104		if (match) {105			const ext = mime.extension(match[1]);106			const emptyReplacer = replacer("", true);107			replacements.set("file", emptyReplacer);108			replacements.set("query", emptyReplacer);109			replacements.set("fragment", emptyReplacer);110			replacements.set("path", emptyReplacer);111			replacements.set("base", emptyReplacer);112			replacements.set("name", emptyReplacer);113			replacements.set("ext", replacer(ext ? `.${ext}` : "", true));114			// Legacy115			replacements.set(116				"filebase",117				deprecated(118					emptyReplacer,119					"[filebase] is now [base]",120					"DEP_WEBPACK_TEMPLATE_PATH_PLUGIN_REPLACE_PATH_VARIABLES_FILENAME"121				)122			);123		} else {124			const { path: file, query, fragment } = parseResource(data.filename);125			const ext = extname(file);126			const base = basename(file);127			const name = base.slice(0, base.length - ext.length);128			const path = file.slice(0, file.length - base.length);129			replacements.set("file", replacer(file));130			replacements.set("query", replacer(query, true));131			replacements.set("fragment", replacer(fragment, true));132			replacements.set("path", replacer(path, true));133			replacements.set("base", replacer(base));134			replacements.set("name", replacer(name));135			replacements.set("ext", replacer(ext, true));136			// Legacy137			replacements.set(138				"filebase",139				deprecated(140					replacer(base),141					"[filebase] is now [base]",142					"DEP_WEBPACK_TEMPLATE_PATH_PLUGIN_REPLACE_PATH_VARIABLES_FILENAME"143				)144			);145		}146	}147	// Compilation context148	//149	// Placeholders150	//151	// [fullhash] - data.hash (3a4b5c6e7f)152	//153	// Legacy Placeholders154	//155	// [hash] - data.hash (3a4b5c6e7f)156	if (data.hash) {157		const hashReplacer = hashLength(158			replacer(data.hash),159			data.hashWithLength,160			assetInfo,161			"fullhash"162		);163		replacements.set("fullhash", hashReplacer);164		// Legacy165		replacements.set(166			"hash",167			deprecated(168				hashReplacer,169				"[hash] is now [fullhash] (also consider using [chunkhash] or [contenthash], see documentation for details)",170				"DEP_WEBPACK_TEMPLATE_PATH_PLUGIN_REPLACE_PATH_VARIABLES_HASH"171			)172		);173	}174	// Chunk Context175	//176	// Placeholders177	//178	// [id] - chunk.id (0.js)179	// [name] - chunk.name (app.js)180	// [chunkhash] - chunk.hash (7823t4t4.js)181	// [contenthash] - chunk.contentHash[type] (3256u3zg.js)182	if (data.chunk) {183		const chunk = data.chunk;184		const contentHashType = data.contentHashType;185		const idReplacer = replacer(chunk.id);186		const nameReplacer = replacer(chunk.name || chunk.id);187		const chunkhashReplacer = hashLength(188			replacer(chunk instanceof Chunk ? chunk.renderedHash : chunk.hash),189			"hashWithLength" in chunk ? chunk.hashWithLength : undefined,190			assetInfo,191			"chunkhash"192		);193		const contenthashReplacer = hashLength(194			replacer(195				data.contentHash ||196					(contentHashType &&197						chunk.contentHash &&198						chunk.contentHash[contentHashType])199			),200			data.contentHashWithLength ||201				("contentHashWithLength" in chunk && chunk.contentHashWithLength202					? chunk.contentHashWithLength[contentHashType]203					: undefined),204			assetInfo,205			"contenthash"206		);207		replacements.set("id", idReplacer);208		replacements.set("name", nameReplacer);209		replacements.set("chunkhash", chunkhashReplacer);210		replacements.set("contenthash", contenthashReplacer);211	}212	// Module Context213	//214	// Placeholders215	//216	// [id] - module.id (2.png)217	// [hash] - module.hash (6237543873.png)218	//219	// Legacy Placeholders220	//221	// [moduleid] - module.id (2.png)222	// [modulehash] - module.hash (6237543873.png)223	if (data.module) {224		const module = data.module;225		const idReplacer = replacer(() =>226			prepareId(227				module instanceof Module ? chunkGraph.getModuleId(module) : module.id228			)229		);230		const moduleHashReplacer = hashLength(231			replacer(() =>232				module instanceof Module233					? chunkGraph.getRenderedModuleHash(module, data.runtime)234					: module.hash235			),236			"hashWithLength" in module ? module.hashWithLength : undefined,237			assetInfo,238			"modulehash"239		);240		const contentHashReplacer = hashLength(241			replacer(data.contentHash),242			undefined,243			assetInfo,244			"contenthash"245		);246		replacements.set("id", idReplacer);247		replacements.set("modulehash", moduleHashReplacer);248		replacements.set("contenthash", contentHashReplacer);249		replacements.set(250			"hash",251			data.contentHash ? contentHashReplacer : moduleHashReplacer252		);253		// Legacy254		replacements.set(255			"moduleid",256			deprecated(257				idReplacer,258				"[moduleid] is now [id]",259				"DEP_WEBPACK_TEMPLATE_PATH_PLUGIN_REPLACE_PATH_VARIABLES_MODULE_ID"260			)261		);262	}263	// Other things264	if (data.url) {265		replacements.set("url", replacer(data.url));266	}267	if (typeof data.runtime === "string") {268		replacements.set(269			"runtime",270			replacer(() => prepareId(data.runtime))271		);272	} else {273		replacements.set("runtime", replacer("_"));274	}275	if (typeof path === "function") {276		path = path(data, assetInfo);277	}278	path = path.replace(REGEXP, (match, content) => {279		if (content.length + 2 === match.length) {280			const contentMatch = /^(\w+)(?::(\w+))?$/.exec(content);281			if (!contentMatch) return match;282			const [, kind, arg] = contentMatch;283			const replacer = replacements.get(kind);284			if (replacer !== undefined) {285				return replacer(match, arg, path);286			}287		} else if (match.startsWith("[\\") && match.endsWith("\\]")) {288			return `[${match.slice(2, -2)}]`;289		}290		return match;291	});292	return path;293};294const plugin = "TemplatedPathPlugin";295class TemplatedPathPlugin {296	/**297	 * Apply the plugin298	 * @param {Compiler} compiler the compiler instance299	 * @returns {void}...replacers.py
Source:replacers.py  
1import re, csv, yaml, enchant2from nltk.corpus import wordnet3from nltk.metrics import edit_distance4##################################################5## Replacing Words Matching Regular Expressions ##6##################################################7replacement_patterns = [8	(r'won\'t', 'will not'),9	(r'can\'t', 'cannot'),10	(r'i\'m', 'i am'),11	(r'ain\'t', 'is not'),12	(r'(\w+)\'ll', '\g<1> will'),13	(r'(\w+)n\'t', '\g<1> not'),14	(r'(\w+)\'ve', '\g<1> have'),15	(r'(\w+)\'s', '\g<1> is'),16	(r'(\w+)\'re', '\g<1> are'),17	(r'(\w+)\'d', '\g<1> would'),18]19class RegexpReplacer(object):20	""" Replaces regular expression in a text.21	>>> replacer = RegexpReplacer()22	>>> replacer.replace("can't is a contraction")23	'cannot is a contraction'24	>>> replacer.replace("I should've done that thing I didn't do")25	'I should have done that thing I did not do'26	"""27	def __init__(self, patterns=replacement_patterns):28		self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]29	30	def replace(self, text):31		s = text32		33		for (pattern, repl) in self.patterns:34			s = re.sub(pattern, repl, s)35		36		return s37####################################38## Replacing Repeating Characters ##39####################################40class RepeatReplacer(object):41	""" Removes repeating characters until a valid word is found.42	>>> replacer = RepeatReplacer()43	>>> replacer.replace('looooove')44	'love'45	>>> replacer.replace('oooooh')46	'ooh'47	>>> replacer.replace('goose')48	'goose'49	"""50	def __init__(self):51		self.repeat_regexp = re.compile(r'(\w*)(\w)\2(\w*)')52		self.repl = r'\1\2\3'53	def replace(self, word):54		if wordnet.synsets(word):55			return word56		57		repl_word = self.repeat_regexp.sub(self.repl, word)58		59		if repl_word != word:60			return self.replace(repl_word)61		else:62			return repl_word63######################################64## Spelling Correction with Enchant ##65######################################66class SpellingReplacer(object):67	""" Replaces misspelled words with a likely suggestion based on shortest68	edit distance.69	>>> replacer = SpellingReplacer()70	>>> replacer.replace('cookbok')71	'cookbook'72	"""73	def __init__(self, dict_name='en', max_dist=2):74		self.spell_dict = enchant.Dict(dict_name)75		self.max_dist = max_dist76	77	def replace(self, word):78		if self.spell_dict.check(word):79			return word80		81		suggestions = self.spell_dict.suggest(word)82		83		if suggestions and edit_distance(word, suggestions[0]) <= self.max_dist:84			return suggestions[0]85		else:86			return word87class CustomSpellingReplacer(SpellingReplacer):88	""" SpellingReplacer that allows passing a custom enchant dictionary, such89	a DictWithPWL.90	>>> d = enchant.DictWithPWL('en_US', 'mywords.txt')91	>>> replacer = CustomSpellingReplacer(d)92	>>> replacer.replace('nltk')93	'nltk'94	"""95	def __init__(self, spell_dict, max_dist=2):96		self.spell_dict = spell_dict97		self.max_dist = max_dist98########################99## Replacing Synonyms ##100########################101class WordReplacer(object):102	""" WordReplacer that replaces a given word with a word from the word_map,103	or if the word isn't found, returns the word as is.104	>>> replacer = WordReplacer({'bday': 'birthday'})105	>>> replacer.replace('bday')106	'birthday'107	>>> replacer.replace('happy')108	'happy'109	"""110	def __init__(self, word_map):111		self.word_map = word_map112	113	def replace(self, word):114		return self.word_map.get(word, word)115class CsvWordReplacer(WordReplacer):116	""" WordReplacer that reads word mappings from a csv file.117	>>> replacer = CsvWordReplacer('synonyms.csv')118	>>> replacer.replace('bday')119	'birthday'120	>>> replacer.replace('happy')121	'happy'122	"""123	def __init__(self, fname):124		word_map = {}125		126		for line in csv.reader(open(fname)):127			word, syn = line128			word_map[word] = syn129		130		super(CsvWordReplacer, self).__init__(word_map)131class YamlWordReplacer(WordReplacer):132	""" WordReplacer that reads word mappings from a yaml file.133	>>> replacer = YamlWordReplacer('synonyms.yaml')134	>>> replacer.replace('bday')135	'birthday'136	>>> replacer.replace('happy')137	'happy'138	"""139	def __init__(self, fname):140		word_map = yaml.load(open(fname))141		super(YamlWordReplacer, self).__init__(word_map)142#######################################143## Replacing Negations with Antonyms ##144#######################################145class AntonymReplacer(object):146	def replace(self, word, pos=None):147		""" Returns the antonym of a word, but only if there is no ambiguity.148		>>> replacer = AntonymReplacer()149		>>> replacer.replace('good')150		>>> replacer.replace('uglify')151		'beautify'152		>>> replacer.replace('beautify')153		'uglify'154		"""155		antonyms = set()156		157		for syn in wordnet.synsets(word, pos=pos):158			for lemma in syn.lemmas():159				for antonym in lemma.antonyms():160					antonyms.add(antonym.name())161		162		if len(antonyms) == 1:163			return antonyms.pop()164		else:165			return None166	167	def replace_negations(self, sent):168		""" Try to replace negations with antonyms in the tokenized sentence.169		>>> replacer = AntonymReplacer()170		>>> replacer.replace_negations(['do', 'not', 'uglify', 'our', 'code'])171		['do', 'beautify', 'our', 'code']172		>>> replacer.replace_negations(['good', 'is', 'not', 'evil'])173		['good', 'is', 'not', 'evil']174		"""175		i, l = 0, len(sent)176		words = []177		178		while i < l:179			word = sent[i]180			181			if word == 'not' and i+1 < l:182				ant = self.replace(sent[i+1])183				184				if ant:185					words.append(ant)186					i += 2187					continue188			189			words.append(word)190			i += 1191		192		return words193class AntonymWordReplacer(WordReplacer, AntonymReplacer):194	""" AntonymReplacer that uses a custom mapping instead of WordNet.195	Order of inheritance is very important, this class would not work if196	AntonymReplacer comes before WordReplacer.197	>>> replacer = AntonymWordReplacer({'evil': 'good'})198	>>> replacer.replace_negations(['good', 'is', 'not', 'evil'])199	['good', 'is', 'good']200	"""201	pass202if __name__ == '__main__':203	import doctest...chapter2.py
Source:chapter2.py  
1"""2==============3Stemming Words4==============5>>> from nltk.stem import PorterStemmer6>>> stemmer = PorterStemmer()7>>> stemmer.stem('cooking')8'cook'9>>> stemmer.stem('cookery')10'cookeri'11>>> from nltk.stem import LancasterStemmer12>>> stemmer = LancasterStemmer()13>>> stemmer.stem('cooking')14'cook'15>>> stemmer.stem('cookery')16'cookery'17>>> from nltk.stem import RegexpStemmer18>>> stemmer = RegexpStemmer('ing')19>>> stemmer.stem('cooking')20'cook'21>>> stemmer.stem('cookery')22'cookery'23>>> stemmer.stem('ingleside')24'leside'25>>> from nltk.stem import SnowballStemmer26>>> SnowballStemmer.languages27('danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', 'spanish', 'swedish')28>>> spanish_stemmer = SnowballStemmer('spanish')29>>> spanish_stemmer.stem('hola')30'hol'31==============================32Lemmatising Words with WordNet33==============================34>>> from nltk.stem import WordNetLemmatizer35>>> lemmatizer = WordNetLemmatizer()36>>> lemmatizer.lemmatize('cooking')37'cooking'38>>> lemmatizer.lemmatize('cooking', pos='v')39'cook'40>>> lemmatizer.lemmatize('cookbooks')41'cookbook'42>>> from nltk.stem import PorterStemmer43>>> stemmer = PorterStemmer()44>>> stemmer.stem('believes')45'believ'46>>> lemmatizer.lemmatize('believes')47'belief'48>>> stemmer.stem('buses')49'buse'50>>> lemmatizer.lemmatize('buses')51'bus'52>>> stemmer.stem('bus')53'bu'54============================================55Replacing Words Matching Regular Expressions56============================================57>>> from replacers import RegexpReplacer58>>> replacer = RegexpReplacer()59>>> replacer.replace("can't is a contraction")60'cannot is a contraction'61>>> replacer.replace("I should've done that thing I didn't do")62'I should have done that thing I did not do'63>>> from nltk.tokenize import word_tokenize64>>> from replacers import RegexpReplacer65>>> replacer = RegexpReplacer()66>>> word_tokenize("can't is a contraction")67['ca', "n't", 'is', 'a', 'contraction']68>>> word_tokenize(replacer.replace("can't is a contraction"))69['can', 'not', 'is', 'a', 'contraction']70=============================71Removing Repeating Characters72=============================73>>> from replacers import RepeatReplacer74>>> replacer = RepeatReplacer()75>>> replacer.replace('looooove')76'love'77>>> replacer.replace('oooooh')78'ooh'79>>> replacer.replace('goose')80'goose'81================================82Spelling Correction with Enchant83================================84>>> from replacers import SpellingReplacer85>>> replacer = SpellingReplacer()86>>> replacer.replace('cookbok')87'cookbook'88>>> import enchant89>>> d = enchant.Dict('en')90>>> d.suggest('languege')91['language', 'languages', 'languor', "language's"]92>>> from nltk.metrics import edit_distance93>>> edit_distance('language', 'languege')94195>>> edit_distance('language', 'languor')96397>>> enchant.list_languages()98['en', 'en_CA', 'en_GB', 'en_US']99>>> dUS = enchant.Dict('en_US')100>>> dUS.check('theater')101True102>>> dGB = enchant.Dict('en_GB')103>>> dGB.check('theater')104False105>>> us_replacer = SpellingReplacer('en_US')106>>> us_replacer.replace('theater')107'theater'108>>> gb_replacer = SpellingReplacer('en_GB')109>>> gb_replacer.replace('theater')110'theatre'111>>> d = enchant.Dict('en_US')112>>> d.check('nltk')113False114>>> d = enchant.DictWithPWL('en_US', 'mywords.txt')115>>> d.check('nltk')116True117>>> from replacers import CustomSpellingReplacer118>>> d = enchant.DictWithPWL('en_US', 'mywords.txt')119>>> replacer = CustomSpellingReplacer(d)120>>> replacer.replace('nltk')121'nltk'122=================================123Replacing Negations with Antonyms124=================================125>>> from replacers import AntonymReplacer126>>> replacer = AntonymReplacer()127>>> replacer.replace('good')128>>> replacer.replace('uglify')129'beautify'130>>> sent = ["let's", 'not', 'uglify', 'our', 'code']131>>> replacer.replace_negations(sent)132["let's", 'beautify', 'our', 'code']133>>> from replacers import AntonymWordReplacer134>>> replacer = AntonymWordReplacer({'evil': 'good'})135>>> replacer.replace_negations(['good', 'is', 'not', 'evil'])136['good', 'is', 'good']137"""138if __name__ == '__main__':139	import doctest...helpers.js
Source:helpers.js  
1import { trimEnd, trimStart } from 'lodash';2/**3 * Override the editor css4 * @param  {[type]} block [description]5 * @return {[type]}       [description]6 */7export function getBlockStyle() {8  return null;9}10export function getBlockContent(style) {11  switch (style) {12    case 'IMG':13      return {14        innerContent: 'link',15        endReplacer: ')',16        startReplacer: '',58        startReplacer: '[text](',59      };60    default:61      return {62        innerContent: '',63        endReplacer: '',64        startReplacer: '',65      };66  }67}68export const getDefaultSelectionOffsets = (69  content,70  startReplacer,71  endReplacer,72  initPosition = 0,73) => ({74  anchorOffset: initPosition + content.length - trimStart(content, startReplacer).length,75  focusOffset: initPosition + trimEnd(content, endReplacer).length,76});77/**78 * Get the start and end offset79 * @param  {Object} selection80 * @return {Object}81 */82export function getOffSets(selection) {83  return {84    end: selection.getEndOffset(),85    start: selection.getStartOffset(),86  };87}88export function getKeyCommandData(command) {89  let content;90  let style;91  switch (command) {92    case 'bold':93      content = '**textToReplace**';94      style = 'BOLD';95      break;96    case 'italic':97      content = '*textToReplace*';98      style = 'ITALIC';99      break;100    case 'underline':101      content = '__textToReplace__';102      style = 'UNDERLINE';103      break;104    default:105      content = '';106      style = '';107  }108  return { content, style };...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
