How to use raise_error method in localstack

Best Python code snippet using localstack_python

lexeme.py

Source:lexeme.py Github

copy

Full Screen

...24 if not rxTest.perform(item.stem):25 return False26 elif rxTest.field == 'paradigm':27 if errorHandler is not None:28 errorHandler.raise_error('Paradigm names cannot be subject to '29 'regex tests.')30 return False31 elif not checkWordform and rxTest.field in Lexeme.propertyFields:32 searchField = rxTest.field33 if searchField == 'lex':34 searchField = 'lemma'35 if not rxTest.perform(item.lex.__dict__[searchField]):36 return False37 elif checkWordform and rxTest.field in wordform.Wordform.propertyFields:38 searchField = rxTest.field39 if searchField == 'lex':40 searchField = 'lemma'41 if not rxTest.perform(item.__dict__[searchField]):42 return False43 else:44 if not checkWordform:45 testResults = [rxTest.perform(d[1])46 for d in item.lex.otherData47 if d[0] == rxTest.field]48 else:49 testResults = [rxTest.perform(d[1])50 for d in item.otherData51 if d[0] == rxTest.field]52 if len(testResults) <= 0 or not all(testResults):53 return False54 return True55class SubLexeme:56 """57 A class that describes a part of lexeme with a single58 stem and a single paradigm link. Each lexeme is deconstructed59 into one or several sublexemes.60 """61 def __init__(self, numStem, stem, paradigm, gramm, gloss, lex,62 noIncorporation=False):63 self.numStem = numStem # the number of the stem64 # (If several stems are equal, store them as one SubLexeme.65 # {-1} means the stem can only be incorporated)66 if type(self.numStem) == int:67 self.numStem = {self.numStem}68 self.stem = stem69 self.paradigm = paradigm70 self.gramm = gramm71 self.gloss = gloss72 self.lex = lex # the Lexeme object this SubLexeme is a part of73 self.noIncorporation = noIncorporation74 def make_stem(self, flexInTable):75 """76 Insert the inflexion parts from the (middle) inflexion77 into the current stem and return the result78 or None if the inflexion and the stem aren't compatible.79 If the stem starts with a dot, or ends with a dot, those are deleted.80 The function is intended for future use.81 """82 if not check_compatibility(self, flexInTable.afx):83 return None84 middleStem = self.stem85 if middleStem.startswith('.'):86 middleStem = middleStem[1:]87 if middleStem.endswith('.'):88 middleStem = middleStem[:-1]89 wf, wfGlossed, gloss = wordform.join_stem_flex(middleStem,90 self.gloss,91 flexInTable.afx,92 bStemStarted=True)93 return wf, wfGlossed, gloss94 def __repr__(self):95 res = '<SubLexeme>\n'96 res += 'stem: ' + self.stem + '\n'97 res += 'paradigm: ' + self.paradigm + '\n'98 res += 'gramm: ' + self.gramm + '\n'99 res += 'gloss: ' + self.gloss + '\n'100 res += '</SubLexeme>\n'101 return res102class ExceptionForm:103 """104 A class that describes an irregular wordform.105 """106 def __init__(self, dictDescr, errorHandler=None):107 self.form = ''108 self.gramm = ''109 self.coexist = False # whether the same combination of grammatical110 # values has a regular equivalent111 self.errorHandler = errorHandler112 try:113 self.gramm = dictDescr['value']114 if dictDescr['content'] is not None:115 for obj in dictDescr['content']:116 if obj['name'] == 'coexist':117 if obj['value'] == 'yes':118 self.coexist = True119 elif obj['value'] == 'no':120 self.coexist = False121 elif self.errorHandler is not None:122 self.errorHandler.raise_error('The coexist field must '123 'have yes or no as its value: ',124 dictDescr)125 elif obj['name'] == 'form':126 self.form = obj['value']127 except KeyError:128 if self.errorHandler is not None:129 self.errorHandler.raise_error('Exception description error: ', dictDescr)130 return131 if len(self.form) <= 0 and self.errorHandler is not None:132 self.errorHandler.raise_error('No form provided in an exception description: ',133 dictDescr)134 def __eq__(self, other):135 if type(other) != ExceptionForm:136 return False137 if other.form == self.form and other.gramm == self.gramm and\138 other.coexist == self.coexist:139 return True140 return False141 def __neq__(self, other):142 return not self.__eq__(other)143class Lexeme:144 """145 A class that describes one lexeme.146 """147 obligFields = {'lex', 'stem', 'paradigm'}148 propertyFields = {'lex', 'stem', 'paradigm', 'gramm', 'gloss',149 'lexref', 'stem-incorp', 'gramm-incorp',150 'gloss-incorp'}151 defaultGlossFields = ['transl_en', 'transl_ru'] # property whose value is used as the stem gloss152 # by default if no gloss is provided153 154 def __init__(self, dictDescr, errorHandler=None):155 self.lemma = ''156 self.lexref = ''157 self.stem = ''158 self.stemIncorp = ''159 self.paradigms = []160 self.gramm = ''161 self.grammIncorp = ''162 self.gloss = ''163 self.glossIncorp = ''164 self.subLexemes = []165 self.exceptions = {} # set of tags -> ExceptionForm object166 self.otherData = [] # list of tuples (name, value)167 self.key2func = {'lex': self.add_lemma, 'lexref': self.add_lexref,168 'stem': self.add_stem, 'paradigm': self.add_paradigm,169 'gramm': self.add_gramm, 'gloss': self.add_gloss,170 'except': self.add_except, 'stem-incorp': self.add_stem_incorp,171 'gramm-incorp': self.add_gramm_incorp,172 'gloss-incorp': self.add_gloss_incorp}173 self.errorHandler = errorHandler174 try:175 keys = set(obj['name'] for obj in dictDescr['content'])176 except KeyError:177 self.raise_error('No content in a lexeme: ', dictDescr)178 return179 if len(Lexeme.obligFields & keys) < len(Lexeme.obligFields):180 self.raise_error('No obligatory fields in a lexeme: ',181 dictDescr['content'])182 return183 for obj in sorted(dictDescr['content'], key=self.fields_sorting_key):184 try:185 self.key2func[obj['name']](obj)186 except KeyError:187 self.add_data(obj)188 self.check_gloss()189 self.generate_sublexemes()190 def raise_error(self, message, data=None):191 if self.errorHandler is not None:192 self.errorHandler.raise_error(message, data)193 @staticmethod194 def fields_sorting_key(obj):195 if type(obj) != dict or 'name' not in obj:196 return ''197 key = obj['name']198 try:199 order = ['lex', 'lexref', 'stem', 'paradigm', 'gramm',200 'gloss'].index(key)201 return '!' + str(order)202 except ValueError:203 return key204 def num_stems(self):205 """Return the number of different stem numbers."""206 if len(self.subLexemes) <= 0:207 return 0208 stemNums = set()209 for i in range(len(self.subLexemes)):210 stemNums |= self.subLexemes[i].numStem211 return len(stemNums)212 213 def add_lemma(self, obj):214 lemma = obj['value']215 if type(lemma) != str or len(lemma) <= 0:216 self.raise_error('Wrong lemma: ', lemma)217 return218 if len(self.lemma) > 0:219 self.raise_error('Duplicate lemma: ' + lemma)220 self.lemma = lemma221 def add_lexref(self, obj):222 lexref = obj['value']223 if type(lexref) != str or len(lexref) <= 0:224 self.raise_error('Wrong lexical reference: ', lexref)225 return226 if len(self.lexref) > 0:227 self.raise_error('Duplicate lexical reference: ' +228 lexref + ' in ' + self.lemma)229 self.lexref = lexref230 def add_stem(self, obj):231 stem = obj['value']232 if type(stem) != str or len(stem) <= 0:233 self.raise_error('Wrong stem in ' + self.lemma + ': ', stem)234 return235 if len(self.stem) > 0:236 self.raise_error('Duplicate stem in ' + self.lemma + ': ', stem)237 self.stem = stem238 def add_stem_incorp(self, obj):239 stemIncorp = obj['value']240 if type(stemIncorp) != str or len(stemIncorp) <= 0:241 self.raise_error('Wrong incorporated stem in ' + self.lemma + ': ', stemIncorp)242 return243 if len(self.stemIncorp) > 0:244 self.raise_error('Duplicate incorporated stem in ' + self.lemma + ': ', stemIncorp)245 self.stemIncorp = stemIncorp246 def add_gramm(self, obj):247 gramm = obj['value']248 if type(gramm) != str or len(gramm) <= 0:249 self.raise_error('Wrong gramtags in ' + self.lemma + ': ', gramm)250 return251 if len(self.gramm) > 0:252 self.raise_error('Duplicate gramtags: ' + gramm +253 ' in ' + self.lemma)254 self.gramm = gramm255 def add_gramm_incorp(self, obj):256 grammIncorp = obj['value']257 if type(grammIncorp) != str or len(grammIncorp) <= 0:258 self.raise_error('Wrong incorporated gramtags in ' + self.lemma +259 ': ', grammIncorp)260 return261 if len(self.gramm) > 0:262 self.raise_error('Duplicate incorporated gramtags: ' + grammIncorp +263 ' in ' + self.lemma)264 self.grammIncorp = grammIncorp265 def add_gloss(self, obj):266 gloss = obj['value']267 if type(gloss) != str or len(gloss) <= 0:268 self.raise_error('Wrong gloss in ' + self.lemma + ': ', gloss)269 return270 if len(self.gloss) > 0:271 self.raise_error('Duplicate gloss: ' + gloss +272 ' in ' + self.lemma)273 self.gloss = gloss274 def add_gloss_incorp(self, obj):275 glossIncorp = obj['value']276 if type(glossIncorp) != str or len(glossIncorp) <= 0:277 self.raise_error('Wrong incorporated gloss in ' + self.lemma + ': ', glossIncorp)278 return279 if len(self.glossIncorp) > 0:280 self.raise_error('Duplicate incorporated gloss: ' + glossIncorp +281 ' in ' + self.lemma)282 self.glossIncorp = glossIncorp283 def check_gloss(self):284 """285 Check if there is a gloss associated with the lexeme,286 otherwise use the English translation (if any) or another287 default property as a gloss. If none are found, use 'ROOT'288 for a gloss.289 """290 if len(self.gloss) <= 0:291 for field in self.defaultGlossFields:292 defaultValue = self.get_data(field)293 if len(defaultValue) > 0:294 self.gloss = defaultValue[0]295 break296 if len(self.gloss) <= 0:297 self.gloss = 'STEM'298 def add_paradigm(self, obj):299 paradigm = obj['value']300 if type(paradigm) != str or len(paradigm) <= 0:301 self.raise_error('Wrong paradigm in ' + self.lemma +302 ': ', paradigm)303 return304 self.paradigms.append(paradigm)305 def add_except(self, obj):306 ex2add = ExceptionForm(obj, self.errorHandler)307 tagSet = set(ex2add.gramm.split(','))308 try:309 if all(ex != ex2add for ex in self.exceptions[tagSet]):310 self.exceptions[tagSet].append(ex2add)311 except KeyError:312 self.exceptions[tagSet] = [ex2add]313 def add_data(self, obj):314 try:315 self.otherData.append((obj['name'], obj['value']))316 except KeyError:317 self.raise_error('Wrong key-value pair in ' + self.lemma +318 ': ', obj)319 320 def get_data(self, field):321 return [d[1] for d in self.otherData if d[0] == field]322 def generate_sublexemes(self):323 self.subLexemes = []324 stems = self.separate_parts(self.stem)325 paradigms = [self.separate_parts(p) for p in self.paradigms]326 grams = self.separate_parts(self.gramm)327 glosses = self.separate_parts(self.gloss)328 # Add conversion links from the descriptions of the paradigms:329 for pGroup in paradigms:330 for p in pGroup:331 for pVariant in p:332 try:333 newStemConversionLinks = grammar.Grammar.paradigms[pVariant].conversion_links334 for cl in newStemConversionLinks:335 self.otherData.append(['conversion-link', cl])336 except KeyError:337 pass338 self.generate_stems(stems)339 340 if len(grams) not in [1, len(stems)]:341 self.raise_error('Wrong number of gramtags (' + self.gramm +342 ') in ' + self.lemma)343 return344 if len(glosses) not in [0, 1, len(stems)]:345 self.raise_error('Wrong number of glosses (' + self.gloss +346 ') in ' + self.lemma)347 return348 for p in paradigms:349 if len(p) not in [1, len(stems)]:350 self.raise_error('Wrong number of paradigms in ' +351 self.lemma + ': ', p)352 return353 noIncorporation = False # whether ordinary stems can be incorporated354 if len(self.stemIncorp) > 0:355 noIncorporation = True356 curGloss, curGramm = '', ''357 if self.glossIncorp is not None:358 curGloss = self.glossIncorp359 elif len(glosses) == 1:360 curGloss = glosses[0][0] # no variants for glosses361 elif len(glosses) > 1:362 curGloss = glosses[-1][0]363 if self.grammIncorp is not None:364 curGramm = self.grammIncorp365 elif len(grams) == 1:366 curGramm = grams[0][0] # no variants for grams either367 elif len(grams) > 1:368 curGramm = grams[-1][0]369 self.append_sublexeme(-1, self.stemIncorp, '',370 curGramm, curGloss, False)371 for iStem in range(len(stems)):372 curGloss, curGramm = '', ''373 if len(glosses) == 1:374 curGloss = glosses[0][0] # no variants for glosses375 elif len(glosses) > 1:376 curGloss = glosses[iStem][0]377 if len(grams) == 1:378 curGramm = grams[0][0] # no variants for grams either379 elif len(grams) > 1:380 curGramm = grams[iStem][0]381 curParadigms = []382 for p in paradigms:383 if len(p) == 1:384 curParadigms += p[0]385 else:386 curParadigms += p[iStem]387 for curStem in stems[iStem]:388 for curParadigm in curParadigms:389 self.append_sublexeme(iStem, curStem, curParadigm,390 curGramm, curGloss, noIncorporation)391 def append_sublexeme(self, iStem, curStem, curParadigm, curGramm, curGloss, noIncorporation):392 for sl in self.subLexemes:393 if (sl.stem == curStem and sl.paradigm == curParadigm394 and sl.gramm == curGramm and sl.gloss == curGloss395 and sl.noIncorporation == noIncorporation):396 sl.numStem.add(iStem)397 return398 self.subLexemes.append(SubLexeme(iStem, curStem, curParadigm,399 curGramm, curGloss, self,400 noIncorporation=noIncorporation))401 @staticmethod402 def separate_parts(s, sepParts='|', sepVars='//'):403 return [part.split(sepVars) for part in s.split(sepParts)]404 def generate_stems(self, stems):405 """Fill in the gaps in the stems description with the help of406 automatic stem conversion."""407 stemConversionNames = set(t[1] for t in self.otherData408 if t[0] == 'conversion-link')409 for scName in stemConversionNames:410 try:411 grammar.Grammar.stemConversions[scName].convert(stems)412 except KeyError:413 self.raise_error('No stem conversion named ' + scName)414 def generate_redupl_paradigm(self):415 """Create new paradigms with reduplicated parts of this particular416 lexeme or change the references if they already exist."""417 if len(grammar.Grammar.paradigms) <= 0:418 self.raise_error('Paradigms must be loaded before lexemes.')419 return420 for sl in self.subLexemes:421 if sl.paradigm not in grammar.Grammar.paradigms:422 self.raise_error('No paradigm named ' + sl.paradigm)423 continue424 paraReduplName = grammar.Grammar.paradigms[sl.paradigm].fork_redupl(sl)425 sl.paradigm = paraReduplName426 def generate_regex_paradigm(self):427 """Create new paradigms where all inflexions with regexes that428 don't match to the particular stem of this lexeme are deleted429 or change the references if they already exist."""430 if len(grammar.Grammar.paradigms) <= 0:431 self.raise_error('Paradigms must be loaded before lexemes.')432 return433 for sl in self.subLexemes:434 if sl.paradigm not in grammar.Grammar.paradigms:435 self.raise_error('No paradigm named ' + sl.paradigm)436 continue437 paraRegexName = grammar.Grammar.paradigms[sl.paradigm].fork_regex(sl)438 sl.paradigm = paraRegexName439 def generate_wordforms(self):440 """Generate a list of all possible wordforms with this lexeme."""441 if len(grammar.Grammar.paradigms) <= 0:442 self.raise_error('Paradigms must be loaded before lexemes.')443 return444 wordforms = []445 for sl in self.subLexemes:446 if sl.paradigm not in grammar.Grammar.paradigms:447 self.raise_error('No paradigm named ' + sl.paradigm)448 continue449 for flex in grammar.Grammar.paradigms[sl.paradigm].flex:450 wf = wordform.Wordform(sl, flex, self.errorHandler)451 if wf.wf is None:452 continue453 # TODO: exceptions454 wordforms.append(wf)455 return wordforms456 def add_derivations(self):457 """Add sublexemes with links to derivations."""458 subLexemes2add = []459 for sl in self.subLexemes:460 derivName = '#deriv#paradigm#' + sl.paradigm461 if derivName in grammar.Grammar.paradigms:...

Full Screen

Full Screen

clitic.py

Source:clitic.py Github

copy

Full Screen

...47 self.errorHandler = errorHandler48 try:49 keys = set(obj['name'] for obj in dictDescr['content'])50 except KeyError:51 self.raise_error('No content in a clitic: ', dictDescr)52 return53 if len(Clitic.obligFields & keys) < len(Clitic.obligFields):54 self.raise_error('No obligatory fields in a clitic: ',55 dictDescr['content'])56 return57 # print(dictDescr['content'])58 for obj in sorted(dictDescr['content'], key=self.fields_sorting_key):59 try:60 self.key2func[obj['name']](obj)61 except KeyError:62 if obj['name'].startswith('regex-'):63 self.add_regex_test(obj)64 else:65 self.add_data(obj)66 if self.stem is None:67 self.stem = self.lemma68 def raise_error(self, message, data=None):69 if self.errorHandler is not None:70 self.errorHandler.raise_error(message, data)71 @staticmethod72 def fields_sorting_key(obj):73 try:74 key = obj['name']75 except KeyError:76 return ''77 try:78 order = ['lex', 'lexref', 'stem', 'paradigm', 'gramm',79 'gloss'].index(key)80 return '!' + str(order)81 except ValueError:82 return key83 84 def add_lemma(self, obj):85 lemma = obj['value']86 if type(lemma) != str or len(lemma) <= 0:87 self.raise_error('Wrong lemma: ', lemma)88 return89 if len(self.lemma) > 0:90 self.raise_error('Duplicate lemma: ' + lemma)91 self.lemma = lemma92 def add_lexref(self, obj):93 lexref = obj['value']94 if type(lexref) != str or len(lexref) <= 0:95 self.raise_error('Wrong lexical reference: ', lexref)96 return97 if len(self.lexref) > 0:98 self.raise_error('Duplicate lexical reference: ' +99 lexref + ' in ' + self.lemma)100 self.lexref = lexref101 def add_stem(self, obj):102 stem = obj['value']103 if type(stem) != str or len(stem) <= 0:104 self.raise_error('Wrong stem in ' + self.lemma + ': ', stem)105 return106 if self.stem is not None:107 self.raise_error('Duplicate stem in ' + self.lemma + ': ', stem)108 self.stem = stem109 def add_gramm(self, obj):110 gramm = obj['value']111 if type(gramm) != str or len(gramm) <= 0:112 self.raise_error('Wrong gramtags in ' + self.lemma + ': ', gramm)113 return114 if len(self.gramm) > 0:115 self.raise_error('Duplicate gramtags: ' + gramm +116 ' in ' + self.lemma)117 self.gramm = gramm118 def add_gloss(self, obj):119 gloss = obj['value']120 if type(gloss) != str or len(gloss) <= 0:121 self.raise_error('Wrong gloss in ' + self.lemma + ': ', gloss)122 return123 if len(self.gloss) > 0:124 self.raise_error('Duplicate gloss: ' + gloss +125 ' in ' + self.lemma)126 self.gloss = gloss127 def add_paradigm(self, obj):128 paradigm = obj['value']129 if type(paradigm) != str or len(paradigm) <= 0:130 self.raise_error('Wrong paradigm in ' + self.lemma +131 ': ', paradigm)132 return133 self.paradigms.append(paradigm)134 def add_side(self, obj):135 side = obj['value']136 if type(side) != str or len(side) <= 0 or\137 side not in ('pro', 'en'):138 self.raise_error('Wrong type in ' + self.lemma + ': ', side)139 return140 if side == 'pro':141 self.side = SIDE_PROCLITIC142 elif side == 'en':143 self.side = SIDE_ENCLITIC144 def add_data(self, obj):145 try:146 self.otherData.append((obj['name'], obj['value']))147 except KeyError:148 self.raise_error('Wrong key-value pair in ' + self.lemma +149 ': ', obj)150 151 def add_regex_test(self, obj):152 if not obj['name'].startswith('regex-'):153 return154 self.regexTests.append(reduplication.RegexTest(obj['name'][6:], obj['value'],155 self.errorHandler))156 157 def get_data(self, field):158 return [v for k, v in self.otherData if k == field]159 160 def separate_parts(self, s, sepParts='|', sepVars='//'):161 return [part.split(sepVars) for part in s.split(sepParts)]162 def generate_stems(self, stems):163 """164 Fill in the gaps in the stems description with the help of165 automatic stem conversion.166 """167 stemConversionNames = {t[1] for t in self.otherData168 if t[0] == 'conversion-link'}169 for scName in stemConversionNames:170 try:171 grammar.Grammar.stemConversions[scName].convert(stems)172 except KeyError:173 self.raise_error('No stem conversion named ' + scName)174 def is_compatible_str(self, strWf):175 """176 Check if the clitic is compatible with the given host word.177 """178 if len(strWf) <= 0:179 return False180 for rxTest in self.regexTests:181 if rxTest.field == 'wf' and not rxTest.perform(strWf):182 return False183 return True184 def is_compatible(self, wf, errorHandler=None):185 """186 Check if the clitic is compatible with the given Wordform.187 """...

Full Screen

Full Screen

modulatorfunctions.py

Source:modulatorfunctions.py Github

copy

Full Screen

...9 if self.types == 'sustain':10 y = 0*t+111 return y12 else: 13 self.raise_error()14 15 def linear(self, t, t0): 16 if self.types == 'attack':17 y = t/t018 return y19 else: 20 self.raise_error()21 22 def invlinear(self, t, t0): 23 if self.types == 'sustain' or self.types == 'decay':24 if (1-(t/t0)) <= 0:25 y = 026 elif (1-(t/t0)) > 0:27 y = 1-(t/t0)28 return y29 else: 30 self.raise_error()31 def sin(self, t, a, f): 32 if self.types == 'sustain':33 y = 1 + a*np.sin(2*np.pi*f*t) 34 return y35 else: 36 self.raise_error()37 def exp(self, t, t0): 38 if self.types == 'attack':39 y = np.exp((5*(t-t0))/t0)40 return y41 else: 42 self.raise_error()43 def invexp(self, t, t0): 44 if self.types == 'sustain' or self.types == 'decay': 45 y = np.exp((-5*t)/t0)46 return y47 else: 48 self.raise_error()49 50 def quartcos(self, t, t0): 51 if self.types == 'sustain' or self.types == 'decay':52 y = np.cos(((np.pi)*t)/(2*t0))53 return y54 else: 55 self.raise_error()56 57 def quartsin(self, t, t0): 58 if self.types == 'attack':59 y = np.sin(((np.pi)*t)/(2*t0))60 return y61 else: 62 self.raise_error()63 64 def halfcos(self, t, t0): 65 if self.types == 'sustain' or self.types == 'decay':66 y = (1 + np.cos((np.pi*t)/t0))/267 return y68 else: 69 self.raise_error()70 def halfsin(self, t, t0): 71 if self.types == 'attack':72 y = (1 + np.cos((np.pi*t)/t0))/273 return y74 else: 75 self.raise_error()76 def log(self, t, t0): 77 if self.types == 'attaack':78 y= np.log10(((9*t)/t0) + 1)79 return y80 else: 81 self.raise_error()82 def invlog(self, t, t0): 83 if self.types == 'sustain' or self.types == 'decay': 84 if t.all() < t0:85 y = np.log10(((-9*t)/t0) + 10)86 elif t.all() >= t0:87 y = 088 return y89 else: 90 self.raise_error()91 def tri(self, t, t0, t1, a1): 92 if self.types == 'attack':93 if t0 in t:94 ta=t[:(t.index(t0))]95 tb=t[(t.index(t0)+1):]96 97 y1=(ta*a1)/t198 y2=((tb-t1)/(t1-t0))+a199 y=y1+y2100 return y101 else:102 self.raise_error()103 104 def pulses(self, t, t0, t1, a1): 105 # sustain106 if self.types == 'sustain':107 t_prima = (t/t0)108 else: 109 raise ArithmeticError110 111 """ 112 t_prima = t/t0 - mod(t/t0)113 f(t_prima) = min{ mod(((1-a1)/t1)*(t_prima - t0 + t1)) + a1}114 """115 116 def raise_error(self):117 print("This type of function cannot be used with this amplitude modulator")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful