How to use findWord method in fMBT

Best Python code snippet using fMBT_python

GetTense.py

Source:GetTense.py Github

copy

Full Screen

...41 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] != "VBG"):42 a = TypeWord[0][0]43 lemmatizer = WordNetLemmatizer()44 x = lemmatizer.lemmatize(a, 'v')45 WordLevel = FindWord.findWord(x)46 TenseLevel = FindWord.findWord('Present Perfect')47 if levels.index(WordLevel) <= levels.index(TenseLevel):48 Tense.append(TenseLevel)49 else:50 Tense.append(WordLevel)51 PresentPerfect.append(1)5253 # Present Progressive54 elif (TypeWord[0][1] == "VBG") and (words[i - 1] in ["am", "are", "is", "isn't"]) and (55 words[i + 1] != 'to') or (TypeWord[0][1] == "VBG") and (56 words[i - 1] in ["am", "are", "is", ] and (words[i - 2] == "not")) and (57 words[i + 1] != 'to') or (58 (TypeWord[0][1] == "VBG") and (59 (nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][1]) in ["NN", "PRP"] and (60 words[i + 1] != "to") and (words[i - 2]) in ["am", "is", "are"] and (61 words[i - 2] not in ["was", "were"]))):6263 a = TypeWord[0][0]64 lemmatizer = WordNetLemmatizer()65 x = lemmatizer.lemmatize(a, 'v')66 WordLevel = FindWord.findWord(x)67 TenseLevel = FindWord.findWord('Present Progressive')68 if levels.index(WordLevel) <= levels.index(TenseLevel):69 Tense.append(TenseLevel)70 else:71 Tense.append(WordLevel)72 PresentProgressive.append(1)7374 # S-Present75 elif ((TypeWord[0][1] in ["VBZ", "VBP", "VB"]) and (76 words[i - 2] not in ["wouldn't", "would", "will", "did", "didn't"]) and (77 words[i - 1] not in ["to", "will", "won't", "would", "wouldn't"]) and ((78 (nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1]) not in ["VB", "VBD", "VBG", "VBN", "VBP",79 "VBZ"])) and (80 (nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1])) not in ["PRP", "NN"] and (81 words[i + 1] != "not") or (82 (TypeWord[0][1] == "VBZ" or "VBP") and (words[i - 1] in ["doesn't", "don't"]) and (83 ((nltk.pos_tag(nltk.word_tokenize(words[i - 2]))[0][1])) in ["PRP", "NN"]) and ((84 (nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1]) not in ["VB", "VBD", "VBG", "VBN",85 "VBP", "VBZ"]))) or (86 (TypeWord[0][1] == "VBZ" or "VBP") and (87 (nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][1]) in ["PRP", "NN"]) and (88 words[i - 2] in ["do", "does"]))):89 a = TypeWord[0][0]90 lemmatizer = WordNetLemmatizer()91 x = lemmatizer.lemmatize(a, 'v')92 WordLevel = FindWord.findWord(x)93 TenseLevel = FindWord.findWord('Simple Present')94 if levels.index(WordLevel) <= levels.index(TenseLevel):95 Tense.append(TenseLevel)96 else:97 Tense.append(WordLevel)98 SimplePresent.append(1)99100 # present perfect progressive101 elif (TypeWord[0][1] == "VBG") and words[i - 1] == 'been' and (102 words[i - 2] in ['have', 'has', "hasn't", "haven't"]) and (103 words[i - 3] not in ["will", "won't", "would", "wouldn't"]) and (104 words[i - 4] not in ["wouldn't", "would", "will", "won't"]) or (105 (TypeWord[0][1] == "VBG") and (words[i - 1] == 'been') and ((nltk.pos_tag(106 nltk.word_tokenize(words[i - 2]))[0][1]) == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (107 words[i - 3] in ['has', 'have', "hasn't", "haven't"])):108 a = TypeWord[0][0]109 lemmatizer = WordNetLemmatizer()110 x = lemmatizer.lemmatize(a, 'v')111 WordLevel = FindWord.findWord(x)112 TenseLevel = FindWord.findWord('Present perfect progressive')113 if levels.index(WordLevel) <= levels.index(TenseLevel):114 Tense.append(TenseLevel)115 else:116 Tense.append(WordLevel)117 presentPerfectProgressive.append(1)118119 # Past perfect120 elif (TypeWord[0][1] in ["VBN", "VBD"]) and (words[i - 1] in ['had', "hadn't"]) and (121 (nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1]) != "VBG") or ((122 TypeWord[0][1] in [123 "VBN", "VBD"] and ((124 nltk.pos_tag(125 nltk.word_tokenize(126 words[127 i - 1]))[128 0][129 1]) == "PRP" or "NN" or "NNS" or "NNP" or "NNPS")) and (130 (words[i - 2] in [131 'had',132 "hadn't"]) and (133 (134 nltk.pos_tag(135 nltk.word_tokenize(136 words[137 i + 1]))[138 0][139 1] != "VBG")))):140 a = TypeWord[0][0]141 lemmatizer = WordNetLemmatizer()142 x = lemmatizer.lemmatize(a, 'v')143 WordLevel = FindWord.findWord(x)144 TenseLevel = FindWord.findWord('Past perfect')145 if levels.index(WordLevel) <= levels.index(TenseLevel):146 Tense.append(TenseLevel)147 else:148 Tense.append(WordLevel)149 PastPerfect.append(1)150151 # simple past152 elif ((TypeWord[0][1] in ["VBN", "VBD"]) and (153 (nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1]) not in ["NN", "PRP", "VBN", "VBD", "VB",154 "VBG"]) and (155 (nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][1]) not in ["VBN", "VBD", "VB"]) and (156 (nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][1]) in ["NN", "PRP"]) and (157 (nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][1]) not in ["VBN", "VBD"]) or (158 (TypeWord[0][1] == "VB") and (words[i - 1] == "didn't")) or (159 (TypeWord[0][1] == "VB") and ((160 nltk.pos_tag(161 nltk.word_tokenize(162 words[163 i - 1]))[164 0][165 1]) == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (166 words[i - 2] in [167 "didn't", 'did']))):168169 a = TypeWord[0][0]170 lemmatizer = WordNetLemmatizer()171 x = lemmatizer.lemmatize(a, 'v')172 WordLevel = FindWord.findWord(x)173 TenseLevel = FindWord.findWord('Simple Past')174 if levels.index(WordLevel) <= levels.index(TenseLevel):175 Tense.append(TenseLevel)176 else:177 Tense.append(WordLevel)178 simplePast.append(1)179180181 # Past Progressive182 elif (TypeWord[0][1] == "VBG") and ((words[i - 1] in ["was", "were", "wasn't", "weren't"])) or (183 (TypeWord[0][1] == "VBG") and (((nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][184 1]) == "PRP" or "NN" or "NNS" or "NNP" or "NNPS"))) and (185 (words[i - 2] in ["was", "were", "wasn't", "weren't"])):186 a = TypeWord[0][0]187 lemmatizer = WordNetLemmatizer()188 x = lemmatizer.lemmatize(a, 'v')189 WordLevel = FindWord.findWord(x)190 TenseLevel = FindWord.findWord('past Progressive')191 if levels.index(WordLevel) <= levels.index(TenseLevel):192 Tense.append(TenseLevel)193 else:194 Tense.append(WordLevel)195 PastProgressive.append(1)196197198 # Past perfect progressive199 elif (TypeWord[0][1] == "VBG" and ((words[i - 2] in ['had', "hadn't"])) and (200 words[i - 1] == 'been')) or (201 (TypeWord[0][1] == 'VBG') and (words[i - 1] == 'been') and ((nltk.pos_tag(202 nltk.word_tokenize(words[i - 2]))[0][1]) == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (203 words[i - 3] in ['had', "hadn't"])):204 a = TypeWord[0][0]205 lemmatizer = WordNetLemmatizer()206 x = lemmatizer.lemmatize(a, 'v')207 WordLevel = FindWord.findWord(x)208 TenseLevel = FindWord.findWord('Past perfect progressive')209 if levels.index(WordLevel) <= levels.index(TenseLevel):210 Tense.append(TenseLevel)211 else:212 Tense.append(WordLevel)213 PastPerfectProgressive.append(1)214215216 # will future217 elif ((TypeWord[0][1] in ["VBP", "VB", "VBZ"]) and (words[i - 1] in ['will', "won't"])) or ((218 (TypeWord[0][1] == 'VB') and (nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][219 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (220 words[i - 2] in ['will',221 "won't"]) and (222 (nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] not in ["VBG", "VBD", "VBN"])))):223 a = TypeWord[0][0]224 lemmatizer = WordNetLemmatizer()225 x = lemmatizer.lemmatize(a, 'v')226 WordLevel = FindWord.findWord(x)227 TenseLevel = FindWord.findWord('will future')228 if levels.index(WordLevel) <= levels.index(TenseLevel):229 Tense.append(TenseLevel)230 else:231 Tense.append(WordLevel)232 future.append(1)233234235236 # going to future237 elif ((TypeWord[0][1] == 'VB') and (words[i - 1] == 'to') and (words[i - 2] == 'going') and (238 words[i - 3] == 'not') and (words[i - 4] in ['am', 'are', 'is'])) or (239 (TypeWord[0][1] == 'VB') and (words[i - 1] == 'to') and (words[i - 2] == 'going') and (240 nltk.pos_tag(nltk.word_tokenize(words[i - 3]))[0][1] in ["PRP", "NN", "NNS", "NNP",241 "NNPS"]) and (242 words[i - 4] in ['am', "is", 'are', ])) or (243 (TypeWord[0][1] == 'VB') and (words[i - 1] == 'to') and (words[i - 2] == 'going') and (244 words[i - 3] in ['am', 'are', 'is'])):245 a = TypeWord[0][0]246 lemmatizer = WordNetLemmatizer()247 x = lemmatizer.lemmatize(a, 'v')248 WordLevel = FindWord.findWord(x)249 TenseLevel = FindWord.findWord('going to future')250 if levels.index(WordLevel) <= levels.index(TenseLevel):251 Tense.append(TenseLevel)252 else:253 Tense.append(WordLevel)254 future.append(1)255256257 # futer progressive258 elif ((TypeWord[0][1] == 'VBG') and (words[i - 1] == 'be') and (words[i - 2] in ['will', "won't"])) or (259 (TypeWord[0][1] == 'VBG') and (words[i - 1] == 'be') and (260 nltk.pos_tag(nltk.word_tokenize(words[i - 2]))[0][261 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (words[i - 3] in ['will', "won't"])):262 a = TypeWord[0][0]263 lemmatizer = WordNetLemmatizer()264 x = lemmatizer.lemmatize(a, 'v')265 WordLevel = FindWord.findWord(x)266 TenseLevel = FindWord.findWord('future progressive')267 if levels.index(WordLevel) <= levels.index(TenseLevel):268 Tense.append(TenseLevel)269 else:270 Tense.append(WordLevel)271 futureProgressive.append(1)272273274 # future perfect275 elif ((TypeWord[0][1] in ["VBN", "VBD"]) and (words[i - 1] == 'have') and (276 words[i - 2] in ['will', "won't"]) and (277 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] != "VBG")) or (278 (TypeWord[0][1] in ["VBN", "VBD"]) and (words[i - 1] == 'have') and (279 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] != "VBG") and (280 nltk.pos_tag(nltk.word_tokenize(words[i - 2]))[0][281 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (282 words[i - 3] in ['will', "won't"])):283 a = TypeWord[0][0]284 lemmatizer = WordNetLemmatizer()285 x = lemmatizer.lemmatize(a, 'v')286 WordLevel = FindWord.findWord(x)287 TenseLevel = FindWord.findWord('future perfect')288 if levels.index(WordLevel) <= levels.index(TenseLevel):289 Tense.append(TenseLevel)290 else:291 Tense.append(WordLevel)292 futurePerfect.append(1)293294295 # fututre perfect progressive296 elif ((TypeWord[0][1] == 'VBG') and (words[i - 1] == 'been') and (words[i - 2] == 'have') and (297 words[i - 3] in ['will', "won't", "'ll", 'will not'])) or (298 (TypeWord[0][1] == 'VBG') and (words[i - 1] == 'been') and (words[i - 2] == 'have') and (299 nltk.pos_tag(nltk.word_tokenize(words[i - 3]))[0][300 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (words[i - 4] in ['will', "won't"])):301 a = TypeWord[0][0]302 lemmatizer = WordNetLemmatizer()303 x = lemmatizer.lemmatize(a, 'v')304 WordLevel = FindWord.findWord(x)305 TenseLevel = FindWord.findWord('future perfect progressive')306 if levels.index(WordLevel) <= levels.index(TenseLevel):307 Tense.append(TenseLevel)308 else:309 Tense.append(WordLevel)310 futurePerfectProgressive.append(1)311312313 # conditional simple314 elif (TypeWord[0][1] == 'VB') and (words[i - 1] in ['would', "wouldn't"]) and (315 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] not in ["VBG", "VBD", "VBN"]) or (316 (TypeWord[0][1] == 'VB') and (317 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] not in ["VBG", "VBN", "VBD"]) and (318 nltk.pos_tag(nltk.word_tokenize(words[i - 1]))[0][319 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (320 words[i - 2] in ['would', "wouldn't"])):321 a = TypeWord[0][0]322 lemmatizer = WordNetLemmatizer()323 x = lemmatizer.lemmatize(a, 'v')324 WordLevel = FindWord.findWord(x)325 TenseLevel = FindWord.findWord('conditional simple')326 if levels.index(WordLevel) <= levels.index(TenseLevel):327 Tense.append(TenseLevel)328 else:329 Tense.append(WordLevel)330 conditionalSimple.append(1)331332 # conditional progressive333 elif ((TypeWord[0][1] == 'VBG') and (words[i - 1] == 'be') and (334 words[i - 2] in ['would', "wouldn't"])) or (335 (TypeWord[0][1] == 'VBG') and (words[i - 1] == 'be') and (336 nltk.pos_tag(nltk.word_tokenize(words[i - 2]))[0][337 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (338 words[i - 3] in ['would', "wouldn't"])):339 a = TypeWord[0][0]340 lemmatizer = WordNetLemmatizer()341 x = lemmatizer.lemmatize(a, 'v')342 WordLevel = FindWord.findWord(x)343 TenseLevel = FindWord.findWord('conditional progressive')344 if levels.index(WordLevel) <= levels.index(TenseLevel):345 Tense.append(TenseLevel)346 else:347 Tense.append(WordLevel)348 conditionalProgressive.append(1)349350351 # conditional perfect352 elif ((TypeWord[0][1] in ['VBN', "VBD"]) and (words[i - 1] == 'have') and (353 words[i - 2] in ['would', "wouldn't"]) and (354 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] != "VBG")) or (355 (TypeWord[0][1] in ["VBN", "VBD"]) and (words[i - 1] == 'have') and (356 nltk.pos_tag(nltk.word_tokenize(words[i - 2]))[0][357 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (358 words[i - 3] in ['would', "wouldn't"]) and (359 nltk.pos_tag(nltk.word_tokenize(words[i + 1]))[0][1] != "VBG")):360 a = TypeWord[0][0]361 lemmatizer = WordNetLemmatizer()362 x = lemmatizer.lemmatize(a, 'v')363 WordLevel = FindWord.findWord(x)364 TenseLevel = FindWord.findWord('conditional perfect')365 if levels.index(WordLevel) <= levels.index(TenseLevel):366 Tense.append(TenseLevel)367 else:368 Tense.append(WordLevel)369 conditionalPerfect.append(1)370371372 # conditional perfect progressive373 elif ((TypeWord[0][1] == 'VBG') and (words[i - 1] == 'been') and (words[i - 2] == 'have') and (374 words[i - 3] in ['would', "wouldn't"])) or (375 (TypeWord[0][1] == 'VBG') and (words[i - 1] == 'been') and (words[i - 2] == 'have') and (376 nltk.pos_tag(nltk.word_tokenize(words[i - 3]))[0][377 1] == "PRP" or "NN" or "NNS" or "NNP" or "NNPS") and (378 words[i - 4] in ['would', "wouldn't"])):379 a = TypeWord[0][0]380 lemmatizer = WordNetLemmatizer()381 x = lemmatizer.lemmatize(a, 'v')382 WordLevel = FindWord.findWord(x)383 TenseLevel = FindWord.findWord('conditional perfect progressive')384 if levels.index(WordLevel) <= levels.index(TenseLevel):385 Tense.append(TenseLevel)386 else:387 Tense.append(WordLevel)388 conditionalPerfectProgressive.append(1)389390 return Tense391 except : ...

Full Screen

Full Screen

reducer14.py

Source:reducer14.py Github

copy

Full Screen

1#!/usr/bin/python2## reducer13.py3## Author: Prabhakar Gundugola4## Description: reducer code for HW1.45import sys6import math7def isspam(true_class):8 if true_class == 1:9 return 'SPAM'10 else:11 return 'HAM'12filenames = sys.argv[1:]13spam_email_count = 014ham_email_count = 015spam_word_count = 016ham_word_count = 017spam_findword = {}18ham_findword = {}19total_cases = 020correct_cases = 021for filename in filenames:22 with open(filename, "r") as myfile:23 for line in myfile:24 tokens = line.split('\t')25 doc_id = tokens[0]26 true_class = int(tokens[1])27 #findword = tokens[3]28 #findword_count = int(tokens[4])29 word_count = int(tokens[2])30 31 if true_class == 1:32 spam_email_count += 133 spam_word_count += word_count34 else:35 ham_email_count += 136 ham_word_count += word_count37 38 if len(tokens) > 3:39 for i in range(3, len(tokens), 2):40 findword = tokens[i]41 findword_count = int(tokens[i+1])42 43 if true_class == 1:44 if findword not in spam_findword:45 spam_findword[findword] = findword_count46 else:47 spam_findword[findword] += findword_count48 else:49 if findword not in ham_findword:50 ham_findword[findword] = findword_count51 else:52 ham_findword[findword] += findword_count53print "spam_email_count: ", spam_email_count54print "ham_email_count: ", ham_email_count55print "spam_word_count: ", spam_word_count56print "ham_word_count: ", ham_word_count57spam_prior = math.log((1.0*spam_email_count)/(spam_email_count + ham_email_count))58ham_prior = math.log((1.0*ham_email_count)/(ham_email_count + spam_email_count))59spam_findword_prob = {}60ham_findword_prob = {}61print "spam_prior: ", spam_prior62print "ham_prior: ", ham_prior63for word in spam_findword:64 if spam_findword[word] > 0:65 spam_findword_prob[word] = math.log((1.0*spam_findword[word]/spam_word_count))66 else:67 spam_findword_prob[word] = float('-inf')68for word in ham_findword:69 if ham_findword[word] > 0:70 ham_findword_prob[word] = math.log((1.0*ham_findword[word]/ham_word_count))71 else:72 ham_findword_prob[word] = float('-inf')73# Naive Bayes classification74for filename in filenames:75 with open(filename, "r") as myfile:76 for line in myfile:77 total_cases += 178 tokens = line.split('\t')79 doc_id = tokens[0]80 true_class = int(tokens[1])81 vocab = {}82 if len(tokens) > 3:83 for i in range(3, len(tokens), 2):84 findword = tokens[i]85 findword_count = int(tokens[i+1])86 vocab[findword] = findword_count87 88 spam_doc_prob, ham_doc_prob = 0.0, 0.089 for key, value in vocab.iteritems():90 if spam_findword_prob[key] == float('-inf'):91 if value == 0:92 spam_doc_prob += 093 else:94 spam_doc_prob += float('-inf')95 else:96 spam_doc_prob += spam_findword_prob[key]*value97 for key, value in vocab.iteritems():98 if ham_findword_prob[key] == float('-inf'):99 if value == 0:100 ham_doc_prob += 0101 else:102 ham_doc_prob += float('-inf')103 else:104 ham_doc_prob += ham_findword_prob[key]*value105 106 spam_doc_prob += spam_prior107 ham_doc_prob += ham_prior108 109 result = doc_id.ljust(30) + '\t\t' + isspam(true_class) + '\t\t'110 if spam_doc_prob > ham_doc_prob:111 predicted = 1112 else:113 predicted = 0114 result += isspam(predicted)115 print result116 if true_class == predicted:117 correct_cases += 1118accuracy = 100.0*correct_cases/total_cases119print "-----------------------"120print "Accuracy: " + str(accuracy) + '%'...

Full Screen

Full Screen

binarysearch.py

Source:binarysearch.py Github

copy

Full Screen

1'''2Binary search operates on a sorted array of size n. It searches for elements3by splitting at the n/2th element, and checks whether the element to find4is larger or smaller than the checked element. It then splits the bottom5(when element is smaller) or top (when element is bigger) in half again,6thereby eliminating the other half. It repeats this process until it finds the7element. runs in O(log n)8'''9import random10alphabet = "abcdefghijklmnopqrstuvwxyz"11words = []12# seed a list of random numbers13for num in range(0, 1000):14 word = ''15 sample = random.sample([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,16 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25], 12)17 for nr in sample:18 word += alphabet[nr]19 words.append(word)20findword = "xfdskjfsdhjk"21words.append(findword)22sortedwords = sorted(words)23def algorithms.BinarySearch(array, findword):24 if len(array) <= 0:25 print 'array contains no elements'26 return27 high = len(sortedwords) - 128 low = 029 currentlocation = ((high-low)/2) + low30 numberofsearches = 031 found = False32 while not found:33 numberofsearches += 134 print 'testing at location:', currentlocation35 if sortedwords[currentlocation] == findword:36 print 'found the word: ', findword, ' at location: ', currentlocation, ' in ', numberofsearches, ' searches.'37 break38 elif sortedwords[currentlocation] < findword:39 low = currentlocation40 currentlocation = ((high-low)/2) + low41 elif sortedwords[currentlocation] > findword:42 high = currentlocation43 currentlocation = ((high-low)/2) + low44def recursivebinarysearch(array, findword, upperbound, lowerbound):45 if len(array) <= 0:46 print 'array contains no elements'47 return48 if upperbound <= 0:49 print 'item:', findword, 'not found in array'50 return51 currentlocation = ((upperbound-lowerbound)/2)+lowerbound52 print 'testing at location:', currentlocation, upperbound, lowerbound53 if array[currentlocation] == findword:54 print 'found the word: ', findword, ' at location: ', currentlocation55 return currentlocation56 elif array[currentlocation] < findword:57 return recursivebinarysearch(array, findword, upperbound, currentlocation)58 elif array[currentlocation] > findword:59 return recursivebinarysearch(array, findword, currentlocation, lowerbound)60# passes an array that is constantly split until its search completes61def recursivesplitbinarysearch(array, findword):62 if len(array) <= 0:63 print 'array contains no elements, findword:', findword, 'not in array'64 return -165 location = len(array)/266 print 'array is now size:', len(array),' testing at location: ', location, array[location]67 if findword == array[location]:68 print 'found the word ', findword69 return True70 elif findword > array[location]:71 return recursivesplitbinarysearch(array[location:], findword)72 elif findword < array[location]:73 return recursivesplitbinarysearch(array[:location], findword)74algorithms.BinarySearch(sortedwords, findword)75recursivebinarysearch(sortedwords, findword, len(sortedwords), 0)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful