How to use test_background method in pytest-bdd

Best Python code snippet using pytest-bdd_python

eval_frames.py

Source:eval_frames.py Github

copy

Full Screen

1#!/usr/bin/env python32# -*- coding: utf-8 -*-3from gensim.models import Word2Vec, KeyedVectors4import argparse5from parse_frames import do_counts, words_to_pmi, seeds_to_real_lex, get_words_to_cut6from data_iters import FrameAnnotationsIter, BackgroundIter, get_sentence_level_test, load_json_as_list, load_codes, get_random_split, code_to_short_form, get_per_frame_split, FrameHardSoftIter7import os8from collections import Counter, defaultdict9import pickle10from random import shuffle11import operator12from scipy import spatial13import glob14from params import Params15from nltk.corpus import stopwords16stop_words = stopwords.words('english')17stop_words.append("''")18stop_words.append('``')19stop_words.append('--')20stop_words.append("'s")21stop_words.append("n't")22stop_words.append("said")23import string24def get_top_words(input_file):25 dir_name = os.path.basename(os.path.dirname(os.path.dirname(input_file)))26 base_name = os.path.join("cache", dir_name + ".counter")27 if (os.path.isfile(base_name)):28 return pickle.load(open(base_name, "rb"))29 c = Counter()30 num_articles = 031 article_counter = Counter()32 for words in BackgroundIter(glob.iglob(input_file), sent_level=False):33 c.update(words)34 num_articles += 135 article_counter.update(set(words))36 pickle.dump((c, num_articles, article_counter), open(base_name, "wb"))37 return c, num_articles, article_counter38# Theory that some frames we model better than others because they are less relavent to39# test text. Count how frequent each frame is in input text40def count_frames(code_to_str, frame_iter):41 frame_counter = Counter()42 text_count = 043 for text,frames,_ in frame_iter:44 for frame in frames:45 frame_counter[frame] += 146 text_count += 147 for f in sorted(frame_counter):48 print(code_to_str[f], ";", frame_counter[f])49# NEW WAY -- train on top of NYT model50def get_wv_nyt_name(input_file, split_type, params):51 if split_type == "random" or split_type == 'kfold' or split_type == 'dcard' or split_type == 'dcard_tune':52 base_name = params.ENGLISH_MFC_MODEL53 else:54 base_name = os.path.join("cache", split_type + ".nyt.model")55 nyt_model = params.ENGLISH_BASE_MODEL56 if (os.path.isfile(base_name)):57 return base_name58 sentence_iter = BackgroundIter(glob.iglob(input_file), verbose=False)59 base_model = Word2Vec.load(nyt_model)60 count = 061 for x in sentence_iter:62 count += 163 base_model.train(sentence_iter, total_examples=count, epochs=base_model.epochs)64 fp = open(base_name, "wb")65 base_model.wv.save(fp)66 fp.close()67 return base_name68class frameTracker():69 def __init__(self):70 self.correct_positive = 071 self.marked_positive = 072 self.true_positive = 073 self.marked_correct = 074 def get_metrics(self, total):75 # return precision, recall, accuracy76 return self.correct_positive / max(float(self.marked_positive), 0.0001), \77 self.correct_positive / float(self.true_positive), \78 self.marked_correct / float(total)79def test_primary_frame(code_to_lex, code_to_str, text_iter, do_print = True):80 total = 081 correct = 082 del code_to_lex[15.0] # Don't guess Other83 for text,_,true_frame in text_iter:84 if true_frame == "null" or true_frame is None:85 continue86 total += 187 text_counter = Counter(text)88 sums = []89 for f in code_to_lex:90 sums.append((f, sum([text_counter[w] for w in code_to_lex[f]])))91 # we shuffle so that ties are randomly broken92 shuffle(sums)93 frame, word_count = max(sums, key=operator.itemgetter(1))94 # Mark as "Other" if it doesn't belong to any other frame95 if word_count < 4:96 frame = 15.097 if frame == true_frame:98 correct += 199 if do_print:100 print (float(correct) / float(total), total)101 return float(correct) / float(total)102# Find center of a set of vectors (unnormalized)103# by summing the vectors104def get_center(words, wv):105 embed_size = 0106 for w in words:107 if not w in wv:108 continue109 embed_size = len(wv[w])110 break111 center = [0 for i in range(0, embed_size)]112 for w in words:113 if not w in wv:114 continue115 center = [x+y for x,y in zip(center, wv[w])]116 return center117# First find center of context_words vectors, then return similarity between keyword and center118def get_mean_similarity(keywords, context_words, wv):119 context_center = get_center(context_words, wv)120 keywords_center = get_center(keywords, wv)121 return 1 - spatial.distance.cosine(context_center, keywords_center)122def test_primary_frame_wv(code_to_lex, code_to_str, text_iter, wv):123 total = 0124 correct = 0125 for text,_,true_frame in text_iter:126 if true_frame == "null" or true_frame is None:127 continue128 total += 1129 sums = []130 for f in code_to_lex:131 sums.append((f, get_mean_similarity(text, code_to_lex[f], wv)))132 # we shuffle so that ties are randomly broken133 shuffle(sums)134 frame, word_count = max(sums, key=operator.itemgetter(1))135 if frame == true_frame:136 correct += 1137 print (float(correct) / float(total), total)138def max_index(l):139 index, value = max(enumerate(l), key=operator.itemgetter(1))140 return str(index)141def test_sentence_annotations(code_to_lex, code_to_str, frame_to_contains, frame_to_doesnt):142 for f in sorted(code_to_lex):143 frame_tracker = frameTracker()144 total = 0145 for contains in frame_to_contains[f]:146 total += 1147 frame_tracker.true_positive += 1148 text_counter = Counter(contains)149 applies_frame = sum([text_counter[w] for w in code_to_lex[f]]) >= 1150 if applies_frame:151 frame_tracker.marked_correct += 1152 frame_tracker.marked_positive += 1153 frame_tracker.correct_positive += 1154 for doesnt in frame_to_doesnt[f]:155 total += 1156 text_counter = Counter(doesnt)157 applies_frame = sum([text_counter[w] for w in code_to_lex[f]]) >= 1158 if applies_frame:159 frame_tracker.marked_positive += 1160 else:161 frame_tracker.marked_correct += 1162 assert (frame_tracker.true_positive == len(frame_to_contains[f]))163 assert (total == len(frame_to_contains[f]) + len(frame_to_doesnt[f]))164 p,r,a = frame_tracker.get_metrics(total)165 if (p + r) == 0:166 print(code_to_str[f], "VERY BAD")167 continue168 print (code_to_str[f], ";",169 p, ";",170 r, ";",171 (2 * (p * r)/(p + r)), ";",172 a, ";")173def test_annotations(code_to_lex, code_to_str, frame_iter, lex_count=3, do_print=True):174 code_to_frame_tracker = {}175 for c in code_to_lex:176 code_to_frame_tracker[c] = frameTracker()177 total = 0178 for text,frames,_ in frame_iter:179 total += 1180 text_counter = Counter(text)181 for c in code_to_lex:182 applies_frame = (sum([text_counter[w] for w in code_to_lex[c]]) >= lex_count)183 gold_applies_frame = (c in frames)184 if applies_frame == gold_applies_frame:185 code_to_frame_tracker[c].marked_correct += 1186 if applies_frame:187 code_to_frame_tracker[c].marked_positive += 1188 if gold_applies_frame:189 code_to_frame_tracker[c].correct_positive += 1190 if gold_applies_frame:191 code_to_frame_tracker[c].true_positive += 1192 code_to_f1 = {}193 average_f1 = 0194 for c in sorted(code_to_frame_tracker):195 p,r,a = code_to_frame_tracker[c].get_metrics(total)196 if (p + r) == 0:197 code_to_f1[c] = 0198 continue199 code_to_f1[c] = (2 * (p * r)/(p + r))200 if do_print:201 print (code_to_str[c], ";",202 p, ";",203 r, ";",204 (2 * (p * r)/(p + r)), ";",205 a, ";")206 if code_to_str[c] == "Other":207 continue208 average_f1 += (2 * (p * r)/(p + r))209 if do_print:210 print ("AVERAGE", average_f1 / (len(code_to_frame_tracker) - 1))211 return code_to_f1212def test_hard_annotations(code_to_lex, code_to_str, frame_iter, lex_count=3):213 code_to_frame_tracker = {}214 for c in code_to_lex:215 code_to_frame_tracker[c] = frameTracker()216 total = 0217 for text,frame_to_all, frame_to_any in frame_iter:218 total += 1219 text_counter = Counter(text)220 for c in code_to_lex:221 applies_frame = (sum([text_counter[w] for w in code_to_lex[c]]) >= lex_count)222 # Check hard, it's only in doc if all annotators think it's in doc223 gold_applies_frame = frame_to_all[c]224 if applies_frame == gold_applies_frame:225 code_to_frame_tracker[c].marked_correct += 1226 if applies_frame:227 code_to_frame_tracker[c].marked_positive += 1228 if gold_applies_frame:229 code_to_frame_tracker[c].correct_positive += 1230 if gold_applies_frame:231 code_to_frame_tracker[c].true_positive += 1232 for c in sorted(code_to_frame_tracker):233 p,r,a = code_to_frame_tracker[c].get_metrics(total)234 if (p + r) == 0:235 print ("VERB BAD")236 return237 print (code_to_str[c], ";",238 p, ";",239 r, ";",240 (2 * (p * r)/(p + r)), ";",241 a, ";")242def get_data_split(split_type, params, frame = None):243 immigration = os.path.join(params.MFC_PATH, "immigration.json")244 tobacco = os.path.join(params.MFC_PATH, "tobacco.json")245 samsex = os.path.join(params.MFC_PATH, "samesex.json")246 full_background = os.path.join(params.MFC_RAW_PATH, "*/json/*.json")247 if split_type == 'tobacco':248 train_files = [immigration, samesex]249 test_files = tobacco250 test_background = os.path.join(params.MFC_RAW_PATH, "smoking/json/*.json")251 elif split_type == 'immigration':252 train_files = [tobacco, samesex]253 test_files = immigration254 test_background = os.path.join(params.MFC_RAW_PATH, "immigration/json/*.json")255 elif split_type == 'samesex':256 train_files = [tobacco, immigration]257 test_files = samesex258 test_background = os.path.join(params.MFC_RAW_PATH, "samesex/json/*.json")259 elif split_type == 'kfold':260 train_files = [tobacco, immigration, samesex]261 test_background = full_background262 assert(frame is not None)263 test_data, train_data = get_per_frame_split(train_files, frame)264 return train_data, test_data, test_background265 elif split_type == 'dcard':266 train_files = [immigration]267 test_background = full_background268 test_data, train_data = get_random_split(train_files, num_folds=10, filter_tone=True)269 return train_data, test_data, test_background270 elif split_type == 'dcard_tune':271 train_files = [immigration]272 test_background = full_background273 test_data, train_data = get_random_split(train_files, num_folds=50, filter_tone=True)274 return train_data, test_data, test_background275 else:276 assert (split_type == "random")277 # train_files = [tobacco, immigration, samesex]278 train_files = [immigration]279 test_background = full_background280 test_data, train_data = get_random_split(train_files)281 return train_data, test_data, test_background282 train_data = load_json_as_list(train_files)283 test_data = load_json_as_list([test_files])284 return train_data, test_data, test_background285def count_all_frames():286 immigration = os.path.join(params.MFC_PATH, "immigration.json")287 tobacco = os.path.join(params.MFC_PATH, "tobacco.json")288 samsex = os.path.join(params.MFC_PATH, "samesex.json")289 codes = os.path.join(params.MFC_PATH, "codes.json")290 train_files = [immigration, tobacco, samesex]291 code_to_str = load_codes(codes)292 train_data = load_json_as_list(train_files)293 doc_level_iter = FrameAnnotationsIter(train_data)294 count_frames(code_to_str, doc_level_iter)295def do_all(args, train_data, test_data, test_background, code_to_str, params, target_frame = None, do_print = True):296 wv_name = get_wv_nyt_name(test_background, args.split_type, params)297 corpus_counter, code_to_counter, word_to_article_count, total_article_count = do_counts(train_data)298 # Sometimes (kfold) we only care about 1 frame299 if target_frame is not None:300 code_to_counter = {f:code_to_counter[f] for f in [target_frame]}301 # cut infrequent words302 cut_words = get_words_to_cut(total_article_count, word_to_article_count, params.MIN_CUT, params.MAX_CUT)303 corpus_counter = Counter({c:corpus_counter[c] for c in corpus_counter if not c in cut_words})304 # calculate PMI305 corpus_count = sum([corpus_counter[k] for k in corpus_counter])306 code_to_lex = {}307 all_frames = set()308 for c in code_to_counter:309 if "primary" in code_to_str[c] or "headline" in code_to_str[c] or "primany" in code_to_str[c]:310 continue311 all_frames.add(c)312 # For the baseline, we just take 100 most frequent words313 if args.baseline:314 # remove stopwords315 code_to_counter[c] = Counter({w:code_to_counter[c][w] for w in code_to_counter[c] if w in corpus_counter and not w in stop_words and not w in string.punctuation})316 code_to_lex[c] = [q[0] for q in code_to_counter[c].most_common(100)]317 else:318 code_to_lex[c] = words_to_pmi(corpus_counter, corpus_count, code_to_counter[c], params.TO_RETURN_COUNT)319 # # Use same seeds as baseline320 # code_to_counter[c] = Counter({w:code_to_counter[c][w] for w in code_to_counter[c] if w in corpus_counter and not w in stop_words and not w in string.punctuation})321 # code_to_lex[c] = [q[0] for q in code_to_counter[c].most_common(100)]322 if do_print:323 print("*******************************************************************************")324 for c in code_to_lex:325 print (code_to_str[c], code_to_lex[c])326 print("*******************************************************************************")327 print("*******************************************************************************")328 top_words, num_articles, article_counter = get_top_words(test_background)329 vocab = sorted(top_words, key=top_words.get, reverse = True)[:params.VOCAB_SIZE]330 if args.baseline:331 code_to_new_lex = code_to_lex332 else:333 code_to_new_lex = {}334 for c in code_to_lex:335 code_to_new_lex[c] = seeds_to_real_lex(code_to_lex[c], wv_name, vocab, code_to_str[c], topn=params.VEC_SEARCH, threshold=params.SIM_THRESH)336# code_to_new_lex[c] = code_to_lex[c] # Test no query expansions. This guy is pretty good337 # filter again, this time off of target corpus cause that's what we have to do in Russian338 words_to_cut = get_words_to_cut(num_articles, article_counter, params.MIN_CUT, params.MAX_CUT)339 for c in code_to_new_lex:340 code_to_new_lex[c] = [w for w in code_to_new_lex[c] if not w in words_to_cut]341 # make data iters342 doc_level_iter = FrameAnnotationsIter(test_data)343 short_codes = set([code_to_short_form(code) for code in code_to_str])344 hard_iter = FrameHardSoftIter(test_data, short_codes)345 # sentence level tests346 frame_to_contains, frame_to_doesnt = get_sentence_level_test(test_data, all_frames)347 # just return everything348 if not do_print:349 return code_to_new_lex, doc_level_iter350 for x in code_to_new_lex:351 print (code_to_str[x])352 print (code_to_new_lex[x])353 # print("*******************************************************************************")354 # print("Frame Counts;")355 # count_frames(code_to_str, doc_level_iter)356 print("*******************************************************************************")357 print("DOC")358 test_annotations(code_to_new_lex, code_to_str, doc_level_iter, lex_count=params.LEX_COUNT)359 # print("*******************************************************************************")360 # Skipping this for now361 # print("DOC HARD")362 # test_hard_annotations(code_to_new_lex, code_to_str, hard_iter)363 # print("*******************************************************************************")364 # print("SENTENCE")365 # test_sentence_annotations(code_to_new_lex,code_to_str, frame_to_contains, frame_to_doesnt)366 print("*******************************************************************************")367 print("PRIMARY")368 test_primary_frame(code_to_new_lex, code_to_str, doc_level_iter)369 print("*******************************************************************************")370 # # Real slow and doesn't work well371 # print("PRIMARY WV")372 # test_primary_frame_wv(code_to_new_lex, code_to_str, doc_level_iter, KeyedVectors.load(wv_name))373 to_save = {}374 for x in code_to_new_lex:375 to_save[code_to_str[x]] = code_to_new_lex[x]376 pickle.dump(to_save, open("cache/" + args.split_type + "_lex.pickle", "wb"))377 to_save = {}378 for x in code_to_lex:379 to_save[code_to_str[x]] = code_to_new_lex[x]380 pickle.dump(to_save, open("cache/" + args.split_type + "_base_lex.pickle", "wb"))381def main():382 parser = argparse.ArgumentParser()383 parser.add_argument("--baseline", action='store_true')384 # specify what to use as training data and what to use as test set. If random, hold out 20% of data of test385 # if kfold, we do a different data split for each frame, so that test and train data have same proportion386 # of the frame at the document level387 parser.add_argument("--split_type", choices=['tobacco', 'immigration', 'samesex', 'random', 'kfold', 'dcard', 'dcard_tune'])388 args = parser.parse_args()389 p = Params()390 code_file = os.path.join(p.MFC_PATH, "codes.json")391 code_to_str = load_codes(code_file)392 if args.split_type == 'kfold':393 codes = set([code_to_short_form(code) for code in code_to_str])394 codes.remove(0.0) # Skip "None"395 codes.remove(16.0) # Skip "Irrelevant396 codes.remove(17.0) # Skip tones397 codes.remove(18.0)398 codes.remove(19.0)399 for code in codes:400 print(code)401 train_data, test_data, test_background = get_data_split(args.split_type, p, code)402 do_all(args, train_data, test_data, test_background, code_to_str, p, code)403 else:404 train_data, test_data, test_background = get_data_split(args.split_type, p)405 do_all(args, train_data, test_data, test_background, code_to_str, p)406if __name__ == "__main__":...

Full Screen

Full Screen

test_syntheticdataset2_shapes.py

Source:test_syntheticdataset2_shapes.py Github

copy

Full Screen

1from SyntheticDataset2.ElementsCreator import *2import unittest3from PIL import Image4class UpdatedSyntheticDatasetShapesTestCase(unittest.TestCase):5 def test_rectangle(self):6 width = 1007 height = 508 color = (255,0,0)9 rotation = 4510 midpoint = (250,250)11 test_background = Image.new('RGBA', (500,500), color=(0,0,255))12 test_rectangle = Rectangle(width, height, color, rotation)13 self.assertEqual(test_rectangle.draw().getpixel((50,25)), (255,0,0,255))14 self.assertEqual(test_rectangle.draw().getpixel((80,40)), (255,0,0,255))15 test_rectangle.overlay(midpoint, test_background)16 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))17 self.assertEqual(test_background.getpixel((265,210)), (255,0,0,255))18 def test_triangle(self):19 base = 10020 height = 10021 color = (255,0,0)22 rotation = 4523 midpoint = (250,250)24 test_background = Image.new('RGBA', (500,500), color=(0,0,255))25 test_triangle = Triangle(base,height,color,rotation)26 self.assertEqual(test_triangle.draw().getpixel((100,100)), (255,0,0,255))27 self.assertEqual(test_triangle.draw().getpixel((75,130)), (255,0,0,255))28 test_triangle.overlay(midpoint, test_background)29 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))30 self.assertEqual(test_background.getpixel((250,250)), (255,0,0,255))31 def test_circle(self):32 radius = 10033 color = (255,0,0)34 midpoint = (250,250)35 test_background = Image.new('RGBA', (500,500), color=(0,0,255))36 test_circle = Circle(radius,color)37 self.assertEqual(test_circle.draw().getpixel((50,25)), (255,0,0,255))38 self.assertEqual(test_circle.draw().getpixel((60,50)), (255,0,0,255))39 test_circle.overlay(midpoint, test_background)40 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))41 self.assertEqual(test_background.getpixel((260,160)), (255,0,0,255))42 def test_half_circle(self):43 radius = 10044 color = (255,0,0)45 rotation = 4546 midpoint = (250,250)47 test_background = Image.new('RGBA', (500,500), color=(0,0,255))48 test_half_cirlce = HalfCircle(radius,color,rotation)49 self.assertEqual(test_half_cirlce.draw().getpixel((100,100)), (255,0,0,255))50 self.assertEqual(test_half_cirlce.draw().getpixel((75,200)), (255,0,0,255))51 test_half_cirlce.overlay(midpoint, test_background)52 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))53 self.assertEqual(test_background.getpixel((215,340)), (255,0,0,255))54 def test_square(self):55 base = 10056 color = (255,0,0)57 rotation = 4558 midpoint = (250,250)59 test_background = Image.new('RGBA', (500,500), color=(0,0,255))60 test_square = Square(base,color,rotation)61 self.assertEqual(test_square.draw().getpixel((50,25)), (255,0,0,255))62 self.assertEqual(test_square.draw().getpixel((20,70)), (255,0,0,255))63 test_square.overlay(midpoint, test_background)64 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))65 self.assertEqual(test_background.getpixel((250,195)), (255,0,0,255))66 def test_trapezoid(self):67 base1 = 7568 base2 = 10069 height = 10070 color = (255,0,0)71 rotation = 4572 midpoint = (250,250)73 test_background = Image.new('RGBA', (500,500), color=(0,0,255))74 test_trapezoid = Trapezoid(base1,base2,height,color,rotation)75 self.assertEqual(test_trapezoid.draw().getpixel((50,25)), (255,0,0,255))76 self.assertEqual(test_trapezoid.draw().getpixel((70,30)), (255,0,0,255))77 test_trapezoid.overlay(midpoint, test_background)78 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))79 self.assertEqual(test_background.getpixel((300,250)), (255,0,0,255))80 def test_quarter_circle(self):81 radius = 10082 color = (255,0,0)83 rotation = 4584 midpoint = (250,250)85 test_background = Image.new('RGBA', (500,500), color=(0,0,255))86 test_quarter_circle = QuarterCircle(radius,color,rotation)87 self.assertEqual(test_quarter_circle.draw().getpixel((100,100)), (255,0,0,255))88 self.assertEqual(test_quarter_circle.draw().getpixel((70,20)), (255,0,0,255))89 test_quarter_circle.overlay(midpoint, test_background)90 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))91 self.assertEqual(test_background.getpixel((250,300)), (255,0,0,255))92 def test_cross(self):93 height = 10094 color = (255,0,0)95 rotation = 4596 midpoint = (250,250)97 test_background = Image.new('RGBA', (500,500), color=(0,0,255))98 test_cross = Cross(height,color,rotation)99 self.assertEqual(test_cross.draw().getpixel((100,100)), (255,0,0,255))100 self.assertEqual(test_cross.draw().getpixel((45,40)), (255,0,0,255))101 test_cross.overlay(midpoint, test_background)102 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))103 self.assertEqual(test_background.getpixel((225,285)), (255,0,0,255))104 def test_pentagon(self):105 radius = 100106 color = (255,0,0)107 rotation = 45108 midpoint = (250,250)109 test_background = Image.new('RGBA', (500,500), color=(0,0,255))110 test_pentagon = Pentagon(radius,color,rotation)111 self.assertEqual(test_pentagon.draw().getpixel((150,150)), (255,0,0,255))112 self.assertEqual(test_pentagon.draw().getpixel((125,60)), (255,0,0,255))113 test_pentagon.overlay(midpoint, test_background)114 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))115 self.assertEqual(test_background.getpixel((210,330)), (255,0,0,255))116 def test_star(self):117 radius = 100118 color = (255,0,0)119 rotation = 45120 midpoint = (250,250)121 test_background = Image.new('RGBA', (500,500), color=(0,0,255))122 test_star = Star(radius,color,rotation)123 self.assertEqual(test_star.draw().getpixel((150,150)), (255,0,0,255))124 self.assertEqual(test_star.draw().getpixel((170,100)), (255,0,0,255))125 test_star.overlay(midpoint, test_background)126 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))127 self.assertEqual(test_background.getpixel((230,200)), (255,0,0,255))128 def test_hexagon(self):129 radius = 100130 color = (255,0,0)131 rotation = 45132 midpoint = (250,250)133 test_background = Image.new('RGBA', (500,500), color=(0,0,255))134 test_hexagon = Hexagon(radius,color,rotation)135 self.assertEqual(test_hexagon.draw().getpixel((150,150)), (255,0,0,255))136 self.assertEqual(test_hexagon.draw().getpixel((90,100)), (255,0,0,255))137 test_hexagon.overlay(midpoint, test_background)138 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))139 self.assertEqual(test_background.getpixel((260,200)), (255,0,0,255))140 def test_heptagon(self):141 radius = 100142 color = (255,0,0)143 rotation = 45144 midpoint = (250,250)145 test_background = Image.new('RGBA', (500,500), color=(0,0,255))146 test_heptagon = Heptagon(radius,color,rotation)147 self.assertEqual(test_heptagon.draw().getpixel((150,150)), (255,0,0,255))148 self.assertEqual(test_heptagon.draw().getpixel((70,150)), (255,0,0,255))149 test_heptagon.overlay(midpoint, test_background)150 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))151 self.assertEqual(test_background.getpixel((235,185)), (255,0,0,255))152 def test_octagon(self):153 radius = 100154 color = (255,0,0)155 rotation = 45156 midpoint = (250,250)157 test_background = Image.new('RGBA', (500,500), color=(0,0,255))158 test_octagon = Octagon(radius,color,rotation)159 self.assertEqual(test_octagon.draw().getpixel((150,150)), (255,0,0,255))160 self.assertEqual(test_octagon.draw().getpixel((100,170)), (255,0,0,255))161 test_octagon.overlay(midpoint, test_background)162 self.assertEqual(test_background.getpixel(midpoint), (255,0,0,255))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-bdd automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful