How to use transform_head method in pandera

Best Python code snippet using pandera_python

Image_Processing_Project.py

Source:Image_Processing_Project.py Github

copy

Full Screen

1import cv22import numpy as np3import dlib4cap = cv2.VideoCapture('video.mp4')5# cap = cv2.VideoCapture('walking_man.mp4')6detector = dlib.get_frontal_face_detector()7predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")8#볼찍기9def cheek(imgorg):10 img_cheek = imgorg.copy()11 for face in faces:12 landmarks = predictor(imgorg, face)13 #왼볼14 x1,x2 = landmarks.part(36).x, landmarks.part(48).x15 y1,y2 = landmarks.part(36).y, landmarks.part(48).y16 x3 = landmarks.part(31).x17 cen_x, cen_y = int((x1+x2)/2), int((y1+y2)/2)18 size = int((x3-cen_x)/2)19 cv2.circle(img_cheek,(cen_x,cen_y),size,(153,70,252),cv2.FILLED)20 #오른볼21 x1,x2 = landmarks.part(45).x, landmarks.part(54).x22 y1,y2 = landmarks.part(45).y, landmarks.part(54).y23 right_x, right_y = int((x1 + x2) / 2), int((y1 + y2) / 2)24 cv2.circle(img_cheek, (right_x, right_y), size, (153, 70, 252), cv2.FILLED)25 return img_cheek26def sunglasses(imgorg,sunglass_img):27 img_sun = imgorg.copy()28 point0 = (int(sunglass_img.shape[1] * 0.2), int(sunglass_img.shape[0] * 0.5))29 point2 = (int(sunglass_img.shape[1] * 0.8), int(sunglass_img.shape[0] * 0.5))30 Src = [(point0[0], point0[1]), (point2[0], point2[1])] #선글라스 이미지의 기준점31 sunglass_gray = cv2.cvtColor(sunglass_img, cv2.COLOR_BGR2GRAY)32 ret, mask = cv2.threshold(sunglass_gray, 170, 255, cv2.THRESH_BINARY_INV)33 mask = cv2.merge((mask, mask, mask))34 mask_inv = cv2.bitwise_not(mask)35 sunglass_mask = cv2.bitwise_and(sunglass_img, mask) # 선글라스36 eye_mask = cv2.bitwise_and(sunglass_img, mask_inv) # 안경마스크와 눈37 eye_mask = cv2.bitwise_not(eye_mask)38 for face in faces:39 landmarks = predictor(imgorg, face)40 left_eye_x, left_eye_y = landmarks.part(36).x, landmarks.part(36).y41 right_eye_x, right_eye_y = landmarks.part(45).x, landmarks.part(45).y42 # 선글라스 이미지의 대응점 저장43 Dst = [(left_eye_x, left_eye_y), (right_eye_x, right_eye_y)]44 # 2D변환 행렬 생성45 ret = cv2.estimateAffinePartial2D(np.array([Src]), np.array([Dst]))46 transform_matrix = ret[0]47 # 선글라스 위치 이동 변환48 transform_sunglass = cv2.warpAffine(sunglass_mask, transform_matrix, (imgorg.shape[1], img_sun.shape[0]))49 # 눈 마스크 이미지 위치 크기 조정50 transform_eye = cv2.warpAffine(eye_mask, transform_matrix, (img_sun.shape[1], img_sun.shape[0]))51 sun_face = cv2.bitwise_and(img_sun, transform_eye)52 sun_face = cv2.addWeighted(sun_face, 0.5, transform_sunglass, 0.4, 0)53 face_without_eye = cv2.bitwise_and(img_sun, cv2.bitwise_not(transform_eye))54 img_sun = cv2.add(sun_face, face_without_eye)55 return img_sun56def head(imgorg,hat_img):57 img_head = imgorg.copy()58 point0 = (int(hat_img.shape[1] * 0.1), int(hat_img.shape[0] * 0.5)) #머리띠 이미지의 기준점59 point2 = (int(hat_img.shape[1] * 0.9), int(hat_img.shape[0] * 0.5))60 Src = [(point0[0]-40, point0[1]), (point2[0], point2[1])]61 head_gray = cv2.cvtColor(hat_img, cv2.COLOR_BGR2GRAY)62 ret, mask = cv2.threshold(head_gray, 180, 255, cv2.THRESH_BINARY_INV)63 mask = cv2.merge((mask, mask, mask))64 mask_inv = cv2.bitwise_not(mask)65 hat_mask = cv2.bitwise_and(hat_img, mask) # 머리띠66 head_mask = cv2.bitwise_and(hat_img, mask_inv) # 머리띠마스크67 head_mask = cv2.bitwise_not(head_mask)68 for face in faces:69 # 얼굴영역 좌표70 landmarks = predictor(imgorg, face)71 x1, y1 = face.left(), face.top()72 x2, y2 = face.right(), face.bottom()73 y3 = int(y1 + ((y1 - y2) * 0.3))74 # 선글라스 이미지의 대응점 저장75 Dst = [(x1 - 40, y3), (x2 + 10, y3)]76 # 2D변환 행렬 생성77 ret = cv2.estimateAffinePartial2D(np.array([Src]), np.array([Dst]))78 transform_matrix = ret[0]79 # 머리띠 위치 이동 변환80 transform_hat = cv2.warpAffine(hat_mask, transform_matrix, (imgorg.shape[1], img_head.shape[0]))81 # 눈 검출 마스크 이미지 위치 크기 조정82 transform_head = cv2.warpAffine(head_mask, transform_matrix, (img_head.shape[1], img_head.shape[0]))83 head_face = cv2.bitwise_and(img_head, transform_head)84 head_face = cv2.addWeighted(head_face, 0.0, transform_hat, 1.0, 0)85 face_without_eye = cv2.bitwise_and(img_head, cv2.bitwise_not(transform_head))86 img_hat = cv2.add(head_face, face_without_eye)87 return img_hat88def faceblur(imgorg):89 # 얼굴 영역을 검출해 얼굴영역 테두리를 표시하고 그 부분만 blur 처리합니다90 for face in faces:91 x1, y1 = face.left(), face.top()92 x2, y2 = face.right(), face.bottom()93 cv2.rectangle(imgorg, (x1, y1), (x2, y2), (0, 0, 0), 2)94 b_face = imgorg[y1:y2, x1:x2]95 b_face = cv2.blur(b_face, (10, 10))96 imgorg[y1:y2, x1:x2] = b_face97 return imgorg98while True:99 ret, img = cap.read()100 if not ret:101 break102 hat_img = cv2.imread("./images/headpin.png") #머리띠 이미지103 sunglass_img = cv2.imread("./images/sunglass.png") #선글라스 이미지104 scaler = 0.3 # 영상 사이즈조절105 img = cv2.resize(img, (int(img.shape[1] * scaler), int(img.shape[0] * scaler)))106 imgorg = img.copy()107 faces = detector(imgorg)108 #함수호출109 cheek_face = cheek(imgorg)110 sunglass_face = sunglasses(imgorg,sunglass_img)111 hat_face = head(imgorg, hat_img)112 blur_face = faceblur(imgorg)113 # cv2.imshow("img", img)114 cv2.imshow("cheek_face", cheek_face)115 cv2.imshow("sunglass_face", sunglass_face)116 cv2.imshow("hat_face", hat_face)117 cv2.imshow("blur_img",blur_face)...

Full Screen

Full Screen

learn_pyltp.py

Source:learn_pyltp.py Github

copy

Full Screen

...68 arcs = get_arcs(words) # 句法分析69 #print("\t".join("%d:%s" % (arc.head,arc.relation) for arc in arcs))70 parse = [arc.relation for arc in arcs]71 head = [arc.head for arc in arcs]72 head = transform_head(head)73 return parse,head74 75 76def get_role(words):77 postags = get_postag(words)# 词性标注78 arcs = get_arcs(words) # 句法分析79 roles = labeller.label(words, postags, arcs) # 语义角色标注80 for role in roles:81 print (role.index, "".join(["%s:(%d,%d)" % \82 (arg.name, arg.range.start, arg.range.end)\83 for arg in role.arguments]))84 85 labeller.release() # 释放模型86def get_recognize(words):87 postags = get_postag(words)88 netags = recognizer.recognize(words, postags) # 命名实体识别89 return netags90 recognizer.release() # 释放模型91def transform_head(head):92 '''93 因为ltp的语法关系是从root节点开始的,94 0A,1B变成了0root,1A,2B95 为了后期使用方便,我们可以通过一个转换,让语法关系与词表的词一一对应96 '''97 head_new = []98 for i in head:99 if i == 0:100 i=0101 else:102 i = i-1103 head_new.append(i)104 return head_new105if __name__ == '__main__':...

Full Screen

Full Screen

marker_roi_heads.py

Source:marker_roi_heads.py Github

copy

Full Screen

...31 self.decoder_head = decoder_head32 @classmethod33 def from_config(cls, cfg, input_shape):34 ret = super().from_config(cfg)35 transform_head = build_transform_head(cfg, input_shape)36 ret["transform_head"] = transform_head37 corner_input_shape, decoder_input_shape = transform_head.output_shape38 ret["corner_head"] = build_corner_head(cfg, corner_input_shape)39 ret["decoder_head"] = build_decoder_head(cfg, decoder_input_shape)40 return ret41 @property42 def device(self):43 return self.corner_head.device44 45 def forward(46 self,47 images: ImageList,48 features: Dict[str, torch.Tensor],49 proposals: List[Instances],50 targets: Optional[List[Instances]] = None,51 ) -> Tuple[List[Dict], Dict]:52 # del images53 if self.training:54 assert targets55 proposals = self.label_and_sample_proposals(proposals, targets)56 # del targets57 58 if self.training:59 corner_features, decoding_features, sample_locations_batch, losses = self.transform_head(images, features, proposals, targets)60 losses.update(self.corner_head(corner_features, proposals))61 losses.update(self.decoder_head(decoding_features, proposals))62 del images, targets63 return [], losses64 65 corner_features, decoding_features, sample_locations_batch, _ = self.transform_head(images, features, proposals, targets)66 corners_batch = self.corner_head(corner_features, proposals)67 obj_scores_batch, decoded_messages_batch = self.decoder_head(decoding_features, proposals)68 results = []69 for i in range(len(proposals)):70 output = {71 "corners": corners_batch[i], "obj_scores": obj_scores_batch[i],72 "decoded_messages": decoded_messages_batch[i],73 "image_shape": proposals[i].image_size}74 if sample_locations_batch:75 output["sample_locations"] = sample_locations_batch[i]76 results.append(output)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pandera automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful