How to use drawBboxes method in fMBT

Best Python code snippet using fMBT_python

yolov3-app.py

Source:yolov3-app.py Github

copy

Full Screen

...111 return inference_duration, detected_objects112 except Exception as e:113 print('EXCEPTION:', str(e))114 return 'Error processing image', 500115def drawBboxes(image, detected_objects):116 objects_identified = len(detected_objects)117 118 iw, ih = image.size119 draw = ImageDraw.Draw(image) 120 textfont = ImageFont.load_default()121 122 for pos in range(objects_identified): 123 entity = detected_objects[pos]['entity'] 124 box = entity["box"]125 x1 = box["l"]126 y1 = box["t"]127 x2 = box["w"]128 y2 = box["h"]129 130 x1 = x1 * iw131 y1 = y1 * ih132 x2 = (x2 * iw) + x1133 y2 = (y2 * ih) + y1134 tag = entity['tag']135 objClass = tag['value'] 136 draw.rectangle((x1, y1, x2, y2), outline = 'blue', width = 1)137 print('rectangle drawn')138 draw.text((x1, y1), str(objClass), fill = "white", font = textfont)139 140 return image141app = Flask(__name__)142# / routes to the default function which returns 'Hello World'143@app.route('/', methods=['GET'])144def defaultPage():145 return Response(response='Hello from Yolov3 inferencing based on ONNX', status=200)146@app.route('/stream/<id>')147def stream(id):148 respBody = ("<html>"149 "<h1>Stream with inferencing overlays</h1>"150 "<img src=\"/mjpeg/" + id + "\"/>"151 "</html>")152 return Response(respBody, status= 200)153 #return render_template('mjpeg.html')154# /score routes to scoring function 155# This function returns a JSON object with inference duration and detected objects156@app.route("/score", methods=['POST'])157def score():158 try:159 objectType = None160 confidenceThreshold = 0.0161 if (request.args):162 try:163 objectType = request.args.get('object')164 stream = request.args.get('stream')165 confidence = request.args.get('confidence')166 if confidence is not None:167 confidenceThreshold = float(confidence) 168 except Exception as ex:169 print('EXCEPTION:', str(ex)) 170 imageData = io.BytesIO(request.get_data())171 # load the image172 img = Image.open(imageData)173 inference_duration, detected_objects = processImage(img, objectType, confidenceThreshold) 174 try: 175 if stream is not None:176 output_img = drawBboxes(img, detected_objects)177 imgBuf = io.BytesIO()178 output_img.save(imgBuf, format='JPEG')179 # post the image with bounding boxes so that it can be viewed as an MJPEG stream180 postData = b'--boundary\r\n' + b'Content-Type: image/jpeg\r\n\r\n' + imgBuf.getvalue() + b'\r\n'181 requests.post('http://127.0.0.1:80/mjpeg_pub/' + stream, data = postData)182 except Exception as ex:183 print('EXCEPTION:', str(ex))184 if len(detected_objects) > 0:185 respBody = { 186 "inferences" : detected_objects187 }188 respBody = json.dumps(respBody)189 return Response(respBody, status= 200, mimetype ='application/json')190 else:191 return Response(status= 204) 192 except Exception as e:193 print('EXCEPTION:', str(e))194 return Response(response='Error processing image ' + str(e), status= 500)195# /score-debug routes to score_debug196# This function scores the image and stores an annotated image for debugging purposes197@app.route('/score-debug', methods=['POST'])198def score_debug():199 try:200 imageData = io.BytesIO(request.get_data())201 # load the image202 img = Image.open(imageData)203 inference_duration, detected_objects = processImage(img)204 print('Inference duration was ', str(inference_duration))205 output_img = drawBboxes(img, detected_objects)206 # datetime object containing current date and time207 now = datetime.now()208 209 output_img_file = now.strftime("%d_%m_%Y_%H_%M_%S.jpeg")210 output_img.save(output_dir + "/" + output_img_file)211 respBody = { 212 "inferences" : detected_objects213 } 214 215 return respBody216 except Exception as e:217 print('EXCEPTION:', str(e))218 return Response(response='Error processing image', status= 500)219# /annotate routes to annotation function 220# This function returns an image with bounding boxes drawn around detected objects221@app.route('/annotate', methods=['POST'])222def annotate():223 try:224 imageData = io.BytesIO(request.get_data())225 # load the image226 img = Image.open(imageData)227 inference_duration, detected_objects = processImage(img)228 print('Inference duration was ', str(inference_duration))229 img = drawBboxes(img, detected_objects)230 231 imgByteArr = io.BytesIO() 232 img.save(imgByteArr, format = 'JPEG') 233 imgByteArr = imgByteArr.getvalue() 234 return Response(response = imgByteArr, status = 200, mimetype = "image/jpeg")235 except Exception as e:236 print('EXCEPTION:', str(e))237 return Response(response='Error processing image', status= 500)238# Load and initialize the model239init()240if __name__ == '__main__':241 # Run the server...

Full Screen

Full Screen

detect.py

Source:detect.py Github

copy

Full Screen

1import os2import cv23import time4import mxnet as mx5from . import predict6from .config_farm import configuration_10_320_20L_5scales_v2 as cfg7from threading import Thread8model_file_path = "LFFD/models/train_10_320_20L_5scales_v2_iter_1000000.params" 9symbol_file_path = "LFFD/symbols/symbol_10_320_20L_5scales_v2_deploy.json"10ctx = mx.gpu(0)11### Threshold for Face Matching12cosine_threshold = 0.813proba_threshold = 0.614comparing_num = 515face_predictor = predict.Predict(mxnet=mx,16 symbol_file_path=symbol_file_path,17 model_file_path=model_file_path,18 ctx=ctx,19 receptive_field_list=cfg.param_receptive_field_list,20 receptive_field_stride=cfg.param_receptive_field_stride,21 bbox_small_list=cfg.param_bbox_small_list,22 bbox_large_list=cfg.param_bbox_large_list,23 receptive_field_center_start=cfg.param_receptive_field_center_start,24 num_output_scales=cfg.param_num_output_scales)25class DetectFaces():26 def __init__(self, frame = None):27 ### Loading Face Detection Models28 self.frame = frame29 self.stopped = False30 31 def start(self):32 print("START VIDEO PROCESSSING")33 Thread(target=self.drawBBoxes, args=()).start()34 Thread.daemon = True35 return self36 def drawBBoxes(self):37 while not self.stopped:38 frame = self.frame39 bboxes, infer_time = face_predictor.predict(frame, resize_scale=1, score_threshold=0.6, top_k=10000, \40 NMS_threshold=0.4, NMS_flag=True, skip_scale_branch_list=[]) 41 42 h, w, c = frame.shape43 for bbox in bboxes:44 print(bbox)45 bbox_int = [int(b) for b in bbox[:-1]]46 bbox_int[0] = max(0, min(w - 1, bbox_int[0]))47 bbox_int[1] = max(0, min(h - 1, bbox_int[1]))48 bbox_int[2] = max(0, min(w - 1, bbox_int[2]))49 bbox_int[3] = max(0, min(h - 1, bbox_int[3]))50 cv2.rectangle(frame, tuple(bbox_int[0:2]), tuple(bbox_int[2:4]), (0, 255, 0), 2)51 self.frame = frame52 53 def stop(self):...

Full Screen

Full Screen

objdetect.py

Source:objdetect.py Github

copy

Full Screen

1import cv2 as cv2import torch3# Load model4model = (5 torch.hub.load("ultralytics/yolov5", "yolov5s", pretrained=True).fuse().autoshape()6) # for PIL/cv2/np inputs and NMS7# Get camera 0 is my usb cam and 2 is my internal one8cap = cv.VideoCapture(2)9def drawbboxes(img, bboxes, labels):10 """Draw bounding boxed onto the image11 :img: Image to draw the bboxes on12 :bboxes: Bounding boxes to draw which should be given as (top-left, bottom-right)13 :returns: An image with bounding boxes drawn14 """15 thickness = 516 color = (0, 255, 0)17 for bbox in bboxes:18 # top-left is x1, y1; bottom-right is x2,y219 x1, y1, x2, y2, prob, category = (20 int(bbox[0]),21 int(bbox[1]),22 int(bbox[2]),23 int(bbox[3]),24 round(bbox[4], 2),25 labels[int(bbox[5])],26 )27 img = cv.rectangle(img, (x1, y1), (x2, y2), color, thickness)28 img = cv.putText(29 img,30 f"Label: {category} ({prob})",31 (x1, y1 - 10),32 0,33 0.5,34 color,35 thickness // 3,36 )37 return img38while True:39 success, img = cap.read()40 # Predict41 img2 = cv.cvtColor(img, cv.COLOR_BGR2RGB)42 results = model(img2, size=640) # includes NMS43 results.print() # print results to screen44 # Visualize45 img = drawbboxes(img, results.pred[0].tolist(), results.names)46 cv.imshow("Camera", img)47 if cv.waitKey(1) and 0xFF == ord("q"):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful