How to use offset_left method in hypothesis

Best Python code snippet using hypothesis

cv2_aug_transforms.py

Source:cv2_aug_transforms.py Github

copy

Full Screen

1#!/usr/bin/env python2# -*- coding:utf-8 -*-3# Author: Donny You (youansheng@gmail.com)4import collections5import random6import math7import cv28import numpy as np9from utils.tools.logger import Logger as Log10class RandomPad(object):11 """ Padding the Image to proper size.12 Args:13 stride: the stride of the network.14 pad_value: the value that pad to the image border.15 img: Image object as input.16 Returns::17 img: Image object.18 """19 def __init__(self, up_scale_range=None, ratio=0.5, mean=(104, 117, 123)):20 # do something21 assert isinstance(up_scale_range, (list, tuple))22 self.up_scale_range = up_scale_range23 self.ratio = ratio24 self.mean = mean25 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):26 assert isinstance(img, (np.ndarray, list))27 assert labelmap is None or isinstance(labelmap, np.ndarray)28 assert maskmap is None or isinstance(maskmap, np.ndarray)29 if random.random() > self.ratio:30 return img, labelmap, maskmap, kpts, bboxes, labels, polygons31 height, width, channels = img.shape32 ws = random.uniform(self.up_scale_range[0], self.up_scale_range[1])33 hs = ws34 for _ in range(50):35 scale = random.uniform(self.up_scale_range[0], self.up_scale_range[1])36 min_ratio = max(0.5, 1. / scale / scale)37 max_ratio = min(2, scale * scale)38 ratio = math.sqrt(random.uniform(min_ratio, max_ratio))39 ws = scale * ratio40 hs = scale / ratio41 if ws >= 1 and hs >= 1:42 break43 w = int(ws * width)44 h = int(hs * height)45 pad_width = random.randint(0, w - width)46 pad_height = random.randint(0, h - height)47 left_pad = random.randint(0, pad_width) # pad_left48 up_pad = random.randint(0, pad_height) # pad_up49 if not isinstance(img, list):50 img = cv2.copyMakeBorder(img, up_pad, pad_height-up_pad, left_pad, pad_width-left_pad,51 cv2.BORDER_CONSTANT, value=self.mean)52 else:53 img = [cv2.copyMakeBorder(item, up_pad, pad_height-up_pad, left_pad, pad_width-left_pad,54 cv2.BORDER_CONSTANT, value=self.mean) for item in img]55 if labelmap is not None:56 labelmap = cv2.copyMakeBorder(labelmap, up_pad, pad_height - up_pad, left_pad, pad_width - left_pad,57 cv2.BORDER_CONSTANT, value=255)58 if maskmap is not None:59 maskmap = cv2.copyMakeBorder(maskmap, up_pad, pad_height - up_pad, left_pad, pad_width - left_pad,60 cv2.BORDER_CONSTANT, value=1)61 if polygons is not None:62 for object_id in range(len(polygons)):63 for polygon_id in range(len(polygons[object_id])):64 polygons[object_id][polygon_id][0::2] += left_pad65 polygons[object_id][polygon_id][1::2] += up_pad66 if kpts is not None and kpts.size > 0:67 kpts[:, :, 0] += left_pad68 kpts[:, :, 1] += up_pad69 if bboxes is not None and bboxes.size > 0:70 bboxes[:, 0::2] += left_pad71 bboxes[:, 1::2] += up_pad72 return img, labelmap, maskmap, kpts, bboxes, labels, polygons73class Padding(object):74 """ Padding the Image to proper size.75 Args:76 stride: the stride of the network.77 pad_value: the value that pad to the image border.78 img: Image object as input.79 Returns::80 img: Image object.81 """82 def __init__(self, pad=None, ratio=0.5, mean=(104, 117, 123), allow_outside_center=True):83 self.pad = pad84 self.ratio = ratio85 self.mean = mean86 self.allow_outside_center = allow_outside_center87 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):88 assert isinstance(img, (np.ndarray, list))89 assert labelmap is None or isinstance(labelmap, np.ndarray)90 assert maskmap is None or isinstance(maskmap, np.ndarray)91 if random.random() > self.ratio:92 return img, labelmap, maskmap, kpts, bboxes, labels, polygons93 height, width, channels = img.shape94 left_pad, up_pad, right_pad, down_pad = self.pad95 target_size = [width + left_pad + right_pad, height + up_pad + down_pad]96 offset_left = -left_pad97 offset_up = -up_pad98 if kpts is not None and kpts.size > 0:99 kpts[:, :, 0] -= offset_left100 kpts[:, :, 1] -= offset_up101 mask = np.logical_or.reduce((kpts[:, :, 0] >= target_size[0], kpts[:, :, 0] < 0,102 kpts[:, :, 1] >= target_size[1], kpts[:, :, 1] < 0))103 kpts[mask == 1, 2] = -1104 if bboxes is not None and bboxes.size > 0:105 if self.allow_outside_center:106 mask = np.ones(bboxes.shape[0], dtype=bool)107 else:108 crop_bb = np.array([offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]])109 center = (bboxes[:, :2] + bboxes[:, 2:]) / 2110 mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)111 bboxes[:, 0::2] -= offset_left112 bboxes[:, 1::2] -= offset_up113 bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, target_size[0] - 1)114 bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, target_size[1] - 1)115 mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))116 bboxes = bboxes[mask]117 if labels is not None:118 labels = labels[mask]119 if polygons is not None:120 new_polygons = list()121 for object_id in range(len(polygons)):122 if mask[object_id] == 1:123 for polygon_id in range(len(polygons[object_id])):124 polygons[object_id][polygon_id][0::2] -= offset_left125 polygons[object_id][polygon_id][1::2] -= offset_up126 polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],127 0, target_size[0] - 1)128 polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],129 0, target_size[1] - 1)130 new_polygons.append(polygons[object_id])131 polygons = new_polygons132 if not isinstance(img, list):133 expand_image = np.zeros((max(height, target_size[1]) + abs(offset_up),134 max(width, target_size[0]) + abs(offset_left), channels), dtype=img.dtype)135 expand_image[:, :, :] = self.mean136 expand_image[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,137 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = img138 img = expand_image[max(offset_up, 0):max(offset_up, 0) + target_size[1],139 max(offset_left, 0):max(offset_left, 0) + target_size[0]]140 else:141 for i in range(len(img)):142 expand_image = np.zeros((max(height, target_size[1]) + abs(offset_up),143 max(width, target_size[0]) + abs(offset_left), channels), dtype=img[i].dtype)144 expand_image[:, :, :] = self.mean145 expand_image[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,146 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = img[i]147 img[i] = expand_image[max(offset_up, 0):max(offset_up, 0) + target_size[1],148 max(offset_left, 0):max(offset_left, 0) + target_size[0]]149 if maskmap is not None:150 expand_maskmap = np.zeros((max(height, target_size[1]) + abs(offset_up),151 max(width, target_size[0]) + abs(offset_left)), dtype=maskmap.dtype)152 expand_maskmap[:, :] = 1153 expand_maskmap[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,154 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = maskmap155 maskmap = expand_maskmap[max(offset_up, 0):max(offset_up, 0) + target_size[1],156 max(offset_left, 0):max(offset_left, 0) + target_size[0]]157 if labelmap is not None:158 expand_labelmap = np.zeros((max(height, target_size[1]) + abs(offset_up),159 max(width, target_size[0]) + abs(offset_left)), dtype=labelmap.dtype)160 expand_labelmap[:, :] = 255161 expand_labelmap[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,162 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = labelmap163 labelmap = expand_labelmap[max(offset_up, 0):max(offset_up, 0) + target_size[1],164 max(offset_left, 0):max(offset_left, 0) + target_size[0]]165 return img, labelmap, maskmap, kpts, bboxes, labels, polygons166class RandomHFlip(object):167 def __init__(self, swap_pair=None, ratio=0.5):168 self.swap_pair = swap_pair169 self.ratio = ratio170 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):171 assert isinstance(img, (np.ndarray, list))172 assert labelmap is None or isinstance(labelmap, np.ndarray)173 assert maskmap is None or isinstance(maskmap, np.ndarray)174 if random.random() > self.ratio:175 return img, labelmap, maskmap, kpts, bboxes, labels, polygons176 height, width, _ = img.shape177 if not isinstance(img, list):178 img = cv2.flip(img, 1)179 else:180 img = [cv2.flip(item, 1) for item in img]181 if labelmap is not None:182 labelmap = cv2.flip(labelmap, 1)183 if maskmap is not None:184 maskmap = cv2.flip(maskmap, 1)185 if polygons is not None:186 for object_id in range(len(polygons)):187 for polygon_id in range(len(polygons[object_id])):188 polygons[object_id][polygon_id][0::2] = width - 1 - polygons[object_id][polygon_id][0::2]189 if bboxes is not None and bboxes.size > 0:190 xmin = width - 1 - bboxes[:, 2]191 xmax = width - 1 - bboxes[:, 0]192 bboxes[:, 0] = xmin193 bboxes[:, 2] = xmax194 if kpts is not None and kpts.size > 0:195 kpts[:, :, 0] = width - 1 - kpts[:, :, 0]196 for pair in self.swap_pair:197 temp_point = np.copy(kpts[:, pair[0] - 1])198 kpts[:, pair[0] - 1] = kpts[:, pair[1] - 1]199 kpts[:, pair[1] - 1] = temp_point200 return img, labelmap, maskmap, kpts, bboxes, labels, polygons201class RandomSaturation(object):202 def __init__(self, lower=0.5, upper=1.5, ratio=0.5):203 self.lower = lower204 self.upper = upper205 self.ratio = ratio206 assert self.upper >= self.lower, "saturation upper must be >= lower."207 assert self.lower >= 0, "saturation lower must be non-negative."208 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):209 assert isinstance(img, np.ndarray)210 assert labelmap is None or isinstance(labelmap, np.ndarray)211 assert maskmap is None or isinstance(maskmap, np.ndarray)212 if random.random() > self.ratio:213 return img, labelmap, maskmap, kpts, bboxes, labels, polygons214 img = img.astype(np.float32)215 img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)216 img[:, :, 1] *= random.uniform(self.lower, self.upper)217 img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)218 img = np.clip(img, 0, 255).astype(np.uint8)219 return img, labelmap, maskmap, kpts, bboxes, labels, polygons220class RandomHue(object):221 def __init__(self, delta=18, ratio=0.5):222 assert 0 <= delta <= 360223 self.delta = delta224 self.ratio = ratio225 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):226 assert isinstance(img, np.ndarray)227 assert labelmap is None or isinstance(labelmap, np.ndarray)228 assert maskmap is None or isinstance(maskmap, np.ndarray)229 if random.random() > self.ratio:230 return img, labelmap, maskmap, kpts, bboxes, labels, polygons231 img = img.astype(np.float32)232 img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)233 img[:, :, 0] += random.uniform(-self.delta, self.delta)234 img[:, :, 0][img[:, :, 0] > 360] -= 360235 img[:, :, 0][img[:, :, 0] < 0] += 360236 img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)237 img = np.clip(img, 0, 255).astype(np.uint8)238 return img, labelmap, maskmap, kpts, bboxes, labels, polygons239class RandomPerm(object):240 def __init__(self, ratio=0.5):241 self.ratio = ratio242 self.perms = ((0, 1, 2), (0, 2, 1),243 (1, 0, 2), (1, 2, 0),244 (2, 0, 1), (2, 1, 0))245 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):246 assert isinstance(img, np.ndarray)247 assert labelmap is None or isinstance(labelmap, np.ndarray)248 assert maskmap is None or isinstance(maskmap, np.ndarray)249 if random.random() > self.ratio:250 return img, labelmap, maskmap, kpts, bboxes, labels, polygons251 swap = self.perms[random.randint(0, len(self.perms) - 1)]252 img = img[:, :, swap].astype(np.uint8)253 return img, labelmap, maskmap, kpts, bboxes, labels, polygons254class RandomContrast(object):255 def __init__(self, lower=0.5, upper=1.5, ratio=0.5):256 self.lower = lower257 self.upper = upper258 self.ratio = ratio259 assert self.upper >= self.lower, "contrast upper must be >= lower."260 assert self.lower >= 0, "contrast lower must be non-negative."261 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):262 assert isinstance(img, np.ndarray)263 assert labelmap is None or isinstance(labelmap, np.ndarray)264 assert maskmap is None or isinstance(maskmap, np.ndarray)265 if random.random() > self.ratio:266 return img, labelmap, maskmap, kpts, bboxes, labels, polygons267 img = img.astype(np.float32)268 img *= random.uniform(self.lower, self.upper)269 img = np.clip(img, 0, 255).astype(np.uint8)270 return img, labelmap, maskmap, kpts, bboxes, labels, polygons271class RandomBrightness(object):272 def __init__(self, shift_value=30, ratio=0.5):273 self.shift_value = shift_value274 self.ratio = ratio275 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):276 assert isinstance(img, np.ndarray)277 assert labelmap is None or isinstance(labelmap, np.ndarray)278 assert maskmap is None or isinstance(maskmap, np.ndarray)279 if random.random() > self.ratio:280 return img, labelmap, maskmap, kpts, bboxes, labels, polygons281 img = img.astype(np.float32)282 shift = random.randint(-self.shift_value, self.shift_value)283 img[:, :, :] += shift284 img = np.around(img)285 img = np.clip(img, 0, 255).astype(np.uint8)286 return img, labelmap, maskmap, kpts, bboxes, labels, polygons287class RandomResizedCrop(object):288 """Crop the given PIL Image to random size and aspect ratio.289 A crop of random size (default: of 0.08 to 1.0) of the original size and a random290 aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop291 is finally resized to given size.292 This is popularly used to train the Inception networks.293 Args:294 size: expected output size of each edge295 scale: range of size of the origin size cropped296 ratio: range of aspect ratio of the origin aspect ratio cropped297 interpolation: Default: PIL.Image.BILINEAR298 """299 def __init__(self, size, scale_range=(0.08, 1.0), aspect_range=(3. / 4., 4. / 3.)):300 self.size = tuple(size)301 self.scale = scale_range302 self.ratio = aspect_range303 @staticmethod304 def get_params(img, scale, ratio):305 """Get parameters for ``crop`` for a random sized crop.306 Args:307 img (PIL Image): Image to be cropped.308 scale (tuple): range of size of the origin size cropped309 ratio (tuple): range of aspect ratio of the origin aspect ratio cropped310 Returns:311 tuple: params (i, j, h, w) to be passed to ``crop`` for a random312 sized crop.313 """314 height, width, _ = img.shape315 for attempt in range(10):316 area = width * height317 target_area = random.uniform(*scale) * area318 aspect_ratio = random.uniform(*ratio)319 w = int(round(math.sqrt(target_area * aspect_ratio)))320 h = int(round(math.sqrt(target_area / aspect_ratio)))321 if random.random() < 0.5:322 w, h = h, w323 if w <= width and h <= height:324 i = random.randint(0, height - h)325 j = random.randint(0, width - w)326 return i, j, h, w327 # Fallback328 w = min(height, width)329 i = (height - w) // 2330 j = (width - w) // 2331 return i, j, w, w332 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):333 """334 Args:335 img (Numpy Image): Image to be cropped and resized.336 Returns:337 Numpy Image: Randomly cropped and resized image.338 """339 assert labelmap is None and maskmap is None and kpts is None and bboxes is None and labels is None340 i, j, h, w = self.get_params(img, self.scale, self.ratio)341 img = img[i:i+h, j:j+w]342 img = cv2.resize(img, self.size, interpolation=cv2.INTER_LINEAR)343 return img, labelmap, maskmap, kpts, bboxes, labels, polygons344class RandomResize(object):345 """Resize the given numpy.ndarray to random size and aspect ratio.346 Args:347 scale_min: the min scale to resize.348 scale_max: the max scale to resize.349 """350 def __init__(self, scale_range=(0.75, 1.25), aspect_range=(0.9, 1.1), target_size=None,351 resize_bound=None, method='random', ratio=0.5):352 self.scale_range = scale_range353 self.aspect_range = aspect_range354 self.resize_bound = resize_bound355 self.method = method356 self.ratio = ratio357 if target_size is not None:358 if isinstance(target_size, int):359 self.input_size = (target_size, target_size)360 elif isinstance(target_size, (list, tuple)) and len(target_size) == 2:361 self.input_size = target_size362 else:363 raise TypeError('Got inappropriate size arg: {}'.format(target_size))364 else:365 self.input_size = None366 def get_scale(self, img_size, bboxes):367 if self.method == 'random':368 scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])369 return scale_ratio370 elif self.method == 'focus':371 if self.input_size is not None and bboxes is not None and len(bboxes) > 0:372 bboxes = np.array(bboxes)373 border = bboxes[:, 2:] - bboxes[:, 0:2]374 scale = 0.6 / max(max(border[:, 0]) / self.input_size[0], max(border[:, 1]) / self.input_size[1])375 scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1]) * scale376 return scale_ratio377 else:378 scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])379 return scale_ratio380 elif self.method == 'bound':381 scale1 = self.resize_bound[0] / min(img_size)382 scale2 = self.resize_bound[1] / max(img_size)383 scale = min(scale1, scale2)384 return scale385 else:386 Log.error('Resize method {} is invalid.'.format(self.method))387 exit(1)388 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):389 """390 Args:391 img (Image): Image to be resized.392 maskmap (Image): Mask to be resized.393 kpt (list): keypoints to be resized.394 center: (list): center points to be resized.395 Returns:396 Image: Randomly resize image.397 Image: Randomly resize maskmap.398 list: Randomly resize keypoints.399 list: Randomly resize center points.400 """401 assert isinstance(img, (np.ndarray, list))402 assert labelmap is None or isinstance(labelmap, np.ndarray)403 assert maskmap is None or isinstance(maskmap, np.ndarray)404 height, width, _ = img.shape405 if random.random() < self.ratio:406 scale_ratio = self.get_scale([width, height], bboxes)407 aspect_ratio = random.uniform(*self.aspect_range)408 w_scale_ratio = math.sqrt(aspect_ratio) * scale_ratio409 h_scale_ratio = math.sqrt(1.0 / aspect_ratio) * scale_ratio410 else:411 w_scale_ratio, h_scale_ratio = 1.0, 1.0412 if kpts is not None and kpts.size > 0:413 kpts[:, :, 0] *= w_scale_ratio414 kpts[:, :, 1] *= h_scale_ratio415 if bboxes is not None and bboxes.size > 0:416 bboxes[:, 0::2] *= w_scale_ratio417 bboxes[:, 1::2] *= h_scale_ratio418 if polygons is not None:419 for object_id in range(len(polygons)):420 for polygon_id in range(len(polygons[object_id])):421 polygons[object_id][polygon_id][0::2] *= w_scale_ratio422 polygons[object_id][polygon_id][1::2] *= h_scale_ratio423 converted_size = (int(width * w_scale_ratio), int(height * h_scale_ratio))424 if not isinstance(img, list):425 img = cv2.resize(img, converted_size, interpolation=cv2.INTER_LINEAR).astype(np.uint8)426 else:427 img = [cv2.resize(item, converted_size, interpolation=cv2.INTER_LINEAR).astype(np.uint8) for item in img]428 if labelmap is not None:429 labelmap = cv2.resize(labelmap, converted_size, interpolation=cv2.INTER_NEAREST)430 if maskmap is not None:431 maskmap = cv2.resize(maskmap, converted_size, interpolation=cv2.INTER_NEAREST)432 return img, labelmap, maskmap, kpts, bboxes, labels, polygons433class RandomRotate(object):434 """Rotate the input numpy.ndarray and points to the given degree.435 Args:436 degree (number): Desired rotate degree.437 """438 def __init__(self, max_degree, ratio=0.5, mean=(104, 117, 123)):439 assert isinstance(max_degree, int)440 self.max_degree = max_degree441 self.ratio = ratio442 self.mean = mean443 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):444 """445 Args:446 img (Image): Image to be rotated.447 maskmap (Image): Mask to be rotated.448 kpt (list): Keypoints to be rotated.449 center (list): Center points to be rotated.450 Returns:451 Image: Rotated image.452 list: Rotated key points.453 """454 assert isinstance(img, (np.ndarray, list))455 assert labelmap is None or isinstance(labelmap, np.ndarray)456 assert maskmap is None or isinstance(maskmap, np.ndarray)457 if random.random() < self.ratio:458 rotate_degree = random.uniform(-self.max_degree, self.max_degree)459 else:460 return img, labelmap, maskmap, kpts, bboxes, labels, polygons461 height, width, _ = img.shape462 img_center = (width / 2.0, height / 2.0)463 rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)464 cos_val = np.abs(rotate_mat[0, 0])465 sin_val = np.abs(rotate_mat[0, 1])466 new_width = int(height * sin_val + width * cos_val)467 new_height = int(height * cos_val + width * sin_val)468 rotate_mat[0, 2] += (new_width / 2.) - img_center[0]469 rotate_mat[1, 2] += (new_height / 2.) - img_center[1]470 if not isinstance(img, list):471 img = cv2.warpAffine(img, rotate_mat, (new_width, new_height), borderValue=self.mean).astype(np.uint8)472 else:473 img = [cv2.warpAffine(item, rotate_mat, (new_width, new_height),474 borderValue=self.mean).astype(np.uint8) for item in img]475 if labelmap is not None:476 labelmap = cv2.warpAffine(labelmap, rotate_mat, (new_width, new_height),477 borderValue=(255, 255, 255), flags=cv2.INTER_NEAREST)478 labelmap = labelmap.astype(np.uint8)479 if maskmap is not None:480 maskmap = cv2.warpAffine(maskmap, rotate_mat, (new_width, new_height),481 borderValue=(1, 1, 1), flags=cv2.INTER_NEAREST)482 maskmap = maskmap.astype(np.uint8)483 if polygons is not None:484 for object_id in range(len(polygons)):485 for polygon_id in range(len(polygons[object_id])):486 for i in range(len(polygons[object_id][polygon_id]) // 2):487 x = polygons[object_id][polygon_id][i * 2]488 y = polygons[object_id][polygon_id][i * 2 + 1]489 p = np.array([x, y, 1])490 p = rotate_mat.dot(p)491 polygons[object_id][polygon_id][i * 2] = p[0]492 polygons[object_id][polygon_id][i * 2 + 1] = p[1]493 if kpts is not None and kpts.size > 0:494 num_objects = len(kpts)495 num_keypoints = len(kpts[0])496 for i in range(num_objects):497 for j in range(num_keypoints):498 x = kpts[i][j][0]499 y = kpts[i][j][1]500 p = np.array([x, y, 1])501 p = rotate_mat.dot(p)502 kpts[i][j][0] = p[0]503 kpts[i][j][1] = p[1]504 # It is not right for object detection tasks.505 if bboxes is not None and bboxes.size > 0:506 for i in range(len(bboxes)):507 bbox_temp = [bboxes[i][0], bboxes[i][1], bboxes[i][2], bboxes[i][1],508 bboxes[i][0], bboxes[i][3], bboxes[i][2], bboxes[i][3]]509 for node in range(4):510 x = bbox_temp[node * 2]511 y = bbox_temp[node * 2 + 1]512 p = np.array([x, y, 1])513 p = rotate_mat.dot(p)514 bbox_temp[node * 2] = p[0]515 bbox_temp[node * 2 + 1] = p[1]516 bboxes[i] = [min(bbox_temp[0], bbox_temp[2], bbox_temp[4], bbox_temp[6]),517 min(bbox_temp[1], bbox_temp[3], bbox_temp[5], bbox_temp[7]),518 max(bbox_temp[0], bbox_temp[2], bbox_temp[4], bbox_temp[6]),519 max(bbox_temp[1], bbox_temp[3], bbox_temp[5], bbox_temp[7])]520 return img, labelmap, maskmap, kpts, bboxes, labels, polygons521class RandomCrop(object):522 """Crop the given numpy.ndarray and at a random location.523 Args:524 size (int or tuple): Desired output size of the crop.(w, h)525 """526 def __init__(self, crop_size, ratio=0.5, method='random', grid=None, allow_outside_center=True):527 self.ratio = ratio528 self.method = method529 self.grid = grid530 self.allow_outside_center = allow_outside_center531 if isinstance(crop_size, float):532 self.size = (crop_size, crop_size)533 elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:534 self.size = crop_size535 else:536 raise TypeError('Got inappropriate size arg: {}'.format(crop_size))537 def get_lefttop(self, crop_size, img_size):538 if self.method == 'center':539 return [(img_size[0] - crop_size[0]) // 2, (img_size[1] - crop_size[1]) // 2]540 elif self.method == 'random':541 x = random.randint(0, img_size[0] - crop_size[0])542 y = random.randint(0, img_size[1] - crop_size[1])543 return [x, y]544 elif self.method == 'grid':545 grid_x = random.randint(0, self.grid[0] - 1)546 grid_y = random.randint(0, self.grid[1] - 1)547 x = grid_x * ((img_size[0] - crop_size[0]) // (self.grid[0] - 1))548 y = grid_y * ((img_size[1] - crop_size[1]) // (self.grid[1] - 1))549 return [x, y]550 else:551 Log.error('Crop method {} is invalid.'.format(self.method))552 exit(1)553 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):554 """555 Args:556 img (Image): Image to be cropped.557 maskmap (Image): Mask to be cropped.558 Returns:559 Image: Cropped image.560 Image: Cropped maskmap.561 list: Cropped keypoints.562 list: Cropped center points.563 """564 assert isinstance(img, (np.ndarray, list))565 assert labelmap is None or isinstance(labelmap, np.ndarray)566 assert maskmap is None or isinstance(maskmap, np.ndarray)567 if random.random() > self.ratio:568 return img, labelmap, maskmap, kpts, bboxes, labels, polygons569 height, width, _ = img.shape570 target_size = [min(self.size[0], width), min(self.size[1], height)]571 offset_left, offset_up = self.get_lefttop(target_size, [width, height])572 # img = ImageHelper.draw_box(img, bboxes[index])573 if kpts is not None and kpts.size > 0:574 kpts[:, :, 0] -= offset_left575 kpts[:, :, 1] -= offset_up576 if bboxes is not None and bboxes.size > 0:577 if self.allow_outside_center:578 mask = np.ones(bboxes.shape[0], dtype=bool)579 else:580 crop_bb = np.array([offset_left, offset_up, offset_left + target_size[0], offset_up + target_size[1]])581 center = (bboxes[:, :2] + bboxes[:, 2:]) / 2582 mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)583 bboxes[:, 0::2] -= offset_left584 bboxes[:, 1::2] -= offset_up585 bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, target_size[0] - 1)586 bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, target_size[1] - 1)587 mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))588 bboxes = bboxes[mask]589 if labels is not None:590 labels = labels[mask]591 if polygons is not None:592 new_polygons = list()593 for object_id in range(len(polygons)):594 if mask[object_id] == 1:595 for polygon_id in range(len(polygons[object_id])):596 polygons[object_id][polygon_id][0::2] -= offset_left597 polygons[object_id][polygon_id][1::2] -= offset_up598 polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],599 0, target_size[0] - 1)600 polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],601 0, target_size[1] - 1)602 new_polygons.append(polygons[object_id])603 polygons = new_polygons604 if not isinstance(img, list):605 img = img[offset_up:offset_up + target_size[1], offset_left:offset_left + target_size[0]]606 else:607 img = [item[offset_up:offset_up + target_size[1], offset_left:offset_left + target_size[0]] for item in img]608 if maskmap is not None:609 maskmap = maskmap[offset_up:offset_up + target_size[1], offset_left:offset_left + target_size[0]]610 if labelmap is not None:611 labelmap = labelmap[offset_up:offset_up + target_size[1], offset_left:offset_left + target_size[0]]612 return img, labelmap, maskmap, kpts, bboxes, labels, polygons613class RandomFocusCrop(object):614 """Crop the given numpy.ndarray and at a random location.615 Args:616 size (int or tuple): Desired output size of the crop.(w, h)617 """618 def __init__(self, crop_size, ratio=0.5, center_jitter=None, mean=(104, 117, 123), allow_outside_center=True):619 self.ratio = ratio620 self.center_jitter = center_jitter621 self.mean = mean622 self.allow_outside_center = allow_outside_center623 if isinstance(crop_size, float):624 self.size = (crop_size, crop_size)625 elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:626 self.size = crop_size627 else:628 raise TypeError('Got inappropriate size arg: {}'.format(crop_size))629 def get_center(self, img_size, bboxes):630 if bboxes is None or bboxes.size == 0:631 if img_size[0] > self.size[0]:632 x = random.randint(self.size[0] // 2, img_size[0] - self.size[0] // 2)633 else:634 x = img_size[0] // 2635 if img_size[1] > self.size[1]:636 y = random.randint(self.size[1] // 2, img_size[1] - self.size[1] // 2)637 else:638 y = img_size[1] // 2639 return [x, y], -1640 else:641 border = bboxes[:, 2:] - bboxes[:, 0:2]642 area = border[:, 0] * border[:, 1]643 max_index = np.argmax(area)644 max_center = [(bboxes[max_index][0] + bboxes[max_index][2]) / 2,645 (bboxes[max_index][1] + bboxes[max_index][3]) / 2]646 if self.center_jitter is not None:647 jitter = random.randint(-self.center_jitter, self.center_jitter)648 max_center[0] += jitter649 jitter = random.randint(-self.center_jitter, self.center_jitter)650 max_center[1] += jitter651 return max_center, max_index652 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):653 """654 Args:655 img (Image): Image to be cropped.656 maskmap (Image): Mask to be cropped.657 Returns:658 Image: Cropped image.659 Image: Cropped maskmap.660 list: Cropped keypoints.661 list: Cropped center points.662 """663 assert isinstance(img, np.ndarray)664 assert labelmap is None or isinstance(labelmap, np.ndarray)665 assert maskmap is None or isinstance(maskmap, np.ndarray)666 if random.random() > self.ratio:667 return img, labelmap, maskmap, kpts, bboxes, labels, polygons668 height, width, channels = img.shape669 center, index = self.get_center([width, height], bboxes)670 # img = ImageHelper.draw_box(img, bboxes[index])671 offset_left = int(center[0] - self.size[0] // 2)672 offset_up = int(center[1] - self.size[1] // 2)673 if kpts is not None and kpts.size > 0:674 kpts[:, :, 0] -= offset_left675 kpts[:, :, 1] -= offset_up676 mask = np.logical_or.reduce((kpts[:, :, 0] >= self.size[0], kpts[:, :, 0] < 0,677 kpts[:, :, 1] >= self.size[1], kpts[:, :, 1] < 0))678 kpts[mask == 1, 2] = -1679 if bboxes is not None and bboxes.size > 0:680 if self.allow_outside_center:681 mask = np.ones(bboxes.shape[0], dtype=bool)682 else:683 crop_bb = np.array([offset_left, offset_up, offset_left + self.size[0], offset_up + self.size[1]])684 center = (bboxes[:, :2] + bboxes[:, 2:]) / 2685 mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]).all(axis=1)686 bboxes[:, 0::2] -= offset_left687 bboxes[:, 1::2] -= offset_up688 bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, self.size[0] - 1)689 bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, self.size[1] - 1)690 mask = np.logical_and(mask, (bboxes[:, :2] < bboxes[:, 2:]).all(axis=1))691 bboxes = bboxes[mask]692 if labels is not None:693 labels = labels[mask]694 if polygons is not None:695 new_polygons = list()696 for object_id in range(len(polygons)):697 if mask[object_id] == 1:698 for polygon_id in range(len(polygons[object_id])):699 polygons[object_id][polygon_id][0::2] -= offset_left700 polygons[object_id][polygon_id][1::2] -= offset_up701 polygons[object_id][polygon_id][0::2] = np.clip(polygons[object_id][polygon_id][0::2],702 0, self.size[0] - 1)703 polygons[object_id][polygon_id][1::2] = np.clip(polygons[object_id][polygon_id][1::2],704 0, self.size[1] - 1)705 new_polygons.append(polygons[object_id])706 polygons = new_polygons707 expand_image = np.zeros((max(height, self.size[1]) + abs(offset_up),708 max(width, self.size[0]) + abs(offset_left), channels), dtype=img.dtype)709 expand_image[:, :, :] = self.mean710 expand_image[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,711 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = img712 img = expand_image[max(offset_up, 0):max(offset_up, 0) + self.size[1],713 max(offset_left, 0):max(offset_left, 0) + self.size[0]]714 if maskmap is not None:715 expand_maskmap = np.zeros((max(height, self.size[1]) + abs(offset_up),716 max(width, self.size[0]) + abs(offset_left)), dtype=maskmap.dtype)717 expand_maskmap[:, :] = 1718 expand_maskmap[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,719 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = maskmap720 maskmap = expand_maskmap[max(offset_up, 0):max(offset_up, 0) + self.size[1],721 max(offset_left, 0):max(offset_left, 0) + self.size[0]]722 if labelmap is not None:723 expand_labelmap = np.zeros((max(height, self.size[1]) + abs(offset_up),724 max(width, self.size[0]) + abs(offset_left)), dtype=labelmap.dtype)725 expand_labelmap[:, :] = 255726 expand_labelmap[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,727 abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = labelmap728 labelmap = expand_labelmap[max(offset_up, 0):max(offset_up, 0) + self.size[1],729 max(offset_left, 0):max(offset_left, 0) + self.size[0]]730 return img, labelmap, maskmap, kpts, bboxes, labels, polygons731class RandomDetCrop(object):732 """Crop733 Arguments:734 img (Image): the image being input during training735 boxes (Tensor): the original bounding boxes in pt form736 labels (Tensor): the class labels for each bbox737 mode (float tuple): the min and max jaccard overlaps738 Return:739 (img, boxes, classes)740 img (Image): the cropped image741 boxes (Tensor): the adjusted bounding boxes in pt form742 labels (Tensor): the class labels for each bbox743 """744 def __init__(self, ratio=0.5, allow_outside_center=True):745 self.ratio = ratio746 self.allow_outside_center = allow_outside_center747 self.sample_options = (748 # using entire original input image749 None,750 # sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9751 (0.1, None),752 (0.3, None),753 (0.5, None),754 (0.7, None),755 (0.9, None),756 # randomly sample a patch757 (None, None),758 )759 @staticmethod760 def intersect(box_a, box_b):761 max_xy = np.minimum(box_a[:, 2:], box_b[2:])762 min_xy = np.maximum(box_a[:, :2], box_b[:2])763 inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)764 return inter[:, 0] * inter[:, 1]765 @staticmethod766 def jaccard_numpy(box_a, box_b):767 """Compute the jaccard overlap of two sets of boxes. The jaccard overlap768 is simply the intersection over union of two boxes.769 E.g.:770 A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)771 Args:772 box_a: Multiple bounding boxes, Shape: [num_boxes,4]773 box_b: Single bounding box, Shape: [4]774 Return:775 jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]776 """777 inter = RandomDetCrop.intersect(box_a, box_b)778 area_a = ((box_a[:, 2] - box_a[:, 0]) *779 (box_a[:, 3] - box_a[:, 1])) # [A,B]780 area_b = ((box_b[2] - box_b[0]) *781 (box_b[3] - box_b[1])) # [A,B]782 union = area_a + area_b - inter783 return inter / union # [A,B]784 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):785 assert labelmap is None and maskmap is None and kpts is None and polygons is None786 assert bboxes is not None and labels is not None787 if random.random() > self.ratio:788 return img, labelmap, maskmap, kpts, bboxes, labels, polygons789 height, width, _ = img.shape790 while True:791 # randomly choose a mode792 mode = random.choice(self.sample_options)793 if mode is None or bboxes.size == 0:794 return img, labelmap, maskmap, kpts, bboxes, labels, polygons795 min_iou, max_iou = mode796 if min_iou is None:797 min_iou = float('-inf')798 if max_iou is None:799 max_iou = float('inf')800 # max trails (50)801 for _ in range(50):802 scale = random.uniform(0.3, 1.)803 min_ratio = max(0.5, scale * scale)804 max_ratio = min(2.0, 1. / scale / scale)805 ratio = math.sqrt(random.uniform(min_ratio, max_ratio))806 w = int(scale * ratio * width)807 h = int((scale / ratio) * height)808 left = random.randint(0, width - w)809 top = random.randint(0, height - h)810 # convert to integer rect x1,y1,x2,y2811 rect = np.array([int(left), int(top), int(left + w), int(top + h)])812 # calculate IoU (jaccard overlap) b/t the cropped and gt boxes813 overlap = self.jaccard_numpy(bboxes, rect)814 # is min and max overlap constraint satisfied? if not try again815 if overlap.min() < min_iou or max_iou < overlap.max():816 continue817 # keep overlap with gt box IF center in sampled patch818 centers = (bboxes[:, :2] + bboxes[:, 2:]) / 2.0819 # mask in all gt boxes that above and to the left of centers820 m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])821 # mask in all gt boxes that under and to the right of centers822 m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])823 # mask in that both m1 and m2 are true824 mask = m1 * m2825 # have any valid boxes? try again if not826 if not mask.any():827 continue828 # take only matching gt boxes829 current_boxes = bboxes[mask, :].copy()830 # cut the crop from the image831 current_img = img[rect[1]:rect[3], rect[0]:rect[2], :]832 # take only matching gt labels833 current_labels = labels[mask]834 # should we use the box left and top corner or the crop's835 current_boxes[:, :2] = np.maximum(current_boxes[:, :2], rect[:2])836 # adjust to crop (by substracting crop's left,top)837 current_boxes[:, :2] -= rect[:2]838 current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:], rect[2:])839 # adjust to crop (by substracting crop's left,top)840 current_boxes[:, 2:] -= rect[:2]841 return current_img, labelmap, maskmap, kpts, current_boxes, current_labels, polygons842class Resize(object):843 """Resize the given numpy.ndarray to random size and aspect ratio.844 Args:845 scale_min: the min scale to resize.846 scale_max: the max scale to resize.847 """848 def __init__(self, target_size=None, min_side_length=None, max_side_length=None):849 self.target_size = target_size850 self.min_side_length = min_side_length851 self.max_side_length = max_side_length852 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):853 assert isinstance(img, (np.ndarray, list))854 assert labelmap is None or isinstance(labelmap, np.ndarray)855 assert maskmap is None or isinstance(maskmap, np.ndarray)856 height, width, _ = img.shape857 if self.target_size is not None:858 target_size = self.target_size859 w_scale_ratio = self.target_size[0] / width860 h_scale_ratio = self.target_size[1] / height861 elif self.min_side_length is not None:862 scale_ratio = self.min_side_length / min(width, height)863 w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio864 target_size = [int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))]865 else:866 scale_ratio = self.max_side_length / max(width, height)867 w_scale_ratio, h_scale_ratio = scale_ratio, scale_ratio868 target_size = [int(round(width * w_scale_ratio)), int(round(height * h_scale_ratio))]869 if kpts is not None and kpts.size > 0:870 kpts[:, :, 0] *= w_scale_ratio871 kpts[:, :, 1] *= h_scale_ratio872 if bboxes is not None and bboxes.size > 0:873 bboxes[:, 0::2] *= w_scale_ratio874 bboxes[:, 1::2] *= h_scale_ratio875 if polygons is not None:876 for object_id in range(len(polygons)):877 for polygon_id in range(len(polygons[object_id])):878 polygons[object_id][polygon_id][0::2] *= w_scale_ratio879 polygons[object_id][polygon_id][1::2] *= h_scale_ratio880 target_size = tuple(target_size)881 if not isinstance(img, list):882 img = cv2.resize(img, target_size, interpolation=cv2.INTER_LINEAR)883 else:884 img = [cv2.resize(item, target_size, interpolation=cv2.INTER_LINEAR) for item in img]885 if labelmap is not None:886 labelmap = cv2.resize(labelmap, target_size, interpolation=cv2.INTER_NEAREST)887 if maskmap is not None:888 maskmap = cv2.resize(maskmap, target_size, interpolation=cv2.INTER_NEAREST)889 return img, labelmap, maskmap, kpts, bboxes, labels, polygons890CV2_AUGMENTATIONS_DICT = {891 'random_saturation': RandomSaturation,892 'random_hue': RandomHue,893 'random_perm': RandomPerm,894 'random_contrast': RandomContrast,895 'random_brightness': RandomBrightness,896 'random_pad': RandomPad,897 'padding': Padding,898 'random_hflip': RandomHFlip,899 'random_resize': RandomResize,900 'random_crop': RandomCrop,901 'random_focus_crop': RandomFocusCrop,902 'random_det_crop': RandomDetCrop,903 'random_resized_crop': RandomResizedCrop,904 'random_rotate': RandomRotate,905 'resize': Resize906}907class CV2AugCompose(object):908 """Composes several transforms together.909 Args:910 transforms (list of ``Transform`` objects): list of transforms to compose.911 Example:912 >>> CV2AugCompose([913 >>> RandomCrop(),914 >>> ])915 """916 def __init__(self, configer, split='train'):917 self.configer = configer918 self.transforms = dict()919 self.split = split920 aug_trans = self.configer.get(split, 'aug_trans')921 shuffle_train_trans = []922 if 'shuffle_trans_seq' in aug_trans:923 if isinstance(aug_trans['shuffle_trans_seq'][0], list):924 train_trans_seq_list = aug_trans['shuffle_trans_seq']925 for train_trans_seq in train_trans_seq_list:926 shuffle_train_trans += train_trans_seq927 else:928 shuffle_train_trans = aug_trans['shuffle_trans_seq']929 for trans in aug_trans['trans_seq'] + shuffle_train_trans:930 self.transforms[trans] = CV2_AUGMENTATIONS_DICT[trans](**aug_trans[trans])931 def __call__(self, img, labelmap=None, maskmap=None, kpts=None, bboxes=None, labels=None, polygons=None):932 if self.configer.get('data', 'input_mode') == 'RGB':933 img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)934 aug_trans = self.configer.get(self.split, 'aug_trans')935 shuffle_trans_seq = []936 if 'shuffle_trans_seq' in aug_trans:937 if isinstance(aug_trans['shuffle_trans_seq'][0], list):938 shuffle_trans_seq_list = aug_trans['shuffle_trans_seq']939 shuffle_trans_seq = shuffle_trans_seq_list[random.randint(0, len(shuffle_trans_seq_list))]940 else:941 shuffle_trans_seq = aug_trans['shuffle_trans_seq']942 random.shuffle(shuffle_trans_seq)943 for trans_key in (shuffle_trans_seq + aug_trans['trans_seq']):944 (img, labelmap, maskmap, kpts,945 bboxes, labels, polygons) = self.transforms[trans_key](img, labelmap, maskmap,946 kpts, bboxes, labels, polygons)947 if self.configer.get('data', 'input_mode') == 'RGB':948 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)949 out_list = [img]950 for elem in [labelmap, maskmap, kpts, bboxes, labels, polygons]:951 if elem is not None:952 out_list.append(elem)...

Full Screen

Full Screen

dtmViewer.py

Source:dtmViewer.py Github

copy

Full Screen

1from pysvg.filter import *2from pysvg.gradient import *3from pysvg.linking import *4from pysvg.script import *5from pysvg.shape import *6from pysvg.structure import *7from pysvg.style import *8from pysvg.text import *9from pysvg.builders import *10from lxml import etree11import sys12import matplotlib13matplotlib.use('Agg')14import matplotlib.pyplot as plt15import os16import os.path17PIX_BY_SEC = 22018HEIGHT_BY_WORKER = 11019RESOLUTION_TASK_DISCOVER = 0.00520OFFSET_LEFT = 4021colorsList = ['#ee0000', '#00bb00', '#0000ff', '#aaaa00', '#00aaaa', '#000077', '#cc00cc']22funcAssignDict = {}23workersTimeDelta = []24taskDict = {}25class badDispatchFinder(object):26 def __init__(self, maxTime, tasksDict):27 self.timeBlocks = [0 for i in xrange(int(maxTime / RESOLUTION_TASK_DISCOVER))]28 for task in tasksDict.values():29 self.addTask(task)30 return31 def addTask(self, taskInfo):32 for i in xrange(int(taskInfo[0] / RESOLUTION_TASK_DISCOVER), int(taskInfo[1][0] / RESOLUTION_TASK_DISCOVER)):33 self.timeBlocks[i] += 134 def drawParts(self, svgObj, nbrWorkers):35 i = 036 oh = ShapeBuilder()37 myStyle = StyleBuilder()38 myStyle.setFontFamily(fontfamily="Verdana")39 myStyle.setFontWeight('bold')40 myStyle.setFontSize('14pt')41 txt = text("Parallelisable?", 5, 20)42 txt.set_style(myStyle.getStyle())43 svgObj.addElement(txt)44 while i < len(self.timeBlocks):45 if self.timeBlocks[i] > nbrWorkers:46 beginRect = i47 while i < len(self.timeBlocks) and self.timeBlocks[i] > nbrWorkers:48 i += 149 endRect = i50 svgObj.addElement(oh.createRect(OFFSET_LEFT + beginRect * RESOLUTION_TASK_DISCOVER * PIX_BY_SEC + 2, 5, (endRect - beginRect) * RESOLUTION_TASK_DISCOVER * PIX_BY_SEC + 2, 18, strokewidth=2, fill='orange', stroke='white'))51 else:52 i += 153def synchroWorkersTime(workerFileBeginPath):54 global workersTimeDelta55 indexF = 056 msgTreeList = []57 while os.path.exists(workerFileBeginPath + str(indexF) + ".xml"):58 xmlTree = etree.parse(workerFileBeginPath + str(indexF) + ".xml")59 msgTreeList.append(xmlTree.getroot()[3]) #60 assert msgTreeList[-1].tag == 'commLog'61 indexF += 162 workersTimeDelta.append(0.) # Par definition, la reference est le premier worker63 # On va chercher les premiers messages qu'il a envoye a chaque worker64 for i in xrange(1, len(msgTreeList)):65 j = 066 while int(msgTreeList[0][j].get('otherWorker')) != i or msgTreeList[0][j].get('direc') != "out":67 #print("[0]",msgTreeList[0][j].get('otherWorker'), msgTreeList[0][j].get('msgtag'), msgTreeList[0][j].get('type'))68 j += 169 msgTagRootWorker, timeRootWorker = int(msgTreeList[0][j].get('msgtag')), float(msgTreeList[0][j].get('time'))70 k = 071 while int(msgTreeList[i][k].get('otherWorker')) != 0 or msgTreeList[i][k].get('direc') != "in":72 #print("[" +str(i)+"]", msgTreeList[i][k].get('otherWorker'), msgTreeList[i][k].get('msgtag'), msgTreeList[i][k].get('type'))73 k += 174 msgTagOtherWorker, timeOtherWorker = int(msgTreeList[i][k].get('msgtag')), float(msgTreeList[i][k].get('time'))75 assert msgTagRootWorker == msgTagOtherWorker76 workersTimeDelta.append(timeOtherWorker - timeRootWorker)77 return78def traceWorker(svgO, workerFile, offset, decalageTemps=0.):79 global funcAssignDict80 global taskDict81 oh = ShapeBuilder()82 xmlTree = etree.parse(workerFile)83 print("ADD WORKER ", workerFile)84 assert xmlTree.getroot().tag == 'dtm'85 wbt = float(xmlTree.getroot().get('timeBegin'))86 myStyle = StyleBuilder()87 myStyle.setFontFamily(fontfamily="Verdana")88 myStyle.setFontWeight('bold')89 myStyle.setFontSize('14pt')90 myStyle.setFilling('blue')91 txt = text("Worker " + str(xmlTree.getroot().get('workerId')), 5, offset + HEIGHT_BY_WORKER / 2)92 txt.set_style(myStyle.getStyle())93 svgO.addElement(txt)94 maxEndTime = 0.95 dataTask = ""96 taskTarget = ""97 for taskTag in xmlTree.getroot()[0]:98 beginTimes = []99 endTimes = []100 for taskInfo in taskTag:101 if taskInfo.tag == 'event' and taskInfo.get('type') == "begin" or taskInfo.get('type') == 'wakeUp':102 #print(taskInfo.get('time'))103 beginTimes.append(float(taskInfo.get('time')) - wbt + decalageTemps)104 elif taskInfo.tag == 'event' and taskInfo.get('type') == "end" or taskInfo.get('type') == 'sleep':105 assert len(endTimes) < len(beginTimes)106 endTimes.append(float(taskInfo.get('time')) - wbt + decalageTemps)107 if maxEndTime < endTimes[-1]:108 maxEndTime = endTimes[-1]109 elif taskInfo.tag == 'target':110 if not taskInfo.get('name') in funcAssignDict:111 print("Choose color " + colorsList[len(funcAssignDict)])112 funcAssignDict[taskInfo.get('name')] = colorsList[len(funcAssignDict)]113 dataTask = taskInfo.get('name') + " : "114 taskTarget = taskInfo.get('name')115 indexArg = 0116 while not (taskInfo.get('arg' + str(indexArg)) is None):117 dataTask += taskInfo.get('arg' + str(indexArg)) + ", "118 indexArg += 1119 taskDict[taskTag.get('id')] = (float(taskTag.get('creationTime')) - wbt + decalageTemps, beginTimes, endTimes)120 #print(beginTimes, endTimes)121 for i in xrange(len(beginTimes)):122 if endTimes[i] - beginTimes[i] < 5. / PIX_BY_SEC:123 #endTimes[i] += 4. / PIX_BY_SEC124 svgO.addElement(oh.createLine(OFFSET_LEFT + decalageTemps * PIX_BY_SEC + beginTimes[i] * PIX_BY_SEC + 1, offset, OFFSET_LEFT + decalageTemps * PIX_BY_SEC + beginTimes[i] * PIX_BY_SEC + 1, offset + HEIGHT_BY_WORKER, strokewidth=3, stroke=funcAssignDict[taskTarget]))125 else:126 svgO.addElement(oh.createRect(OFFSET_LEFT + decalageTemps * PIX_BY_SEC + beginTimes[i] * PIX_BY_SEC + 2, offset, endTimes[i] * PIX_BY_SEC - 2 - (beginTimes[i] * PIX_BY_SEC + 2), HEIGHT_BY_WORKER, strokewidth=3, stroke=funcAssignDict[taskTarget]))127 #svgO.addElement(text(dataTask, beginTime * PIX_BY_SEC + 25, offset + HEIGHT_BY_WORKER / 2))128 loadsList = []129 timesList = []130 for loadTag in xmlTree.getroot()[1]:131 tmpLoad = []132 for workerState in loadTag:133 if workerState.tag != "workerKnownState":134 continue135 loadWorker = [float(i) for i in workerState.get('load').split(",")]136 tmpLoad.append(loadWorker[0]+loadWorker[1]+loadWorker[2]) # En cours, en attente de demarrage et en attente de redemarrage137 loadsList.append(tmpLoad)138 timesList.append(float(loadTag.get('time')))139 timesList = [t-wbt+decalageTemps for t in timesList]140 plt.figure()141 if len(sys.argv) > 3 and sys.argv[3] == "log":142 plt.yscale('log', nonposy='clip')143 count = 0144 for line in zip(*loadsList):145 plt.plot(timesList, line, label='Worker '+str(count))146 count += 1147 plt.legend()148 plt.xlabel('Temps normalise')149 plt.ylabel('Load estime')150 plt.savefig(sys.argv[2] + 'loads_'+str(xmlTree.getroot().get('workerId'))+'.png')151 return maxEndTime152def traceTimeline(svgO, offset, upTo):153 oh = ShapeBuilder()154 svgO.addElement(oh.createRect(OFFSET_LEFT - 10, offset + 5, OFFSET_LEFT + (upTo + 1) * PIX_BY_SEC + 10, HEIGHT_BY_WORKER / 2))155 for i in range(int(upTo) + 2):156 svgO.addElement(text(str(i), OFFSET_LEFT + i * PIX_BY_SEC - 5 * len(str(i)), offset + 40))157 for i in range((int(upTo) + 2) * 10):158 if i % 10 == 0:159 svgO.addElement(oh.createLine(OFFSET_LEFT + i * PIX_BY_SEC / 10, offset + 5, OFFSET_LEFT + i * PIX_BY_SEC / 10, offset + 20, stroke='red', strokewidth=2))160 else:161 svgO.addElement(oh.createLine(OFFSET_LEFT + i * PIX_BY_SEC / 10, offset + 5, OFFSET_LEFT + i * PIX_BY_SEC / 10, offset + 20, stroke='black', strokewidth=2))162 myStyle = StyleBuilder()163 myStyle.setFontFamily(fontfamily="Verdana")164 myStyle.setFontStyle('italic')165 myStyle.setFontSize('12pt') #no need for the keywords all the time166 txt = text("Time (seconds)", OFFSET_LEFT + 10, offset + 55)167 txt.set_style(myStyle.getStyle())168 svgO.addElement(txt)169 return170def printFuncNames(svgO, offset):171 global funcAssignDict172 myStyle = StyleBuilder()173 myStyle.setFontFamily(fontfamily="Verdana")174 myStyle.setFontWeight('bold')175 myStyle.setFontSize('20pt') #no need for the keywords all the time176 tmpOff = offset177 txt = text("Functions color map :", 10, tmpOff)178 txt.set_style(myStyle.getStyle())179 svgO.addElement(txt)180 tmpOff += 35181 for key in funcAssignDict:182 myStyle.setFilling(funcAssignDict[key])183 txt = text(str(key), 30, tmpOff)184 txt.set_style(myStyle.getStyle())185 svgO.addElement(txt)186 tmpOff += 30187def main():188 global taskDict189 global workersTimeDelta190 objSvg = svg()191 indexF = 0192 maxEndTime = 0.193 tmpEndTime = 0.194 synchroWorkersTime(sys.argv[1]+"/log")195 print(workersTimeDelta)196 while os.path.exists(sys.argv[1]+"/log" + str(indexF) + ".xml"):197 tmpEndTime = traceWorker(objSvg, sys.argv[1]+"/log" + str(indexF) + ".xml", indexF * (HEIGHT_BY_WORKER + 10) + 30, workersTimeDelta[indexF])198 if tmpEndTime > maxEndTime:199 maxEndTime = tmpEndTime200 indexF += 1201 traceTimeline(objSvg, indexF * (HEIGHT_BY_WORKER + 10) + 30, maxEndTime)202 printFuncNames(objSvg, (indexF + 1) * (HEIGHT_BY_WORKER + 10) + 30)203 cb = badDispatchFinder(maxEndTime, taskDict)204 cb.drawParts(objSvg, indexF + 1)205 print("FINISHED...")206 objSvg.save(sys.argv[2])207if __name__ == '__main__':208 print("HOP!")...

Full Screen

Full Screen

test.py

Source:test.py Github

copy

Full Screen

1#!/usr/bin/env python2from PIL import ImageFont3import inkyphat4font_file = inkyphat.fonts.FredokaOne5inkyphat.arc((0, 0, 212, 104), 0, 180, 2)6top = 07left = 08offset_left = 09for font_size in (10, 12, 14, 16, 18, 20):10 text = "Test {}".format(font_size)11 font = inkyphat.ImageFont.truetype(font_file, font_size)12 width, height = font.getsize(text)13 inkyphat.text((0, top), text, 1, font=font)14 top += height + 115 left = max(left, offset_left + width)16offset_left = left + 517top = 018for font_size in (22, 24, 26, 28):19 text = "Test {}".format(font_size)20 font = inkyphat.ImageFont.truetype(font_file, font_size)21 width, height = font.getsize(text)22 inkyphat.text((offset_left, top), text, 1, font=font)23 top += height + 124 left = max(left, offset_left + width)25offset_left = left + 526top = 027for font_size in (30, 32, 34):28 text = "Test {}".format(font_size)29 font = inkyphat.ImageFont.truetype(font_file, font_size)30 width, height = font.getsize(text)31 inkyphat.text((offset_left, top), text, 1, font=font)32 top += height + 133 left = max(left, offset_left + width)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful