How to use ignored method in elementium

Best Python code snippet using elementium_python

kitti_eval_numba.py

Source:kitti_eval_numba.py Github

copy

Full Screen

1import io as sysio2import time3import numba4import numpy as np5from scipy.interpolate import interp1d6# from second.core.non_max_suppression.nms_gpu import rotate_iou_gpu_eval7def get_mAP(prec):8 sums = 09 for i in range(0, len(prec), 4):10 sums += prec[i]11 return sums / 11 * 10012@numba.jit13def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):14 scores.sort()15 scores = scores[::-1]16 current_recall = 017 thresholds = []18 for i, score in enumerate(scores):19 l_recall = (i + 1) / num_gt20 if i < (len(scores) - 1):21 r_recall = (i + 2) / num_gt22 else:23 r_recall = l_recall24 if (((r_recall - current_recall) < (current_recall - l_recall))25 and (i < (len(scores) - 1))):26 continue27 # recall = l_recall28 thresholds.append(score)29 current_recall += 1 / (num_sample_pts - 1.0)30 # print(len(thresholds), len(scores), num_gt)31 return thresholds32def clean_data(gt_anno, dt_anno, current_class, difficulty):33 CLASS_NAMES = [34 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram'35 ]36 MIN_HEIGHT = [40, 25, 25]37 MAX_OCCLUSION = [0, 1, 2]38 MAX_TRUNCATION = [0.15, 0.3, 0.5]39 dc_bboxes, ignored_gt, ignored_dt = [], [], []40 current_cls_name = CLASS_NAMES[current_class].lower()41 num_gt = len(gt_anno["name"])42 num_dt = len(dt_anno["name"])43 num_valid_gt = 044 for i in range(num_gt):45 # for each gt in one image46 bbox = gt_anno["bbox"][i]47 gt_name = gt_anno["name"][i].lower()48 height = bbox[3] - bbox[1]49 valid_class = -150 if (gt_name == current_cls_name):51 valid_class = 152 elif (current_cls_name == "Pedestrian".lower()53 and "Person_sitting".lower() == gt_name):54 valid_class = 055 elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):56 valid_class = 057 else:58 valid_class = -159 ignore = False60 if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])61 or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])62 or (height <= MIN_HEIGHT[difficulty])):63 # if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:64 ignore = True65 if valid_class == 1 and not ignore:66 ignored_gt.append(0)67 num_valid_gt += 168 elif (valid_class == 0 or (ignore and (valid_class == 1))):69 ignored_gt.append(1)70 else:71 ignored_gt.append(-1)72 # for i in range(num_gt):73 if gt_anno["name"][i] == "DontCare":74 dc_bboxes.append(gt_anno["bbox"][i])75 for i in range(num_dt):76 # for each pred in one image77 if (dt_anno["name"][i].lower() == current_cls_name):78 valid_class = 179 else:80 valid_class = -181 height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])82 if height < MIN_HEIGHT[difficulty]:83 ignored_dt.append(1)84 elif valid_class == 1:85 ignored_dt.append(0)86 else:87 ignored_dt.append(-1)88 return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes89@numba.jit(nopython=True)90def image_box_overlap(boxes, query_boxes, criterion=-1):91 N = boxes.shape[0]92 K = query_boxes.shape[0]93 overlaps = np.zeros((N, K), dtype=boxes.dtype)94 for k in range(K):95 qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *96 (query_boxes[k, 3] - query_boxes[k, 1]))97 for n in range(N):98 iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(99 boxes[n, 0], query_boxes[k, 0]))100 if iw > 0:101 ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(102 boxes[n, 1], query_boxes[k, 1]))103 if ih > 0:104 if criterion == -1:105 ua = (106 (boxes[n, 2] - boxes[n, 0]) *107 (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)108 elif criterion == 0:109 ua = ((boxes[n, 2] - boxes[n, 0]) *110 (boxes[n, 3] - boxes[n, 1]))111 elif criterion == 1:112 ua = qbox_area113 else:114 ua = 1.0115 overlaps[n, k] = iw * ih / ua116 return overlaps117# def bev_box_overlap(boxes, qboxes, criterion=-1):118# riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)119# return riou120#@numba.jit(nopython=True, parallel=True)121# def d3_box_overlap_kernel(boxes,122# qboxes,123# rinc,124# criterion=-1,125# z_axis=1,126# z_center=1.0):127# """128# z_axis: the z (height) axis.129# z_center: unified z (height) center of box.130# """131# N, K = boxes.shape[0], qboxes.shape[0]132# for i in range(N):133# for j in range(K):134# if rinc[i, j] > 0:135# min_z = min(136# boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),137# qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center))138# max_z = max(139# boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,140# qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center)141# iw = min_z - max_z142# if iw > 0:143# area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]144# area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]145# inc = iw * rinc[i, j]146# if criterion == -1:147# ua = (area1 + area2 - inc)148# elif criterion == 0:149# ua = area1150# elif criterion == 1:151# ua = area2152# else:153# ua = 1.0154# rinc[i, j] = inc / ua155# else:156# rinc[i, j] = 0.0157# def d3_box_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):158# """kitti camera format z_axis=1.159# """160# bev_axes = list(range(7))161# bev_axes.pop(z_axis + 3)162# bev_axes.pop(z_axis)163# rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)164# d3_box_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)165# return rinc166@numba.jit(nopython=True)167def compute_statistics_jit(overlaps,168 gt_datas,169 dt_datas,170 ignored_gt,171 ignored_det,172 dc_bboxes,173 metric,174 min_overlap,175 thresh=0,176 compute_fp=False,177 compute_aos=False):178 det_size = dt_datas.shape[0]179 gt_size = gt_datas.shape[0]180 dt_scores = dt_datas[:, -1]181 #dt_alphas = dt_datas[:, 4]182 gt_alphas = gt_datas[:, 4]183 dt_bboxes = dt_datas[:, :4]184 # gt_bboxes = gt_datas[:, :4]185 assigned_detection = [False] * det_size186 ignored_threshold = [False] * det_size187 if compute_fp:188 for i in range(det_size):189 if (dt_scores[i] < thresh):190 ignored_threshold[i] = True191 NO_DETECTION = -10000000192 tp, fp, fn, similarity = 0, 0, 0, 0193 # thresholds = [0.0]194 # delta = [0.0]195 thresholds = np.zeros((gt_size, ))196 thresh_idx = 0197 delta = np.zeros((gt_size, ))198 delta_idx = 0199 for i in range(gt_size):200 if ignored_gt[i] == -1:201 continue202 det_idx = -1203 valid_detection = NO_DETECTION204 max_overlap = 0205 assigned_ignored_det = False206 for j in range(det_size):207 if (ignored_det[j] == -1):208 continue209 if (assigned_detection[j]):210 continue211 if (ignored_threshold[j]):212 continue213 overlap = overlaps[j, i]214 dt_score = dt_scores[j]215 if (not compute_fp and (overlap > min_overlap)216 and dt_score > valid_detection):217 det_idx = j218 valid_detection = dt_score219 elif (compute_fp and (overlap > min_overlap)220 and (overlap > max_overlap or assigned_ignored_det)221 and ignored_det[j] == 0):222 max_overlap = overlap223 det_idx = j224 valid_detection = 1225 assigned_ignored_det = False226 elif (compute_fp and (overlap > min_overlap)227 and (valid_detection == NO_DETECTION)228 and ignored_det[j] == 1):229 det_idx = j230 valid_detection = 1231 assigned_ignored_det = True232 if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:233 fn += 1234 elif ((valid_detection != NO_DETECTION)235 and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):236 assigned_detection[det_idx] = True237 elif valid_detection != NO_DETECTION:238 # only a tp add a threshold.239 tp += 1240 # thresholds.append(dt_scores[det_idx])241 thresholds[thresh_idx] = dt_scores[det_idx]242 thresh_idx += 1243 # if compute_aos:244 # delta.append(gt_alphas[i] - dt_alphas[det_idx])245 # delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]246 # delta_idx += 1247 assigned_detection[det_idx] = True248 if compute_fp:249 for i in range(det_size):250 if (not (assigned_detection[i] or ignored_det[i] == -1251 or ignored_det[i] == 1 or ignored_threshold[i])):252 fp += 1253 nstuff = 0254 if metric == 0:255 overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)256 for i in range(dc_bboxes.shape[0]):257 for j in range(det_size):258 if (assigned_detection[j]):259 continue260 if (ignored_det[j] == -1 or ignored_det[j] == 1):261 continue262 if (ignored_threshold[j]):263 continue264 if overlaps_dt_dc[j, i] > min_overlap:265 assigned_detection[j] = True266 nstuff += 1267 fp -= nstuff268 if compute_aos:269 tmp = np.zeros((fp + delta_idx, ))270 # tmp = [0] * fp271 for i in range(delta_idx):272 tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0273 # tmp.append((1.0 + np.cos(delta[i])) / 2.0)274 # assert len(tmp) == fp + tp275 # assert len(delta) == tp276 if tp > 0 or fp > 0:277 similarity = np.sum(tmp)278 else:279 similarity = -1280 return tp, fp, fn, similarity, thresholds[:thresh_idx]281def get_split_parts(num, num_part):282 same_part = num // num_part283 remain_num = num % num_part284 if remain_num == 0:285 return [same_part] * num_part286 else:287 return [same_part] * num_part + [remain_num]288@numba.jit(nopython=True)289def fused_compute_statistics(overlaps,290 pr,291 gt_nums,292 dt_nums,293 dc_nums,294 gt_datas,295 dt_datas,296 dontcares,297 ignored_gts,298 ignored_dets,299 metric,300 min_overlap,301 thresholds,302 compute_aos=False):303 gt_num = 0304 dt_num = 0305 dc_num = 0306 for i in range(gt_nums.shape[0]):307 for t, thresh in enumerate(thresholds):308 overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num +309 gt_nums[i]]310 gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]311 dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]312 ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]313 ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]314 dontcare = dontcares[dc_num:dc_num + dc_nums[i]]315 tp, fp, fn, similarity, _ = compute_statistics_jit(316 overlap,317 gt_data,318 dt_data,319 ignored_gt,320 ignored_det,321 dontcare,322 metric,323 min_overlap=min_overlap,324 thresh=thresh,325 compute_fp=True,326 compute_aos=compute_aos)327 pr[t, 0] += tp328 pr[t, 1] += fp329 pr[t, 2] += fn330 if similarity != -1:331 pr[t, 3] += similarity332 gt_num += gt_nums[i]333 dt_num += dt_nums[i]334 dc_num += dc_nums[i]335def calculate_iou_partly(gt_annos,336 dt_annos,337 metric,338 num_parts=50,339 z_axis=1,340 z_center=1.0):341 """fast iou algorithm. this function can be used independently to342 do result analysis. 343 Args:344 gt_annos: dict, must from get_label_annos() in kitti_common.py345 dt_annos: dict, must from get_label_annos() in kitti_common.py346 metric: eval type. 0: bbox, 1: bev, 2: 3d347 num_parts: int. a parameter for fast calculate algorithm348 z_axis: height axis. kitti camera use 1, lidar use 2.349 """350 assert len(gt_annos) == len(dt_annos)351 total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)352 total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)353 num_examples = len(gt_annos)354 split_parts = get_split_parts(num_examples, num_parts)355 parted_overlaps = []356 example_idx = 0357 #bev_axes = list(range(3))358 #bev_axes.pop(z_axis)359 for num_part in split_parts:360 gt_annos_part = gt_annos[example_idx:example_idx + num_part]361 dt_annos_part = dt_annos[example_idx:example_idx + num_part]362 if metric == 0:363 gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)364 dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)365 overlap_part = image_box_overlap(gt_boxes, dt_boxes)366 elif metric == 1:367 loc = np.concatenate(368 [a["location"][:, bev_axes] for a in gt_annos_part], 0)369 dims = np.concatenate(370 [a["dimensions"][:, bev_axes] for a in gt_annos_part], 0)371 rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)372 gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],373 axis=1)374 loc = np.concatenate(375 [a["location"][:, bev_axes] for a in dt_annos_part], 0)376 dims = np.concatenate(377 [a["dimensions"][:, bev_axes] for a in dt_annos_part], 0)378 rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)379 dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],380 axis=1)381 overlap_part = bev_box_overlap(gt_boxes,382 dt_boxes).astype(np.float64)383 elif metric == 2:384 loc = np.concatenate([a["location"] for a in gt_annos_part], 0)385 dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)386 rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)387 gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],388 axis=1)389 loc = np.concatenate([a["location"] for a in dt_annos_part], 0)390 dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)391 rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)392 dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],393 axis=1)394 overlap_part = d3_box_overlap(395 gt_boxes, dt_boxes, z_axis=z_axis,396 z_center=z_center).astype(np.float64)397 else:398 raise ValueError("unknown metric")399 parted_overlaps.append(overlap_part)400 example_idx += num_part401 overlaps = []402 example_idx = 0403 for j, num_part in enumerate(split_parts):404 gt_annos_part = gt_annos[example_idx:example_idx + num_part]405 dt_annos_part = dt_annos[example_idx:example_idx + num_part]406 gt_num_idx, dt_num_idx = 0, 0407 for i in range(num_part):408 gt_box_num = total_gt_num[example_idx + i]409 dt_box_num = total_dt_num[example_idx + i]410 overlaps.append(411 parted_overlaps[j][gt_num_idx:gt_num_idx +412 gt_box_num, dt_num_idx:dt_num_idx +413 dt_box_num])414 gt_num_idx += gt_box_num415 dt_num_idx += dt_box_num416 example_idx += num_part417 return overlaps, parted_overlaps, total_gt_num, total_dt_num418def _prepare_data(gt_annos, dt_annos, current_class, difficulty):419 gt_datas_list = []420 dt_datas_list = []421 total_dc_num = []422 ignored_gts, ignored_dets, dontcares = [], [], []423 total_num_valid_gt = 0424 for i in range(len(gt_annos)):425 # for each image426 rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)427 num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets428 ignored_gts.append(np.array(ignored_gt, dtype=np.int64))429 ignored_dets.append(np.array(ignored_det, dtype=np.int64))430 if len(dc_bboxes) == 0:431 dc_bboxes = np.zeros((0, 4)).astype(np.float64)432 else:433 dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)434 total_dc_num.append(dc_bboxes.shape[0])435 dontcares.append(dc_bboxes)436 total_num_valid_gt += num_valid_gt437 gt_datas = np.concatenate(438 [gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)439 dt_datas = np.concatenate([440 dt_annos[i]["bbox"],441 dt_annos[i]["score"][..., np.newaxis]442 ], 1)443 gt_datas_list.append(gt_datas)444 dt_datas_list.append(dt_datas)445 total_dc_num = np.stack(total_dc_num, axis=0)446 return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,447 total_dc_num, total_num_valid_gt)448def eval_class(gt_annos,449 dt_annos,450 current_classes,451 difficultys,452 metric,453 min_overlaps,454 compute_aos=False,455 z_axis=1,456 z_center=1.0,457 num_parts=50):458 """Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.459 Args:460 gt_annos: dict, must from get_label_annos() in kitti_common.py461 dt_annos: dict, must from get_label_annos() in kitti_common.py462 current_class: int, 0: car, 1: pedestrian, 2: cyclist463 difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard464 metric: eval type. 0: bbox, 1: bev, 2: 3d465 min_overlap: float, min overlap. official: 466 [[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]] 467 format: [metric, class]. choose one from matrix above.468 num_parts: int. a parameter for fast calculate algorithm469 Returns:470 dict of recall, precision and aos471 """472 assert len(gt_annos) == len(dt_annos)473 num_examples = len(gt_annos)474 split_parts = get_split_parts(num_examples, num_parts)475 rets = calculate_iou_partly(476 dt_annos,477 gt_annos,478 metric,479 num_parts,480 z_axis=z_axis,481 z_center=z_center)482 overlaps, parted_overlaps, total_dt_num, total_gt_num = rets483 N_SAMPLE_PTS = 41484 num_minoverlap = len(min_overlaps)485 num_class = len(current_classes)486 num_difficulty = len(difficultys)487 precision = np.zeros(488 [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])489 recall = np.zeros(490 [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])491 aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])492 all_thresholds = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])493 for m, current_class in enumerate(current_classes):494 for l, difficulty in enumerate(difficultys):495 rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)496 (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,497 dontcares, total_dc_num, total_num_valid_gt) = rets498 for k, min_overlap in enumerate(min_overlaps[:, metric, m]):499 thresholdss = []500 for i in range(len(gt_annos)):501 rets = compute_statistics_jit(502 overlaps[i],503 gt_datas_list[i],504 dt_datas_list[i],505 ignored_gts[i],506 ignored_dets[i],507 dontcares[i],508 metric,509 min_overlap=min_overlap,510 thresh=0.0,511 compute_fp=False)512 tp, fp, fn, similarity, thresholds = rets513 thresholdss += thresholds.tolist()514 thresholdss = np.array(thresholdss)515 thresholds = get_thresholds(thresholdss, total_num_valid_gt)516 thresholds = np.array(thresholds)517 all_thresholds[m, l, k, :len(thresholds)] = thresholds518 pr = np.zeros([len(thresholds), 4])519 idx = 0520 for j, num_part in enumerate(split_parts):521 gt_datas_part = np.concatenate(522 gt_datas_list[idx:idx + num_part], 0)523 dt_datas_part = np.concatenate(524 dt_datas_list[idx:idx + num_part], 0)525 dc_datas_part = np.concatenate(526 dontcares[idx:idx + num_part], 0)527 ignored_dets_part = np.concatenate(528 ignored_dets[idx:idx + num_part], 0)529 ignored_gts_part = np.concatenate(530 ignored_gts[idx:idx + num_part], 0)531 fused_compute_statistics(532 parted_overlaps[j],533 pr,534 total_gt_num[idx:idx + num_part],535 total_dt_num[idx:idx + num_part],536 total_dc_num[idx:idx + num_part],537 gt_datas_part,538 dt_datas_part,539 dc_datas_part,540 ignored_gts_part,541 ignored_dets_part,542 metric,543 min_overlap=min_overlap,544 thresholds=thresholds,545 compute_aos=compute_aos)546 idx += num_part547 for i in range(len(thresholds)):548 precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])549 if compute_aos:550 aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])551 for i in range(len(thresholds)):552 precision[m, l, k, i] = np.max(553 precision[m, l, k, i:], axis=-1)554 if compute_aos:555 aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)556 ret_dict = {557 # "recall": recall, # [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]558 "precision": precision,559 "orientation": aos,560 "thresholds": all_thresholds,561 "min_overlaps": min_overlaps,562 }563 return ret_dict564def get_mAP_v2(prec):565 sums = 0566 for i in range(0, prec.shape[-1], 4):567 sums = sums + prec[..., i]568 return sums / 11 * 100569def do_eval_v3(gt_annos,570 dt_annos,571 current_classes,572 min_overlaps,573 compute_aos=False,574 difficultys=(0, 1, 2),575 z_axis=1,576 z_center=1.0):577 # min_overlaps: [num_minoverlap, metric, num_class]578 types = ["bbox"]579 metrics = {}580 for i in range(len(types)):581 ret = eval_class(582 gt_annos,583 dt_annos,584 current_classes,585 difficultys,586 i,587 min_overlaps,588 compute_aos,589 z_axis=z_axis,590 z_center=z_center)591 metrics[types[i]] = ret592 return metrics593def print_str(value, *arg, sstream=None):594 if sstream is None:595 sstream = sysio.StringIO()596 sstream.truncate(0)597 sstream.seek(0)598 print(value, *arg, file=sstream)599 return sstream.getvalue()600def get_official_eval_result(gt_annos,601 dt_annos,602 current_classes,603 difficultys=[0, 1, 2],604 z_axis=1,605 z_center=1.0):606 """607 gt_annos and dt_annos must contains following keys:608 [bbox, location, dimensions, rotation_y, score]609 """610 overlap_mod = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],611 [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],612 [0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]])613 overlap_easy = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.5, 0.5, 0.5],614 [0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5],615 [0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5]])616 min_overlaps = np.stack([overlap_mod, overlap_easy], axis=0) # [2, 3, 5]617 class_to_name = {618 0: 'Car',619 1: 'Van',620 2: 'Truck',621 3: 'Pedestrian',622 4: 'Person_sitting',623 5: 'Cyclist',624 6: 'Tram',625 }626 name_to_class = {v: n for n, v in class_to_name.items()}627 if not isinstance(current_classes, (list, tuple)):628 current_classes = [current_classes]629 current_classes_int = []630 for curcls in current_classes:631 if isinstance(curcls, str):632 current_classes_int.append(name_to_class[curcls])633 else:634 current_classes_int.append(curcls)635 current_classes = current_classes_int636 min_overlaps = min_overlaps[:, :, current_classes]637 result = ''638 # check whether alpha is valid639 compute_aos = False640 # for anno in dt_annos:641 # if anno['alpha'].shape[0] != 0:642 # if anno['alpha'][0] != -10:643 # compute_aos = True644 # break645 metrics = do_eval_v3(646 gt_annos,647 dt_annos,648 current_classes,649 min_overlaps,650 compute_aos,651 difficultys,652 z_axis=z_axis,653 z_center=z_center)654 for j, curcls in enumerate(current_classes):655 # mAP threshold array: [num_minoverlap, metric, class]656 # mAP result: [num_class, num_diff, num_minoverlap]657 for i in range(min_overlaps.shape[0]):658 mAPbbox = get_mAP_v2(metrics["bbox"]["precision"][j, :, i])659 mAPbbox = ", ".join("{:.2f}".format(v) for v in mAPbbox)660 #mAPbev = get_mAP_v2(metrics["bev"]["precision"][j, :, i])661 #mAPbev = ", ".join("{:.2f}".format(v) for v in mAPbev)662 #mAP3d = get_mAP_v2(metrics["3d"]["precision"][j, :, i])663 #mAP3d = ", ".join("{:.2f}".format(v) for v in mAP3d)664 result += print_str(665 ( class_to_name[curcls] + 666 "AP(Average Precision)@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))667 result += print_str("bbox AP:{}".format(mAPbbox))668 #result += print_str("bev AP:{}".format(mAPbev))669 #result += print_str("3d AP:{}".format(mAP3d))670 # if compute_aos:671 # mAPaos = get_mAP_v2(metrics["bbox"]["orientation"][j, :, i])672 # mAPaos = ", ".join("{:.2f}".format(v) for v in mAPaos)673 # result += print_str("aos AP:{}".format(mAPaos))...

Full Screen

Full Screen

wait.py

Source:wait.py Github

copy

Full Screen

1#!/usr/bin/python2#3# Copyright 2011 Software Freedom Conservancy.4#5# Licensed under the Apache License, Version 2.0 (the "License");6# you may not use this file except in compliance with the License.7# You may obtain a copy of the License at8#9# http://www.apache.org/licenses/LICENSE-2.010#11# Unless required by applicable law or agreed to in writing, software12# distributed under the License is distributed on an "AS IS" BASIS,13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.14# See the License for the specific language governing permissions and15# limitations under the License.16import time17from selenium.common.exceptions import NoSuchElementException18from selenium.common.exceptions import TimeoutException19POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method20IGNORED_EXCEPTIONS = [NoSuchElementException] # list of exceptions ignored during calls to the method21class WebDriverWait(object):22 def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):23 """Constructor, takes a WebDriver instance and timeout in seconds.24 25 :Args:26 - driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)27 - timeout - Number of seconds before timing out28 - poll_frequency - sleep interval between calls29 By default, it is 0.5 second.30 - ignored_exceptions - iterable structure of exception classes ignored during calls.31 By default, it contains NoSuchElementException only.32 Example:33 from selenium.webdriver.support.ui import WebDriverWait \n34 element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n35 is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n36 until_not(lambda x: x.find_element_by_id("someId").is_displayed())37 """38 self._driver = driver39 self._timeout = timeout40 self._poll = poll_frequency41 # avoid the divide by zero42 if self._poll == 0:43 self._poll = POLL_FREQUENCY44 exceptions = IGNORED_EXCEPTIONS45 if ignored_exceptions is not None:46 try:47 exceptions.extend(iter(ignored_exceptions))48 except TypeError: # ignored_exceptions is not iterable49 exceptions.append(ignored_exceptions)50 self._ignored_exceptions = tuple(exceptions)51 def until(self, method, message=''):52 """Calls the method provided with the driver as an argument until the \53 return value is not False."""54 end_time = time.time() + self._timeout55 while(True):56 try:57 value = method(self._driver)58 if value:59 return value60 except self._ignored_exceptions:61 pass62 time.sleep(self._poll)63 if(time.time() > end_time):64 break65 raise TimeoutException(message)66 def until_not(self, method, message=''):67 """Calls the method provided with the driver as an argument until the \68 return value is False."""69 end_time = time.time() + self._timeout70 while(True):71 try:72 value = method(self._driver)73 if not value:74 return value75 except self._ignored_exceptions:76 return True77 time.sleep(self._poll)78 if(time.time() > end_time):79 break...

Full Screen

Full Screen

style_controlword_list.py

Source:style_controlword_list.py Github

copy

Full Screen

1# {"controlword": ["ignored/coded", "description", "section", "type"]2# filters: [see table spec pp.173-2073style_controlword_dict = {4 "spv": [],5 "li": [],6 "ri": [],7 "ql": [],8 "rin": [],9 "lin": [],10 "itap": [],11 "nowidctlpar": [],12 "wrapdefault": [],13}14"snext": ["snext", "Coded", "Style Sheet", "Value", "\\snext", "\\snext"],15 "additive": ["additive", "Coded", "Style Sheet", "Flag", "\\additive",16 "\\additive"]17test = {18 "\\'": ["Ignored", "Special Characters", "Symbol"],19 "\\-": ["Ignored", "Special Characters", "Symbol"],20 "\\*": ["Ignored", "Special Characters", "Symbol"],21 "\\:": ["Ignored", "Special Characters", "Symbol"],22 "\\": ["Ignored", "Special Characters", "Symbol"],23 "\\_": ["Ignored", "Special Characters", "Symbol"],24 "\\{": ["Ignored", "Special Characters", "Symbol"],25 "\\|": ["Ignored", "Special Characters", "Symbol"],26 "\\}": ["Ignored", "Special Characters", "Symbol"],27 "\\~": ["Ignored", "Special Characters", "Symbol"],28 "\\ab": ["Ignored", "Associated Character Properties", "Toggle"],29 "\\absh": ["Ignored", "Positioned Objects and Frames", "Value"],30 "\\abslock": ["Ignored", "Positioned Objects and Frames", "Flag"],31 "\\absnoovrlpN": ["Ignored", "Positioned Objects and Frames", "Toggle"],32 "\\absw": ["Ignored", "Positioned Objects and Frames", "Value"],33 "\\acaps": ["Ignored", "Associated Character Properties", "Toggle"],34 "\\acccomma": ["Ignored", "Font (Character^) Formatting Properties", "Toggle"],35 "\\accdot": ["Ignored", "Font (Character^) Formatting Properties", "Toggle"],36 "\\accnone": ["Ignored", "Font (Character^) Formatting Properties", "Toggle"],37 "\\acccircle": ["Ignored", "Font (Character^) Formatting Properties", "Toggle"],38 "\\accunderdot": ["Ignored", "Font (Character^) Formatting Properties", "Toggle"],39 "\\acf": ["Ignored", "Associated Character Properties", "Value"],40 "\\additive": ["Ignored", "Style Sheet", "Flag"],41 "\\adjustright": ["Ignored", "Section Formatting Properties", "Flag"],42 "\\adn": ["Ignored", "Associated Character Properties", "Value"],43 "\\aenddoc": ["Ignored", "Document Formatting Properties", "Flag"],44 "\\aendnotes": ["Ignored", "Document Formatting Properties", "Flag"],45 "\\aexpnd": ["Ignored", "Associated Character Properties", "Value"],46 "\\af": ["Ignored", "Associated Character Properties", "Value"],47 "\\affixed": ["Ignored", "Paragraph Formatting Properties", "Flag"],48 "\\afs": ["Ignored", "Associated Character Properties", "Value"],49 "\\aftnbj": ["Ignored", "Document Formatting Properties", "Flag"],50 "\\aftncn": ["Ignored", "Document Formatting Properties", "Destination"],51 "\\aftnnalc": ["Ignored", "Document Formatting Properties", "Flag"],52 "\\aftnnar": ["Ignored", "Document Formatting Properties", "Flag"],53 "\\aftnnauc": ["Ignored", "Document Formatting Properties", "Flag"],54 "\\aftnnchi": ["Ignored", "Document Formatting Properties", "Flag"],55 "\\aftnnchosung": ["Ignored", "Document Formatting Properties", "Flag"],56 "\\aftnncnum": ["Ignored", "Document Formatting Properties", "Flag"],57 "\\aftnndbar": ["Ignored", "Document Formatting Properties", "Flag"],58 "\\aftnndbnum": ["Ignored", "Document Formatting Properties", "Flag"],59 "\\aftnndbnumd": ["Ignored", "Document Formatting Properties", "Flag"]...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run elementium automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful