How to use test_cfg method in pytest-django

Best Python code snippet using pytest-django_python

associative_embedding.py

Source:associative_embedding.py Github

copy

Full Screen

1# Copyright (c) OpenMMLab. All rights reserved.2import warnings3import mmcv4import torch5from mmcv.image import imwrite6from mmcv.utils.misc import deprecated_api_warning7from mmcv.visualization.image import imshow8from mmpose.core.evaluation import (aggregate_scale, aggregate_stage_flip,9 flip_feature_maps, get_group_preds,10 split_ae_outputs)11from mmpose.core.post_processing.group import HeatmapParser12from mmpose.core.visualization import imshow_keypoints13from .. import builder14from ..builder import POSENETS15from .base import BasePose16try:17 from mmcv.runner import auto_fp1618except ImportError:19 warnings.warn('auto_fp16 from mmpose will be deprecated from v0.15.0'20 'Please install mmcv>=1.1.4')21 from mmpose.core import auto_fp1622@POSENETS.register_module()23class AssociativeEmbedding(BasePose):24 """Associative embedding pose detectors.25 Args:26 backbone (dict): Backbone modules to extract feature.27 keypoint_head (dict): Keypoint head to process feature.28 train_cfg (dict): Config for training. Default: None.29 test_cfg (dict): Config for testing. Default: None.30 pretrained (str): Path to the pretrained models.31 loss_pose (None): Deprecated arguments. Please use32 ``loss_keypoint`` for heads instead.33 """34 def __init__(self,35 backbone,36 keypoint_head=None,37 train_cfg=None,38 test_cfg=None,39 pretrained=None,40 loss_pose=None):41 super().__init__()42 self.fp16_enabled = False43 self.backbone = builder.build_backbone(backbone)44 if keypoint_head is not None:45 if 'loss_keypoint' not in keypoint_head and loss_pose is not None:46 warnings.warn(47 '`loss_pose` for BottomUp is deprecated, '48 'use `loss_keypoint` for heads instead. See '49 'https://github.com/open-mmlab/mmpose/pull/382'50 ' for more information.', DeprecationWarning)51 keypoint_head['loss_keypoint'] = loss_pose52 self.keypoint_head = builder.build_head(keypoint_head)53 self.train_cfg = train_cfg54 self.test_cfg = test_cfg55 self.use_udp = test_cfg.get('use_udp', False)56 self.parser = HeatmapParser(self.test_cfg)57 self.init_weights(pretrained=pretrained)58 @property59 def with_keypoint(self):60 """Check if has keypoint_head."""61 return hasattr(self, 'keypoint_head')62 def init_weights(self, pretrained=None):63 """Weight initialization for model."""64 self.backbone.init_weights(pretrained)65 if self.with_keypoint:66 self.keypoint_head.init_weights()67 @auto_fp16(apply_to=('img', ))68 def forward(self,69 img=None,70 targets=None,71 masks=None,72 joints=None,73 img_metas=None,74 return_loss=True,75 return_heatmap=False,76 **kwargs):77 """Calls either forward_train or forward_test depending on whether78 return_loss is True.79 Note:80 - batch_size: N81 - num_keypoints: K82 - num_img_channel: C83 - img_width: imgW84 - img_height: imgH85 - heatmaps weight: W86 - heatmaps height: H87 - max_num_people: M88 Args:89 img (torch.Tensor[N,C,imgH,imgW]): Input image.90 targets (list(torch.Tensor[N,K,H,W])): Multi-scale target heatmaps.91 masks (list(torch.Tensor[N,H,W])): Masks of multi-scale target92 heatmaps93 joints (list(torch.Tensor[N,M,K,2])): Joints of multi-scale target94 heatmaps for ae loss95 img_metas (dict): Information about val & test.96 By default it includes:97 - "image_file": image path98 - "aug_data": input99 - "test_scale_factor": test scale factor100 - "base_size": base size of input101 - "center": center of image102 - "scale": scale of image103 - "flip_index": flip index of keypoints104 return loss (bool): ``return_loss=True`` for training,105 ``return_loss=False`` for validation & test.106 return_heatmap (bool) : Option to return heatmap.107 Returns:108 dict|tuple: if 'return_loss' is true, then return losses. \109 Otherwise, return predicted poses, scores, image \110 paths and heatmaps.111 """112 if return_loss:113 return self.forward_train(img, targets, masks, joints, img_metas,114 **kwargs)115 return self.forward_test(116 img, img_metas, return_heatmap=return_heatmap, **kwargs)117 def forward_train(self, img, targets, masks, joints, img_metas, **kwargs):118 """Forward the bottom-up model and calculate the loss.119 Note:120 batch_size: N121 num_keypoints: K122 num_img_channel: C123 img_width: imgW124 img_height: imgH125 heatmaps weight: W126 heatmaps height: H127 max_num_people: M128 Args:129 img (torch.Tensor[N,C,imgH,imgW]): Input image.130 targets (List(torch.Tensor[N,K,H,W])): Multi-scale target heatmaps.131 masks (List(torch.Tensor[N,H,W])): Masks of multi-scale target132 heatmaps133 joints (List(torch.Tensor[N,M,K,2])): Joints of multi-scale target134 heatmaps for ae loss135 img_metas (dict):Information about val&test136 By default this includes:137 - "image_file": image path138 - "aug_data": input139 - "test_scale_factor": test scale factor140 - "base_size": base size of input141 - "center": center of image142 - "scale": scale of image143 - "flip_index": flip index of keypoints144 Returns:145 dict: The total loss for bottom-up146 """147 output = self.backbone(img)148 if self.with_keypoint:149 output = self.keypoint_head(output)150 # if return loss151 losses = dict()152 if self.with_keypoint:153 keypoint_losses = self.keypoint_head.get_loss(154 output, targets, masks, joints)155 losses.update(keypoint_losses)156 return losses157 def forward_dummy(self, img):158 """Used for computing network FLOPs.159 See ``tools/get_flops.py``.160 Args:161 img (torch.Tensor): Input image.162 Returns:163 Tensor: Outputs.164 """165 output = self.backbone(img)166 if self.with_keypoint:167 output = self.keypoint_head(output)168 return output169 def forward_test(self, img, img_metas, return_heatmap=False, **kwargs):170 """Inference the bottom-up model.171 Note:172 - Batchsize: N (currently support batchsize = 1)173 - num_img_channel: C174 - img_width: imgW175 - img_height: imgH176 Args:177 flip_index (List(int)):178 aug_data (List(Tensor[NxCximgHximgW])): Multi-scale image179 test_scale_factor (List(float)): Multi-scale factor180 base_size (Tuple(int)): Base size of image when scale is 1181 center (np.ndarray): center of image182 scale (np.ndarray): the scale of image183 """184 assert img.size(0) == 1185 assert len(img_metas) == 1186 img_metas = img_metas[0]187 aug_data = img_metas['aug_data']188 test_scale_factor = img_metas['test_scale_factor']189 base_size = img_metas['base_size']190 center = img_metas['center']191 scale = img_metas['scale']192 result = {}193 scale_heatmaps_list = []194 scale_tags_list = []195 for idx, s in enumerate(sorted(test_scale_factor, reverse=True)):196 image_resized = aug_data[idx].to(img.device)197 features = self.backbone(image_resized)198 if self.with_keypoint:199 outputs = self.keypoint_head(features)200 heatmaps, tags = split_ae_outputs(201 outputs, self.test_cfg['num_joints'],202 self.test_cfg['with_heatmaps'], self.test_cfg['with_ae'],203 self.test_cfg.get('select_output_index', range(len(outputs))))204 if self.test_cfg.get('flip_test', True):205 # use flip test206 features_flipped = self.backbone(207 torch.flip(image_resized, [3]))208 if self.with_keypoint:209 outputs_flipped = self.keypoint_head(features_flipped)210 heatmaps_flipped, tags_flipped = split_ae_outputs(211 outputs_flipped, self.test_cfg['num_joints'],212 self.test_cfg['with_heatmaps'], self.test_cfg['with_ae'],213 self.test_cfg.get('select_output_index',214 range(len(outputs))))215 heatmaps_flipped = flip_feature_maps(216 heatmaps_flipped, flip_index=img_metas['flip_index'])217 if self.test_cfg['tag_per_joint']:218 tags_flipped = flip_feature_maps(219 tags_flipped, flip_index=img_metas['flip_index'])220 else:221 tags_flipped = flip_feature_maps(222 tags_flipped, flip_index=None, flip_output=True)223 else:224 heatmaps_flipped = None225 tags_flipped = None226 aggregated_heatmaps = aggregate_stage_flip(227 heatmaps,228 heatmaps_flipped,229 index=-1,230 project2image=self.test_cfg['project2image'],231 size_projected=base_size,232 align_corners=self.test_cfg.get('align_corners', True),233 aggregate_stage='average',234 aggregate_flip='average')235 aggregated_tags = aggregate_stage_flip(236 tags,237 tags_flipped,238 index=-1,239 project2image=self.test_cfg['project2image'],240 size_projected=base_size,241 align_corners=self.test_cfg.get('align_corners', True),242 aggregate_stage='concat',243 aggregate_flip='concat')244 if s == 1 or len(test_scale_factor) == 1:245 if isinstance(aggregated_tags, list):246 scale_tags_list.extend(aggregated_tags)247 else:248 scale_tags_list.append(aggregated_tags)249 if isinstance(aggregated_heatmaps, list):250 scale_heatmaps_list.extend(aggregated_heatmaps)251 else:252 scale_heatmaps_list.append(aggregated_heatmaps)253 aggregated_heatmaps = aggregate_scale(254 scale_heatmaps_list,255 align_corners=self.test_cfg.get('align_corners', True),256 aggregate_scale='average')257 aggregated_tags = aggregate_scale(258 scale_tags_list,259 align_corners=self.test_cfg.get('align_corners', True),260 aggregate_scale='unsqueeze_concat')261 heatmap_size = aggregated_heatmaps.shape[2:4]262 tag_size = aggregated_tags.shape[2:4]263 if heatmap_size != tag_size:264 tmp = []265 for idx in range(aggregated_tags.shape[-1]):266 tmp.append(267 torch.nn.functional.interpolate(268 aggregated_tags[..., idx],269 size=heatmap_size,270 mode='bilinear',271 align_corners=self.test_cfg.get('align_corners',272 True)).unsqueeze(-1))273 aggregated_tags = torch.cat(tmp, dim=-1)274 # perform grouping275 grouped, scores = self.parser.parse(aggregated_heatmaps,276 aggregated_tags,277 self.test_cfg['adjust'],278 self.test_cfg['refine'])279 preds = get_group_preds(280 grouped,281 center,282 scale, [aggregated_heatmaps.size(3),283 aggregated_heatmaps.size(2)],284 use_udp=self.use_udp)285 image_paths = []286 image_paths.append(img_metas['image_file'])287 if return_heatmap:288 output_heatmap = aggregated_heatmaps.detach().cpu().numpy()289 else:290 output_heatmap = None291 result['preds'] = preds292 result['scores'] = scores293 result['image_paths'] = image_paths294 result['output_heatmap'] = output_heatmap295 return result296 @deprecated_api_warning({'pose_limb_color': 'pose_link_color'},297 cls_name='AssociativeEmbedding')298 def show_result(self,299 img,300 result,301 skeleton=None,302 kpt_score_thr=0.3,303 bbox_color=None,304 pose_kpt_color=None,305 pose_link_color=None,306 radius=4,307 thickness=1,308 font_scale=0.5,309 win_name='',310 show=False,311 show_keypoint_weight=False,312 wait_time=0,313 out_file=None):314 """Draw `result` over `img`.315 Args:316 img (str or Tensor): The image to be displayed.317 result (list[dict]): The results to draw over `img`318 (bbox_result, pose_result).319 skeleton (list[list]): The connection of keypoints.320 skeleton is 0-based indexing.321 kpt_score_thr (float, optional): Minimum score of keypoints322 to be shown. Default: 0.3.323 pose_kpt_color (np.array[Nx3]`): Color of N keypoints.324 If None, do not draw keypoints.325 pose_link_color (np.array[Mx3]): Color of M links.326 If None, do not draw links.327 radius (int): Radius of circles.328 thickness (int): Thickness of lines.329 font_scale (float): Font scales of texts.330 win_name (str): The window name.331 show (bool): Whether to show the image. Default: False.332 show_keypoint_weight (bool): Whether to change the transparency333 using the predicted confidence scores of keypoints.334 wait_time (int): Value of waitKey param.335 Default: 0.336 out_file (str or None): The filename to write the image.337 Default: None.338 Returns:339 Tensor: Visualized image only if not `show` or `out_file`340 """341 img = mmcv.imread(img)342 img = img.copy()343 img_h, img_w, _ = img.shape344 pose_result = []345 for res in result:346 pose_result.append(res['keypoints'])347 imshow_keypoints(img, pose_result, skeleton, kpt_score_thr,348 pose_kpt_color, pose_link_color, radius, thickness)349 if show:350 imshow(img, win_name, wait_time)351 if out_file is not None:352 imwrite(img, out_file)...

Full Screen

Full Screen

test_forward.py

Source:test_forward.py Github

copy

Full Screen

1"""2pytest tests/test_forward.py3"""4import copy5from os.path import dirname, exists, join6import numpy as np7import torch8def _get_config_directory():9 """ Find the predefined detector config directory """10 try:11 # Assume we are running in the source mmdetection repo12 repo_dpath = dirname(dirname(__file__))13 except NameError:14 # For IPython development when this __file__ is not defined15 import mmdet16 repo_dpath = dirname(dirname(mmdet.__file__))17 config_dpath = join(repo_dpath, 'configs')18 if not exists(config_dpath):19 raise Exception('Cannot find config path')20 return config_dpath21def _get_config_module(fname):22 """23 Load a configuration as a python module24 """25 from xdoctest.utils import import_module_from_path26 config_dpath = _get_config_directory()27 config_fpath = join(config_dpath, fname)28 config_mod = import_module_from_path(config_fpath)29 return config_mod30def _get_detector_cfg(fname):31 """32 Grab configs necessary to create a detector. These are deep copied to allow33 for safe modification of parameters without influencing other tests.34 """35 import mmcv36 config = _get_config_module(fname)37 model = copy.deepcopy(config.model)38 train_cfg = mmcv.Config(copy.deepcopy(config.train_cfg))39 test_cfg = mmcv.Config(copy.deepcopy(config.test_cfg))40 return model, train_cfg, test_cfg41def test_ssd300_forward():42 model, train_cfg, test_cfg = _get_detector_cfg('ssd300_coco.py')43 model['pretrained'] = None44 from mmdet.models import build_detector45 detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)46 input_shape = (1, 3, 300, 300)47 mm_inputs = _demo_mm_inputs(input_shape)48 imgs = mm_inputs.pop('imgs')49 img_metas = mm_inputs.pop('img_metas')50 # Test forward train51 gt_bboxes = mm_inputs['gt_bboxes']52 gt_labels = mm_inputs['gt_labels']53 losses = detector.forward(54 imgs,55 img_metas,56 gt_bboxes=gt_bboxes,57 gt_labels=gt_labels,58 return_loss=True)59 assert isinstance(losses, dict)60 # Test forward test61 with torch.no_grad():62 img_list = [g[None, :] for g in imgs]63 batch_results = []64 for one_img, one_meta in zip(img_list, img_metas):65 result = detector.forward([one_img], [[one_meta]],66 return_loss=False)67 batch_results.append(result)68def test_rpn_forward():69 model, train_cfg, test_cfg = _get_detector_cfg('rpn_r50_fpn_1x.py')70 model['pretrained'] = None71 from mmdet.models import build_detector72 detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)73 input_shape = (1, 3, 224, 224)74 mm_inputs = _demo_mm_inputs(input_shape)75 imgs = mm_inputs.pop('imgs')76 img_metas = mm_inputs.pop('img_metas')77 # Test forward train78 gt_bboxes = mm_inputs['gt_bboxes']79 losses = detector.forward(80 imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)81 assert isinstance(losses, dict)82 # Test forward test83 with torch.no_grad():84 img_list = [g[None, :] for g in imgs]85 batch_results = []86 for one_img, one_meta in zip(img_list, img_metas):87 result = detector.forward([one_img], [[one_meta]],88 return_loss=False)89 batch_results.append(result)90def test_retina_ghm_forward():91 model, train_cfg, test_cfg = _get_detector_cfg(92 'ghm/retinanet_ghm_r50_fpn_1x.py')93 model['pretrained'] = None94 from mmdet.models import build_detector95 detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)96 input_shape = (3, 3, 224, 224)97 mm_inputs = _demo_mm_inputs(input_shape)98 imgs = mm_inputs.pop('imgs')99 img_metas = mm_inputs.pop('img_metas')100 # Test forward train101 gt_bboxes = mm_inputs['gt_bboxes']102 gt_labels = mm_inputs['gt_labels']103 losses = detector.forward(104 imgs,105 img_metas,106 gt_bboxes=gt_bboxes,107 gt_labels=gt_labels,108 return_loss=True)109 assert isinstance(losses, dict)110 # Test forward test111 with torch.no_grad():112 img_list = [g[None, :] for g in imgs]113 batch_results = []114 for one_img, one_meta in zip(img_list, img_metas):115 result = detector.forward([one_img], [[one_meta]],116 return_loss=False)117 batch_results.append(result)118 if torch.cuda.is_available():119 detector = detector.cuda()120 imgs = imgs.cuda()121 # Test forward train122 gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]123 gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]124 losses = detector.forward(125 imgs,126 img_metas,127 gt_bboxes=gt_bboxes,128 gt_labels=gt_labels,129 return_loss=True)130 assert isinstance(losses, dict)131 # Test forward test132 with torch.no_grad():133 img_list = [g[None, :] for g in imgs]134 batch_results = []135 for one_img, one_meta in zip(img_list, img_metas):136 result = detector.forward([one_img], [[one_meta]],137 return_loss=False)138 batch_results.append(result)139def test_cascade_forward():140 try:141 from torchvision import _C as C # NOQA142 except ImportError:143 import pytest144 raise pytest.skip('requires torchvision on cpu')145 model, train_cfg, test_cfg = _get_detector_cfg(146 'cascade_rcnn_r50_fpn_1x.py')147 model['pretrained'] = None148 # torchvision roi align supports CPU149 model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True150 from mmdet.models import build_detector151 detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)152 input_shape = (1, 3, 256, 256)153 # Test forward train with a non-empty truth batch154 mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])155 imgs = mm_inputs.pop('imgs')156 img_metas = mm_inputs.pop('img_metas')157 gt_bboxes = mm_inputs['gt_bboxes']158 gt_labels = mm_inputs['gt_labels']159 losses = detector.forward(160 imgs,161 img_metas,162 gt_bboxes=gt_bboxes,163 gt_labels=gt_labels,164 return_loss=True)165 assert isinstance(losses, dict)166 from mmdet.apis.train import parse_losses167 total_loss = float(parse_losses(losses)[0].item())168 assert total_loss > 0169 # Test forward train with an empty truth batch170 mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])171 imgs = mm_inputs.pop('imgs')172 img_metas = mm_inputs.pop('img_metas')173 gt_bboxes = mm_inputs['gt_bboxes']174 gt_labels = mm_inputs['gt_labels']175 losses = detector.forward(176 imgs,177 img_metas,178 gt_bboxes=gt_bboxes,179 gt_labels=gt_labels,180 return_loss=True)181 assert isinstance(losses, dict)182 from mmdet.apis.train import parse_losses183 total_loss = float(parse_losses(losses)[0].item())184 assert total_loss > 0185def test_faster_rcnn_forward():186 try:187 from torchvision import _C as C # NOQA188 except ImportError:189 import pytest190 raise pytest.skip('requires torchvision on cpu')191 model, train_cfg, test_cfg = _get_detector_cfg('faster_rcnn_r50_fpn_1x.py')192 model['pretrained'] = None193 # torchvision roi align supports CPU194 model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True195 from mmdet.models import build_detector196 detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)197 input_shape = (1, 3, 256, 256)198 # Test forward train with a non-empty truth batch199 mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])200 imgs = mm_inputs.pop('imgs')201 img_metas = mm_inputs.pop('img_metas')202 gt_bboxes = mm_inputs['gt_bboxes']203 gt_labels = mm_inputs['gt_labels']204 losses = detector.forward(205 imgs,206 img_metas,207 gt_bboxes=gt_bboxes,208 gt_labels=gt_labels,209 return_loss=True)210 assert isinstance(losses, dict)211 from mmdet.apis.train import parse_losses212 total_loss = float(parse_losses(losses)[0].item())213 assert total_loss > 0214 # Test forward train with an empty truth batch215 mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])216 imgs = mm_inputs.pop('imgs')217 img_metas = mm_inputs.pop('img_metas')218 gt_bboxes = mm_inputs['gt_bboxes']219 gt_labels = mm_inputs['gt_labels']220 losses = detector.forward(221 imgs,222 img_metas,223 gt_bboxes=gt_bboxes,224 gt_labels=gt_labels,225 return_loss=True)226 assert isinstance(losses, dict)227 from mmdet.apis.train import parse_losses228 total_loss = float(parse_losses(losses)[0].item())229 assert total_loss > 0230def test_faster_rcnn_ohem_forward():231 try:232 from torchvision import _C as C # NOQA233 except ImportError:234 import pytest235 raise pytest.skip('requires torchvision on cpu')236 model, train_cfg, test_cfg = _get_detector_cfg(237 'faster_rcnn_ohem_r50_fpn_1x.py')238 model['pretrained'] = None239 # torchvision roi align supports CPU240 model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True241 from mmdet.models import build_detector242 detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)243 input_shape = (1, 3, 256, 256)244 # Test forward train with a non-empty truth batch245 mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])246 imgs = mm_inputs.pop('imgs')247 img_metas = mm_inputs.pop('img_metas')248 gt_bboxes = mm_inputs['gt_bboxes']249 gt_labels = mm_inputs['gt_labels']250 losses = detector.forward(251 imgs,252 img_metas,253 gt_bboxes=gt_bboxes,254 gt_labels=gt_labels,255 return_loss=True)256 assert isinstance(losses, dict)257 from mmdet.apis.train import parse_losses258 total_loss = float(parse_losses(losses)[0].item())259 assert total_loss > 0260 # Test forward train with an empty truth batch261 mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])262 imgs = mm_inputs.pop('imgs')263 img_metas = mm_inputs.pop('img_metas')264 gt_bboxes = mm_inputs['gt_bboxes']265 gt_labels = mm_inputs['gt_labels']266 losses = detector.forward(267 imgs,268 img_metas,269 gt_bboxes=gt_bboxes,270 gt_labels=gt_labels,271 return_loss=True)272 assert isinstance(losses, dict)273 from mmdet.apis.train import parse_losses274 total_loss = float(parse_losses(losses)[0].item())275 assert total_loss > 0276def _demo_mm_inputs(input_shape=(1, 3, 300, 300),277 num_items=None, num_classes=10): # yapf: disable278 """279 Create a superset of inputs needed to run test or train batches.280 Args:281 input_shape (tuple):282 input batch dimensions283 num_items (None | List[int]):284 specifies the number of boxes in each batch item285 num_classes (int):286 number of different labels a box might have287 """288 (N, C, H, W) = input_shape289 rng = np.random.RandomState(0)290 imgs = rng.rand(*input_shape)291 img_metas = [{292 'img_shape': (H, W, C),293 'ori_shape': (H, W, C),294 'pad_shape': (H, W, C),295 'filename': '<demo>.png',296 'scale_factor': 1.0,297 'flip': False,298 } for _ in range(N)]299 gt_bboxes = []300 gt_labels = []301 for batch_idx in range(N):302 if num_items is None:303 num_boxes = rng.randint(1, 10)304 else:305 num_boxes = num_items[batch_idx]306 cx, cy, bw, bh = rng.rand(num_boxes, 4).T307 tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)308 tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)309 br_x = ((cx * W) + (W * bw / 2)).clip(0, W)310 br_y = ((cy * H) + (H * bh / 2)).clip(0, H)311 boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T312 class_idxs = rng.randint(1, num_classes, size=num_boxes)313 gt_bboxes.append(torch.FloatTensor(boxes))314 gt_labels.append(torch.LongTensor(class_idxs))315 mm_inputs = {316 'imgs': torch.FloatTensor(imgs),317 'img_metas': img_metas,318 'gt_bboxes': gt_bboxes,319 'gt_labels': gt_labels,320 'gt_bboxes_ignore': None,321 }...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-django automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful