How to use _generate_task method in lisa

Best Python code snippet using lisa_python

test_formats.py

Source:test_formats.py Github

copy

Full Screen

...201 for i in range(count)202 }203 images["image_quality"] = 75204 return images205 def _generate_task(self, images, **overrides):206 task = {207 "name": "my task #1",208 "overlap": 0,209 "segment_size": 100,210 "labels": [211 {212 "name": "car",213 "attributes": [214 {215 "name": "model",216 "mutable": False,217 "input_type": "select",218 "default_value": "mazda",219 "values": ["bmw", "mazda", "renault"]220 },221 {222 "name": "parked",223 "mutable": True,224 "input_type": "checkbox",225 "default_value": False226 },227 ]228 },229 {"name": "person"},230 ]231 }232 task.update(overrides)233 return self._create_task(task, images)234 @staticmethod235 def _test_export(check, task, format_name, **export_args):236 with tempfile.TemporaryDirectory() as temp_dir:237 file_path = osp.join(temp_dir, format_name)238 dm.task.export_task(task["id"], file_path,239 format_name, **export_args)240 check(file_path)241 def test_export_formats_query(self):242 formats = dm.views.get_export_formats()243 self.assertEqual({f.DISPLAY_NAME for f in formats},244 {245 'COCO 1.0',246 'CVAT for images 1.1',247 'CVAT for video 1.1',248 'Datumaro 1.0',249 'Datumaro 3D 1.0',250 'LabelMe 3.0',251 'MOT 1.1',252 'MOTS PNG 1.0',253 'PASCAL VOC 1.1',254 'Segmentation mask 1.1',255 'TFRecord 1.0',256 'YOLO 1.1',257 'ImageNet 1.0',258 'CamVid 1.0',259 'WiderFace 1.0',260 'VGGFace2 1.0',261 'Market-1501 1.0',262 'ICDAR Recognition 1.0',263 'ICDAR Localization 1.0',264 'ICDAR Segmentation 1.0',265 'Kitti Raw Format 1.0',266 'Sly Point Cloud Format 1.0',267 'KITTI 1.0',268 'LFW 1.0',269 'Cityscapes 1.0',270 'Open Images V6 1.0'271 })272 def test_import_formats_query(self):273 formats = dm.views.get_import_formats()274 self.assertEqual({f.DISPLAY_NAME for f in formats},275 {276 'COCO 1.0',277 'CVAT 1.1',278 'LabelMe 3.0',279 'MOT 1.1',280 'MOTS PNG 1.0',281 'PASCAL VOC 1.1',282 'Segmentation mask 1.1',283 'TFRecord 1.0',284 'YOLO 1.1',285 'ImageNet 1.0',286 'CamVid 1.0',287 'WiderFace 1.0',288 'VGGFace2 1.0',289 'Market-1501 1.0',290 'ICDAR Recognition 1.0',291 'ICDAR Localization 1.0',292 'ICDAR Segmentation 1.0',293 'Kitti Raw Format 1.0',294 'Sly Point Cloud Format 1.0',295 'KITTI 1.0',296 'LFW 1.0',297 'Cityscapes 1.0',298 'Open Images V6 1.0',299 'Datumaro 1.0',300 'Datumaro 3D 1.0',301 })302 def test_exports(self):303 def check(file_path):304 with open(file_path, 'rb') as f:305 self.assertTrue(len(f.read()) != 0)306 for f in dm.views.get_export_formats():307 if not f.ENABLED:308 self.skipTest("Format is disabled")309 format_name = f.DISPLAY_NAME310 if format_name == "VGGFace2 1.0":311 self.skipTest("Format is disabled")312 for save_images in { True, False }:313 images = self._generate_task_images(3)314 task = self._generate_task(images)315 self._generate_annotations(task)316 with self.subTest(format=format_name, save_images=save_images):317 self._test_export(check, task,318 format_name, save_images=save_images)319 def test_empty_images_are_exported(self):320 dm_env = dm.formats.registry.dm_env321 for format_name, importer_name in [322 ('COCO 1.0', 'coco'),323 ('CVAT for images 1.1', 'cvat'),324 # ('CVAT for video 1.1', 'cvat'), # does not support325 ('Datumaro 1.0', 'datumaro'),326 ('LabelMe 3.0', 'label_me'),327 # ('MOT 1.1', 'mot_seq'), # does not support328 # ('MOTS PNG 1.0', 'mots_png'), # does not support329 ('PASCAL VOC 1.1', 'voc'),330 ('Segmentation mask 1.1', 'voc'),331 ('TFRecord 1.0', 'tf_detection_api'),332 ('YOLO 1.1', 'yolo'),333 ('ImageNet 1.0', 'imagenet_txt'),334 ('CamVid 1.0', 'camvid'),335 ('WiderFace 1.0', 'wider_face'),336 ('VGGFace2 1.0', 'vgg_face2'),337 ('Market-1501 1.0', 'market1501'),338 ('ICDAR Recognition 1.0', 'icdar_word_recognition'),339 ('ICDAR Localization 1.0', 'icdar_text_localization'),340 ('ICDAR Segmentation 1.0', 'icdar_text_segmentation'),341 # ('KITTI 1.0', 'kitti') format does not support empty annotations342 ('LFW 1.0', 'lfw'),343 # ('Cityscapes 1.0', 'cityscapes'), does not support, empty annotations344 ]:345 with self.subTest(format=format_name):346 if not dm.formats.registry.EXPORT_FORMATS[format_name].ENABLED:347 self.skipTest("Format is disabled")348 images = self._generate_task_images(3)349 task = self._generate_task(images)350 def check(file_path):351 def load_dataset(src):352 return datumaro.components.dataset. \353 Dataset.import_from(src, importer_name, env=dm_env)354 if zipfile.is_zipfile(file_path):355 with tempfile.TemporaryDirectory() as tmp_dir:356 zipfile.ZipFile(file_path).extractall(tmp_dir)357 dataset = load_dataset(tmp_dir)358 self.assertEqual(len(dataset), task["size"])359 else:360 dataset = load_dataset(file_path)361 self.assertEqual(len(dataset), task["size"])362 self._test_export(check, task, format_name, save_images=False)363 def test_can_skip_outside(self):364 images = self._generate_task_images(3)365 task = self._generate_task(images)366 self._generate_annotations(task)367 task_ann = TaskAnnotation(task["id"])368 task_ann.init_from_db()369 task_data = TaskData(task_ann.ir_data, Task.objects.get(pk=task["id"]))370 extractor = CvatTaskDataExtractor(task_data)371 dm_dataset = datumaro.components.project.Dataset.from_extractors(extractor)372 self.assertEqual(4, len(dm_dataset.get("image_1").annotations))373 def test_no_outside_shapes_in_per_frame_export(self):374 images = self._generate_task_images(3)375 task = self._generate_task(images)376 self._generate_annotations(task)377 task_ann = TaskAnnotation(task["id"])378 task_ann.init_from_db()379 task_data = TaskData(task_ann.ir_data, Task.objects.get(pk=task["id"]))380 outside_count = 0381 for f in task_data.group_by_frame(include_empty=True):382 for ann in f.labeled_shapes:383 if getattr(ann, 'outside', None):384 outside_count += 1385 self.assertEqual(0, outside_count)386 def test_cant_make_rel_frame_id_from_unknown(self):387 images = self._generate_task_images(3)388 images['frame_filter'] = 'step=2'389 task = self._generate_task(images)390 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))391 with self.assertRaisesRegex(ValueError, r'Unknown'):392 task_data.rel_frame_id(1) # the task has only 0 and 2 frames393 def test_can_make_rel_frame_id_from_known(self):394 images = self._generate_task_images(6)395 images['frame_filter'] = 'step=2'396 images['start_frame'] = 1397 task = self._generate_task(images)398 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))399 self.assertEqual(2, task_data.rel_frame_id(5))400 def test_cant_make_abs_frame_id_from_unknown(self):401 images = self._generate_task_images(3)402 images['frame_filter'] = 'step=2'403 task = self._generate_task(images)404 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))405 with self.assertRaisesRegex(ValueError, r'Unknown'):406 task_data.abs_frame_id(2) # the task has only 0 and 1 indices407 def test_can_make_abs_frame_id_from_known(self):408 images = self._generate_task_images(6)409 images['frame_filter'] = 'step=2'410 images['start_frame'] = 1411 task = self._generate_task(images)412 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))413 self.assertEqual(5, task_data.abs_frame_id(2))414 def test_frames_outside_are_not_generated(self):415 # https://github.com/openvinotoolkit/cvat/issues/2827416 images = self._generate_task_images(10)417 images['start_frame'] = 0418 task = self._generate_task(images, overlap=3, segment_size=6)419 annotations = {420 "version": 0,421 "tags": [],422 "shapes": [],423 "tracks": [424 {425 "frame": 6,426 "label_id": task["labels"][0]["id"],427 "group": None,428 "source": "manual",429 "attributes": [],430 "shapes": [431 {432 "frame": 6,433 "points": [1.0, 2.1, 100, 300.222],434 "type": "rectangle",435 "occluded": False,436 "outside": False,437 "attributes": [],438 },439 ]440 },441 ]442 }443 self._put_api_v1_job_id_annotations(444 task["segments"][2]["jobs"][0]["id"], annotations)445 task_ann = TaskAnnotation(task["id"])446 task_ann.init_from_db()447 task_data = TaskData(task_ann.ir_data, Task.objects.get(pk=task['id']))448 i = -1449 for i, frame in enumerate(task_data.group_by_frame()):450 self.assertTrue(frame.frame in range(6, 10))451 self.assertEqual(i + 1, 4)452class FrameMatchingTest(_DbTestBase):453 def _generate_task_images(self, paths): # pylint: disable=no-self-use454 f = BytesIO()455 with zipfile.ZipFile(f, 'w') as archive:456 for path in paths:457 archive.writestr(path, generate_image_file(path).getvalue())458 f.name = 'images.zip'459 f.seek(0)460 return {461 'client_files[0]': f,462 'image_quality': 75,463 }464 def _generate_task(self, images):465 task = {466 "name": "my task #1",467 "overlap": 0,468 "segment_size": 100,469 "labels": [470 {471 "name": "car",472 "attributes": [473 {474 "name": "model",475 "mutable": False,476 "input_type": "select",477 "default_value": "mazda",478 "values": ["bmw", "mazda", "renault"]479 },480 {481 "name": "parked",482 "mutable": True,483 "input_type": "checkbox",484 "default_value": False485 },486 ]487 },488 {"name": "person"},489 ]490 }491 return self._create_task(task, images)492 def test_frame_matching(self):493 task_paths = [494 'a.jpg',495 'a/a.jpg',496 'a/b.jpg',497 'b/a.jpg',498 'b/c.jpg',499 'a/b/c.jpg',500 'a/b/d.jpg',501 ]502 images = self._generate_task_images(task_paths)503 task = self._generate_task(images)504 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task["id"]))505 for input_path, expected, root in [506 ('z.jpg', None, ''), # unknown item507 ('z/a.jpg', None, ''), # unknown item508 ('d.jpg', 'a/b/d.jpg', 'a/b'), # match with root hint509 ('b/d.jpg', 'a/b/d.jpg', 'a'), # match with root hint510 ] + list(zip(task_paths, task_paths, [None] * len(task_paths))): # exact matches511 with self.subTest(input=input_path):512 actual = task_data.match_frame(input_path, root)513 if actual is not None:514 actual = task_data.frame_info[actual]['path']515 self.assertEqual(expected, actual)516 def test_dataset_root(self):517 for task_paths, dataset_paths, expected in [518 ([ 'a.jpg', 'b/c/a.jpg' ], [ 'a.jpg', 'b/c/a.jpg' ], ''),519 ([ 'b/a.jpg', 'b/c/a.jpg' ], [ 'a.jpg', 'c/a.jpg' ], 'b'), # 'images from share' case520 ([ 'b/c/a.jpg' ], [ 'a.jpg' ], 'b/c'), # 'images from share' case521 ([ 'a.jpg' ], [ 'z.jpg' ], None),522 ]:523 with self.subTest(expected=expected):524 images = self._generate_task_images(task_paths)525 task = self._generate_task(images)526 task_data = TaskData(AnnotationIR(),527 Task.objects.get(pk=task["id"]))528 dataset = [529 datumaro.components.extractor.DatasetItem(530 id=osp.splitext(p)[0])531 for p in dataset_paths]532 root = find_dataset_root(dataset, task_data)533 self.assertEqual(expected, root)534class TaskAnnotationsImportTest(_DbTestBase):535 def _generate_custom_annotations(self, annotations, task):536 self._put_api_v1_task_id_annotations(task["id"], annotations)537 return annotations538 def _generate_task_images(self, count, name="image", **image_params):539 images = {540 "client_files[%d]" % i: generate_image_file("%s_%d.jpg" % (name, i),541 **image_params)542 for i in range(count)543 }544 images["image_quality"] = 75545 return images546 def _generate_task(self, images, annotation_format, **overrides):547 labels = []548 if annotation_format in ["ICDAR Recognition 1.0",549 "ICDAR Localization 1.0"]:550 labels = [{551 "name": "icdar",552 "attributes": [{553 "name": "text",554 "mutable": False,555 "input_type": "text",556 "values": ["word1", "word2"]557 }]558 }]559 elif annotation_format == "ICDAR Segmentation 1.0":560 labels = [{561 "name": "icdar",562 "attributes": [563 {564 "name": "text",565 "mutable": False,566 "input_type": "text",567 "values": ["word_1", "word_2", "word_3"]568 },569 {570 "name": "index",571 "mutable": False,572 "input_type": "number",573 "values": ["0", "1", "2"]574 },575 {576 "name": "color",577 "mutable": False,578 "input_type": "text",579 "values": ["100 110 240", "10 15 20", "120 128 64"]580 },581 {582 "name": "center",583 "mutable": False,584 "input_type": "text",585 "values": ["1 2", "2 4", "10 45"]586 },587 ]588 }]589 elif annotation_format == "Market-1501 1.0":590 labels = [{591 "name": "market-1501",592 "attributes": [593 {594 "name": "query",595 "mutable": False,596 "input_type": "select",597 "values": ["True", "False"]598 },599 {600 "name": "camera_id",601 "mutable": False,602 "input_type": "number",603 "values": ["0", "1", "2", "3"]604 },605 {606 "name": "person_id",607 "mutable": False,608 "input_type": "number",609 "values": ["1", "2", "3"]610 },611 ]612 }]613 else:614 labels = [615 {616 "name": "car",617 "attributes": [618 {619 "name": "model",620 "mutable": False,621 "input_type": "select",622 "default_value": "mazda",623 "values": ["bmw", "mazda", "renault"]624 },625 {626 "name": "parked",627 "mutable": True,628 "input_type": "checkbox",629 "default_value": False630 }631 ]632 },633 {634 "name": "background",635 "attributes": [],636 },637 {"name": "person"}638 ]639 task = {640 "name": "my task #1",641 "overlap": 0,642 "segment_size": 100,643 "labels": labels644 }645 task.update(overrides)646 return self._create_task(task, images)647 def _generate_annotations(self, task, annotation_format):648 shapes = []649 tracks = []650 tags = []651 if annotation_format in ["ICDAR Recognition 1.0",652 "ICDAR Localization 1.0"]:653 shapes = [{654 "frame": 0,655 "label_id": task["labels"][0]["id"],656 "group": 0,657 "source": "manual",658 "attributes": [659 {660 "spec_id": task["labels"][0]["attributes"][0]["id"],661 "value": task["labels"][0]["attributes"][0]["values"][0]662 },663 ],664 "points": [1.0, 2.1, 10.6, 53.22],665 "type": "rectangle",666 "occluded": False,667 }]668 elif annotation_format == "Market-1501 1.0":669 tags = [{670 "frame": 1,671 "label_id": task["labels"][0]["id"],672 "group": 0,673 "source": "manual",674 "attributes": [675 {676 "spec_id": task["labels"][0]["attributes"][0]["id"],677 "value": task["labels"][0]["attributes"][0]["values"][1]678 },679 {680 "spec_id": task["labels"][0]["attributes"][1]["id"],681 "value": task["labels"][0]["attributes"][1]["values"][2]682 },683 {684 "spec_id": task["labels"][0]["attributes"][2]["id"],685 "value": task["labels"][0]["attributes"][2]["values"][0]686 }687 ],688 }]689 elif annotation_format == "ICDAR Segmentation 1.0":690 shapes = [{691 "frame": 0,692 "label_id": task["labels"][0]["id"],693 "group": 0,694 "source": "manual",695 "attributes": [696 {697 "spec_id": task["labels"][0]["attributes"][0]["id"],698 "value": task["labels"][0]["attributes"][0]["values"][0]699 },700 {701 "spec_id": task["labels"][0]["attributes"][1]["id"],702 "value": task["labels"][0]["attributes"][1]["values"][0]703 },704 {705 "spec_id": task["labels"][0]["attributes"][2]["id"],706 "value": task["labels"][0]["attributes"][2]["values"][1]707 },708 {709 "spec_id": task["labels"][0]["attributes"][3]["id"],710 "value": task["labels"][0]["attributes"][3]["values"][2]711 }712 ],713 "points": [1.0, 2.1, 10.6, 53.22],714 "type": "rectangle",715 "occluded": False,716 }]717 else:718 rectangle_shape_wo_attrs = {719 "frame": 1,720 "label_id": task["labels"][1]["id"],721 "group": 0,722 "source": "manual",723 "attributes": [],724 "points": [2.0, 2.1, 40, 10.7],725 "type": "rectangle",726 "occluded": False,727 }728 rectangle_shape_with_attrs = {729 "frame": 0,730 "label_id": task["labels"][0]["id"],731 "group": 0,732 "source": "manual",733 "attributes": [734 {735 "spec_id": task["labels"][0]["attributes"][0]["id"],736 "value": task["labels"][0]["attributes"][0]["values"][0]737 },738 {739 "spec_id": task["labels"][0]["attributes"][1]["id"],740 "value": task["labels"][0]["attributes"][1]["default_value"]741 }742 ],743 "points": [1.0, 2.1, 10.6, 13.22],744 "type": "rectangle",745 "occluded": False,746 }747 track_wo_attrs = {748 "frame": 0,749 "label_id": task["labels"][1]["id"],750 "group": 0,751 "source": "manual",752 "attributes": [],753 "shapes": [754 {755 "frame": 0,756 "attributes": [],757 "points": [1.0, 2.1, 10.6, 53.22, 30, 20.222],758 "type": "polygon",759 "occluded": False,760 "outside": False761 }762 ]763 }764 tag_wo_attrs = {765 "frame": 0,766 "label_id": task["labels"][0]["id"],767 "group": None,768 "attributes": []769 }770 tag_with_attrs = {771 "frame": 1,772 "label_id": task["labels"][0]["id"],773 "group": 3,774 "source": "manual",775 "attributes": [776 {777 "spec_id": task["labels"][0]["attributes"][0]["id"],778 "value": task["labels"][0]["attributes"][0]["values"][1]779 },780 {781 "spec_id": task["labels"][0]["attributes"][1]["id"],782 "value": task["labels"][0]["attributes"][1]["default_value"]783 }784 ],785 }786 if annotation_format == "VGGFace2 1.0":787 shapes = [rectangle_shape_wo_attrs]788 elif annotation_format == "CVAT 1.1":789 shapes = [rectangle_shape_wo_attrs,790 rectangle_shape_with_attrs]791 tags = [tag_with_attrs, tag_wo_attrs]792 elif annotation_format == "MOTS PNG 1.0":793 tracks = [track_wo_attrs]794 else:795 shapes = [rectangle_shape_wo_attrs, \796 rectangle_shape_with_attrs]797 tags = [tag_wo_attrs]798 tracks = [track_wo_attrs]799 annotations = {800 "version": 0,801 "tags": tags,802 "shapes": shapes,803 "tracks": tracks804 }805 return self._generate_custom_annotations(annotations, task)806 def _test_can_import_annotations(self, task, import_format):807 with tempfile.TemporaryDirectory() as temp_dir:808 file_path = osp.join(temp_dir, import_format)809 export_format = import_format810 if import_format == "CVAT 1.1":811 export_format = "CVAT for images 1.1"812 dm.task.export_task(task["id"], file_path, export_format)813 expected_ann = TaskAnnotation(task["id"])814 expected_ann.init_from_db()815 dm.task.import_task_annotations(task["id"],816 file_path, import_format)817 actual_ann = TaskAnnotation(task["id"])818 actual_ann.init_from_db()819 self.assertEqual(len(expected_ann.data), len(actual_ann.data))820 def test_can_import_annotations_for_image_with_dots_in_filename(self):821 for f in dm.views.get_import_formats():822 format_name = f.DISPLAY_NAME823 images = self._generate_task_images(3, "img0.0.0")824 task = self._generate_task(images, format_name)825 self._generate_annotations(task, format_name)826 with self.subTest(format=format_name):827 if not f.ENABLED:828 self.skipTest("Format is disabled")829 self._test_can_import_annotations(task, format_name)830 def test_can_import_mots_annotations_with_splited_masks(self):831 #https://github.com/openvinotoolkit/cvat/issues/3360832 format_name = 'MOTS PNG 1.0'833 source_dataset = Dataset.from_iterable([834 DatasetItem(id='image_0',835 annotations=[836 Mask(np.array([[1, 1, 1, 0, 1, 1, 1]] * 5),837 label=0, attributes={'track_id': 0})838 ]...

Full Screen

Full Screen

_test_formats.py

Source:_test_formats.py Github

copy

Full Screen

...235 for i in range(count)236 }237 images["image_quality"] = 75238 return images239 def _generate_task(self, images):240 task = {241 "name": "my task #1",242 "owner": '',243 "assignee": '',244 "overlap": 0,245 "segment_size": 100,246 "z_order": False,247 "labels": [248 {249 "name": "car",250 "attributes": [251 {252 "name": "model",253 "mutable": False,254 "input_type": "select",255 "default_value": "mazda",256 "values": ["bmw", "mazda", "renault"]257 },258 {259 "name": "parked",260 "mutable": True,261 "input_type": "checkbox",262 "default_value": False263 },264 ]265 },266 {"name": "person"},267 ]268 }269 return self._create_task(task, images)270 @staticmethod271 def _test_export(check, task, format_name, **export_args):272 with tempfile.TemporaryDirectory() as temp_dir:273 file_path = osp.join(temp_dir, format_name)274 dm.task.export_task(task["id"], file_path,275 format_name, **export_args)276 check(file_path)277 def test_export_formats_query(self):278 formats = dm.views.get_export_formats()279 self.assertEqual({f.DISPLAY_NAME for f in formats},280 {281 'COCO 1.0',282 'CVAT for images 1.1',283 'CVAT for video 1.1',284 'Datumaro 1.0',285 'LabelMe 3.0',286 'MOT 1.1',287 'PASCAL VOC 1.1',288 'Segmentation mask 1.1',289 'TFRecord 1.0',290 'YOLO 1.1',291 })292 def test_import_formats_query(self):293 formats = dm.views.get_import_formats()294 self.assertEqual({f.DISPLAY_NAME for f in formats},295 {296 'COCO 1.0',297 'CVAT 1.1',298 'LabelMe 3.0',299 'MOT 1.1',300 'PASCAL VOC 1.1',301 'Segmentation mask 1.1',302 'TFRecord 1.0',303 'YOLO 1.1',304 })305 def test_exports(self):306 def check(file_path):307 with open(file_path, 'rb') as f:308 self.assertTrue(len(f.read()) != 0)309 for f in dm.views.get_export_formats():310 if not f.ENABLED:311 self.skipTest("Format is disabled")312 format_name = f.DISPLAY_NAME313 for save_images in { True, False }:314 images = self._generate_task_images(3)315 task = self._generate_task(images)316 self._generate_annotations(task)317 with self.subTest(format=format_name, save_images=save_images):318 self._test_export(check, task,319 format_name, save_images=save_images)320 def test_empty_images_are_exported(self):321 dm_env = dm.formats.registry.dm_env322 for format_name, importer_name in [323 ('COCO 1.0', 'coco'),324 ('CVAT for images 1.1', 'cvat'),325 # ('CVAT for video 1.1', 'cvat'), # does not support326 ('Datumaro 1.0', 'datumaro_project'),327 ('LabelMe 3.0', 'label_me'),328 # ('MOT 1.1', 'mot_seq'), # does not support329 ('PASCAL VOC 1.1', 'voc'),330 ('Segmentation mask 1.1', 'voc'),331 ('TFRecord 1.0', 'tf_detection_api'),332 ('YOLO 1.1', 'yolo'),333 ]:334 with self.subTest(format=format_name):335 if not dm.formats.registry.EXPORT_FORMATS[format_name].ENABLED:336 self.skipTest("Format is disabled")337 images = self._generate_task_images(3)338 task = self._generate_task(images)339 def check(file_path):340 def load_dataset(src):341 if importer_name == 'datumaro_project':342 project = datumaro.components.project. \343 Project.load(src)344 # NOTE: can't import cvat.utils.cli345 # for whatever reason, so remove the dependency346 #347 project.config.remove('sources')348 return project.make_dataset()349 return dm_env.make_importer(importer_name)(src) \350 .make_dataset()351 if zipfile.is_zipfile(file_path):352 with tempfile.TemporaryDirectory() as tmp_dir:353 zipfile.ZipFile(file_path).extractall(tmp_dir)354 dataset = load_dataset(tmp_dir)355 else:356 dataset = load_dataset(file_path)357 self.assertEqual(len(dataset), task["size"])358 self._test_export(check, task, format_name, save_images=False)359 def test_can_skip_outside(self):360 images = self._generate_task_images(3)361 task = self._generate_task(images)362 self._generate_annotations(task)363 task_ann = TaskAnnotation(task["id"])364 task_ann.init_from_db()365 task_data = TaskData(task_ann.ir_data, Task.objects.get(pk=task["id"]))366 extractor = CvatTaskDataExtractor(task_data, include_outside=False)367 dm_dataset = datumaro.components.project.Dataset.from_extractors(extractor)368 self.assertEqual(4, len(dm_dataset.get("image_1").annotations))369 extractor = CvatTaskDataExtractor(task_data, include_outside=True)370 dm_dataset = datumaro.components.project.Dataset.from_extractors(extractor)371 self.assertEqual(5, len(dm_dataset.get("image_1").annotations))372 def test_cant_make_rel_frame_id_from_unknown(self):373 images = self._generate_task_images(3)374 images['frame_filter'] = 'step=2'375 task = self._generate_task(images)376 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))377 with self.assertRaisesRegex(ValueError, r'Unknown'):378 task_data.rel_frame_id(1) # the task has only 0 and 2 frames379 def test_can_make_rel_frame_id_from_known(self):380 images = self._generate_task_images(6)381 images['frame_filter'] = 'step=2'382 images['start_frame'] = 1383 task = self._generate_task(images)384 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))385 self.assertEqual(2, task_data.rel_frame_id(5))386 def test_cant_make_abs_frame_id_from_unknown(self):387 images = self._generate_task_images(3)388 images['frame_filter'] = 'step=2'389 task = self._generate_task(images)390 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))391 with self.assertRaisesRegex(ValueError, r'Unknown'):392 task_data.abs_frame_id(2) # the task has only 0 and 1 indices393 def test_can_make_abs_frame_id_from_known(self):394 images = self._generate_task_images(6)395 images['frame_filter'] = 'step=2'396 images['start_frame'] = 1397 task = self._generate_task(images)398 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task['id']))399 self.assertEqual(5, task_data.abs_frame_id(2))400class FrameMatchingTest(_DbTestBase):401 def _generate_task_images(self, paths): # pylint: disable=no-self-use402 f = BytesIO()403 with zipfile.ZipFile(f, 'w') as archive:404 for path in paths:405 archive.writestr(path, generate_image_file(path).getvalue())406 f.name = 'images.zip'407 f.seek(0)408 return {409 'client_files[0]': f,410 'image_quality': 75,411 }412 def _generate_task(self, images):413 task = {414 "name": "my task #1",415 "owner": '',416 "assignee": '',417 "overlap": 0,418 "segment_size": 100,419 "z_order": False,420 "labels": [421 {422 "name": "car",423 "attributes": [424 {425 "name": "model",426 "mutable": False,427 "input_type": "select",428 "default_value": "mazda",429 "values": ["bmw", "mazda", "renault"]430 },431 {432 "name": "parked",433 "mutable": True,434 "input_type": "checkbox",435 "default_value": False436 },437 ]438 },439 {"name": "person"},440 ]441 }442 return self._create_task(task, images)443 def test_frame_matching(self):444 task_paths = [445 'a.jpg',446 'a/a.jpg',447 'a/b.jpg',448 'b/a.jpg',449 'b/c.jpg',450 'a/b/c.jpg',451 'a/b/d.jpg',452 ]453 images = self._generate_task_images(task_paths)454 task = self._generate_task(images)455 task_data = TaskData(AnnotationIR(), Task.objects.get(pk=task["id"]))456 for input_path, expected, root in [457 ('z.jpg', None, ''), # unknown item458 ('z/a.jpg', None, ''), # unknown item459 ('d.jpg', 'a/b/d.jpg', 'a/b'), # match with root hint460 ('b/d.jpg', 'a/b/d.jpg', 'a'), # match with root hint461 ] + list(zip(task_paths, task_paths, [None] * len(task_paths))): # exact matches462 with self.subTest(input=input_path):463 actual = task_data.match_frame(input_path, root)464 if actual is not None:465 actual = task_data.frame_info[actual]['path']466 self.assertEqual(expected, actual)467 def test_dataset_root(self):468 for task_paths, dataset_paths, expected in [469 ([ 'a.jpg', 'b/c/a.jpg' ], [ 'a.jpg', 'b/c/a.jpg' ], ''),470 ([ 'b/a.jpg', 'b/c/a.jpg' ], [ 'a.jpg', 'c/a.jpg' ], 'b'), # 'images from share' case471 ([ 'b/c/a.jpg' ], [ 'a.jpg' ], 'b/c'), # 'images from share' case472 ([ 'a.jpg' ], [ 'z.jpg' ], None),473 ]:474 with self.subTest(expected=expected):475 images = self._generate_task_images(task_paths)476 task = self._generate_task(images)477 task_data = TaskData(AnnotationIR(),478 Task.objects.get(pk=task["id"]))479 dataset = [480 datumaro.components.extractor.DatasetItem(481 id=osp.splitext(p)[0])482 for p in dataset_paths]483 root = find_dataset_root(dataset, task_data)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful