How to use apri method in SeleniumBase

Best Python code snippet using SeleniumBase

test_registers.py

Source:test_registers.py Github

copy

Full Screen

...99 def load_disk_data(cls, filename, **kwargs): pass100 @classmethod101 def clean_disk_data(cls, filename, **kwargs):pass102def data(blk):103 return blk.get_apri(), blk.get_start_n(), len(blk)104class Test_Register(TestCase):105 def setUp(self):106 if SAVES_DIR.is_dir():107 shutil.rmtree(SAVES_DIR)108 SAVES_DIR.mkdir()109 def tearDown(self):110 if SAVES_DIR.is_dir():111 shutil.rmtree(SAVES_DIR)112 Register._instances.clear()113 def test___init__(self):114 shutil.rmtree(SAVES_DIR)115 with self.assertRaises(FileNotFoundError):116 Testy_Register(SAVES_DIR, "test")117 SAVES_DIR.mkdir()118 with self.assertRaises(TypeError):119 Testy_Register(SAVES_DIR, 0)120 with self.assertRaises(TypeError):121 Testy_Register(0, "sup")122 self.assertFalse(Testy_Register(SAVES_DIR, "sup")._created)123 self.assertEqual(Testy_Register(SAVES_DIR, "sup")._version, CURRENT_VERSION)124 def test_add_subclass(self):125 with self.assertRaisesRegex(TypeError, "must be a class"):126 Register.add_subclass(0)127 class Hello:pass128 with self.assertRaisesRegex(TypeError, "subclass of `Register`"):129 Register.add_subclass(Hello)130 Register.add_subclass(Testy_Register2)131 self.assertIn(132 "Testy_Register2",133 Register._constructors.keys()134 )135 self.assertEqual(136 Register._constructors["Testy_Register2"],137 Testy_Register2138 )139 def test__split_disk_block_key(self):140 keys = [141 _BLK_KEY_PREFIX + b"{\"hello\" = \"hey\"}" + _KEY_SEP + b"00000" + _KEY_SEP + b"10",142 _BLK_KEY_PREFIX + _KEY_SEP + b"00000" + _KEY_SEP + b"10",143 _BLK_KEY_PREFIX + b"{\"hello\" = \"hey\"}" + _KEY_SEP + _KEY_SEP + b"10",144 _BLK_KEY_PREFIX + b"{\"hello\" = \"hey\"}" + _KEY_SEP + b"00000" + _KEY_SEP ,145 ]146 splits = [147 (b"{\"hello\" = \"hey\"}", b"00000", b"10"),148 (b"", b"00000", b"10"),149 (b"{\"hello\" = \"hey\"}", b"", b"10"),150 (b"{\"hello\" = \"hey\"}", b"00000", b"" ),151 ]152 for key, split in zip(keys, splits):153 self.assertEqual(154 split,155 Register._split_disk_block_key(_BLK_KEY_PREFIX_LEN, key)156 )157 for key in keys:158 self.assertEqual(159 key,160 Register._join_disk_block_data( *((_BLK_KEY_PREFIX, ) + Register._split_disk_block_key(_BLK_KEY_PREFIX_LEN, key)))161 )162 def test__join_disk_block_data(self):163 splits = [164 (_BLK_KEY_PREFIX, b"hello", b"there", b"friend"),165 (_BLK_KEY_PREFIX, b"", b"there", b"friend"),166 (_BLK_KEY_PREFIX, b"hello", b"", b"friend"),167 (_BLK_KEY_PREFIX, b"hello", b"there", b"" ),168 ]169 keys = [170 _BLK_KEY_PREFIX + b"hello" + _KEY_SEP + b"there" + _KEY_SEP + b"friend",171 _BLK_KEY_PREFIX + _KEY_SEP + b"there" + _KEY_SEP + b"friend",172 _BLK_KEY_PREFIX + b"hello" + _KEY_SEP + _KEY_SEP + b"friend",173 _BLK_KEY_PREFIX + b"hello" + _KEY_SEP + b"there" + _KEY_SEP174 ]175 for split,key in zip(splits, keys):176 self.assertEqual(177 key,178 Register._join_disk_block_data(*split)179 )180 for split in splits:181 self.assertEqual(182 split[1:],183 Register._split_disk_block_key(_BLK_KEY_PREFIX_LEN, Register._join_disk_block_data(*split))184 )185 def test___str__(self):186 self.assertEqual(187 str(Testy_Register(SAVES_DIR, "hello")),188 "hello"189 )190 def test___repr__(self):191 self.assertEqual(192 repr(Testy_Register(SAVES_DIR, "hello")),193 f"Testy_Register(\"{str(SAVES_DIR)}\", \"hello\")"194 )195 def test__check_open_raise_uncreated(self):196 reg = Testy_Register(SAVES_DIR, "hey")197 with self.assertRaisesRegex(Register_Error, "test"):198 reg._check_open_raise("test")199 def test__set_local_dir(self):200 # test that error is raised when `local_dir` is not a sub-dir of `saves_directory`201 local_dir = SAVES_DIR / "bad" / "test_local_dir"202 reg = Testy_Register(SAVES_DIR, "sup")203 with self.assertRaisesRegex(ValueError, "sub-directory"):204 reg._set_local_dir(local_dir)205 # test that error is raised when `Register` has not been created206 local_dir = SAVES_DIR / "test_local_dir"207 reg = Testy_Register(SAVES_DIR, "sup")208 with self.assertRaisesRegex(FileNotFoundError, "database"):209 reg._set_local_dir(local_dir)210 # test that newly created register has the correct filestructure and instance attributes211 # register database must be manually created for this test case212 local_dir = SAVES_DIR / "test_local_dir"213 reg = Testy_Register(SAVES_DIR, "sup")214 local_dir.mkdir()215 (local_dir / REGISTER_FILENAME).mkdir(exist_ok = False)216 (local_dir / VERSION_FILEPATH).touch(exist_ok = False)217 (local_dir / MSG_FILEPATH).touch(exist_ok = False)218 (local_dir / CLS_FILEPATH).touch(exist_ok = False)219 (local_dir / DATABASE_FILEPATH).mkdir(exist_ok = False)220 try:221 reg._db = open_lmdb(local_dir / REGISTER_FILENAME, 1, False)222 reg._set_local_dir(local_dir)223 self.assertTrue(reg._created)224 self.assertEqual(225 local_dir,226 reg._local_dir227 )228 self.assertEqual(229 str(local_dir).encode("ASCII"),230 reg._local_dir_bytes231 )232 self.assertEqual(233 _SUB_KEY_PREFIX + reg._local_dir_bytes,234 reg._subreg_bytes235 )236 self.assertEqual(237 reg._db_filepath,238 local_dir / DATABASE_FILEPATH239 )240 finally:241 reg._db.close()242 def test___hash___uncreated(self):243 with self.assertRaisesRegex(Register_Error, "__hash__"):244 hash(Testy_Register(SAVES_DIR, "hey"))245 def test___eq___uncreated(self):246 with self.assertRaises(Register_Error):247 Testy_Register(SAVES_DIR, "hey") == Testy_Register(SAVES_DIR, "sup")248 def test_add_ram_block(self):249 reg = Testy_Register(SAVES_DIR, "msg")250 blk = Block([], Apri_Info(name = "test"))251 try:252 reg.add_ram_block(blk)253 except Register_Error:254 self.fail("register doesn't need to be open")255 reg = Testy_Register(SAVES_DIR, "msg")256 blk1 = Block([], Apri_Info(name = "test"))257 reg.add_ram_block(blk1)258 self.assertEqual(259 1,260 len(reg._ram_blks)261 )262 blk2 = Block([], Apri_Info(name = "testy"))263 reg.add_ram_block(blk2)264 self.assertEqual(265 2,266 len(reg._ram_blks)267 )268 blk3 = Block([], Apri_Info(name = "testy"))269 reg.add_ram_block(blk3)270 self.assertEqual(271 3,272 len(reg._ram_blks)273 )274 blk4 = Block([1], Apri_Info(name = "testy"))275 reg.add_ram_block(blk4)276 self.assertEqual(277 4,278 len(reg._ram_blks)279 )280 def test_open_uncreated(self):281 reg = Testy_Register(SAVES_DIR, "hey")282 with reg.open() as reg:283 self.assertFalse(reg._db_is_closed())284 self.assertTrue(reg._created)285 keyvals = {286 _START_N_HEAD_KEY : b"0",287 _START_N_TAIL_LENGTH_KEY : str(Register._START_N_TAIL_LENGTH_DEFAULT).encode("ASCII"),288 _CURR_ID_KEY: b"0"289 }290 self.assertTrue(reg._db_is_closed())291 db = None292 try:293 db = open_lmdb(reg._db_filepath, 1, False)294 with db.begin() as txn:295 for key, val in keyvals.items():296 self.assertEqual(297 val,298 txn.get(key)299 )300 self.assertEqual(301 len(keyvals),302 lmdb_count_keys(db, b"")303 )304 finally:305 if db is not None:306 db.close()307 def test_remove_ram_block(self):308 reg = Numpy_Register(SAVES_DIR, "msg")309 blk = Block([], Apri_Info(name = "name"))310 reg.add_ram_block(blk)311 try:312 reg.remove_ram_block(blk)313 except Register_Error:314 self.fail("removing ram blocks doesn't need reg to be open")315 reg = Numpy_Register(SAVES_DIR, "msg")316 blk1 = Block([], Apri_Info(name = "name1"))317 reg.add_ram_block(blk1)318 reg.remove_ram_block(blk1)319 self.assertEqual(320 0,321 len(reg._ram_blks)322 )323 reg.add_ram_block(blk1)324 reg.remove_ram_block(blk1)325 self.assertEqual(326 0,327 len(reg._ram_blks)328 )329 reg.add_ram_block(blk1)330 blk2 = Block([], Apri_Info(name = "name2"))331 reg.add_ram_block(blk2)332 reg.remove_ram_block(blk1)333 self.assertEqual(334 1,335 len(reg._ram_blks)336 )337 reg.remove_ram_block(blk2)338 self.assertEqual(339 0,340 len(reg._ram_blks)341 )342 def test_get_ram_block_by_n_no_recursive(self):343 reg = Testy_Register(SAVES_DIR, "hello")344 with self.assertRaisesRegex(IndexError, "non-negative"):345 reg.get_ram_block_by_n(Apri_Info(name = "no"), -1)346 reg = Testy_Register(SAVES_DIR, "hello")347 apri = Apri_Info(name = "list")348 blk = Block(list(range(1000)), apri)349 reg.add_ram_block(blk)350 try:351 reg.get_ram_block_by_n(apri, 500)352 except Register_Error:353 self.fail("register does not need to be open")354 reg = Testy_Register(SAVES_DIR, "hello")355 apri = Apri_Info(name = "list")356 blk1 = Block(list(range(1000)), apri)357 reg.add_ram_block(blk1)358 for n in [0, 10, 500, 990, 999]:359 self.assertIs(360 blk1,361 reg.get_ram_block_by_n(apri, n)362 )363 for n in [1000]:364 with self.assertRaises(Data_Not_Found_Error):365 reg.get_ram_block_by_n(apri, n)366 blk2 = Block(list(range(1000, 2000)), apri, 1000)367 reg.add_ram_block(blk2)368 for n in [1000, 1010, 1990, 1999]:369 self.assertIs(370 blk2,371 reg.get_ram_block_by_n(apri, n)372 )373 def test_get_all_ram_blocks_no_recursive(self):374 reg = Testy_Register(SAVES_DIR, "msg")375 apri = Apri_Info(name = "hey")376 blk = Block([], apri)377 reg.add_ram_block(blk)378 try:379 reg.get_all_ram_blocks(apri)380 except Register_Error:381 self.fail("register does not need to be open")382 reg = Testy_Register(SAVES_DIR, "msg")383 apri1 = Apri_Info(name="hey")384 blk1 = Block([], apri1)385 reg.add_ram_block(blk1)386 self.assertEqual(387 1,388 len(list(reg.get_all_ram_blocks(apri1)))389 )390 self.assertEqual(391 blk1,392 list(reg.get_all_ram_blocks(apri1))[0]393 )394 apri2 = Apri_Info(name = "hello")395 blk2 = Block(list(range(10)), apri2)396 reg.add_ram_block(blk2)397 self.assertEqual(398 1,399 len(list(reg.get_all_ram_blocks(apri2)))400 )401 self.assertEqual(402 blk2,403 list(reg.get_all_ram_blocks(apri2))[0]404 )405 blk3 = Block(list(range(10)), apri2, 1)406 reg.add_ram_block(blk3)407 self.assertEqual(408 2,409 len(list(reg.get_all_ram_blocks(apri2)))410 )411 self.assertIn(412 blk2,413 reg.get_all_ram_blocks(apri2)414 )415 self.assertIn(416 blk3,417 reg.get_all_ram_blocks(apri2)418 )419 def test___hash___created(self):420 # create two `Register`s421 reg1 = Testy_Register(SAVES_DIR, "msg")422 reg2 = Testy_Register(SAVES_DIR, "msg")423 with reg1.open() as reg1:pass424 with reg2.open() as reg2:pass425 self.assertEqual(426 hash(reg1),427 hash(reg1)428 )429 self.assertEqual(430 hash(reg2),431 hash(reg2)432 )433 self.assertNotEqual(434 hash(reg1),435 hash(reg2)436 )437 # manually change the `_local_dir` to force equality438 reg2 = Testy_Register(SAVES_DIR, "msg")439 reg2._set_local_dir(reg1._local_dir)440 self.assertEqual(441 hash(reg2),442 hash(reg1)443 )444 # a different `Register` derived type should change the hash value445 reg2 = Testy_Register2(SAVES_DIR, "msg")446 reg2._set_local_dir(reg1._local_dir)447 self.assertNotEqual(448 hash(reg2),449 hash(reg1)450 )451 # relative paths should work as expected452 reg2 = Testy_Register(SAVES_DIR, "msg")453 reg2._set_local_dir(".." / SAVES_DIR / reg1._local_dir)454 self.assertEqual(455 hash(reg2),456 hash(reg1)457 )458 def test___eq___created(self):459 # open two `Register`s460 reg1 = Testy_Register(SAVES_DIR, "msg")461 reg2 = Testy_Register(SAVES_DIR, "msg")462 with reg1.open() as reg1:pass463 with reg2.open() as reg2:pass464 self.assertEqual(465 reg1,466 reg1467 )468 self.assertEqual(469 reg2,470 reg2471 )472 self.assertNotEqual(473 reg1,474 reg2475 )476 # manually change the `_local_dir` to force equality477 reg2 = Testy_Register(SAVES_DIR, "msg")478 reg2._set_local_dir(reg1._local_dir)479 self.assertEqual(480 reg2,481 reg1482 )483 # test a different `Register` derived type484 reg2 = Testy_Register2(SAVES_DIR, "msg")485 reg2._set_local_dir(reg1._local_dir)486 self.assertNotEqual(487 reg2,488 reg1489 )490 # test that relative paths work as expected491 reg2 = Testy_Register(SAVES_DIR, "msg")492 reg2._set_local_dir(".." / SAVES_DIR / reg1._local_dir)493 self.assertEqual(494 reg2,495 reg1496 )497 def test__check_open_raise_created(self):498 reg = Testy_Register(SAVES_DIR, "hi")499 with self.assertRaisesRegex(Register_Error, "xyz"):500 reg._check_open_raise("xyz")501 reg = Testy_Register(SAVES_DIR, "hi")502 with reg.open() as reg:503 try:504 reg._check_open_raise("xyz")505 except Register_Error:506 self.fail("the register is open")507 reg = Testy_Register(SAVES_DIR, "hi")508 with reg.open() as reg:pass509 with self.assertRaisesRegex(Register_Error, "xyz"):510 reg._check_open_raise("xyz")511 def test__get_id_by_apri_new(self):512 reg = Testy_Register(SAVES_DIR, "hi")513 with self.assertRaises(ValueError):514 reg._get_id_by_apri(None, None, True)515 with self.assertRaises(ValueError):516 reg._get_id_by_apri(None, None, False)517 apri1 = Apri_Info(name = "hi")518 apri2 = Apri_Info(name = "hello")519 apri3 = Apri_Info(name = "sup")520 apri4 = Apri_Info(name = "hey")521 reg = Testy_Register(SAVES_DIR, "hi")522 with reg.open() as reg:523 with reg._db.begin() as txn:524 curr_id = txn.get(_CURR_ID_KEY)525 _id1 = reg._get_id_by_apri(apri1, None, True)526 self.assertEqual(527 curr_id,528 _id1529 )530 self.assertEqual(531 1,532 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)533 )534 self.assertEqual(535 1,536 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)537 )538 _id2 = reg._get_id_by_apri(apri2, None, True)539 self.assertNotEqual(540 _id1,541 _id2542 )543 self.assertEqual(544 2,545 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)546 )547 self.assertEqual(548 2,549 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)550 )551 _id3 = reg._get_id_by_apri(None, apri3.to_json().encode("ASCII"), True)552 self.assertNotIn(553 _id3,554 [_id1, _id2]555 )556 self.assertEqual(557 3,558 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)559 )560 self.assertEqual(561 3,562 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)563 )564 with self.assertRaises(Data_Not_Found_Error):565 reg._get_id_by_apri(apri4, None, False)566 def test__get_instance(self):567 reg1 = Testy_Register(SAVES_DIR, "msg")568 with reg1.open() as reg1: pass569 reg2 = Testy_Register(SAVES_DIR, "msg")570 reg2._set_local_dir(reg1._local_dir)571 self.assertIs(572 reg1,573 Register._get_instance(reg2._local_dir)574 )575 self.assertIs(576 reg1,577 Register._get_instance(reg1._local_dir)578 )579 def test_set_message(self):580 reg = Testy_Register(SAVES_DIR, "testy")581 try:582 reg.set_message("yes")583 except Register_Error as e:584 if "has not been opened" in str(e):585 self.fail("the register doesn't need to be open for set_message")586 else:587 raise e588 self.assertEqual(589 "yes",590 str(reg)591 )592 with reg.open() as reg:pass593 reg.set_message("no")594 self.assertEqual(595 "no",596 str(reg)597 )598 with reg._msg_filepath.open("r") as fh:599 self.assertEqual(600 "no",601 fh.read()602 )603 def test_add_disk_block(self):604 reg = Testy_Register(SAVES_DIR, "sup")605 blk = Block([], Apri_Info(name = "hi"))606 with self.assertRaisesRegex(Register_Error, "open.*add_disk_block"):607 reg.add_disk_block(blk)608 reg = Testy_Register(SAVES_DIR, "hello")609 blk = Block([], Apri_Info(name = "hi"), 10**50)610 with reg.open() as reg:611 with self.assertRaisesRegex(IndexError, "correct head"):612 reg.add_disk_block(blk)613 reg = Testy_Register(SAVES_DIR, "hello")614 too_large = reg._start_n_tail_mod615 blk = Block([], Apri_Info(name = "hi"), too_large)616 with reg.open() as reg:617 with self.assertRaisesRegex(IndexError, "correct head"):618 reg.add_disk_block(blk)619 reg = Testy_Register(SAVES_DIR, "hello")620 too_large = reg._start_n_tail_mod621 blk = Block([], Apri_Info(name = "hi"), too_large - 1)622 with reg.open() as reg:623 try:624 reg.add_disk_block(blk)625 except IndexError:626 self.fail("index is not too large")627 reg = Testy_Register(SAVES_DIR, "hi")628 blk1 = Block([], Apri_Info(name = "hello"))629 blk2 = Block([1], Apri_Info(name = "hello"))630 blk3 = Block([], Apri_Info(name = "hi"))631 blk4 = Block([], Apri_Info(name = "hello"))632 blk5 = Block([], Apri_Info(sir = "hey", maam = "hi"))633 blk6 = Block([], Apri_Info(maam="hi", sir = "hey"))634 with reg.open() as reg:635 reg.add_disk_block(blk1)636 self.assertEqual(637 1,638 lmdb_count_keys(reg._db, _BLK_KEY_PREFIX)639 )640 self.assertEqual(641 1,642 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)643 )644 self.assertEqual(645 1,646 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)647 )648 reg.add_disk_block(blk2)649 self.assertEqual(650 2,651 lmdb_count_keys(reg._db, _BLK_KEY_PREFIX)652 )653 self.assertEqual(654 1,655 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)656 )657 self.assertEqual(658 1,659 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)660 )661 reg.add_disk_block(blk3)662 self.assertEqual(663 3,664 lmdb_count_keys(reg._db, _BLK_KEY_PREFIX)665 )666 self.assertEqual(667 2,668 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)669 )670 self.assertEqual(671 2,672 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)673 )674 with self.assertRaisesRegex(Register_Error, "[dD]uplicate"):675 reg.add_disk_block(blk4)676 reg.add_disk_block(blk5)677 with self.assertRaisesRegex(Register_Error, "[dD]uplicate"):678 reg.add_disk_block(blk6)679 with self.assertRaisesRegex(Register_Error, "read-only"):680 with reg.open(read_only = True) as reg:681 reg.add_disk_block(blk)682 reg = Numpy_Register(SAVES_DIR, "no")683 with reg.open() as reg:684 reg.add_disk_block(Block(np.arange(30), Apri_Info(maybe = "maybe")))685 for debug in [1,2,3,4]:686 apri = Apri_Info(none = "all")687 blk = Block(np.arange(14), apri, 0)688 with self.assertRaises(KeyboardInterrupt):689 reg.add_disk_block(blk, debug = debug)690 self.assertEqual(691 1,692 lmdb_count_keys(reg._db, _BLK_KEY_PREFIX)693 )694 self.assertEqual(695 1,696 lmdb_count_keys(reg._db, _COMPRESSED_KEY_PREFIX)697 )698 self.assertEqual(699 1,700 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)701 )702 self.assertEqual(703 1,704 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)705 )706 self.assertEqual(707 1,708 sum(1 for d in reg._local_dir.iterdir() if d.is_file())709 )710 self.assertTrue(np.all(711 np.arange(30) ==712 reg.get_disk_block(Apri_Info(maybe = "maybe"), 0, 30).get_segment()713 ))714 with self.assertRaises(Data_Not_Found_Error):715 reg.get_disk_block(Apri_Info(none = "all"), 0, 14)716 def test__get_apri_json_by_id(self):717 reg = Testy_Register(SAVES_DIR, "hello")718 with reg.open() as reg:719 apri1 = Apri_Info(name = "hi")720 _id1 = reg._get_id_by_apri(apri1, None, True)721 self.assertIsInstance(722 _id1,723 bytes724 )725 self.assertEqual(726 apri1,727 Apri_Info.from_json(reg._get_apri_json_by_id(_id1).decode("ASCII"))728 )729 apri2 = Apri_Info(name = "sup")730 _id2 = reg._get_id_by_apri(apri2, None, True)731 self.assertEqual(732 apri2,733 Apri_Info.from_json(reg._get_apri_json_by_id(_id2).decode("ASCII"))734 )735 def test_get_all_apri_info_no_recursive(self):736 reg = Testy_Register(SAVES_DIR, "msg")737 with self.assertRaisesRegex(Register_Error, "get_all_apri_info"):738 reg.get_all_apri_info()739 reg = Testy_Register(SAVES_DIR, "msg")740 with reg.open() as reg:741 apri1 = Apri_Info(name = "hello")742 reg._get_id_by_apri(apri1, None, True)743 self.assertEqual(744 1,745 len(list(reg.get_all_apri_info()))746 )747 self.assertEqual(748 apri1,749 list(reg.get_all_apri_info())[0]750 )751 apri2 = Apri_Info(name = "hey")752 blk = Block([], apri2)753 reg.add_ram_block(blk)754 self.assertEqual(755 2,756 len(list(reg.get_all_apri_info()))757 )758 self.assertIn(759 apri1,760 list(reg.get_all_apri_info())761 )762 self.assertIn(763 apri2,764 list(reg.get_all_apri_info())765 )766 # def test__from_name_same_register(self):767 #768 # reg = Testy_Register2(SAVES_DIR, "hello")769 # with reg.open() as reg: pass770 # with self.assertRaisesRegex(TypeError, "add_subclass"):771 # Register._from_local_dir(reg._local_dir)772 #773 # reg1 = Testy_Register(SAVES_DIR, "hellooooo")774 # with reg1.open() as reg1: pass775 # reg2 = Register._from_local_dir(reg1._local_dir)776 # self.assertIs(777 # reg1,778 # reg2779 # )780 def test__open_created(self):781 reg = Testy_Register(SAVES_DIR, "testy")782 with reg.open() as reg: pass783 with reg.open() as reg:784 self.assertFalse(reg._db_is_closed())785 with self.assertRaises(Register_Already_Open_Error):786 with reg.open() as reg: pass787 reg1 = Testy_Register(SAVES_DIR, "testy")788 with reg1.open() as reg1: pass789 reg2 = Testy_Register(SAVES_DIR, "testy")790 reg2._set_local_dir(reg1._local_dir)791 self.assertEqual(792 reg1,793 reg2794 )795 self.assertFalse(796 reg1 is reg2797 )798 with reg2.open() as reg2:799 self.assertIs(800 reg1,801 reg2802 )803 def test__get_id_by_apri(self):804 reg = Testy_Register(SAVES_DIR, "hello")805 apri1 = Apri_Info(name = "hello")806 with reg.open() as reg:807 _id1 = reg._get_id_by_apri(apri1, None, True)808 _id2 = reg._get_id_by_apri(apri1, None, True)809 self.assertIsInstance(810 _id2,811 bytes812 )813 self.assertEqual(814 _id1,815 _id2816 )817 _id3 = reg._get_id_by_apri(None, apri1.to_json().encode("ASCII"), False)818 self.assertEqual(819 _id1,820 _id3821 )822 def test__convert_disk_block_key_no_head(self):823 reg = Testy_Register(SAVES_DIR, "sup")824 with reg.open() as reg:825 apri1 = Apri_Info(name = "hey")826 blk1 = Block([], apri1)827 reg.add_disk_block(blk1)828 with lmdb_prefix_iterator(reg._db, _BLK_KEY_PREFIX) as it:829 for curr_key,_ in it: pass830 self.assertEqual(831 (apri1, 0, 0),832 reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, curr_key)833 )834 self.assertEqual(835 (apri1, 0, 0),836 reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, curr_key, apri1)837 )838 old_keys = {curr_key}839 blk2 = Block(list(range(10)), apri1)840 reg.add_disk_block(blk2)841 with lmdb_prefix_iterator(reg._db, _BLK_KEY_PREFIX) as it:842 for key,_val in it:843 if key not in old_keys:844 curr_key = key845 self.assertEqual(846 (apri1, 0, 10),847 reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, curr_key)848 )849 old_keys.add(curr_key)850 apri2 = Apri_Info(name = "hello")851 blk3 = Block(list(range(100)), apri2, 10)852 reg.add_disk_block(blk3)853 with lmdb_prefix_iterator(reg._db, _BLK_KEY_PREFIX) as it:854 for key,_val in it:855 if key not in old_keys:856 curr_key = key857 self.assertEqual(858 (apri2, 10, 100),859 reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, curr_key)860 )861 old_keys.add(curr_key)862 blk4 = Block(list(range(100)), apri2)863 reg.add_disk_block(blk4)864 with lmdb_prefix_iterator(reg._db, _BLK_KEY_PREFIX) as it:865 for key,_val in it:866 if key not in old_keys:867 curr_key = key868 self.assertEqual(869 (apri2, 0, 100),870 reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, curr_key)871 )872 def check_reg_set_start_n_info(self, reg, mod, head, tail_length):873 self.assertEqual(874 mod,875 reg._start_n_tail_mod876 )877 self.assertEqual(878 head,879 reg._start_n_head880 )881 self.assertEqual(882 tail_length,883 reg._start_n_tail_length884 )885 with reg._db.begin() as txn:886 self.assertEqual(887 str(head).encode("ASCII"),888 txn.get(_START_N_HEAD_KEY)889 )890 self.assertEqual(891 str(tail_length).encode("ASCII"),892 txn.get(_START_N_TAIL_LENGTH_KEY)893 )894 def check_key_set_start_n_info(self, reg, key, apri, start_n, length):895 _apri, _start_n, _length = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, key, None)896 self.assertEqual(897 apri,898 _apri899 )900 self.assertEqual(901 start_n,902 _start_n903 )904 self.assertEqual(905 length,906 _length907 )908 def test_set_start_n_info(self):909 reg = Testy_Register(SAVES_DIR, "hello")910 with self.assertRaisesRegex(Register_Error, "set_start_n_info"):911 reg.set_start_n_info(10, 3)912 reg = Testy_Register(SAVES_DIR, "hello")913 with reg.open() as reg:914 with self.assertRaisesRegex(TypeError, "int"):915 reg.set_start_n_info(10, 3.5)916 reg = Testy_Register(SAVES_DIR, "hello")917 with reg.open() as reg:918 with self.assertRaisesRegex(TypeError, "int"):919 reg.set_start_n_info(10.5, 3)920 reg = Testy_Register(SAVES_DIR, "hello")921 with reg.open() as reg:922 with self.assertRaisesRegex(ValueError, "non-negative"):923 reg.set_start_n_info(-1, 3)924 reg = Testy_Register(SAVES_DIR, "hello")925 with reg.open() as reg:926 try:927 reg.set_start_n_info(0, 3)928 except ValueError:929 self.fail("head can be 0")930 reg = Testy_Register(SAVES_DIR, "hello")931 with reg.open() as reg:932 with self.assertRaisesRegex(ValueError, "positive"):933 reg.set_start_n_info(0, -1)934 reg = Testy_Register(SAVES_DIR, "hello")935 with reg.open() as reg:936 with self.assertRaisesRegex(ValueError, "positive"):937 reg.set_start_n_info(0, 0)938 for head, tail_length in product([0, 1, 10, 100, 1100, 450], [1,2,3,4,5]):939 # check set works940 reg = Testy_Register(SAVES_DIR, "hello")941 with reg.open() as reg:942 try:943 reg.set_start_n_info(head, tail_length)944 except ValueError:945 self.fail(f"head = {head}, tail_length = {tail_length} are okay")946 with reg._db.begin() as txn:947 self.assertEqual(948 str(head).encode("ASCII"),949 txn.get(_START_N_HEAD_KEY)950 )951 self.assertEqual(952 str(tail_length).encode("ASCII"),953 txn.get(_START_N_TAIL_LENGTH_KEY)954 )955 # check read-only mode doesn't work956 with reg.open(read_only = True) as reg:957 with self.assertRaisesRegex(Register_Error, "read-only"):958 reg.set_start_n_info(head, tail_length)959 # test make sure ValueError is thrown for small smart_n960 # 0 and head * 10 ** tail_length - 1 are the two possible extremes of the small start_n961 if head > 0:962 for start_n in [0, head * 10 ** tail_length - 1]:963 reg = Testy_Register(SAVES_DIR, "hello")964 with reg.open() as reg:965 blk = Block([], Apri_Info(name = "hi"), start_n)966 reg.add_disk_block(blk)967 with self.assertRaisesRegex(ValueError, "correct head"):968 reg.set_start_n_info(head, tail_length)969 # make sure it exits safely970 self.check_reg_set_start_n_info(971 reg,972 10 ** Register._START_N_TAIL_LENGTH_DEFAULT, 0, Register._START_N_TAIL_LENGTH_DEFAULT973 )974 # test to make sure a few permissible start_n work975 smallest = head * 10 ** tail_length976 largest = smallest + 10 ** tail_length - 1977 for start_n in [smallest, smallest + 1, smallest + 2, largest -2, largest -1, largest]:978 reg = Testy_Register(SAVES_DIR, "hello")979 apri = Apri_Info(name="hi")980 with reg.open() as reg:981 blk = Block([], apri,start_n)982 reg.add_disk_block(blk)983 for debug in [0, 1, 2]:984 if debug == 0:985 reg.set_start_n_info(head, tail_length)986 else:987 with self.assertRaises(KeyboardInterrupt):988 reg.set_start_n_info(head // 10, tail_length + 1, debug)989 self.check_reg_set_start_n_info(990 reg,991 10 ** tail_length, head, tail_length992 )993 with lmdb_prefix_iterator(reg._db, _BLK_KEY_PREFIX) as it:994 for curr_key,_ in it:pass995 self.check_key_set_start_n_info(996 reg, curr_key,997 apri, start_n, 0998 )999 old_keys = {curr_key}1000 blk = Block(list(range(50)), apri, start_n)1001 # test to make sure `largest + 1` etc do not work1002 for start_n in [largest + 1, largest + 10, largest + 100, largest + 1000]:1003 reg = Testy_Register(SAVES_DIR, "hello")1004 apri = Apri_Info(name="hi")1005 with reg.open() as reg:1006 blk = Block([], apri, start_n)1007 reg.add_disk_block(blk)1008 with self.assertRaisesRegex(ValueError, "correct head"):1009 reg.set_start_n_info(head, tail_length)1010 # make sure it exits safely1011 self.check_reg_set_start_n_info(1012 reg,1013 10 ** Register._START_N_TAIL_LENGTH_DEFAULT, 0, Register._START_N_TAIL_LENGTH_DEFAULT1014 )1015 def check__iter_disk_block_pairs(self, t, apri, start_n, length):1016 self.assertEqual(1017 3,1018 len(t)1019 )1020 self.assertIsInstance(1021 t[0],1022 Apri_Info1023 )1024 self.assertEqual(1025 apri,1026 t[0]1027 )1028 self.assertIsInstance(1029 t[1],1030 int1031 )1032 self.assertEqual(1033 start_n,1034 t[1]1035 )1036 self.assertIsInstance(1037 t[2],1038 int1039 )1040 self.assertEqual(1041 length,1042 t[2]1043 )1044 def test__iter_disk_block_pairs(self):1045 reg = Testy_Register(SAVES_DIR, "HI")1046 with reg.open() as reg:1047 apri1 = Apri_Info(name = "abc")1048 apri2 = Apri_Info(name = "xyz")1049 blk1 = Block(list(range(50)), apri1, 0)1050 blk2 = Block(list(range(50)), apri1, 50)1051 blk3 = Block(list(range(500)), apri2, 1000)1052 reg.add_disk_block(blk1)1053 total = 01054 for i, t in chain(1055 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, None, None)),1056 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, apri1, None)),1057 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, None, apri1.to_json().encode("ASCII")))1058 ):1059 total += 11060 if i == 0:1061 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1062 self.check__iter_disk_block_pairs(t, apri1, 0, 50)1063 else:1064 self.fail()1065 if total != 3:1066 self.fail(str(total))1067 reg.add_disk_block(blk2)1068 total = 01069 for i, t in chain(1070 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, None, None)),1071 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, apri1, None)),1072 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, None, apri1.to_json().encode("ASCII")))1073 ):1074 total += 11075 if i == 0:1076 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1077 self.check__iter_disk_block_pairs(t, apri1, 0, 50)1078 elif i == 1:1079 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1080 self.check__iter_disk_block_pairs(t, apri1, 50, 50)1081 else:1082 self.fail()1083 if total != 6:1084 self.fail(str(total))1085 reg.add_disk_block(blk3)1086 total = 01087 for i, t in chain(1088 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, None, None))1089 ):1090 total += 11091 if i == 0:1092 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1093 self.check__iter_disk_block_pairs(t, apri1, 0, 50)1094 elif i == 1:1095 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1096 self.check__iter_disk_block_pairs(t, apri1, 50, 50)1097 elif i == 2:1098 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri2)1099 self.check__iter_disk_block_pairs(t, apri2, 1000, 500)1100 else:1101 self.fail()1102 if total != 3:1103 self.fail()1104 total = 01105 for i, t in chain(1106 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, apri1, None)),1107 enumerate(reg._iter_disk_block_pairs(_BLK_KEY_PREFIX, None, apri1.to_json().encode("ASCII")))1108 ):1109 total += 11110 if i == 0:1111 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1112 self.check__iter_disk_block_pairs(t, apri1, 0, 50)1113 elif i == 1:1114 t = reg._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, t[0], apri1)1115 self.check__iter_disk_block_pairs(t, apri1, 50, 50)1116 else:1117 self.fail()1118 if total != 4:1119 self.fail()1120 # def test__from_local_dir_different_registers(self):1121 #1122 # reg1 = Testy_Register(SAVES_DIR, "hellooooo")1123 # with reg1.open() as reg1: pass1124 #1125 # reg2 = Testy_Register(SAVES_DIR, "hellooooo")1126 # with reg2.open() as reg2: pass1127 #1128 # del Register._instances[reg2]1129 #1130 # reg3 = Register._from_local_dir(reg2._local_dir)1131 #1132 # self.assertEqual(1133 # reg2,1134 # reg31135 # )1136 # self.assertFalse(1137 # reg2 is reg31138 # )1139 def test_open(self):1140 reg1 = Testy_Register(SAVES_DIR, "msg")1141 with reg1.open() as reg2:pass1142 self.assertIs(1143 reg1,1144 reg21145 )1146 try:1147 with reg1.open() as reg1:pass1148 except Register_Error:1149 self.fail()1150 reg2 = Testy_Register(SAVES_DIR, "hello")1151 with reg2.open() as reg2:pass1152 reg3 = Testy_Register(SAVES_DIR, "hello")1153 reg3._set_local_dir(reg2._local_dir)1154 with reg3.open() as reg4:pass1155 self.assertIs(1156 reg4,1157 reg21158 )1159 reg4 = Testy_Register(SAVES_DIR, "sup")1160 with self.assertRaisesRegex(ValueError, "read-only"):1161 with reg4.open(read_only = True) as reg:pass1162 def test__recursive_open(self):1163 # must be created1164 reg1 = Testy_Register(SAVES_DIR, "hello")1165 with self.assertRaises(Register_Error):1166 with reg1._recursive_open(False):pass1167 # must be created1168 reg2 = Testy_Register(SAVES_DIR, "hello")1169 with reg2.open() as reg2:pass1170 with reg2._recursive_open(False) as reg3:pass1171 self.assertIs(1172 reg2,1173 reg31174 )1175 reg3 = Testy_Register(SAVES_DIR, "hello")1176 reg3._set_local_dir(reg2._local_dir)1177 with reg3._recursive_open(False) as reg4:pass1178 self.assertIs(1179 reg2,1180 reg41181 )1182 reg5 = Testy_Register(SAVES_DIR, "hi")1183 with reg5.open() as reg5:1184 try:1185 with reg5._recursive_open(False):pass1186 except Register_Error:1187 self.fail()1188 else:1189 self.assertFalse(1190 reg5._db_is_closed()1191 )1192 self.assertTrue(1193 reg5._db_is_closed()1194 )1195 reg6 = Testy_Register(SAVES_DIR, "supp")1196 with reg6.open() as reg6: pass1197 with reg6.open(read_only = True) as reg6:1198 with self.assertRaisesRegex(ValueError, "read-only"):1199 with reg6._recursive_open(False):pass1200 def test_get_disk_block_no_recursive(self):1201 reg = Numpy_Register(SAVES_DIR, "hello")1202 with self.assertRaisesRegex(Register_Error, "get_disk_block"):1203 reg.get_disk_block(Apri_Info(name = "i am the octopus"), 0, 0)1204 reg = Numpy_Register(SAVES_DIR, "hello")1205 with reg.open() as reg:1206 apri1 = Apri_Info(name = "i am the octopus")1207 blk1 = Block(np.arange(100), apri1)1208 reg.add_disk_block(blk1)1209 self.assertEqual(1210 blk1,1211 reg.get_disk_block(apri1, 0, 100)1212 )1213 blk2 = Block(np.arange(100,200), apri1, 100)1214 reg.add_disk_block(blk2)1215 self.assertEqual(1216 blk2,1217 reg.get_disk_block(apri1, 100, 100)1218 )1219 self.assertEqual(1220 blk1,1221 reg.get_disk_block(apri1, 0, 100)1222 )1223 apri2 = Apri_Info(name = "hello")1224 blk3 = Block(np.arange(3000,4000), apri2, 2000)1225 reg.add_disk_block(blk3)1226 self.assertEqual(1227 blk3,1228 reg.get_disk_block(apri2, 2000, 1000)1229 )1230 self.assertEqual(1231 blk2,1232 reg.get_disk_block(apri1, 100, 100)1233 )1234 self.assertEqual(1235 blk1,1236 reg.get_disk_block(apri1, 0, 100)1237 )1238 for metadata in [1239 (apri1, 0, 200), (apri1, 1, 99), (apri1, 5, 100), (apri1, 1, 100),1240 (apri2, 2000, 999), (apri2, 2000, 1001), (apri2, 1999, 1000),1241 (Apri_Info(name = "noooo"), 0, 100)1242 ]:1243 with self.assertRaises(Data_Not_Found_Error):1244 reg.get_disk_block(*metadata)1245 apri3 = Apri_Info(1246 name = "'''i love quotes'''and'' backslashes\\\\",1247 num = '\\\"double\\quotes\' are cool too"'1248 )1249 blk = Block(np.arange(69, 420), apri3)1250 reg.add_disk_block(blk)1251 self.assertEqual(1252 blk,1253 reg.get_disk_block(apri3, 0, 420 - 69)1254 )1255 def _remove_disk_block_helper(self, reg, block_data):1256 expected_num_blocks = len(block_data)1257 self.assertEqual(1258 expected_num_blocks,1259 lmdb_count_keys(reg._db, _BLK_KEY_PREFIX)1260 )1261 self.assertEqual(1262 expected_num_blocks,1263 lmdb_count_keys(reg._db, _COMPRESSED_KEY_PREFIX)1264 )1265 self.assertEqual(1266 sum(d.is_dir() for d in reg._local_dir.iterdir()),1267 11268 )1269 self.assertEqual(1270 sum(d.is_file() for d in reg._local_dir.iterdir()),1271 expected_num_blocks1272 )1273 for apri, start_n, length in block_data:1274 key = reg._get_disk_block_key(_BLK_KEY_PREFIX, apri, None, start_n, length, False)1275 with reg._db.begin() as txn:1276 filename = Path(txn.get(key).decode("ASCII"))1277 self.assertTrue((reg._local_dir / filename).exists())1278 def test_remove_disk_block(self):1279 reg1 = Testy_Register(SAVES_DIR, "hi")1280 with self.assertRaisesRegex(Register_Error, "open.*remove_disk_block"):1281 reg1.remove_disk_block(Apri_Info(name = "fooopy doooopy"), 0, 0)1282 with reg1.open() as reg1:1283 apri1 = Apri_Info(name = "fooopy doooopy")1284 blk1 = Block(list(range(50)), apri1)1285 reg1.add_disk_block(blk1)1286 self._remove_disk_block_helper(reg1, [(apri1, 0, 50)])1287 reg1.remove_disk_block(apri1, 0, 50)1288 self._remove_disk_block_helper(reg1, [])1289 reg1.add_disk_block(blk1)1290 apri2 = Apri_Info(name = "fooopy doooopy2")1291 blk2 = Block(list(range(100)), apri2, 1000)1292 reg1.add_disk_block(blk2)1293 self._remove_disk_block_helper(reg1, [(apri1, 0, 50), (apri2, 1000, 100)])1294 reg1.remove_disk_block(apri2, 1000, 100)1295 self._remove_disk_block_helper(reg1, [(apri1, 0, 50)])1296 reg1.remove_disk_block(apri1, 0, 50)1297 self._remove_disk_block_helper(reg1, [])1298 with self.assertRaisesRegex(Register_Error, "read-write"):1299 with reg1.open(read_only = True) as reg1:1300 reg1.remove_disk_block(apri1, 0, 0)1301 # add the same block to two registers1302 reg1 = Testy_Register(SAVES_DIR, "hello")1303 reg2 = Testy_Register(SAVES_DIR, "sup")1304 apri = Apri_Info(name = "hi")1305 blk = Block([], apri)1306 with reg1.open() as reg1:1307 reg1.add_disk_block(blk)1308 with reg2.open() as reg2:1309 reg2.add_disk_block(blk)1310 with reg1.open() as reg1:1311 reg1.remove_disk_block(apri, 0, 0)1312 self._remove_disk_block_helper(reg1, [])1313 with reg2.open() as reg2:1314 self._remove_disk_block_helper(reg2, [(apri, 0, 0)])1315 reg = Numpy_Register(SAVES_DIR, "hello")1316 with reg.open() as reg:1317 apri = Apri_Info(no = "yes")1318 blk = Block(np.arange(14), apri)1319 reg.add_disk_block(blk)1320 apri = Apri_Info(maybe = "maybe")1321 blk = Block(np.arange(20), apri)1322 reg.add_disk_block(blk)1323 for debug in [1,2,3]:1324 if debug == 3:1325 reg.compress(Apri_Info(maybe = "maybe"), 0, 20)1326 with self.assertRaises(KeyboardInterrupt):1327 reg.remove_disk_block(Apri_Info(maybe = "maybe"), 0, 20, debug = debug)1328 self.assertEqual(1329 2,1330 lmdb_count_keys(reg._db, _BLK_KEY_PREFIX)1331 )1332 self.assertEqual(1333 2,1334 lmdb_count_keys(reg._db, _COMPRESSED_KEY_PREFIX)1335 )1336 self.assertEqual(1337 2,1338 lmdb_count_keys(reg._db, _ID_APRI_KEY_PREFIX)1339 )1340 self.assertEqual(1341 2,1342 lmdb_count_keys(reg._db, _APRI_ID_KEY_PREFIX)1343 )1344 self.assertEqual(1345 2 + (1 if debug == 3 else 0),1346 sum(1 for d in reg._local_dir.iterdir() if d.is_file())1347 )1348 if debug == 3:1349 reg.decompress(Apri_Info(maybe = "maybe"), 0, 20)1350 self.assertTrue(np.all(1351 np.arange(14) ==1352 reg.get_disk_block(Apri_Info(no = "yes"), 0, 14).get_segment()1353 ))1354 self.assertTrue(np.all(1355 np.arange(20) ==1356 reg.get_disk_block(Apri_Info(maybe = "maybe"), 0, 20).get_segment()1357 ))1358 def test_set_apos_info(self):1359 reg = Testy_Register(SAVES_DIR, "hello")1360 with self.assertRaisesRegex(Register_Error, "open.*set_apos_info"):1361 reg.set_apos_info(Apri_Info(no = "no"), Apos_Info(yes = "yes"))1362 with reg.open() as reg:1363 try:1364 reg.set_apos_info(Apri_Info(no = "no"), Apos_Info(yes = "yes"))1365 except Data_Not_Found_Error:1366 self.fail("Do not need apri_info to already be there to add apos_info")1367 except Exception as e:1368 raise e1369 self.assertEqual(1370 1,1371 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1372 )1373 reg.set_apos_info(Apri_Info(no="no"), Apos_Info(maybe="maybe"))1374 self.assertEqual(1375 1,1376 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1377 )1378 reg.set_apos_info(Apri_Info(weird="right"), Apos_Info(maybe="maybe"))1379 self.assertEqual(1380 2,1381 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1382 )1383 reg.set_apos_info(Apri_Info(weird="right"), Apos_Info(maybe="maybe"))1384 self.assertEqual(1385 2,1386 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1387 )1388 for debug in [1,2]:1389 with self.assertRaises(KeyboardInterrupt):1390 reg.set_apos_info(Apri_Info(__ = "____"), Apos_Info(eight = 9), debug)1391 self.assertEqual(1392 2,1393 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1394 )1395 with reg.open(read_only = True) as reg:1396 with self.assertRaisesRegex(Register_Error, "read-write"):1397 reg.set_apos_info(Apri_Info(no="no"), Apos_Info(yes="yes"))1398 def test_get_apos_info(self):1399 reg = Testy_Register(SAVES_DIR, "hello")1400 with self.assertRaisesRegex(Register_Error, "open.*get_apos_info"):1401 reg.get_apos_info(Apri_Info(no = "no"))1402 with reg.open() as reg:1403 apri = Apri_Info(no = "yes")1404 apos = Apos_Info(yes = "no")1405 with self.assertRaisesRegex(Data_Not_Found_Error, re.escape(str(apri))):1406 reg.get_apos_info(apri)1407 reg.set_apos_info(apri, apos)1408 self.assertEqual(1409 apos,1410 reg.get_apos_info(apri)1411 )1412 apri = Apri_Info(no = "yes")1413 apos = Apos_Info(yes = "no", restart = Apos_Info(num = 1))1414 reg.set_apos_info(apri, apos)1415 self.assertEqual(1416 apos,1417 reg.get_apos_info(apri)1418 )1419 with reg.open(read_only = True) as reg:1420 try:1421 self.assertEqual(1422 apos,1423 reg.get_apos_info(apri)1424 )1425 except Register_Error as e:1426 if "read-write" in str(e):1427 self.fail("get_apos_info allows the register to be in read-only mode")1428 else:1429 raise e1430 except Exception as e:1431 raise e1432 def test_remove_apos_info(self):1433 reg = Testy_Register(SAVES_DIR, "hello")1434 with self.assertRaisesRegex(Register_Error, "open.*remove_apos_info"):1435 reg.remove_apos_info(Apri_Info(no = "no"))1436 with reg.open() as reg:1437 apri1 = Apri_Info(no = "yes")1438 apos1 = Apos_Info(yes = "no")1439 apri2 = Apri_Info(maam = "sir")1440 apos2 = Apos_Info(sir = "maam", restart = apos1)1441 reg.set_apos_info(apri1, apos1)1442 reg.remove_apos_info(apri1)1443 self.assertEqual(1444 0,1445 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1446 )1447 with self.assertRaisesRegex(Data_Not_Found_Error, re.escape(str(apri1))):1448 reg.get_apos_info(apri1)1449 reg.set_apos_info(apri1, apos1)1450 reg.set_apos_info(apri2, apos2)1451 reg.remove_apos_info(apri2)1452 self.assertEqual(1453 1,1454 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1455 )1456 with self.assertRaisesRegex(Data_Not_Found_Error, re.escape(str(apri2))):1457 reg.get_apos_info(apri2)1458 self.assertEqual(1459 apos1,1460 reg.get_apos_info(apri1)1461 )1462 for debug in [1,2]:1463 with self.assertRaises(KeyboardInterrupt):1464 reg.remove_apos_info(apri1, debug)1465 self.assertEqual(1466 1,1467 lmdb_count_keys(reg._db, _APOS_KEY_PREFIX)1468 )1469 self.assertEqual(1470 apos1,1471 reg.get_apos_info(apri1)1472 )1473 with reg.open(read_only = True) as reg:1474 with self.assertRaisesRegex(Register_Error, "read-write"):1475 reg.remove_apos_info(apri1)1476 def test_get_all_disk_blocks_no_recursive(self):1477 reg = Numpy_Register(SAVES_DIR, "HI")1478 with reg.open() as reg:1479 apri1 = Apri_Info(name = "abc")1480 apri2 = Apri_Info(name = "xyz")1481 blk1 = Block(np.arange(50), apri1, 0)1482 blk2 = Block(np.arange(50), apri1, 50)1483 blk3 = Block(np.arange(500), apri2, 1000)1484 reg.add_disk_block(blk1)1485 total = 01486 for i, blk in enumerate(reg.get_all_disk_blocks(apri1)):1487 total += 11488 if i == 0:1489 self.assertEqual(1490 blk1,1491 blk1492 )1493 else:1494 self.fail()1495 self.assertEqual(1496 1,1497 total1498 )1499 reg.add_disk_block(blk2)1500 total = 01501 for i, blk in enumerate(reg.get_all_disk_blocks(apri1)):1502 total += 11503 if i == 0:1504 self.assertEqual(1505 blk1,1506 blk1507 )1508 elif i == 1:1509 self.assertEqual(1510 blk2,1511 blk1512 )1513 else:1514 self.fail()1515 self.assertEqual(1516 2,1517 total1518 )1519 reg.add_disk_block(blk3)1520 total = 01521 for i, blk in enumerate(reg.get_all_disk_blocks(apri1)):1522 total += 11523 if i == 0:1524 self.assertEqual(1525 blk1,1526 blk1527 )1528 elif i == 1:1529 self.assertEqual(1530 blk2,1531 blk1532 )1533 else:1534 self.fail()1535 self.assertEqual(1536 2,1537 total1538 )1539 total = 01540 for i,blk in enumerate(reg.get_all_disk_blocks(apri2)):1541 total += 11542 if i == 0:1543 self.assertEqual(1544 blk3,1545 blk1546 )1547 else:1548 self.fail()1549 self.assertEqual(1550 1,1551 total1552 )1553 def test__iter_subregisters(self):1554 reg = Testy_Register(SAVES_DIR, "hello")1555 with reg.open() as reg:1556 total = 01557 for i,_ in enumerate(reg._iter_subregisters()):1558 total += 11559 self.assertEqual(1560 0,1561 total1562 )1563 reg = Testy_Register(SAVES_DIR, "hello")1564 with reg.open() as reg:1565 with reg._db.begin(write = True) as txn:1566 txn.put(reg._get_subregister_key(), _SUB_VAL)1567 total = 01568 for i, _reg in enumerate(reg._iter_subregisters()):1569 total += 11570 if i == 0:1571 self.assertIs(1572 reg,1573 _reg1574 )1575 else:1576 self.fail()1577 self.assertEqual(1578 1,1579 total1580 )1581 reg1 = Testy_Register(SAVES_DIR, "hello")1582 reg2 = Testy_Register(SAVES_DIR, "hello")1583 reg3 = Testy_Register(SAVES_DIR, "hello")1584 with reg2.open():pass1585 with reg3.open():pass1586 with reg1.open() as reg:1587 with reg1._db.begin(write=True) as txn:1588 txn.put(reg2._get_subregister_key(), _SUB_VAL)1589 txn.put(reg3._get_subregister_key(), _SUB_VAL)1590 total = 01591 regs = []1592 for i, _reg in enumerate(reg1._iter_subregisters()):1593 total += 11594 if i == 0 or i == 1:1595 self.assertTrue(1596 _reg is reg2 or _reg is reg31597 )1598 regs.append(_reg)1599 else:1600 self.fail()1601 self.assertEqual(1602 2,1603 total1604 )1605 self.assertFalse(1606 regs[0] is regs[1]1607 )1608 reg1 = Testy_Register(SAVES_DIR, "hello")1609 reg2 = Testy_Register(SAVES_DIR, "hello")1610 reg3 = Testy_Register(SAVES_DIR, "hello")1611 with reg3.open():pass1612 with reg2.open():1613 with reg2._db.begin(write=True) as txn:1614 txn.put(reg3._get_subregister_key(), _SUB_VAL)1615 with reg1.open() as reg:1616 with reg1._db.begin(write=True) as txn:1617 txn.put(reg2._get_subregister_key(), _SUB_VAL)1618 total = 01619 regs = []1620 for i, _reg in enumerate(reg._iter_subregisters()):1621 total += 11622 if i == 0:1623 self.assertTrue(1624 _reg is reg21625 )1626 regs.append(_reg)1627 else:1628 self.fail()1629 self.assertEqual(1630 1,1631 total1632 )1633 with reg2.open() as reg:1634 total = 01635 regs = []1636 for i, _reg in enumerate(reg._iter_subregisters()):1637 total += 11638 if i == 0:1639 self.assertTrue(1640 _reg is reg31641 )1642 regs.append(_reg)1643 else:1644 self.fail()1645 self.assertEqual(1646 1,1647 total1648 )1649 def test_get_disk_block_by_n_no_recursive(self):1650 reg = Numpy_Register(SAVES_DIR, "hello")1651 with self.assertRaises(Register_Error):1652 reg.get_disk_block_by_n(Apri_Info(name = "no"), 50)1653 reg = Numpy_Register(SAVES_DIR, "hello")1654 apri1 = Apri_Info(name = "sup")1655 apri2 = Apri_Info(name = "hi")1656 blk1 = Block(np.arange(75), apri1)1657 blk2 = Block(np.arange(125), apri1, 75)1658 blk3 = Block(np.arange(1000), apri2, 100)1659 blk4 = Block(np.arange(100), apri2, 2000)1660 with reg.open() as reg:1661 reg.add_disk_block(blk1)1662 reg.add_disk_block(blk2)1663 reg.add_disk_block(blk3)1664 reg.add_disk_block(blk4)1665 for n in [0, 1, 2, 72, 73, 74]:1666 self.assertEqual(1667 blk1,1668 reg.get_disk_block_by_n(apri1, n)1669 )1670 for n in [75, 76, 77, 197, 198, 199]:1671 self.assertEqual(1672 blk2,1673 reg.get_disk_block_by_n(apri1, n)1674 )1675 for n in [-2, -1]:1676 with self.assertRaisesRegex(ValueError, "non-negative"):1677 reg.get_disk_block_by_n(apri1, n)1678 for n in [200, 201, 1000]:1679 with self.assertRaises(Data_Not_Found_Error):1680 reg.get_disk_block_by_n(apri1, n)1681 def test__check_no_cycles_from(self):1682 reg = Testy_Register(SAVES_DIR, "hello")1683 with self.assertRaises(Register_Error):1684 reg._check_no_cycles_from(reg)1685 reg = Testy_Register(SAVES_DIR, "hello")1686 with reg.open() as reg:pass1687 # loop1688 self.assertFalse(1689 reg._check_no_cycles_from(reg)1690 )1691 reg1 = Testy_Register(SAVES_DIR, "hello")1692 reg2 = Testy_Register(SAVES_DIR, "hello")1693 reg3 = Testy_Register(SAVES_DIR, "hello")1694 reg4 = Testy_Register(SAVES_DIR, "hello")1695 reg5 = Testy_Register(SAVES_DIR, "hello")1696 reg6 = Testy_Register(SAVES_DIR, "hello")1697 reg7 = Testy_Register(SAVES_DIR, "hello")1698 with reg1.open(): pass1699 with reg2.open(): pass1700 with reg3.open(): pass1701 with reg4.open(): pass1702 with reg5.open(): pass1703 with reg6.open(): pass1704 with reg7.open(): pass1705 # disjoint1706 self.assertTrue(1707 reg2._check_no_cycles_from(reg1)1708 )1709 # 1-path (1 -> 2)1710 with reg1.open() as reg1:1711 with reg1._db.begin(write = True) as txn:1712 txn.put(reg2._get_subregister_key(), _SUB_VAL)1713 self.assertFalse(1714 reg1._check_no_cycles_from(reg2)1715 )1716 self.assertTrue(1717 reg2._check_no_cycles_from(reg1)1718 )1719 self.assertFalse(1720 reg2._check_no_cycles_from(reg2)1721 )1722 self.assertFalse(1723 reg1._check_no_cycles_from(reg1)1724 )1725 self.assertTrue(1726 reg3._check_no_cycles_from(reg2)1727 )1728 self.assertTrue(1729 reg2._check_no_cycles_from(reg3)1730 )1731 self.assertTrue(1732 reg3._check_no_cycles_from(reg1)1733 )1734 self.assertTrue(1735 reg1._check_no_cycles_from(reg3)1736 )1737 # 2-path (1 -> 2 -> 3)1738 with reg2.open() as reg2:1739 with reg2._db.begin(write=True) as txn:1740 txn.put(reg3._get_subregister_key(), _SUB_VAL)1741 self.assertFalse(1742 reg1._check_no_cycles_from(reg1)1743 )1744 self.assertFalse(1745 reg2._check_no_cycles_from(reg2)1746 )1747 self.assertFalse(1748 reg3._check_no_cycles_from(reg3)1749 )1750 self.assertFalse(1751 reg1._check_no_cycles_from(reg2)1752 )1753 self.assertTrue(1754 reg2._check_no_cycles_from(reg1)1755 )1756 self.assertFalse(1757 reg1._check_no_cycles_from(reg3)1758 )1759 self.assertTrue(1760 reg3._check_no_cycles_from(reg1)1761 )1762 self.assertFalse(1763 reg2._check_no_cycles_from(reg3)1764 )1765 self.assertTrue(1766 reg3._check_no_cycles_from(reg2)1767 )1768 self.assertTrue(1769 reg4._check_no_cycles_from(reg1)1770 )1771 self.assertTrue(1772 reg4._check_no_cycles_from(reg2)1773 )1774 self.assertTrue(1775 reg4._check_no_cycles_from(reg3)1776 )1777 self.assertTrue(1778 reg1._check_no_cycles_from(reg4)1779 )1780 self.assertTrue(1781 reg2._check_no_cycles_from(reg4)1782 )1783 self.assertTrue(1784 reg3._check_no_cycles_from(reg4)1785 )1786 # 2-cycle (4 -> 5 -> 4)1787 with reg4.open() as reg4:1788 with reg4._db.begin(write = True) as txn:1789 txn.put(reg5._get_subregister_key(), _SUB_VAL)1790 with reg5.open() as reg5:1791 with reg5._db.begin(write=True) as txn:1792 txn.put(reg4._get_subregister_key(), _SUB_VAL)1793 self.assertFalse(1794 reg4._check_no_cycles_from(reg4)1795 )1796 self.assertFalse(1797 reg5._check_no_cycles_from(reg5)1798 )1799 self.assertFalse(1800 reg4._check_no_cycles_from(reg5)1801 )1802 self.assertFalse(1803 reg5._check_no_cycles_from(reg4)1804 )1805 self.assertTrue(1806 reg6._check_no_cycles_from(reg5)1807 )1808 self.assertTrue(1809 reg6._check_no_cycles_from(reg4)1810 )1811 self.assertTrue(1812 reg5._check_no_cycles_from(reg6)1813 )1814 self.assertTrue(1815 reg4._check_no_cycles_from(reg6)1816 )1817 # 2 cycle with tail (4 -> 5 -> 4 -> 6)1818 with reg4.open() as reg4:1819 with reg4._db.begin(write = True) as txn:1820 txn.put(reg6._get_subregister_key(), _SUB_VAL)1821 self.assertFalse(1822 reg4._check_no_cycles_from(reg4)1823 )1824 self.assertFalse(1825 reg5._check_no_cycles_from(reg5)1826 )1827 self.assertFalse(1828 reg6._check_no_cycles_from(reg6)1829 )1830 self.assertFalse(1831 reg4._check_no_cycles_from(reg5)1832 )1833 self.assertFalse(1834 reg5._check_no_cycles_from(reg4)1835 )1836 self.assertFalse(1837 reg4._check_no_cycles_from(reg6)1838 )1839 self.assertTrue(1840 reg6._check_no_cycles_from(reg4)1841 )1842 self.assertFalse(1843 reg5._check_no_cycles_from(reg6)1844 )1845 self.assertTrue(1846 reg6._check_no_cycles_from(reg5)1847 )1848 self.assertTrue(1849 reg7._check_no_cycles_from(reg4)1850 )1851 self.assertTrue(1852 reg7._check_no_cycles_from(reg5)1853 )1854 self.assertTrue(1855 reg7._check_no_cycles_from(reg6)1856 )1857 self.assertTrue(1858 reg4._check_no_cycles_from(reg7)1859 )1860 self.assertTrue(1861 reg5._check_no_cycles_from(reg7)1862 )1863 self.assertTrue(1864 reg6._check_no_cycles_from(reg7)1865 )1866 # 3-cycle (1 -> 2 -> 3 -> 1)1867 with reg3.open() as reg2:1868 with reg3._db.begin(write=True) as txn:1869 txn.put(reg1._get_subregister_key(), _SUB_VAL)1870 self.assertFalse(1871 reg1._check_no_cycles_from(reg1)1872 )1873 self.assertFalse(1874 reg2._check_no_cycles_from(reg2)1875 )1876 self.assertFalse(1877 reg3._check_no_cycles_from(reg3)1878 )1879 self.assertFalse(1880 reg1._check_no_cycles_from(reg2)1881 )1882 self.assertFalse(1883 reg2._check_no_cycles_from(reg1)1884 )1885 self.assertFalse(1886 reg1._check_no_cycles_from(reg3)1887 )1888 self.assertFalse(1889 reg3._check_no_cycles_from(reg1)1890 )1891 self.assertFalse(1892 reg2._check_no_cycles_from(reg3)1893 )1894 self.assertFalse(1895 reg3._check_no_cycles_from(reg2)1896 )1897 self.assertTrue(1898 reg7._check_no_cycles_from(reg1)1899 )1900 self.assertTrue(1901 reg7._check_no_cycles_from(reg2)1902 )1903 self.assertTrue(1904 reg7._check_no_cycles_from(reg3)1905 )1906 self.assertTrue(1907 reg1._check_no_cycles_from(reg7)1908 )1909 self.assertTrue(1910 reg2._check_no_cycles_from(reg7)1911 )1912 self.assertTrue(1913 reg3._check_no_cycles_from(reg7)1914 )1915 # long path (0 -> 1 -> ... -> N)1916 N = 101917 regs = [Numpy_Register(SAVES_DIR, f"{i}") for i in range(N+2)]1918 for reg in regs:1919 with reg.open():pass1920 for i in range(N):1921 with regs[i].open() as reg:1922 with reg._db.begin(write=True) as txn:1923 txn.put(regs[i+1]._get_subregister_key(), _SUB_VAL)1924 for i, j in product(range(N+1), repeat = 2):1925 val = regs[i]._check_no_cycles_from(regs[j])1926 if i == j:1927 self.assertFalse(val)1928 elif i > j:1929 self.assertTrue(val)1930 else:1931 self.assertFalse(val)1932 for i in range(N+1):1933 self.assertTrue(1934 regs[i]._check_no_cycles_from(regs[N+1])1935 )1936 self.assertTrue(1937 regs[N+1]._check_no_cycles_from(regs[i])1938 )1939 # adding arc between 2 cycle with tail (4 -> 5 -> 4 -> 6) to 3-cycle (1 -> 2 -> 3 -> 1)1940 for i, j in product([1,2,3], [4,5,6]):1941 regi = eval(f"reg{i}")1942 regj = eval(f"reg{j}")1943 self.assertTrue(regi._check_no_cycles_from(regj))1944 def test_add_subregister(self):1945 reg1 = Testy_Register(SAVES_DIR, "hello")1946 reg2 = Testy_Register(SAVES_DIR, "hello")1947 with self.assertRaisesRegex(Register_Error, "open.*add_subregister"):1948 reg1.add_subregister(reg2)1949 reg1 = Testy_Register(SAVES_DIR, "hello")1950 reg2 = Testy_Register(SAVES_DIR, "hello")1951 with reg1.open() as reg1:1952 with self.assertRaisesRegex(Register_Error, "add_subregister"):1953 reg1.add_subregister(reg2)1954 reg1 = Testy_Register(SAVES_DIR, "hello")1955 reg2 = Testy_Register(SAVES_DIR, "hello")1956 reg3 = Testy_Register(SAVES_DIR, "hello")1957 with reg2.open(): pass1958 with reg1.open() as reg1:1959 try:1960 reg1.add_subregister(reg2)1961 except Register_Error:1962 self.fail()1963 with reg3.open(): pass1964 with self.assertRaisesRegex(Register_Error, "read-write"):1965 with reg2.open(read_only = True) as reg2:1966 reg2.add_subregister(reg3)1967 with reg2.open() as reg2:1968 try:1969 reg2.add_subregister(reg3)1970 except Register_Error:1971 self.fail()1972 with reg1.open() as reg1:1973 try:1974 reg1.add_subregister(reg3)1975 except Register_Error:1976 self.fail()1977 reg1 = Testy_Register(SAVES_DIR, "hello")1978 reg2 = Testy_Register(SAVES_DIR, "hello")1979 reg3 = Testy_Register(SAVES_DIR, "hello")1980 with reg3.open(): pass1981 with reg2.open() as reg2:1982 try:1983 reg2.add_subregister(reg3)1984 except Register_Error:1985 self.fail()1986 with reg1.open() as reg1:1987 try:1988 reg1.add_subregister(reg2)1989 except Register_Error:1990 self.fail()1991 with reg3.open() as reg3:1992 with self.assertRaises(Register_Error):1993 reg3.add_subregister(reg1)1994 reg1 = Testy_Register(SAVES_DIR, "hello")1995 reg2 = Testy_Register(SAVES_DIR, "hello")1996 with reg1.open():pass1997 with reg2.open():pass1998 with reg1.open() as reg1:1999 for debug in [1,2]:2000 with self.assertRaises(KeyboardInterrupt):2001 reg1.add_subregister(reg2, debug)2002 self.assertEqual(2003 0,2004 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2005 )2006 def test_remove_subregister(self):2007 reg1 = Testy_Register(SAVES_DIR, "hello")2008 reg2 = Testy_Register(SAVES_DIR, "hello")2009 reg3 = Testy_Register(SAVES_DIR, "hello")2010 with reg1.open():pass2011 with reg2.open():pass2012 with self.assertRaisesRegex(Register_Error, "open.*remove_subregister"):2013 reg1.remove_subregister(reg2)2014 with reg3.open():pass2015 with reg1.open() as reg1:2016 reg1.add_subregister(reg2)2017 self.assertEqual(2018 1,2019 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2020 )2021 reg1.remove_subregister(reg2)2022 self.assertEqual(2023 0,2024 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2025 )2026 reg1.add_subregister(reg2)2027 reg1.add_subregister(reg3)2028 self.assertEqual(2029 2,2030 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2031 )2032 reg1.remove_subregister(reg2)2033 self.assertEqual(2034 1,2035 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2036 )2037 for debug in [1,2]:2038 with self.assertRaises(KeyboardInterrupt):2039 reg1.remove_subregister(reg3, debug)2040 self.assertEqual(2041 1,2042 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2043 )2044 reg1.remove_subregister(reg3)2045 self.assertEqual(2046 0,2047 lmdb_count_keys(reg1._db, _SUB_KEY_PREFIX)2048 )2049 with self.assertRaisesRegex(Register_Error, "read-write"):2050 with reg1.open(read_only = True) as reg1:2051 reg1.remove_subregister(reg2)2052 def test_get_all_ram_blocks(self):2053 reg = Testy_Register(SAVES_DIR, "whatever")2054 apri = Apri_Info(name = "whatev")2055 with reg.open() as reg: pass2056 with self.assertRaisesRegex(Register_Error, "get_all_ram_blocks"):2057 for _ in reg.get_all_ram_blocks(apri, True): pass2058 reg = Testy_Register(SAVES_DIR, "whatever")2059 apri1 = Apri_Info(name = "foomy")2060 apri2 = Apri_Info(name = "doomy")2061 blk1 = Block(list(range(10)), apri1)2062 blk2 = Block(list(range(20)), apri1, 10)2063 blk3 = Block(list(range(14)), apri2, 50)2064 blk4 = Block(list(range(100)), apri2, 120)2065 blk5 = Block(list(range(120)), apri2, 1000)2066 reg1 = Testy_Register(SAVES_DIR, "helllo")2067 reg2 = Testy_Register(SAVES_DIR, "suuup")2068 reg1.add_ram_block(blk1)2069 reg1.add_ram_block(blk2)2070 reg1.add_ram_block(blk3)2071 reg2.add_ram_block(blk4)2072 reg2.add_ram_block(blk5)2073 try:2074 reg1.get_all_ram_blocks(apri1, True)2075 except Register_Error:2076 self.fail("_check_open_raise should only be called if data couldn't be found in initial register")2077 total = 02078 for i, blk in enumerate(reg1.get_all_ram_blocks(apri1)):2079 total += 12080 if i == 0:2081 self.assertIs(2082 blk1,2083 blk2084 )2085 elif i == 1:2086 self.assertIs(2087 blk2,2088 blk2089 )2090 else:2091 self.fail()2092 self.assertEqual(2093 2,2094 total2095 )2096 with reg2.open(): pass2097 with reg1.open() as reg1:2098 reg1.add_subregister(reg2)2099 total = 02100 for i, blk in enumerate(reg1.get_all_ram_blocks(apri1, True)):2101 total += 12102 if i == 0:2103 self.assertIs(2104 blk1,2105 blk2106 )2107 elif i == 1:2108 self.assertIs(2109 blk2,2110 blk2111 )2112 else:2113 self.fail()2114 self.assertEqual(2115 2,2116 total2117 )2118 total = 02119 for i, blk in enumerate(reg1.get_all_ram_blocks(apri2, True)):2120 total += 12121 if i == 0:2122 self.assertIs(2123 blk3,2124 blk2125 )2126 elif i == 1:2127 self.assertIs(2128 blk4,2129 blk2130 )2131 elif i == 2:2132 self.assertIs(2133 blk5,2134 blk2135 )2136 else:2137 self.fail()2138 self.assertEqual(2139 3,2140 total2141 )2142 def test_get_ram_block_by_n(self):2143 reg = Testy_Register(SAVES_DIR, "whatever")2144 apri = Apri_Info(name = "whatev")2145 with reg.open() as reg: pass2146 with self.assertRaisesRegex(Register_Error, "get_ram_block_by_n"):2147 for _ in reg.get_ram_block_by_n(apri, 0, True): pass2148 apri1 = Apri_Info(name = "foomy")2149 apri2 = Apri_Info(name = "doomy")2150 blk1 = Block(list(range(10)), apri1)2151 blk2 = Block(list(range(20)), apri1, 10)2152 blk3 = Block(list(range(14)), apri2, 50)2153 blk4 = Block(list(range(100)), apri2, 120)2154 blk5 = Block(list(range(120)), apri2, 1000)2155 reg1 = Testy_Register(SAVES_DIR, "helllo")2156 reg2 = Testy_Register(SAVES_DIR, "suuup")2157 reg1.add_ram_block(blk1)2158 reg1.add_ram_block(blk2)2159 reg1.add_ram_block(blk3)2160 reg2.add_ram_block(blk4)2161 reg2.add_ram_block(blk5)2162 try:2163 reg1.get_ram_block_by_n(apri1, 0, True)2164 except Register_Error:2165 self.fail("_check_open_raise should only be called if data couldn't be found in initial register")2166 tests = [2167 (reg1, (apri1, 0, True ), blk1),2168 (reg1, (apri1, 0, False), blk1),2169 (reg1, (apri1, 9, True ), blk1),2170 (reg1, (apri1, 9, False), blk1),2171 (reg1, (apri1, 10, True ), blk2),2172 (reg1, (apri1, 10, False), blk2),2173 (reg1, (apri1, 29, True ), blk2),2174 (reg1, (apri1, 29, False), blk2),2175 (reg1, (apri2, 50, True ), blk3),2176 (reg1, (apri2, 50, False), blk3),2177 (reg1, (apri2, 63, True ), blk3),2178 (reg1, (apri2, 63, False), blk3),2179 (reg2, (apri2, 120, True ), blk4),2180 (reg2, (apri2, 219, True ), blk4),2181 (reg2, (apri2, 1000, True ), blk5),2182 (reg2, (apri2, 1119, True ), blk5)2183 ]2184 for reg, args, blk in tests:2185 if args[2]:2186 with reg.open() as reg:2187 self.assertIs(2188 blk,2189 reg.get_ram_block_by_n(*args)2190 )2191 else:2192 self.assertIs(2193 blk,2194 reg.get_ram_block_by_n(*args)2195 )2196 def test_disk_intervals(self):2197 reg = Testy_Register(SAVES_DIR, "sup")2198 apri1 = Apri_Info(descr = "hello")2199 apri2 = Apri_Info(descr = "hey")2200 with self.assertRaisesRegex(Register_Error, "open.*disk_intervals"):2201 reg.disk_intervals(apri1)2202 with reg.open() as reg:2203 for apri in [apri1, apri2]:2204 with self.assertRaisesRegex(Data_Not_Found_Error, "Apri_Info"):2205 reg.disk_intervals(apri)2206 with reg.open() as reg:2207 reg.add_disk_block(Block(list(range(50)), apri1))2208 self.assertEqual(2209 [(0, 50)],2210 reg.disk_intervals(apri1)2211 )2212 with self.assertRaisesRegex(Data_Not_Found_Error, "Apri_Info"):2213 reg.disk_intervals(apri2)2214 reg.add_disk_block(Block(list(range(100)), apri1))2215 self.assertEqual(2216 [(0, 100), (0, 50)],2217 reg.disk_intervals(apri1)2218 )2219 reg.add_disk_block(Block(list(range(1000)), apri1, 1))2220 self.assertEqual(2221 [(0, 100), (0, 50), (1, 1000)],2222 reg.disk_intervals(apri1)2223 )2224 reg.add_disk_block(Block(list(range(420)), apri2, 69))2225 self.assertEqual(2226 [(0, 100), (0, 50), (1, 1000)],2227 reg.disk_intervals(apri1)2228 )2229 self.assertEqual(2230 [(69, 420)],2231 reg.disk_intervals(apri2)2232 )2233 # blk = Block(list(range(50)), )2234 def test__iter_ram_and_disk_block_datas(self):pass2235 def test_get_disk_block_again(self):2236 reg = Numpy_Register(SAVES_DIR, "test")2237 apri1 = Apri_Info(descr = "hey")2238 with self.assertRaisesRegex(Register_Error, "open.*get_disk_block"):2239 reg.get_disk_block(apri1)2240 with reg.open() as reg:2241 with self.assertRaisesRegex(TypeError, "Apri_Info"):2242 reg.get_disk_block("poo")2243 with self.assertRaisesRegex(TypeError, "int"):2244 reg.get_disk_block(apri1, "butt")2245 with self.assertRaisesRegex(TypeError, "int"):2246 reg.get_disk_block(apri1, 0, "dumb")2247 with self.assertRaisesRegex(ValueError, "non-negative"):2248 reg.get_disk_block(apri1, -1)2249 with self.assertRaisesRegex(ValueError, "non-negative"):2250 reg.get_disk_block(apri1, 0, -1)2251 with self.assertRaises(ValueError):2252 reg.get_disk_block(apri1, length= -1)2253 reg.add_disk_block(Block(list(range(50)), apri1))2254 self.assertTrue(np.all(2255 reg.get_disk_block(apri1).get_segment() == np.arange(50)2256 ))2257 self.assertTrue(np.all(2258 reg.get_disk_block(apri1, 0).get_segment() == np.arange(50)2259 ))2260 self.assertTrue(np.all(2261 reg.get_disk_block(apri1, 0, 50).get_segment() == np.arange(50)2262 ))2263 reg.add_disk_block(Block(list(range(51)), apri1))2264 self.assertTrue(np.all(2265 reg.get_disk_block(apri1).get_segment() == np.arange(51)2266 ))2267 self.assertTrue(np.all(2268 reg.get_disk_block(apri1, 0).get_segment() == np.arange(51)2269 ))2270 self.assertTrue(np.all(2271 reg.get_disk_block(apri1, 0, 51).get_segment() == np.arange(51)2272 ))2273 self.assertTrue(np.all(2274 reg.get_disk_block(apri1, 0, 50).get_segment() == np.arange(50)2275 ))2276 reg.add_disk_block(Block(list(range(100)), apri1, 1))2277 self.assertTrue(np.all(2278 reg.get_disk_block(apri1).get_segment() == np.arange(51)2279 ))2280 self.assertTrue(np.all(2281 reg.get_disk_block(apri1, 0).get_segment() == np.arange(51)2282 ))2283 self.assertTrue(np.all(2284 reg.get_disk_block(apri1, 0, 51).get_segment() == np.arange(51)2285 ))2286 self.assertTrue(np.all(2287 reg.get_disk_block(apri1, 0, 50).get_segment() == np.arange(50)2288 ))2289 self.assertTrue(np.all(2290 reg.get_disk_block(apri1, 1, 100).get_segment() == np.arange(100)2291 ))2292 # def test_get_all_apri_info(self):2293 #2294 # reg = Testy_Register(SAVES_DIR, "test")2295 #2296 # with self.assertRaisesRegex(Register_Error, "open.*get_all_apri_info"):2297 # reg.get_all_apri_info()2298 #2299 # for i in range(200):2300 #2301 # apri1 = Apri_Info(name = i)2302 # apri2 = Apri_Info(name = f"{i}")2303 #2304 # with reg.open() as reg:2305 #2306 # reg.add_disk_block(Block([1], apri1))2307 # reg.add_ram_block(Block([1], apri2))2308 #2309 # get = reg.get_all_apri_info()2310 #2311 # self.assertEqual(2312 # 2*(i+1),2313 # len(get)2314 # )2315 #2316 # for j in range(i+1):2317 #2318 # self.assertIn(2319 # Apri_Info(name = i),2320 # get2321 # )2322 #2323 # self.assertIn(2324 # Apri_Info(name = f"{i}"),2325 # get2326 # )2327 def _is_compressed_helper(self, reg, apri, start_n, length, data_file_bytes = None):2328 compressed_key = reg._get_disk_block_key(_COMPRESSED_KEY_PREFIX, apri, None, start_n, length, False)2329 self.assertTrue(lmdb_has_key(reg._db, compressed_key))2330 with reg._db.begin() as txn:2331 val = txn.get(compressed_key)2332 self.assertNotEqual(val, _IS_NOT_COMPRESSED_VAL)2333 zip_filename = (reg._local_dir / val.decode("ASCII")).with_suffix(".zip")2334 self.assertTrue(zip_filename.exists())2335 self.assertEqual(zip_filename.suffix, ".zip")2336 data_key = reg._get_disk_block_key(_BLK_KEY_PREFIX, apri, None, start_n, length, False)2337 self.assertTrue(lmdb_has_key(reg._db, data_key))2338 if data_file_bytes is not None:2339 with reg._db.begin() as txn:2340 self.assertEqual(txn.get(data_key), data_file_bytes)2341 data_filename = reg._local_dir / data_file_bytes.decode("ASCII")2342 self.assertTrue(data_filename.exists())2343 self.assertLessEqual(os.stat(data_filename).st_size, 2)2344 def _is_not_compressed_helper(self, reg, apri, start_n, length):2345 compressed_key = reg._get_disk_block_key(_COMPRESSED_KEY_PREFIX, apri, None, start_n, length, False)2346 self.assertTrue(lmdb_has_key(reg._db, compressed_key))2347 with reg._db.begin() as txn:2348 self.assertEqual(txn.get(compressed_key), _IS_NOT_COMPRESSED_VAL)2349 data_key = reg._get_disk_block_key(_BLK_KEY_PREFIX, apri, None, start_n, length, False)2350 with reg._db.begin() as txn:2351 return txn.get(data_key)2352 def test_compress(self):2353 reg2 = Numpy_Register(SAVES_DIR, "testy2")2354 with self.assertRaisesRegex(Register_Error, "open.*compress"):2355 reg2.compress(Apri_Info(num = 0))2356 apri1 = Apri_Info(descr = "sup")2357 apri2 = Apri_Info(descr = "hey")2358 apris = [apri1, apri1, apri2]2359 length1 = 5002360 blk1 = Block(np.arange(length1), apri1)2361 length2 = 10000002362 blk2 = Block(np.arange(length2), apri1)2363 length3 = 20002364 blk3 = Block(np.arange(length3), apri2)2365 lengths = [length1, length2, length3]2366 with reg2.open() as reg2:2367 reg2.add_disk_block(blk1)2368 reg2.add_disk_block(blk2)2369 reg2.add_disk_block(blk3)2370 for i, (apri, length) in enumerate(zip(apris, lengths)):2371 data_file_bytes = self._is_not_compressed_helper(reg2, apri, 0, length)2372 reg2.compress(apri, 0, length)2373 self._is_compressed_helper(reg2, apri, 0, length, data_file_bytes)2374 for _apri, _length in zip(apris[i+1:], lengths[i+1:]):2375 self._is_not_compressed_helper(reg2, _apri, 0, _length)2376 expected = str(apri).replace("(", "\\(").replace(")", "\\)") + f".*start_n.*0.*length.*{length}"2377 with self.assertRaisesRegex(Compression_Error, expected):2378 reg2.compress(apri, 0, length)2379 with self.assertRaisesRegex(Register_Error, "read-write"):2380 with reg2.open(read_only = True) as reg2:2381 reg2.compress(Apri_Info(num = 0))2382 reg = Numpy_Register(SAVES_DIR, "no")2383 with reg.open() as reg:2384 apri = Apri_Info(num = 7)2385 blk = Block(np.arange(40), apri)2386 reg.add_disk_block(blk)2387 for debug in [1,2,3,4]:2388 with self.assertRaises(KeyboardInterrupt):2389 reg.compress(apri, debug = debug)2390 self._is_not_compressed_helper(reg, apri, 0, 40)2391 def test_decompress(self):2392 reg1 = Numpy_Register(SAVES_DIR, "lol")2393 apri1 = Apri_Info(descr = "LOL")2394 apri2 = Apri_Info(decr = "HAHA")2395 apris = [apri1, apri1, apri2]2396 with self.assertRaisesRegex(Register_Error, "open.*decompress"):2397 reg1.decompress(apri1)2398 lengths = [50, 500, 5000]2399 start_ns = [0, 0, 1000]2400 data = [np.arange(length) for length in lengths]2401 blks = [Block(*t) for t in zip(data, apris, start_ns)]2402 data_files_bytes = []2403 with reg1.open() as reg1:2404 for blk in blks:2405 reg1.add_disk_block(blk)2406 data_files_bytes.append(2407 self._is_not_compressed_helper(reg1, blk.get_apri(), blk.get_start_n(), len(blk))2408 )2409 for t in zip(apris, start_ns, lengths):2410 reg1.compress(*t)2411 for i, t in enumerate(zip(apris, start_ns, lengths)):2412 reg1.decompress(*t)2413 self._is_not_compressed_helper(reg1, *t)2414 for _t in zip(apris[i+1:], start_ns[i+1:], lengths[i+1:], data_files_bytes[i+1:]):2415 self._is_compressed_helper(reg1, *_t)2416 expected = str(t[0]).replace("(", "\\(").replace(")", "\\)") + f".*start_n.*0.*length.*{t[2]}"2417 with self.assertRaisesRegex(Decompression_Error, expected):2418 reg1.decompress(*t)2419 with self.assertRaisesRegex(Register_Error, "read-only"):2420 with reg1.open(read_only = True) as reg1:2421 reg1.decompress(apri1)...

Full Screen

Full Screen

registers.py

Source:registers.py Github

copy

Full Screen

...242 def __repr__(self):243 return f"{self.__class__.__name__}(\"{str(self.saves_directory)}\", \"{self._msg}\")"244 def __contains__(self, apri):245 self._check_open_raise("__contains__")246 if any(blk.get_apri() == apri for blk in self._ram_blks):247 return True248 else:249 return lmdb_has_key(self._db, _APRI_ID_KEY_PREFIX + apri.to_json().encode("ASCII"))250 def __iter__(self):251 with lmdb_prefix_iterator(self._db, _ID_APRI_KEY_PREFIX) as it:252 for _, apri_json in it:253 yield Apri_Info.from_json(apri_json.decode("ASCII"))254 def set_message(self, message):255 """Give this `Register` a brief description.256 WARNING: This method OVERWRITES the current message. In order to append a new message to the current one, do257 something like the following:258 old_message = str(reg)259 new_message = old_message + " Hello!"260 reg.set_message(new_message)261 :param message: (type `str`)262 """263 if not isinstance(message, str):264 raise TypeError("`message` must be a string.")265 self._msg = message266 if self._created:267 with self._msg_filepath.open("w") as fh:268 fh.write(message)269 def set_start_n_info(self, head = None, tail_length = None, debug = 0):270 """Set the range of the `start_n` parameters of disk `Block`s belonging to this `Register`.271 Reset to default `head` and `tail_length` by omitting the parameters.272 If the `start_n` parameter is very large (of order more than trillions), then the `Register` database can273 become very bloated by storing many redundant digits for the `start_n` parameter. Calling this method with274 appropriate `head` and `tail_length` parameters alleviates the bloat.275 The "head" and "tail" of a non-negative number x is defined by x = head * 10^L + tail, where L is the "length",276 or the number of digits, of "tail". (L must be at least 1, and 0 is considered to have 1 digit.)277 By calling `set_start_n_info(head, tail_length)`, the user is asserting that the start_n of every disk278 `Block` belong to this `Register` can be decomposed in the fashion start_n = head * 10^tail_length + tail. The279 user is discouraged to call this method for large `tail_length` values (>12), as this is likely unnecessary and280 defeats the purpose of this method.281 :param head: (type `int`, optional) Non-negative. If omitted, resets this `Register` to the default `head`.282 :param tail_length: (type `int`) Positive. If omitted, resets this `Register` to the default `tail_length`.283 """284 # DEBUG : 1, 2285 self._check_open_raise("set_start_n_info")286 self._check_readwrite_raise("set_start_n_info")287 if head is not None and not is_signed_int(head):288 raise TypeError("`head` must be of type `int`.")289 elif head is not None:290 head = int(head)291 else:292 head = Register._START_N_HEAD_DEFAULT293 if tail_length is not None and not is_signed_int(tail_length):294 raise TypeError("`tail_length` must of of type `int`.")295 elif tail_length is not None:296 tail_length = int(tail_length)297 else:298 tail_length = Register._START_N_TAIL_LENGTH_DEFAULT299 if head < 0:300 raise ValueError("`head` must be non-negative.")301 if tail_length <= 0:302 raise ValueError("`tail_length` must be positive.")303 if head == self._start_n_head and tail_length == self._start_n_tail_length:304 return305 new_mod = 10 ** tail_length306 with lmdb_prefix_iterator(self._db, _BLK_KEY_PREFIX) as it:307 for key, _ in it:308 apri, start_n, length = self._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, key)309 if start_n // new_mod != head:310 raise ValueError(311 "The following `start_n` does not have the correct head:\n" +312 f"`start_n` : {start_n}\n" +313 "That `start_n` is associated with a `Block` whose `Apri_Info` and length is:\n" +314 f"`Apri_Info` : {str(apri.to_json())}\n" +315 f"length : {length}\n"316 )317 if debug == 1:318 raise KeyboardInterrupt319 try:320 with self._db.begin(write = True) as rw_txn:321 with self._db.begin() as ro_txn:322 with lmdb_prefix_iterator(ro_txn, _BLK_KEY_PREFIX) as it:323 rw_txn.put(_START_N_HEAD_KEY, str(head).encode("ASCII"))324 rw_txn.put(_START_N_TAIL_LENGTH_KEY, str(tail_length).encode("ASCII"))325 for key, val in it:326 _, start_n, _ = self._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, key)327 apri_json, _, length_bytes = Register._split_disk_block_key(_BLK_KEY_PREFIX_LEN, key)328 new_start_n_bytes = str(start_n % new_mod).encode("ASCII")329 new_key = Register._join_disk_block_data(330 _BLK_KEY_PREFIX, apri_json, new_start_n_bytes, length_bytes331 )332 if key != new_key:333 rw_txn.put(new_key, val)334 rw_txn.delete(key)335 if debug == 2:336 raise KeyboardInterrupt337 except lmdb.MapFullError:338 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))339 self._start_n_head = head340 self._start_n_tail_length = tail_length341 self._start_n_tail_mod = 10 ** self._start_n_tail_length342 @contextmanager343 def open(self, read_only = False):344 if not self._created and not read_only:345 # set local directory info and create levelDB database346 local_dir = random_unique_filename(self.saves_directory, length = 4, alphabet = LOCAL_DIR_CHARS)347 try:348 local_dir.mkdir(exist_ok = False)349 (local_dir / REGISTER_FILENAME).mkdir(exist_ok = False)350 with (local_dir / MSG_FILEPATH).open("x") as fh:351 fh.write(self._msg)352 with (local_dir / VERSION_FILEPATH).open("x") as fh:353 fh.write(self._version)354 with (local_dir / CLS_FILEPATH).open("x") as fh:355 fh.write(str(type(self)))356 (local_dir / DATABASE_FILEPATH).mkdir(exist_ok = False)357 self._set_local_dir(local_dir)358 self._db = open_lmdb(self._db_filepath, self._db_map_size, False)359 try:360 with self._db.begin(write = True) as txn:361 # set register info362 txn.put(_START_N_HEAD_KEY, str(self._start_n_head).encode("ASCII"))363 txn.put(_START_N_TAIL_LENGTH_KEY, str(self._start_n_tail_length).encode("ASCII"))364 txn.put(_CURR_ID_KEY, b"0")365 except lmdb.MapFullError:366 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))367 Register._add_instance(local_dir, self)368 yiel = self369 except BaseException as e:370 if local_dir.is_dir():371 shutil.rmtree(local_dir)372 raise e373 elif self._created:374 yiel = self._open_created(read_only)375 else:376 raise ValueError(377 "You must `open` this `Register` at least once with `read_only = False` before you can open it in "378 "read-only mode."379 )380 try:381 yield yiel382 finally:383 yiel._close_created()384 def increase_register_size(self, num_bytes):385 """WARNING: DO NOT CALL THIS METHOD FROM MORE THAN ONE PYTHON PROCESS AT A TIME. You are safe if you call it386 from only one Python process. You are safe if you have multiple Python processes running and call it from only387 ONE of them. But do NOT call it from multiple processes at once. Doing so may result in catastrophic loss of388 data.389 :param num_bytes: (type `int`) Positive.390 """391 self._check_open_raise("increase_register_size")392 if not is_signed_int(num_bytes):393 raise TypeError("`num_bytes` must be of type `int`.")394 if num_bytes <= 0:395 raise ValueError("`num_bytes` must be positive.")396 if num_bytes <= self._db_map_size:397 raise ValueError("`num_bytes` must be larger than the current `Register` size.")398 self._db.set_mapsize(num_bytes)399 self._db_map_size = num_bytes400 def get_register_size(self):401 return self._db_map_size402 #################################403 # PROTEC REGISTER METHODS #404 def _open_created(self, read_only):405 if Register._instance_exists(self._local_dir):406 ret = Register._get_instance(self._local_dir)407 else:408 ret = self409 if not ret._created:410 raise Register_Error(Register._NOT_CREATED_ERROR_MESSAGE.format("_open_created"))411 if ret._db is not None and not ret._db_is_closed():412 raise Register_Already_Open_Error()413 self._read_only = read_only414 ret._db = open_lmdb(self._db_filepath, self._db_map_size, read_only)415 return ret416 def _close_created(self):417 self._db.close()418 @contextmanager419 def _recursive_open(self, read_only):420 if not self._created:421 raise Register_Error(Register._NOT_CREATED_ERROR_MESSAGE.format("_recursive_open"))422 else:423 try:424 yiel = self._open_created(read_only)425 need_close = True426 except Register_Already_Open_Error:427 yiel = self428 need_close = False429 if not read_only and yiel._read_only:430 raise ValueError(431 "Attempted to open a `Register` in read-write mode that is already open in read-only mode."432 )433 try:434 yield yiel435 finally:436 if need_close:437 yiel._close_created()438 def _check_open_raise(self, method_name):439 if self._db is None or self._db_is_closed():440 raise Register_Error(441 f"This `Register` database has not been opened. You must open this register via `with reg.open() as " +442 f"reg:` before calling the method `{method_name}`."443 )444 def _check_readwrite_raise(self, method_name):445 """Call `self._check_open_raise` before this method."""446 if self._read_only:447 raise Register_Error(448 f"This `Register` is `open`ed in read-only mode. In order to call the method `{method_name}`, you must "449 "open this `Register` in read-write mode via `with reg.open() as reg:`."450 )451 # def _check_memory_raise(self, keys, vals):452 #453 # stat = self._db.stat()454 #455 # current_size = stat.psize * (stat.leaf_pages + stat.branch_pages + stat.overflow_pages)456 #457 # entry_size_bytes = sum(len(key) + len(val) for key, val in zip(keys, vals)) * BYTES_PER_CHAR458 #459 # if current_size + entry_size_bytes >= Register._MEMORY_FULL_PROP * self._db_map_size:460 #461 # raise MemoryError(462 # "The `Register` database is out of memory. Please allocate more memory using the method "463 # "`Register.increase_register_size`."464 # )465 def _set_local_dir(self, local_dir):466 """`local_dir` and a corresponding register database must exist prior to calling this method.467 :param local_dir: (type `pathlib.Path`) Absolute.468 """469 if not local_dir.is_absolute():470 raise ValueError(NOT_ABSOLUTE_ERROR_MESSAGE.format(str(local_dir)))471 if local_dir.parent != self.saves_directory:472 raise ValueError(473 "The `local_dir` argument must be a sub-directory of `reg.saves_directory`.\n" +474 f"`local_dir.parent` : {str(local_dir.parent)}\n"475 f"`reg.saves_directory` : {str(self.saves_directory)}"476 )477 check_register_structure(local_dir)478 self._created = True479 self._local_dir = local_dir480 self._local_dir_bytes = str(self._local_dir).encode("ASCII")481 self._db_filepath = self._local_dir / DATABASE_FILEPATH482 self._subreg_bytes = (483 _SUB_KEY_PREFIX + self._local_dir_bytes484 )485 self._version_filepath = local_dir / VERSION_FILEPATH486 self._msg_filepath = local_dir / MSG_FILEPATH487 self._cls_filepath = local_dir / CLS_FILEPATH488 def _has_compatible_version(self):489 return self._version in COMPATIBLE_VERSIONS490 def _db_is_closed(self):491 if not self._created:492 raise Register_Error(Register._NOT_CREATED_ERROR_MESSAGE.format("_db_is_closed"))493 else:494 return lmdb_is_closed(self._db)495 # @staticmethod496 # def _detect_open_elsewhere_hold_iterator():497 #498 # return itertools.count()499 # def _detect_open_elsewhere_hold(self, error):500 #501 # if Register._is_open_elsewhere_error(error):502 #503 # warnings.warn(504 # "The `Register` database is open in another process and cannot be accessed. The program will hold "505 # "until it detects that the `Register` database is accessible. You may still halt the "506 # "program using a keyboard interrupt, for example, if you wish.\n"507 #508 # "Possible solutions:\n"509 # " - Close this `Register` in the other process.\n"510 # " - Open this `Register` in read-only mode via `with reg.open(read_only = True) as reg`.\n"511 # " - Create a new `Register` and open that and write to it."512 # )513 #514 # for _ in itertools.count(0):515 #516 # time.sleep(Register._OPEN_ELSEWHERE_HOLD_CHECK_INTERVAL)517 #518 # try:519 # plyvel.DB(str(self._db_filepath))520 #521 # except Exception as e:522 # if not Register._is_open_elsewhere_error(e):523 # raise e524 #525 # else:526 # return527 #528 # else:529 # raise error530 # @staticmethod531 # def _is_open_elsewhere_error(error):532 #533 # return (534 # isinstance(error, plyvel.IOError) and535 # "The process cannot access the file because it is being used by another process." in str(error)536 # )537 #################################538 # PUBLIC APRI METHODS #539 def get_all_apri_info(self, recursively = False):540 if not isinstance(recursively, bool):541 raise TypeError("`recursively` must be of type `bool`")542 ret = []543 for blk in self._ram_blks:544 ret.append(blk.get_apri())545 self._check_open_raise("get_all_apri_info")546 with lmdb_prefix_iterator(self._db, _ID_APRI_KEY_PREFIX) as it:547 for _, val in it:548 ret.append(Apri_Info.from_json(val.decode("ASCII")))549 if recursively:550 for subreg in self._iter_subregisters():551 with subreg._recursive_open(True) as subreg:552 ret.append(subreg.get_all_apri_info())553 return sorted(ret)554 def change_apri_info(self, old_apri, new_apri, recursively = False, debug = 0):555 """Replace an old `Apri_Info`, and all references to it, with a new `Apri_Info`.556 If ANY `Block`, `Apri_Info`, or `Apos_Info` references `old_apri`, its entries in this `Register` will be557 updated to reflect the replacement of `old_apri` with `new_apri`. (See example below.) After the replacement558 `old_apri` -> `new_apri` is made, the set of `Apri_Info` that changed under that replacement must be disjoint559 from the set of `Apri_Info` that did not change. Otherwise, a `ValueError` is raised.560 For example, say we intend to replace561 `old_apri = Apri_Info(descr = "periodic points")`562 with563 `new_apri = Apri_info(descr = "odd periods", ref = "Newton et al. 2005")`.564 In an example `Register`, there are two `Block`s, one with `old_apri` and the other with565 `some_other_apri = Apri_info(descr = "period length", respective = old_apri)`.566 After a call to `change_apri_info(old_apri, new_apri)`, the first `Block` will have `new_apri` and the second567 will have568 `Apri_Info(descr = "period length", respective = new_apri)`.569 :param old_apri: (type `Apri_Info`)570 :param new_apri: (type `Apri_info`)571 :param recursively: (type `bool`)572 :raise ValueError: See above.573 """574 # DEBUG : 1, 2, 3575 self._check_open_raise("change_apri_info")576 self._check_readwrite_raise("change_apri_info")577 # raises `Data_Not_Found_Error` if `old_apri` does not have an ID578 old_apri_id = self._get_id_by_apri(old_apri, None, False)579 if old_apri == new_apri:580 return581 old_apri_json = old_apri.to_json().encode("ASCII")582 old_apri_id_key = _APRI_ID_KEY_PREFIX + old_apri_json583 old_id_apri_key = _ID_APRI_KEY_PREFIX + old_apri_id584 new_apri_json = new_apri.to_json().encode("ASCII")585 if lmdb_has_key(self._db, _APRI_ID_KEY_PREFIX + new_apri_json):586 new_apri_id = self._get_id_by_apri(new_apri, new_apri_json, False)587 new_id_apri_key = _ID_APRI_KEY_PREFIX + new_apri_id588 has_new_apri_already = True589 warnings.warn(f"This `Register` already has a reference to {str(new_apri)}.")590 else:591 new_apri_id = None592 new_id_apri_key = None593 has_new_apri_already = False594 if debug == 1:595 raise KeyboardInterrupt596 try:597 with self._db.begin(write = True) as rw_txn:598 with self._db.begin() as ro_txn:599 apris_changed = set()600 apris_didnt_change = set()601 # change all apri_id keys602 with lmdb_prefix_iterator(ro_txn, _APRI_ID_KEY_PREFIX) as it:603 for key, val in it:604 if key == old_apri_id_key:605 new_key = _APRI_ID_KEY_PREFIX + new_apri_json606 else:607 apri = Apri_Info.from_json(key[_APRI_ID_KEY_PREFIX_LEN:].decode("ASCII"))608 replaced = apri.change_info(old_apri, new_apri)609 new_key = _APRI_ID_KEY_PREFIX + replaced.to_json().encode("ASCII")610 if new_key != key:611 rw_txn.put(new_key, val)612 rw_txn.delete(key)613 apris_changed.add(new_key[_APRI_ID_KEY_PREFIX_LEN : ])614 else:615 apris_didnt_change.add(key[_APRI_ID_KEY_PREFIX_LEN : ])616 # check `apris_changed` and `apris_didnt_change` are disjoint, otherwise raise ValueError617 if not apris_changed.isdisjoint(apris_didnt_change):618 # ValueError automatically aborts the LMDB transaction619 raise ValueError(620 "The set of `Apri_Info` that changed under the replacement `old_apri` -> `new_apri` must be "621 "disjoint from the set of `Apri_Info` that did not change."622 )623 # change all id_apri keys624 with lmdb_prefix_iterator(ro_txn, _ID_APRI_KEY_PREFIX) as it:625 for key, val in it:626 new_key = key627 if key == old_id_apri_key:628 new_val = new_apri_json629 else:630 apri = Apri_Info.from_json(val.decode("ASCII"))631 replaced = apri.change_info(old_apri, new_apri)632 new_val = replaced.to_json().encode("ASCII")633 if has_new_apri_already and key == new_id_apri_key:634 new_key = old_id_apri_key635 if key != new_key or val != new_val:636 rw_txn.put(new_key, new_val)637 if has_new_apri_already:638 # change all blocks639 for prefix in [_BLK_KEY_PREFIX, _COMPRESSED_KEY_PREFIX]:640 with lmdb_prefix_iterator(ro_txn, prefix + new_apri_id + _KEY_SEP) as it:641 for key, val in it:642 new_blk_key = prefix + old_apri_id + key[key.index(_KEY_SEP) : ]643 rw_txn.put(new_blk_key, val)644 # change all apos vals645 with lmdb_prefix_iterator(ro_txn, _APOS_KEY_PREFIX) as it:646 for key, val in it:647 apos = Apos_Info.from_json(val.decode("ASCII"))648 replaced = apos.change_info(old_apri, new_apri)649 new_val = replaced.to_json().encode("ASCII")650 if val != new_val:651 rw_txn.put(new_key, new_val)652 if debug == 2:653 raise KeyboardInterrupt654 if debug == 3:655 raise KeyboardInterrupt656 except lmdb.MapFullError:657 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))658 if recursively:659 for subreg in self._iter_subregisters():660 with subreg._recursive_open(False) as subreg:661 subreg.change_apri_info(old_apri, new_apri, True)662 def remove_apri_info(self, apri, debug = 0):663 """Remove an `Apri_Info` that is not associated with any other `Apri_Info`, `Block`, nor `Apos_Info`.664 :param apri: (type `Apri_Info`)665 :raise ValueError: If there are any `Apri_Info`, `Block`, or `Apos_Info` associated with `apri`.666 """667 # DEBUG : 1, 2, 3, 4668 self._check_open_raise("remove_apri_info")669 self._check_readwrite_raise("remove_apri_info")670 if not isinstance(apri, Apri_Info):671 raise TypeError("`apri` must be of type `Apri_Info`.")672 _id = self._get_id_by_apri(apri, None, False)673 if self.get_num_disk_blocks(apri) != 0:674 raise ValueError(675 f"There are disk `Block`s saved with `{str(apri)}`. Please remove them first and call "676 "`remove_apri_info` again."677 )678 if debug == 1:679 raise KeyboardInterrupt680 with lmdb_prefix_iterator(self._db, _ID_APRI_KEY_PREFIX) as it:681 for _, _apri_json in it:682 _apri = Apri_Info.from_json(_apri_json.decode("ASCII"))683 if apri in _apri and apri != _apri:684 raise ValueError(685 f"{str(_apri)} is associated with {str(apri)}. Please remove the former first before removing "686 "the latter."687 )688 if debug == 2:689 raise KeyboardInterrupt690 try:691 self.get_apos_info(apri)692 except Data_Not_Found_Error:693 pass694 else:695 raise ValueError(696 f"There is an `Apos_Info` associated with `{str(apri)}`. Please remove it first and call "697 "`remove_apri_info` again."698 )699 if debug == 3:700 raise KeyboardInterrupt701 try:702 with self._db.begin(write = True) as txn:703 txn.delete(_ID_APRI_KEY_PREFIX + _id)704 txn.delete(_APRI_ID_KEY_PREFIX + apri.to_json().encode("ASCII"))705 if debug == 4:706 raise KeyboardInterrupt707 except lmdb.MapFullError:708 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))709 #################################710 # PROTEC APRI METHODS #711 def _get_apri_json_by_id(self, _id, txn = None):712 """Get JSON bytestring representing an `Apri_Info` instance.713 :param _id: (type `bytes`)714 :param txn: (type `lmbd.Transaction`, default `None`) The transaction to query. If `None`, then use open a new715 transaction and commit it after this method resolves.716 :return: (type `bytes`)717 """718 commit = txn is None719 if commit:720 txn = self._db.begin()721 try:722 return txn.get(_ID_APRI_KEY_PREFIX + _id)723 finally:724 if commit:725 txn.commit()726 def _get_id_by_apri(self, apri, apri_json, missing_ok, txn = None):727 """Get an `Apri_Info` ID for this database. If `missing_ok is True`, then create an ID if the passed `apri` or728 `apri_json` is unknown to this `Register`.729 One of `apri` and `apri_json` can be `None`, but not both. If both are not `None`, then `apri` is used.730 `self._db` must be opened by the caller.731 :param apri: (type `Apri_Info`)732 :param apri_json: (type `bytes`)733 :param missing_ok: (type `bool`) Create an ID if the passed `apri` or `apri_json` is unknown to this `Register`.734 :param txn: (type `lmbd.Transaction`, default `None`) The transaction to query. If `None`, then use open a new735 transaction and commit it after this method resolves.736 :raises Apri_Info_Not_Found_Error: If `apri` or `apri_json` is not known to this `Register` and `missing_ok737 is False`.738 :return: (type `bytes`)739 """740 if apri is not None:741 key = _APRI_ID_KEY_PREFIX + apri.to_json().encode("ASCII")742 elif apri_json is not None:743 key = _APRI_ID_KEY_PREFIX + apri_json744 else:745 raise ValueError746 commit = txn is None747 if commit and missing_ok:748 txn = self._db.begin(write = True)749 elif commit:750 txn = self._db.begin()751 try:752 _id = txn.get(key, default = None)753 if _id is not None:754 return _id755 elif missing_ok:756 _id = txn.get(_CURR_ID_KEY)757 next_id = str(int(_id) + 1).encode("ASCII")758 txn.put(_CURR_ID_KEY, next_id)759 txn.put(key, _id)760 txn.put(_ID_APRI_KEY_PREFIX + _id, key[_APRI_ID_KEY_PREFIX_LEN:])761 return _id762 else:763 if apri is None:764 apri = Apri_Info.from_json(apri_json.decode("ASCII"))765 raise Data_Not_Found_Error(f"`{str(apri)}` is not known to this `Register`.")766 finally:767 if commit:768 try:769 txn.commit()770 except lmdb.MapFullError:771 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))772 #################################773 # PUBLIC APOS METHODS #774 def set_apos_info(self, apri, apos, debug = 0):775 """Set some `Apos_Info` for corresponding `Apri_Info`.776 WARNING: This method will OVERWRITE any previous saved `Apos_Info`. If you do not want to lose any previously777 saved data, then you should do something like the following:778 apos = reg.get_apos_info(apri)779 apos.period_length = 5780 reg.set_apos_info(apos)781 :param apri: (type `Apri_Info`)782 :param apos: (type `Apos_Info`)783 """784 # DEBUG : 1, 2785 self._check_open_raise("set_apos_info")786 self._check_readwrite_raise("set_apos_info")787 if not isinstance(apri, Apri_Info):788 raise TypeError("`apri` must be of type `Apri_Info`")789 if not isinstance(apos, Apos_Info):790 raise TypeError("`apos` must be of type `Apos_Info`")791 key = self._get_apos_key(apri, None, True)792 apos_json = apos.to_json().encode("ASCII")793 if debug == 1:794 raise KeyboardInterrupt795 try:796 with self._db.begin(write = True) as txn:797 txn.put(key, apos_json)798 if debug == 2:799 raise KeyboardInterrupt800 except lmdb.MapFullError:801 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))802 def get_apos_info(self, apri):803 """Get some `Apos_Info` associated with a given `Apri_Info`.804 :param apri: (type `Apri_Info`)805 :raises Apri_Info_Not_Found_Error: If `apri` is not known to this `Register`.806 :raises Data_Not_Found_Error: If no `Apos_Info` has been associated to `apri`.807 :return: (type `Apos_Info`)808 """809 self._check_open_raise("get_apos_info")810 if not isinstance(apri, Apri_Info):811 raise TypeError("`apri` must be of type `Apri_Info`")812 key = self._get_apos_key(apri, None, False)813 with self._db.begin() as txn:814 apos_json = txn.get(key, default=None)815 if apos_json is not None:816 return Apos_Info.from_json(apos_json.decode("ASCII"))817 else:818 raise Data_Not_Found_Error(f"No `Apos_Info` associated with `{str(apri)}`.")819 def remove_apos_info(self, apri, debug = 0):820 # DEBUG : 1, 2821 self._check_open_raise("remove_apos_info")822 self._check_readwrite_raise("remove_apos_info")823 if not isinstance(apri, Apri_Info):824 raise TypeError("`apri` must be of type `Apri_Info`.")825 key = self._get_apos_key(apri, None, False)826 if debug == 1:827 raise KeyboardInterrupt828 if lmdb_has_key(self._db, key):829 try:830 with self._db.begin(write = True) as txn:831 txn.delete(key)832 if debug == 2:833 raise KeyboardInterrupt834 except lmdb.MapFullError:835 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))836 else:837 raise Data_Not_Found_Error(f"No `Apos_Info` associated with `{str(apri)}`.")838 #################################839 # PROTEC APOS METHODS #840 def _get_apos_key(self, apri, apri_json, missing_ok, txn = None):841 """Get a key for an `Apos_Info` entry.842 One of `apri` and `apri_json` can be `None`, but not both. If both are not `None`, then `apri` is used. If843 `missing_ok is True`, then create a new `Apri_Info` ID if one does not already exist for `apri`.844 :param apri: (type `Apri_Info`)845 :param apri_json: (type `bytes`)846 :param missing_ok: (type `bool`)847 :param txn: (type `lmbd.Transaction`, default `None`) The transaction to query. If `None`, then use open a new848 transaction and commit it after this method resolves.849 :raises Apri_Info_Not_Found_Error: If `missing_ok is False` and `apri` is not known to this `Register`.850 :return: (type `bytes`)851 """852 if apri is None and apri_json is None:853 raise ValueError854 apri_id = self._get_id_by_apri(apri, apri_json, missing_ok, txn)855 return _APOS_KEY_PREFIX + _KEY_SEP + apri_id856 #################################857 # PUBLIC SUB-REGISTER METHODS #858 def add_subregister(self, subreg, debug = 0):859 # DEBUG : 1, 2860 self._check_open_raise("add_subregister")861 self._check_readwrite_raise("add_subregister")862 if not isinstance(subreg, Register):863 raise TypeError("`subreg` must be of a `Register` derived type")864 if not subreg._created:865 raise Register_Error(Register._NOT_CREATED_ERROR_MESSAGE.format("add_subregister"))866 key = subreg._get_subregister_key()867 if debug == 1:868 raise KeyboardInterrupt869 if not lmdb_has_key(self._db, key):870 if subreg._check_no_cycles_from(self):871 try:872 with self._db.begin(write = True) as txn:873 txn.put(key, _SUB_VAL)874 if debug == 2:875 raise KeyboardInterrupt876 except lmdb.MapFullError:877 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))878 else:879 raise Register_Error(880 "Attempting to add this register as a sub-register will created a directed cycle in the " +881 "subregister relation. "882 f'Intended super-register description: "{str(self)}". '883 f'Intended sub-register description: "{str(subreg)}".'884 )885 else:886 raise Register_Error("`Register` already added as subregister.")887 def remove_subregister(self, subreg, debug = 0):888 """889 :param subreg: (type `Register`)890 """891 # DEBUG : 1, 2892 self._check_open_raise("remove_subregister")893 self._check_readwrite_raise("remove_subregister")894 if not isinstance(subreg, Register):895 raise TypeError("`subreg` must be of a `Register` derived type.")896 key = subreg._get_subregister_key()897 if debug == 1:898 raise KeyboardInterrupt899 if lmdb_has_key(self._db, key):900 try:901 with self._db.begin(write = True) as txn:902 txn.delete(key)903 if debug == 2:904 raise KeyboardInterrupt905 except lmdb.MapFullError:906 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))907 else:908 raise Register_Error("`Register` not added as subregister.")909 #################################910 # PROTEC SUB-REGISTER METHODS #911 def _check_no_cycles_from(self, original, touched = None):912 """Checks if adding `self` as a subregister to `original` would not create any directed cycles containing the913 arc `original` -> `self` in the subregister relation.914 Returns `False` if a directed cycle would be created and `True` otherwise. If `self` is already a subregister915 of `original`, then return `True` if the currently existing relation has no directed cycles that pass through916 `self`, and `False` otherwise. If `self == original`, then return `False`.917 :param original: (type `Register`)918 :param touched: used for recursion.919 :return: (type `bool`)920 """921 if not self._created or not original._created:922 raise Register_Error(Register._NOT_CREATED_ERROR_MESSAGE.format("_check_no_cycles_from"))923 if self is original:924 return False925 if touched is None:926 touched = set()927 with self._recursive_open(True) as reg:928 if any(929 original is subreg930 for subreg in reg._iter_subregisters()931 ):932 return False933 for subreg in reg._iter_subregisters():934 if subreg not in touched:935 touched.add(subreg)936 if not subreg._check_no_cycles_from(original, touched):937 return False938 else:939 return True940 def _iter_subregisters(self):941 with lmdb_prefix_iterator(self._db, _SUB_KEY_PREFIX) as it:942 for key, _ in it:943 local_dir = Path(key[_SUB_KEY_PREFIX_LEN:].decode("ASCII"))944 subreg = Register._from_local_dir(local_dir)945 yield subreg946 def _get_subregister_key(self):947 return _SUB_KEY_PREFIX + self._local_dir_bytes948 #################################949 # PUBLIC DISK BLK METHODS #950 @classmethod951 @abstractmethod952 def dump_disk_data(cls, data, filename, **kwargs):953 """Dump data to the disk.954 This method should not change any properties of any `Register`, which is why it is a class-method and955 not an instance-method. It merely takes `data` and dumps it to disk.956 Most use-cases prefer the instance-method `add_disk_block`.957 :param data: (any type) The raw data to dump.958 :param filename: (type `pathlib.Path`) The filename to dump to. You may edit this filename if959 necessary (such as by adding a suffix), but you must return the edited filename.960 :return: (type `pathlib.Path`) The actual filename of the data on the disk.961 """962 @classmethod963 @abstractmethod964 def load_disk_data(cls, filename, **kwargs):965 """Load raw data from the disk.966 This method should not change any properties of any `Register`, which is why it is a classmethod and967 not an instancemethod. It merely loads the raw data saved on the disk and returns it.968 Most use-cases prefer the method `get_disk_block`.969 :param filename: (type `pathlib.Path`) Where to load the block from. You may need to edit this970 filename if necessary, such as by adding a suffix, but you must return the edited filename.971 :raises Data_Not_Found_Error: If the data could not be loaded because it doesn't exist.972 :return: (any type) The data loaded from the disk.973 :return: (pathlib.Path) The exact path of the data saved to the disk.974 """975 @classmethod976 @abstractmethod977 def clean_disk_data(cls, filename, **kwargs):978 """979 :param filename:980 :param kwargs:981 :return:982 """983 def add_disk_block(self, blk, return_metadata = False, debug = 0, **kwargs):984 """Save a `Block` to disk and link it with this `Register`.985 :param blk: (type `Block`)986 :param return_metadata: (type `bool`, default `False`) Whether to return a `File_Metadata` object, which987 contains file creation date/time and size of dumped data to the disk.988 :raises Register_Error: If a duplicate `Block` already exists in this `Register`.989 """990 #DEBUG : 1, 2, 3, 4991 _FAIL_NO_RECOVER_ERROR_MESSAGE = "Could not successfully recover from a failed disk `Block` add!"992 self._check_open_raise("add_disk_block")993 self._check_readwrite_raise("add_disk_block")994 if not isinstance(blk, Block):995 raise TypeError("`blk` must be of type `Block`.")996 if not isinstance(return_metadata, bool):997 raise TypeError("`return_metadata` must be of type `bool`.")998 start_n_head = blk.get_start_n() // self._start_n_tail_mod999 if start_n_head != self._start_n_head :1000 raise IndexError(1001 "The `start_n` for the passed `Block` does not have the correct head:\n" +1002 f"`tail_length` : {self._start_n_tail_length}\n" +1003 f"expected `head` : {self._start_n_head}\n"1004 f"`start_n` : {blk.get_start_n()}\n" +1005 f"`start_n` head : {start_n_head}\n" +1006 "Please see the method `set_start_n_info` to troubleshoot this error."1007 )1008 apris = [apri for _, apri in blk.get_apri().iter_inner_info() if isinstance(apri, Apri_Info)]1009 filename = None1010 if debug == 1:1011 raise KeyboardInterrupt1012 try:1013 with self._db.begin(write = True) as rw_txn:1014 with self._db.begin() as ro_txn:1015 # this will create ID's if necessary1016 for i, apri in enumerate(apris):1017 self._get_id_by_apri(apri, None, True, rw_txn)1018 blk_key = self._get_disk_block_key(1019 _BLK_KEY_PREFIX,1020 blk.get_apri(), None, blk.get_start_n(), len(blk),1021 False, rw_txn1022 )1023 if not lmdb_has_key(ro_txn, blk_key):1024 filename = random_unique_filename(self._local_dir, length=6)1025 if debug == 2:1026 raise KeyboardInterrupt1027 filename = type(self).dump_disk_data(blk.get_segment(), filename, **kwargs)1028 if debug == 3:1029 raise KeyboardInterrupt1030 filename_bytes = str(filename.name).encode("ASCII")1031 compressed_key = _COMPRESSED_KEY_PREFIX + blk_key[_BLK_KEY_PREFIX_LEN : ]1032 rw_txn.put(blk_key, filename_bytes)1033 rw_txn.put(compressed_key, _IS_NOT_COMPRESSED_VAL)1034 if len(blk) == 0:1035 warnings.warn(1036 "Added a length 0 disk `Block` to this `Register`.\n" +1037 f"`Register` message: {str(self)}\n" +1038 f"`Block`: {str(blk)}\n" +1039 f"`Register` location: {str(self._local_dir)}"1040 )1041 if return_metadata:1042 return File_Metadata.from_path(filename)1043 else:1044 raise Register_Error(1045 f"Duplicate `Block` with the following data already exists in this `Register`: " +1046 f"{str(blk.get_apri())}, start_n = {blk.get_start_n()}, length = {len(blk)}."1047 )1048 if debug == 4:1049 raise KeyboardInterrupt1050 except BaseException as e:1051 # We must assume that if an exception was thrown, `rw_txn` did not commit successfully.1052 try:1053 if filename is not None:1054 filename.unlink(missing_ok = True)1055 except BaseException:1056 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1057 else:1058 if isinstance(e, lmdb.MapFullError):1059 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))1060 else:1061 raise e1062 def remove_disk_block(self, apri, start_n = None, length = None, recursively = False, debug = 0):1063 """Delete a disk `Block` and unlink it with this `Register`.1064 :param apri: (type `Apri_Info`)1065 :param start_n: (type `int`) Non-negative.1066 :param length: (type `int`) Non-negative.1067 :param recursively: (type `bool`)1068 """1069 # DEBUG : 1, 2, 31070 _FAIL_NO_RECOVER_ERROR_MESSAGE = "Could not successfully recover from a failed disk `Block` remove!"1071 self._check_open_raise("remove_disk_block")1072 self._check_readwrite_raise("remove_disk_block")1073 start_n, length = Register._check_apri_start_n_length_raise(apri, start_n, length)1074 start_n, length = self._resolve_start_n_length(apri, start_n, length)1075 try:1076 blk_key, compressed_key = self._check_blk_compressed_keys_raise(None, None, apri, None, start_n, length)1077 if debug == 1:1078 raise KeyboardInterrupt1079 except Data_Not_Found_Error:1080 pass1081 else:1082 blk_filename, compressed_filename = self._check_blk_compressed_files_raise(1083 blk_key, compressed_key, apri, start_n, length1084 )1085 if not is_deletable(blk_filename):1086 raise OSError(f"Cannot delete `Block` file `{str(blk_filename)}`.")1087 if compressed_filename is not None and not is_deletable(compressed_filename):1088 raise OSError(f"Cannot delete compressed `Block` file `{str(compressed_filename)}`.")1089 compressed_val = None1090 blk_val = None1091 try:1092 with self._db.begin(write = True) as txn:1093 compressed_val = txn.get(compressed_key)1094 blk_val = txn.get(blk_key)1095 txn.delete(compressed_key)1096 txn.delete(blk_key)1097 if debug == 2:1098 raise KeyboardInterrupt1099 if compressed_filename is not None:1100 blk_filename.unlink(missing_ok = False)1101 if debug == 3:1102 raise KeyboardInterrupt1103 compressed_filename.unlink(missing_ok = False)1104 else:1105 type(self).clean_disk_data(blk_filename)1106 except BaseException as e:1107 if blk_val is not None:1108 try:1109 if compressed_filename is not None:1110 if compressed_filename.exists():1111 blk_filename.touch(exist_ok = True)1112 with self._db.begin(write = True) as txn:1113 txn.put(compressed_key, compressed_val)1114 txn.put(blk_key, blk_val)1115 else:1116 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1117 else:1118 if blk_filename.exists():1119 with self._db.begin(write = True) as txn:1120 txn.put(compressed_key, compressed_val)1121 txn.put(blk_key, blk_val)1122 else:1123 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1124 except Register_Recovery_Error as ee:1125 raise ee1126 except BaseException:1127 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1128 if isinstance(e, lmdb.MapFullError):1129 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))1130 else:1131 raise e1132 return1133 if recursively:1134 for subreg in self._iter_subregisters():1135 with subreg._recursive_open(False) as subreg:1136 try:1137 subreg.remove_disk_block(apri, start_n, length, True)1138 except Data_Not_Found_Error:1139 pass1140 else:1141 return1142 raise Data_Not_Found_Error(1143 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(str(apri), start_n, length)1144 )1145 def get_disk_block(self, apri, start_n = None, length = None, return_metadata = False, recursively = False, **kwargs):1146 self._check_open_raise("get_disk_block")1147 start_n, length = Register._check_apri_start_n_length_raise(apri, start_n, length)1148 if not isinstance(return_metadata, bool):1149 raise TypeError("`return_metadata` must be of type `bool`.")1150 if not isinstance(recursively, bool):1151 raise TypeError("`recursively` must be of type `bool`.")1152 start_n, length = self._resolve_start_n_length(apri, start_n, length)1153 try:1154 blk_key, compressed_key = self._check_blk_compressed_keys_raise(None, None, apri, None, start_n, length)1155 except Data_Not_Found_Error:1156 pass1157 else:1158 with self._db.begin() as txn:1159 if txn.get(compressed_key) != _IS_NOT_COMPRESSED_VAL:1160 raise Compression_Error(1161 "Could not load `Block` with the following data because the `Block` is compressed. Please call " +1162 "the `Register` method `decompress` first before loading the data.\n" +1163 f"{apri}, start_n = {start_n}, length = {length}"1164 )1165 blk_filename, _ = self._check_blk_compressed_files_raise(blk_key, compressed_key, apri, start_n, length)1166 blk_filename = self._local_dir / blk_filename1167 data, blk_filename = type(self).load_disk_data(blk_filename, **kwargs)1168 blk = Block(data, apri, start_n)1169 if return_metadata:1170 return blk, File_Metadata.from_path(blk_filename)1171 else:1172 return blk1173 if recursively:1174 for subreg in self._iter_subregisters():1175 with subreg._recursive_open(True) as subreg:1176 try:1177 return subreg.get_disk_block(apri, start_n, length, return_metadata, True)1178 except Data_Not_Found_Error:1179 pass1180 raise Data_Not_Found_Error(1181 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(str(apri), start_n, length)1182 )1183 def get_disk_block_by_n(self, apri, n, return_metadata = False, recursively = False):1184 self._check_open_raise("get_disk_block_by_n")1185 if not isinstance(apri, Apri_Info):1186 raise TypeError("`apri` must be of type `Apri_Info`.")1187 if not is_signed_int(n):1188 raise TypeError("`n` must be of type `int`.")1189 else:1190 n = int(n)1191 if not isinstance(return_metadata, bool):1192 raise TypeError("`return_metadata` must be of type `bool`.")1193 if not isinstance(recursively, bool):1194 raise TypeError("`recursively` must be of type `bool`.")1195 if n < 0:1196 raise ValueError("`n` must be non-negative")1197 try:1198 for start_n, length in self.disk_intervals(apri):1199 if start_n <= n < start_n + length:1200 return self.get_disk_block(apri, start_n, length, return_metadata, False)1201 except Data_Not_Found_Error:1202 pass1203 if recursively:1204 for subreg in self._iter_subregisters():1205 with subreg._recursive_open(True) as subreg:1206 try:1207 return subreg.get_disk_block_by_n(apri, n, return_metadata, True)1208 except Data_Not_Found_Error:1209 pass1210 raise Data_Not_Found_Error(Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_N.format(str(apri), n))1211 def get_all_disk_blocks(self, apri, return_metadata = False, recursively = False):1212 self._check_open_raise("get_all_disk_blocks")1213 if not isinstance(apri, Apri_Info):1214 raise TypeError("`apri` must be of type `Apri_Info`.")1215 if not isinstance(return_metadata, bool):1216 raise TypeError("`return_metadata` must be of type `bool`.")1217 if not isinstance(recursively, bool):1218 raise TypeError("`recursively` must be of type `bool`.")1219 for start_n, length in self.disk_intervals(apri):1220 try:1221 yield self.get_disk_block(apri, start_n, length, return_metadata, False)1222 except Data_Not_Found_Error:1223 pass1224 if recursively:1225 for subreg in self._iter_subregisters():1226 with subreg._recursive_open(True) as subreg:1227 for blk in subreg.get_all_disk_blocks(apri, return_metadata, True):1228 yield blk1229 def get_disk_block_metadata(self, apri, start_n = None, length = None, recursively = False):1230 self._check_open_raise("get_disk_block_metadata")1231 start_n, length = Register._check_apri_start_n_length_raise(apri, start_n, length)1232 if not isinstance(recursively, bool):1233 raise TypeError("`recursively` must be of type `bool`.")1234 start_n, length = self._resolve_start_n_length(apri, start_n, length)1235 try:1236 blk_key, compressed_key = self._check_blk_compressed_keys_raise(None, None, apri, None, start_n, length)1237 except Data_Not_Found_Error:1238 pass1239 else:1240 blk_filename, compressed_filename = self._check_blk_compressed_files_raise(1241 blk_key, compressed_key, apri, start_n, length1242 )1243 if compressed_filename is not None:1244 return File_Metadata.from_path(compressed_filename)1245 else:1246 return File_Metadata.from_path(blk_filename)1247 if recursively:1248 for subreg in self._iter_subregisters():1249 with subreg._recursive_open(True) as subreg:1250 try:1251 return subreg.get_disk_block_metadata(apri, start_n, length, True)1252 except Data_Not_Found_Error:1253 pass1254 raise Data_Not_Found_Error(1255 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(str(apri), start_n, length)1256 )1257 def disk_intervals(self, apri):1258 """Return a `list` of all tuples `(start_n, length)` associated to disk `Block`s.1259 The tuples are sorted by increasing `start_n` and the larger `length` is used to break ties.1260 :param apri: (type `Apri_Info`)1261 :return: (type `list`)1262 """1263 self._check_open_raise("disk_intervals")1264 if not isinstance(apri, Apri_Info):1265 raise TypeError("`apri` must be of type `Apri_Info`.")1266 return sorted([1267 self._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, key, apri)[1:]1268 for key, _ in self._iter_disk_block_pairs(_BLK_KEY_PREFIX, apri, None)1269 ], key = lambda t: (t[0], -t[1]))1270 def get_num_disk_blocks(self, apri):1271 self._check_open_raise("get_num_disk_blocks")1272 return lmdb_count_keys(1273 self._db,1274 _BLK_KEY_PREFIX + self._get_id_by_apri(apri, None, False) + _KEY_SEP1275 )1276 def compress(self, apri, start_n = None, length = None, compression_level = 6, return_metadata = False, debug = 0):1277 """Compress a `Block`.1278 :param apri: (type `Apri_Info`)1279 :param start_n: (type `int`) Non-negative.1280 :param length: (type `int`) Non-negative.1281 :param compression_level: (type `int`, default 6) Between 0 and 9, inclusive. 0 is for the fastest compression,1282 but lowest compression ratio; 9 is slowest, but highest ratio. See1283 https://docs.python.org/3/library/zlib.html#zlib.compressobj for more information.1284 :param return_metadata: (type `bool`, default `False`) Whether to return a `File_Metadata` object that1285 describes the compressed file.1286 :raises Compression_Error: If the `Block` is already compressed.1287 :return: (type `File_Metadata`) If `return_metadata is True`.1288 """1289 # DEBUG : 1, 2, 3, 41290 _FAIL_NO_RECOVER_ERROR_MESSAGE = "Could not recover successfully from a failed disk `Block` compress!"1291 self._check_open_raise("compress")1292 self._check_readwrite_raise("compress")1293 start_n, length = Register._check_apri_start_n_length_raise(apri, start_n, length)1294 if not is_signed_int(compression_level):1295 raise TypeError("`compression_level` must be of type `int`.")1296 else:1297 compression_level = int(compression_level)1298 if not isinstance(return_metadata, bool):1299 raise TypeError("`return_metadata` must be of type `bool`.")1300 if not (0 <= compression_level <= 9):1301 raise ValueError("`compression_level` must be between 0 and 9.")1302 start_n, length = self._resolve_start_n_length(apri, start_n, length)1303 compressed_key = self._get_disk_block_key(1304 _COMPRESSED_KEY_PREFIX, apri, None, start_n, length, False1305 )1306 blk_key, compressed_key = self._check_blk_compressed_keys_raise(1307 None, compressed_key, apri, None, start_n, length1308 )1309 with self._db.begin() as txn:1310 compressed_val = txn.get(compressed_key)1311 if compressed_val != _IS_NOT_COMPRESSED_VAL:1312 raise Compression_Error(1313 "The disk `Block` with the following data has already been compressed: " +1314 f"{str(apri)}, start_n = {start_n}, length = {length}"1315 )1316 with self._db.begin() as txn:1317 blk_filename = self._local_dir / txn.get(blk_key).decode("ASCII")1318 compressed_filename = random_unique_filename(self._local_dir, COMPRESSED_FILE_SUFFIX)1319 compressed_val = compressed_filename.name.encode("ASCII")1320 cleaned = False1321 if debug == 1:1322 raise KeyboardInterrupt1323 try:1324 with self._db.begin(write = True) as txn:1325 txn.put(compressed_key, compressed_val)1326 if debug == 2:1327 raise KeyboardInterrupt1328 with zipfile.ZipFile(1329 compressed_filename, # target filename1330 "x", # zip mode (write, but don't overwrite)1331 zipfile.ZIP_DEFLATED, # compression mode1332 True, # use zip641333 compression_level,1334 strict_timestamps=False # change timestamps of old or new files1335 ) as compressed_fh:1336 compressed_fh.write(blk_filename, blk_filename.name)1337 if debug == 3:1338 raise KeyboardInterrupt1339 if debug == 4:1340 raise KeyboardInterrupt1341 type(self).clean_disk_data(blk_filename)1342 cleaned = True1343 blk_filename.touch(exist_ok = False)1344 except BaseException as e:1345 try:1346 with self._db.begin(write = True) as txn:1347 txn.put(compressed_key, _IS_NOT_COMPRESSED_VAL)1348 if cleaned or not blk_filename.exists():1349 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1350 else:1351 compressed_filename.unlink(missing_ok = True)1352 except Register_Recovery_Error as ee:1353 raise ee1354 except BaseException:1355 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1356 else:1357 if isinstance(e, lmdb.MapFullError):1358 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))1359 else:1360 raise e1361 if return_metadata:1362 return File_Metadata.from_path(compressed_filename)1363 else:1364 return None1365 # def compress_all(self, apri, compression_level = 6, return_metadata = False):1366 # """Compress all non-compressed `Block`s. Any `Block`s that are already compressed will be skipped.1367 #1368 # :param apri: (type `Apri_Info`)1369 # :param compression_level: (type `int`, default 6) Between 0 and 9, inclusive. 0 is for the fastest compression,1370 # but lowest compression ratio; 9 is slowest, but highest ratio. See1371 # https://docs.python.org/3/library/zlib.html#zlib.compressobj for more information.1372 # :param return_metadata: (type `bool`, default `False`) Whether to return a `list` of `File_Metadata` objects1373 # that describes the compressed files.1374 # :return: (type `list`) If `return_metadata is True`.1375 # """1376 #1377 # self._check_open_raise("compress_all")1378 #1379 # self._check_readwrite_raise("compress_all")1380 #1381 # if not isinstance(apri, Apri_Info):1382 # raise TypeError("`apri` must be of type `Apri_Info`.")1383 #1384 # if not is_signed_int(compression_level):1385 # raise TypeError("`compression_level` must be of type `int`.")1386 # else:1387 # compression_level = int(compression_level)1388 #1389 # if not isinstance(return_metadata, bool):1390 # raise TypeError("`return_metadata` must be of type `bool`.")1391 #1392 # if not (0 <= compression_level <= 9):1393 # raise ValueError("`compression_level` must be between 0 and 9.")1394 #1395 # if return_metadata:1396 # ret = []1397 #1398 # else:1399 # ret = None1400 #1401 # for start_n, length in self.get_disk_block_intervals(apri):1402 #1403 # try:1404 # metadata = self.compress(apri, start_n, length, return_metadata)1405 #1406 # except Compression_Error:1407 # pass1408 #1409 # else:1410 # if return_metadata:1411 # ret.append(metadata)1412 #1413 # return ret1414 #1415 # # try:1416 # # compressed_val, compressed_filename, compressed_fh = self._compress_helper_open_zipfile(compression_level)1417 # #1418 # # for blk_key, _ in self._iter_disk_block_pairs(_BLK_KEY_PREFIX, apri, None):1419 # #1420 # # compressed_key = _COMPRESSED_KEY_PREFIX + blk_key[_BLK_KEY_PREFIX_LEN : ]1421 # # apri, start_n, length = self._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, blk_key, apri)1422 # #1423 # # try:1424 # # blk_filename = self._compress_helper_check_keys(compressed_key, apri, start_n, length)1425 # #1426 # # except Compression_Error:1427 # # pass1428 # #1429 # # else:1430 # # Register._compress_helper_write_data(compressed_fh, blk_filename)1431 # # self._compress_helper_update_key(compressed_key, compressed_val)1432 # # to_clean.append(blk_filename)1433 # #1434 # # finally:1435 # # if compressed_fh is not None:1436 # # compressed_fh.close()1437 # #1438 # # for blk_filename in to_clean:1439 # # Register._compress_helper_clean_uncompressed_data(compressed_filename, blk_filename)1440 # #1441 # # if return_metadata:1442 # # return File_Metadata.from_path(compressed_filename)1443 # #1444 # # else:1445 # # return None1446 def decompress(self, apri, start_n = None, length = None, return_metadata = False, debug = 0):1447 """Decompress a `Block`.1448 :param apri: (type `Apri_Info`)1449 :param start_n: (type `int`) Non-negative.1450 :param length: (type `int`) Non-negative.1451 :param return_metadata: (type `bool`, default `False`) Whether to return a `File_Metadata` object that1452 describes the decompressed file.1453 :raise Decompression_Error: If the `Block` is not compressed.1454 :return: (type `list`) If `return_metadata is True`.1455 """1456 # DEBUG : 1, 2, 3, 41457 _FAIL_NO_RECOVER_ERROR_MESSAGE = "Could not recover successfully from a failed disk `Block` decompress!"1458 self._check_open_raise("decompress")1459 self._check_readwrite_raise("decompress")1460 start_n, length = Register._check_apri_start_n_length_raise(apri, start_n, length)1461 if not isinstance(return_metadata, bool):1462 raise TypeError("`return_metadata` must be of type `bool`.")1463 start_n, length = self._resolve_start_n_length(apri, start_n, length)1464 blk_key, compressed_key = self._check_blk_compressed_keys_raise(None, None, apri, None, start_n, length)1465 with self._db.begin() as txn:1466 compressed_val = txn.get(compressed_key)1467 if compressed_val == _IS_NOT_COMPRESSED_VAL:1468 raise Decompression_Error(1469 "The disk `Block` with the following data is not compressed: " +1470 f"{str(apri)}, start_n = {start_n}, length = {length}"1471 )1472 with self._db.begin() as txn:1473 blk_filename = txn.get(blk_key).decode("ASCII")1474 blk_filename = self._local_dir / blk_filename1475 compressed_filename = self._local_dir / compressed_val.decode("ASCII")1476 deleted = False1477 if not is_deletable(blk_filename):1478 raise OSError(f"Cannot delete ghost file `{str(blk_filename)}`.")1479 if not is_deletable(compressed_filename):1480 raise OSError(f"Cannot delete compressed file `{str(compressed_filename)}`.")1481 if debug == 1:1482 raise KeyboardInterrupt1483 try:1484 with self._db.begin(write = True) as txn:1485 # delete ghost file1486 blk_filename.unlink(missing_ok = False)1487 deleted = True1488 if debug == 2:1489 raise KeyboardInterrupt1490 with zipfile.ZipFile(compressed_filename, "r") as compressed_fh:1491 compressed_fh.extract(blk_filename.name, self._local_dir)1492 if debug == 3:1493 raise KeyboardInterrupt1494 txn.put(compressed_key, _IS_NOT_COMPRESSED_VAL)1495 if debug == 4:1496 raise KeyboardInterrupt1497 compressed_filename.unlink(missing_ok = False)1498 except BaseException as e:1499 try:1500 if not compressed_filename.is_file():1501 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1502 elif deleted or not blk_filename.is_file():1503 blk_filename.unlink(missing_ok = True)1504 blk_filename.touch(exist_ok = False)1505 except Register_Recovery_Error as ee:1506 raise ee1507 except BaseException:1508 raise Register_Recovery_Error(_FAIL_NO_RECOVER_ERROR_MESSAGE)1509 else:1510 if isinstance(e, lmdb.MapFullError):1511 raise Register_Error(Register._MEMORY_FULL_ERROR_MESSAGE.format(self._db_map_size))1512 else:1513 raise e1514 if return_metadata:1515 return File_Metadata.from_path(blk_filename)1516 else:1517 return None1518 # def decompress_all(self, apri, return_metadatas = False):1519 # """Decompress all compressed `Block`s. Any `Block`s that are not compressed will be skipped.1520 #1521 # :param apri: (type `Apri_Info`)1522 # :param return_metadatas: (type `bool`, default `False`) Whether to return a `list` of `File_Metadata` objects1523 # that describes the decompressed file(s).1524 # :return: (type `list`) If `return_metadatas is True`.1525 # """1526 #1527 # self._check_open_raise("decompress_all")1528 #1529 # self._check_readwrite_raise("decompress_all")1530 #1531 # if not isinstance(apri, Apri_Info):1532 # raise TypeError("`apri` must be of type `Apri_Info`.")1533 #1534 # if not isinstance(return_metadatas, bool):1535 # raise TypeError("`return_metadatas` must be of type `bool`.")1536 #1537 # if return_metadatas:1538 # ret = []1539 #1540 # else:1541 # ret = None1542 #1543 # for start_n, length in self.get_disk_block_intervals(apri):1544 #1545 # try:1546 # metadata = self.decompress(apri, start_n, length, return_metadatas)1547 #1548 # except Decompression_Error:1549 # pass1550 #1551 # else:1552 #1553 # if return_metadatas:1554 # ret.append(metadata)1555 #1556 # return ret1557 #################################1558 # PROTEC DISK BLK METHODS #1559 def _get_disk_block_key(self, prefix, apri, apri_json, start_n, length, missing_ok, txn = None):1560 """Get the database key for a disk `Block`.1561 One of `apri` and `apri_json` can be `None`, but not both. If both are not `None`, then `apri` is used.1562 `self._db` must be opened by the caller. This method only queries the database to obtain the `apri` ID.1563 If `missing_ok is True` and an ID for `apri` does not already exist, then a new one will be created. If1564 `missing_ok is False` and an ID does not already exist, then an error is raised.1565 :param prefix: (type `bytes`)1566 :param apri: (type `Apri_Info`)1567 :param apri_json: (types `bytes`)1568 :param start_n: (type `int`) The start index of the `Block`.1569 :param length: (type `int`) The length of the `Block`.1570 :param missing_ok: (type `bool`)1571 :param txn: (type `lmbd.Transaction`, default `None`) The transaction to query. If `None`, then use open a new1572 transaction and commit it after this method resolves.1573 :raises Apri_Info_Not_Found_Error: If `missing_ok is False` and `apri` is not known to this `Register`.1574 :return: (type `bytes`)1575 """1576 if apri is None and apri_json is None:1577 raise ValueError1578 _id = self._get_id_by_apri(apri, apri_json, missing_ok, txn)1579 tail = start_n % self._start_n_tail_mod1580 return (1581 prefix +1582 _id + _KEY_SEP +1583 str(tail) .encode("ASCII") + _KEY_SEP +1584 str(length).encode("ASCII")1585 )1586 def _iter_disk_block_pairs(self, prefix, apri, apri_json, txn = None):1587 """Iterate over key-value pairs for block entries.1588 :param prefix: (type `bytes`)1589 :param apri: (type `Apri_Info`)1590 :param apri_json: (type `bytes`)1591 :param txn: (type `lmbd.Transaction`, default `None`) The transaction to query. If `None`, then use open a new1592 transaction and commit it after this method resolves.1593 :return: (type `bytes`) key1594 :return: (type `bytes`) val1595 """1596 if apri_json is not None or apri is not None:1597 prefix += self._get_id_by_apri(apri, apri_json, False, txn)1598 prefix += _KEY_SEP1599 if txn is None:1600 txn = self._db1601 with lmdb_prefix_iterator(txn, prefix) as it:1602 for key,val in it:1603 yield key, val1604 @staticmethod1605 def _split_disk_block_key(prefix_len, key):1606 return tuple(key[prefix_len:].split(_KEY_SEP))1607 @staticmethod1608 def _join_disk_block_data(prefix, apri_json, start_n_bytes, length_bytes):1609 return (1610 prefix +1611 apri_json + _KEY_SEP +1612 start_n_bytes + _KEY_SEP +1613 length_bytes1614 )1615 def _convert_disk_block_key(self, prefix_len, key, apri = None, txn = None):1616 """1617 :param prefix_len: (type `int`) Positive.1618 :param key: (type `bytes`)1619 :param apri: (type `Apri_Info`, default None) If `None`, the relevant `apri` is acquired through a database1620 query.1621 :param txn: (type `lmbd.Transaction`, default `None`) The transaction to query. If `None`, then use open a new1622 transaction and commit it after this method resolves.1623 :return: (type `Apri_Info`)1624 :return (type `int`) start_n1625 :return (type `int`) length, non-negative1626 """1627 apri_id, start_n_bytes, length_bytes = Register._split_disk_block_key(prefix_len, key)1628 if apri is None:1629 apri_json = self._get_apri_json_by_id(apri_id, txn)1630 apri = Apri_Info.from_json(apri_json.decode("ASCII"))1631 return (1632 apri,1633 int(start_n_bytes.decode("ASCII")) + self._start_n_head * self._start_n_tail_mod,1634 int(length_bytes.decode("ASCII"))1635 )1636 def _check_blk_compressed_keys_raise(self, blk_key, compressed_key, apri, apri_json, start_n, length):1637 if compressed_key is None and blk_key is None:1638 compressed_key = self._get_disk_block_key(_COMPRESSED_KEY_PREFIX, apri, apri_json, start_n, length, False)1639 if blk_key is not None and compressed_key is None:1640 compressed_key = _COMPRESSED_KEY_PREFIX + blk_key[_BLK_KEY_PREFIX_LEN : ]1641 elif compressed_key is not None and blk_key is None:1642 blk_key = _BLK_KEY_PREFIX + compressed_key[_COMPRESSED_KEY_PREFIX_LEN : ]1643 if apri is None:1644 apri = Apri_Info.from_json(apri_json.decode("ASCII"))1645 if not lmdb_has_key(self._db, blk_key) or not lmdb_has_key(self._db, compressed_key):1646 raise Data_Not_Found_Error(1647 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(apri, start_n, length)1648 )1649 return blk_key, compressed_key1650 def _check_blk_compressed_files_raise(self, blk_key, compressed_key, apri, start_n, length):1651 with self._db.begin() as txn:1652 blk_val = txn.get(blk_key)1653 compressed_val = txn.get(compressed_key)1654 blk_filename = self._local_dir / blk_val.decode("ASCII")1655 if compressed_val != _IS_NOT_COMPRESSED_VAL:1656 compressed_filename = self._local_dir / compressed_val.decode("ASCII")1657 if not compressed_filename.exists() or not blk_filename.exists():1658 raise Data_Not_Found_Error(1659 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(str(apri), start_n, length)1660 )1661 return blk_filename, compressed_filename1662 else:1663 if not blk_filename.exists():1664 raise Data_Not_Found_Error(1665 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(str(apri), start_n, length)1666 )1667 return blk_filename, None1668 @staticmethod1669 def _check_apri_start_n_length_raise(apri, start_n, length):1670 if not isinstance(apri, Apri_Info):1671 raise TypeError("`apri` must be of type `Apri_Info`")1672 if not is_signed_int(start_n) and start_n is not None:1673 raise TypeError("start_n` must be an `int`")1674 elif start_n is not None:1675 start_n = int(start_n)1676 if not is_signed_int(length) and length is not None:1677 raise TypeError("`length` must be an `int`")1678 elif length is not None:1679 length = int(length)1680 if start_n is not None and start_n < 0:1681 raise ValueError("`start_n` must be non-negative")1682 if length is not None and length < 0:1683 raise ValueError("`length` must be non-negative")1684 return start_n, length1685 # def _compress_helper_check_keys(self, compressed_key, apri, start_n, length):1686 # """Check status of the database and raise errors if anything is wrong.1687 #1688 # :param compressed_key: (type `bytes`) prefix is `_COMPRESSED_KEY_PREFIX`)1689 # :param apri: (type `Apri_Info`)1690 # :param start_n: (type `int`)1691 # :param length: (type `int`) non-negative1692 # :raise Compression_Error: If the `Block` has already been compressed.1693 # :raise Data_Not_Found_Error1694 # :return: (type `pathlib.Path`) The path of the data to compress.1695 # """1696 #1697 # blk_key, compressed_key = self._check_blk_compressed_keys_raise(1698 # None, compressed_key, apri, None, start_n, length1699 # )1700 #1701 # with self._db.begin() as txn:1702 # compressed_val = txn.get(compressed_key)1703 #1704 # if compressed_val != _IS_NOT_COMPRESSED_VAL:1705 # raise Compression_Error(1706 # "The disk `Block` with the following data has already been compressed: " +1707 # f"{str(apri)}, start_n = {start_n}, length = {length}"1708 # )1709 #1710 # with self._db.begin() as txn:1711 # blk_filename = self._local_dir / txn.get(blk_key).decode("ASCII")1712 #1713 # if not blk_filename.exists():1714 # raise Data_Not_Found_Error(1715 # Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_FULL.format(str(apri), start_n, length)1716 # )1717 #1718 # return blk_filename1719 #1720 # def _compress_helper_open_zipfile(self, compression_level):1721 # """Open a zip file with a random name. The handle must be closed manually.1722 #1723 # :return: (type `bytes`) If compression is successful, the appropriate compression key should be updated with1724 # this value.1725 # :return (type `pathlib.Path`) The path to the zip file.1726 # :return: (type `zipfile.ZipFile`) The zip file handle. This must be closed manually later.1727 # """1728 #1729 # compressed_filename = random_unique_filename(self._local_dir, COMPRESSED_FILE_SUFFIX)1730 #1731 # compressed_val = compressed_filename.name.encode("ASCII")1732 #1733 # compressed_fh = zipfile.ZipFile(1734 # compressed_filename, # target filename1735 # "x", # zip mode (write, but don't overwrite)1736 # zipfile.ZIP_DEFLATED, # compression mode1737 # True, # use zip641738 # compression_level,1739 # strict_timestamps=False # change timestamps of old or new files1740 # )1741 #1742 # return compressed_val, compressed_filename, compressed_fh1743 #1744 # @staticmethod1745 # def _compress_helper_write_data(compressed_fh, blk_filename):1746 # """Compress the data.1747 #1748 # :param compressed_fh: (type `zipfile.ZipFile`)1749 # :param blk_filename: (type `pathlib.Path`)1750 # """1751 #1752 # compressed_fh.write(blk_filename, blk_filename.name)1753 #1754 # def _compress_helper_update_key(self, compressed_key, compressed_val):1755 # """If compression is successful, update the database.1756 #1757 # :param compressed_key: (type `bytes`)1758 # :param compressed_val: (type `bytes`)1759 # """1760 #1761 # with self._db.begin(write = True) as txn:1762 # txn.put(compressed_key, compressed_val)1763 @staticmethod1764 def _compress_helper_clean_uncompressed_data(compressed_filename, blk_filename):1765 """Remove uncompressed data after successful compression.1766 :param compressed_filename: (type `pathlib.Path`)1767 :param blk_filename: (type `pathlib.Path`) The uncompressed data to clean.1768 """1769 if compressed_filename.exists():1770 if blk_filename.is_dir():1771 shutil.rmtree(blk_filename)1772 elif blk_filename.is_file():1773 blk_filename.unlink(missing_ok = False)1774 else:1775 raise RuntimeError(f"Failed to delete uncompressed data at `{str(blk_filename)}`.")1776 # make a ghost file with the same name so that `random_unique_filename` works as intended1777 blk_filename.touch(exist_ok = False)1778 else:1779 raise Compression_Error(f"Failed to create zip file at `{str(compressed_filename)}`.")1780 # def _decompress_helper(self, apri, start_n, length):1781 #1782 # blk_key, compressed_key = self._check_blk_compressed_keys_raise(None, None, apri, None, start_n, length)1783 #1784 # with self._db.begin() as txn:1785 # compressed_val = txn.get(compressed_key)1786 #1787 # if compressed_val == _IS_NOT_COMPRESSED_VAL:1788 #1789 # raise Decompression_Error(1790 # "The disk `Block` with the following data is not compressed: " +1791 # f"{str(apri)}, start_n = {start_n}, length = {length}"1792 # )1793 #1794 # with self._db.begin() as txn:1795 # blk_filename = txn.get(blk_key).decode("ASCII")1796 #1797 # compressed_filename = self._local_dir / compressed_val.decode("ASCII")1798 #1799 # with zipfile.ZipFile(compressed_filename, "r") as compressed_fh:1800 #1801 # # delete ghost file1802 # (self._local_dir / blk_filename).unlink(False)1803 #1804 # try:1805 # blk_filename = compressed_fh.extract(blk_filename, self._local_dir)1806 #1807 # except Exception as e:1808 #1809 # (self._local_dir / blk_filename).touch(exist_ok = False)1810 # raise e1811 #1812 # try:1813 #1814 # with self._db.begin(write = True) as txn:1815 # txn.put(compressed_key, _IS_NOT_COMPRESSED_VAL)1816 #1817 # except Exception as e:1818 #1819 # blk_filename.unlink(missing_ok = False)1820 # blk_filename.touch(exist_ok=False)1821 # raise e1822 #1823 # if zip_archive_is_empty(compressed_filename):1824 #1825 # try:1826 # compressed_filename.unlink(missing_ok = False)1827 #1828 # except Exception as e:1829 #1830 # with self._db.begin(write = True) as txn:1831 # txn.put(compressed_key, compressed_filename.name.encode("ASCII"))1832 # blk_filename.unlink(missing_ok=False)1833 # blk_filename.touch(exist_ok=False)1834 # raise e1835 #1836 # return compressed_filename, blk_filename1837 def _resolve_start_n_length(self, apri, start_n, length):1838 """1839 :param apri: (type `Apri_Info`)1840 :param start_n: (type `int` or `NoneType`) Non-negative.1841 :param length: (type `int` or `NoneType`) Positive.1842 :raise Data_Not_Found_Error1843 :raise ValueError: If `start_n is None and length is not None`.1844 :return: (type `int`) Resolved `start_n`, always `int`.1845 :return: (type `int`) Resolved `length`, always `length`.1846 """1847 if start_n is not None and length is not None:1848 return start_n, length1849 elif start_n is not None and length is None:1850 key = self._get_disk_block_key(_BLK_KEY_PREFIX, apri, None, start_n, 1, False)1851 first_key_sep_index = key.find(_KEY_SEP)1852 second_key_sep_index = key.find(_KEY_SEP, first_key_sep_index + 1)1853 prefix = key [ : second_key_sep_index + 1]1854 i = -11855 largest_length = None1856 key_with_largest_length = None1857 with lmdb_prefix_iterator(self._db, prefix) as it:1858 for i, (key, _) in enumerate(it):1859 length = int(Register._split_disk_block_key(_BLK_KEY_PREFIX_LEN, key)[2].decode("ASCII"))1860 if largest_length is None or length > largest_length:1861 largest_length = length1862 key_with_largest_length = key1863 if i == -1:1864 raise Data_Not_Found_Error(f"No disk `Block`s found with {str(apri)} and start_n = {start_n}.")1865 else:1866 return self._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, key_with_largest_length, apri)[1:]1867 elif start_n is None and length is None:1868 prefix = _BLK_KEY_PREFIX + self._get_id_by_apri(apri, None, False) + _KEY_SEP1869 smallest_start_n = None1870 i = -11871 with lmdb_prefix_iterator(self._db, prefix) as it:1872 for i, (key, _) in enumerate(it):1873 start_n = int(Register._split_disk_block_key(_BLK_KEY_PREFIX_LEN, key)[1].decode("ASCII"))1874 if smallest_start_n is None or start_n < smallest_start_n:1875 smallest_start_n = start_n1876 if i == -1:1877 raise Data_Not_Found_Error(f"No disk `Block`s found with {str(apri)}.")1878 else:1879 return self._resolve_start_n_length(apri, smallest_start_n, None)1880 else:1881 raise ValueError(f"If you specify a `Block` length, you must also specify a `start_n`.")1882 #################################1883 # PUBLIC RAM BLK METHODS #1884 def add_ram_block(self, blk):1885 if not isinstance(blk, Block):1886 raise TypeError("`blk` must be of type `Block`.")1887 if all(ram_blk is not blk for ram_blk in self._ram_blks):1888 self._ram_blks.append(blk)1889 def remove_ram_block(self, blk):1890 if not isinstance(blk, Block):1891 raise TypeError("`blk` must be of type `Block`.")1892 for i, ram_blk in enumerate(self._ram_blks):1893 if ram_blk is blk:1894 del self._ram_blks[i]1895 return1896 raise Data_Not_Found_Error(f"No RAM disk block found.")1897 def get_ram_block_by_n(self, apri, n, recursively = False):1898 if not isinstance(apri, Apri_Info):1899 raise TypeError("`apri` must be of type `Apri_Info`.")1900 if not is_signed_int(n):1901 raise TypeError("`n` must be of type `int`.")1902 else:1903 n = int(n)1904 if not isinstance(recursively, bool):1905 raise TypeError("`recursively` must be of type `bool`.")1906 if n < 0:1907 raise IndexError("`n` must be non-negative")1908 for blk in self._ram_blks:1909 start_n = blk.get_start_n()1910 if blk.get_apri() == apri and start_n <= n < start_n + len(blk):1911 return blk1912 if recursively:1913 self._check_open_raise("get_ram_block_by_n")1914 for subreg in self._iter_subregisters():1915 with subreg._recursive_open(True) as subreg:1916 try:1917 return subreg.get_disk_block_by_n(apri, n, True)1918 except Data_Not_Found_Error:1919 pass1920 raise Data_Not_Found_Error(1921 Register._DISK_BLOCK_DATA_NOT_FOUND_ERROR_MSG_N.format(str(apri), n)1922 )1923 def get_all_ram_blocks(self, apri, recursively = False):1924 if not isinstance(apri, Apri_Info):1925 raise TypeError("`apri` must be of type `Apri_Info`.")1926 if not isinstance(recursively, bool):1927 raise TypeError("`recursively` must be of type `bool`.")1928 for blk in self._ram_blks:1929 if blk.get_apri() == apri:1930 yield blk1931 if recursively:1932 self._check_open_raise("get_all_ram_blocks")1933 for subreg in self._iter_subregisters():1934 with subreg._recursive_open(True) as subreg:1935 for blk in subreg.get_all_ram_blocks(apri, True):1936 yield blk1937 #################################1938 # PROTEC RAM BLK METHODS #1939 #################################1940 # PUBLIC RAM & DISK BLK METHODS #1941 def __getitem__(self, apri_and_n_and_recursively):1942 short = apri_and_n_and_recursively1943 # check that the general shape and type of `apri_and_n_and_recursively` is correct1944 if (1945 not isinstance(short, tuple) or1946 not(2 <= len(short) <= 3) or1947 not(isinstance(short[0], Apri_Info)) or1948 (not is_signed_int(short[1]) and not isinstance(short[1], slice)) or1949 (len(short) == 3 and not isinstance(short[2],bool))1950 ):1951 raise TypeError(Register.___GETITEM___ERROR_MSG)1952 # check that slices do not have negative indices1953 if (1954 isinstance(short[1], slice) and (1955 (short[1].start is not None and short[1].start < 0) or1956 (short[1].stop is not None and short[1].stop < 0)1957 )1958 ):1959 raise ValueError(Register.___GETITEM___ERROR_MSG)1960 # unpack1961 if len(short) == 2:1962 apri, n = apri_and_n_and_recursively1963 recursively = False1964 else:1965 apri, n, recursively = apri_and_n_and_recursively1966 # return iterator if given slice1967 if isinstance(n, slice):1968 return _Element_Iter(self, apri, n, recursively)1969 # otherwise return a single element1970 else:1971 blk = self.get_disk_block_by_n(apri, n)1972 return blk[n]1973 def all_intervals(self, apri, combine = True, recursively = False):1974 if not isinstance(apri, Apri_Info):1975 raise TypeError("`apri` must be of type `Apri_Info`.")1976 if not isinstance(combine, bool):1977 raise TypeError("`combine` must be of type `bool`.")1978 if not isinstance(recursively, bool):1979 raise TypeError("`recursively` must be of type `bool`.")1980 intervals_sorted = sorted(1981 [1982 (start_n, length)1983 for _, start_n, length in self._iter_converted_ram_and_disk_block_datas(apri, recursively)1984 ],1985 key = lambda t: (t[0], -t[1])1986 )1987 if combine:1988 intervals_reduced = []1989 for int1 in intervals_sorted:1990 for i, int2 in enumerate(intervals_reduced):1991 if intervals_overlap(int1,int2):1992 a1, l1 = int11993 a2, l2 = int21994 if a2 + l2 < a1 + l1:1995 intervals_reduced[i] = (a2, a1 + l1 - a2)1996 break1997 else:1998 intervals_reduced.append(int1)1999 intervals_combined = []2000 for start_n, length in intervals_reduced:2001 if len(intervals_combined) == 0 or intervals_combined[-1][0] + intervals_combined[-1][1] < start_n:2002 intervals_combined.append((start_n, length))2003 else:2004 intervals_combined[-1] = (intervals_combined[-1][0], start_n + length)2005 return intervals_combined2006 else:2007 return intervals_sorted2008 #################################2009 # PROTEC RAM & DISK BLK METHODS #2010 def _iter_converted_ram_and_disk_block_datas(self, apri, recursively = False):2011 for blk in self._ram_blks:2012 if blk.get_apri() == apri:2013 yield apri, blk.get_start_n(), len(blk)2014 for key, _ in self._iter_disk_block_pairs(_BLK_KEY_PREFIX, apri, None):2015 yield self._convert_disk_block_key(_BLK_KEY_PREFIX_LEN, key, apri)2016 if recursively:2017 for subreg in self._iter_subregisters():2018 with subreg._recursive_open(True) as subreg:2019 for data in subreg._iter_ram_and_disk_block_datas(apri, True):2020 yield data2021class Pickle_Register(Register):2022 @classmethod2023 def dump_disk_data(cls, data, filename, **kwargs):2024 if len(kwargs) > 0:2025 raise KeyError("`Pickle_Register.add_disk_block` accepts no keyword-arguments.")2026 filename = filename.with_suffix(".pkl")2027 with filename.open("wb") as fh:2028 pickle.dump(data, fh)2029 return filename2030 @classmethod2031 def load_disk_data(cls, filename, **kwargs):2032 if len(kwargs) > 0:2033 raise KeyError("`Pickle_Register.get_disk_block` accepts no keyword-arguments.")2034 with filename.open("rb") as fh:2035 return pickle.load(fh), filename2036 @classmethod2037 def clean_disk_data(cls, filename, **kwargs):2038 pass2039Register.add_subclass(Pickle_Register)2040class Numpy_Register(Register):2041 @classmethod2042 def dump_disk_data(cls, data, filename, **kwargs):2043 if len(kwargs) > 0:2044 raise KeyError("`Numpy_Register.add_disk_block` accepts no keyword-arguments.")2045 filename = filename.with_suffix(".npy")2046 np.save(filename, data, allow_pickle = False, fix_imports = False)2047 return filename2048 @classmethod2049 def load_disk_data(cls, filename, **kwargs):2050 if "mmap_mode" in kwargs:2051 mmap_mode = kwargs["mmap_mode"]2052 else:2053 mmap_mode = None2054 if len(kwargs) > 1:2055 raise KeyError("`Numpy_Register.get_disk_data` only accepts the keyword-argument `mmap_mode`.")2056 if mmap_mode not in [None, "r+", "r", "w+", "c"]:2057 raise ValueError(2058 "The keyword-argument `mmap_mode` for `Numpy_Register.get_disk_block` can only have the values " +2059 "`None`, 'r+', 'r', 'w+', 'c'. Please see " +2060 "https://numpy.org/doc/stable/reference/generated/numpy.memmap.html#numpy.memmap for more information."2061 )2062 return np.load(filename, mmap_mode = mmap_mode, allow_pickle = False, fix_imports = False), filename2063 @classmethod2064 def clean_disk_data(cls, filename, **kwargs):2065 filename = Path(filename)2066 filename = filename.with_suffix(".npy")2067 if not filename.is_absolute():2068 raise ValueError(NOT_ABSOLUTE_ERROR_MESSAGE.format(str(filename)))2069 filename.unlink(missing_ok = False)2070 def get_disk_block(self, apri, start_n = None, length = None, return_metadata = False, recursively = False, **kwargs):2071 """2072 :param apri: (type `Apri_Info`)2073 :param start_n: (type `int`)2074 :param length: (type `length`) non-negative/2075 :param return_metadata: (type `bool`, default `False`) Whether to return a `File_Metadata` object, which2076 contains file creation date/time and size of dumped saved on the disk.2077 :param recursively: (type `bool`, default `False`) Search all subregisters for the `Block`.2078 :param mmap_mode: (type `str`, default `None`) Load the Numpy file using memory mapping, see2079 https://numpy.org/doc/stable/reference/generated/numpy.memmap.html#numpy.memmap for more information.2080 :return: (type `File_Metadata`) If `return_metadata is True`.2081 """2082 ret = super().get_disk_block(apri, start_n, length, return_metadata, recursively, **kwargs)2083 if return_metadata:2084 blk = ret[0]2085 else:2086 blk = ret2087 if isinstance(blk.get_segment(), np.memmap):2088 blk = Memmap_Block(blk.get_segment(), blk.get_apri(), blk.get_start_n())2089 if return_metadata:2090 return blk, ret[1]2091 else:2092 return blk2093 def concatenate_disk_blocks(self, apri, start_n = None, length = None, delete = False, return_metadata = False, debug = 0):2094 """Concatenate several `Block`s into a single `Block` along axis 0 and save the new one to the disk.2095 If `delete = True`, then the smaller `Block`s are deleted automatically.2096 The interval `range(start_n, start_n + length)` must be the disjoint union of intervals of the form2097 `range(blk.get_start_n(), blk.get_start_n() + len(blk))`, where `blk` is a disk `Block` with `Apri_Info`2098 given by `apri`.2099 Length-0 `Block`s are ignored.2100 If `start_n` is not specified, it is taken to be the smallest `start_n` of any `Block` saved to this2101 `Register`. If `length` is not specified, it is taken to be the length of the largest2102 contiguous set of indices that start with `start_n`. If `start_n` is not specified but `length` is, a...

Full Screen

Full Screen

test_info.py

Source:test_info.py Github

copy

Full Screen

1import json2from copy import copy3from unittest import TestCase4from cornifer import Apri_Info, Apos_Info5class Test__Info(TestCase):6 def test___init__(self):7 with self.assertRaises(ValueError):8 Apri_Info()9 with self.assertRaises(ValueError):10 Apri_Info(_json ="sup")11 with self.assertRaises(ValueError):12 Apri_Info(_hash ="sup")13 with self.assertRaises(ValueError):14 Apri_Info(lst = [1, 2, 3])15 with self.assertRaises(ValueError):16 Apri_Info(dct = {1:2})17 try:18 Apri_Info(tup = (1, 2))19 except ValueError:20 self.fail("tuples are hashable")21 try:22 Apri_Info(msg ="hey")23 except ValueError:24 self.fail("strings are hashable")25 try:26 Apri_Info(pi ="π")27 except ValueError:28 self.fail("pi is okay")29 try:30 Apri_Info(double_null ="\0\0")31 except ValueError:32 self.fail("double null okay")33 apri = Apri_Info(msg ="primes", mod4 = 1)34 self.assertEqual(apri.msg, "primes")35 self.assertEqual(apri.mod4, 1)36 def test__from_json(self):37 with self.assertRaises(ValueError):38 Apri_Info.from_json("[\"no\"]")39 apri = Apri_Info.from_json("{\"msg\": \"primes\"}")40 self.assertEqual(apri.msg, "primes")41 apri = Apri_Info.from_json("{\"mod4\": 1}")42 self.assertEqual(apri.mod4, 1)43 apri = Apri_Info.from_json("{\"tup\": [1,2,3]}")44 self.assertEqual(apri.tup, (1,2,3))45 apri = Apri_Info.from_json("""{"msg" : "primes", "respective" : "Apri_Info.from_json({\\"haha\\" : \\"lol\\"})" }""")46 self.assertEqual(apri.msg, "primes")47 self.assertEqual(apri.respective, Apri_Info(haha = "lol"))48 def test__to_json(self):49 _json = Apri_Info(msg ="primes", mod4 = 3).to_json()50 self.assertTrue(isinstance(_json, str))51 obj = json.loads(_json)52 self.assertTrue(isinstance(obj, dict))53 self.assertEqual(len(obj), 2)54 self.assertEqual(obj, {"msg": "primes", "mod4": 3})55 _json = Apri_Info(msg="primes", primes = (2, 3, 5)).to_json()56 self.assertTrue(isinstance(_json, str))57 obj = json.loads(_json)58 self.assertTrue(isinstance(obj, dict))59 self.assertEqual(len(obj), 2)60 self.assertEqual(obj, {"msg": "primes", "primes": [2,3,5]})61 apri = Apri_Info(msg = "primes", primes = (2,3,5), respective = Apri_Info(lol = "haha"))62 self.assertEqual(apri, Apri_Info.from_json(apri.to_json()))63 def test___hash__(self):64 self.assertEqual(65 hash(Apri_Info(msg ="primes", mod4 = 1)),66 hash(Apri_Info(mod4 = 1, msg ="primes"))67 )68 self.assertNotEqual(69 hash(Apri_Info(msg ="primes", mod4 = 1)),70 hash(Apri_Info(mod4 = 1))71 )72 def test___eq__(self):73 self.assertEqual(74 Apri_Info(msg ="primes", mod4 = 1),75 Apri_Info(mod4 = 1, msg ="primes")76 )77 self.assertNotEqual(78 Apri_Info(msg ="primes", mod4 = 1),79 Apri_Info(mod4 = 1)80 )81 self.assertNotEqual(82 Apri_Info(mod4 = 1),83 Apri_Info(msg ="primes", mod4 = 1)84 )85 self.assertEqual(86 Apri_Info(msg = "primes", respective = Apri_Info(hello = "hi", num = 7)),87 Apri_Info(respective = Apri_Info(num = 7, hello = "hi"), msg = "primes")88 )89 self.assertNotEqual(90 Apri_Info(msg = "primes", respective = Apri_Info(hello = "hi", num = 8)),91 Apri_Info(respective = Apri_Info(num = 7, hello = "hi"), msg = "primes")92 )93 def test___copy__(self):94 self.assertEqual(95 Apri_Info(no = "no"),96 copy(Apri_Info(no = "no"))97 )98 apri = Apri_Info(msg ="primes")99 self.assertEqual(100 apri,101 copy(apri)102 )103 self.assertEqual(104 hash(apri),105 hash(copy(apri))106 )107 apri = Apri_Info(msg ="primes", mod4 = 1)108 self.assertEqual(109 apri,110 copy(apri)111 )112 self.assertEqual(113 hash(apri),114 hash(copy(apri))115 )116 def test_iter_inner_info(self):117 apri = Apri_Info(descr = "descr")118 self.assertEqual(119 {(None, Apri_Info(descr = "descr"))},120 set(apri.iter_inner_info())121 )122 self.assertEqual(123 1,124 sum(1 for _ in apri.iter_inner_info())125 )126 apri = Apri_Info(descr = Apri_Info(num = 7))127 self.assertEqual(128 {129 (None, Apri_Info(descr = Apri_Info(num = 7))),130 ("descr", Apri_Info(num = 7))131 },132 set(apri.iter_inner_info())133 )134 self.assertEqual(135 2,136 sum(1 for _ in apri.iter_inner_info())137 )138 apri = Apri_Info(descr = Apri_Info(blub = Apri_Info(hi = "hello")))139 self.assertEqual(140 {141 (None, Apri_Info(descr = Apri_Info(blub = Apri_Info(hi = "hello")))),142 ("descr", Apri_Info(blub = Apri_Info(hi = "hello"))),143 ("blub", Apri_Info(hi = "hello"))144 },145 set(apri.iter_inner_info())146 )147 self.assertEqual(148 3,149 sum(1 for _ in apri.iter_inner_info())150 )151 apri = Apri_Info(num = 7, descr = Apri_Info(blub = Apri_Info(hi = "hello")))152 self.assertEqual(153 {154 (None, Apri_Info(num = 7, descr = Apri_Info(blub = Apri_Info(hi = "hello")))),155 ("descr", Apri_Info(blub = Apri_Info(hi = "hello"))),156 ("blub", Apri_Info(hi = "hello"))157 },158 set(apri.iter_inner_info())159 )160 self.assertEqual(161 3,162 sum(1 for _ in apri.iter_inner_info())163 )164 apri = Apri_Info(num = 7, descr = Apri_Info(no = "yes", blub = Apri_Info(hi = "hello")))165 self.assertEqual(166 {167 (None, Apri_Info(num = 7, descr = Apri_Info(no = "yes", blub = Apri_Info(hi = "hello")))),168 ("descr", Apri_Info(no = "yes", blub = Apri_Info(hi = "hello"))),169 ("blub", Apri_Info(hi = "hello"))170 },171 set(apri.iter_inner_info())172 )173 self.assertEqual(174 3,175 sum(1 for _ in apri.iter_inner_info())176 )177 apri = Apri_Info(num = Apri_Info(descr = "hi"), two = Apri_Info(descr = "hi"))178 self.assertEqual(179 {180 (None, Apri_Info(num = Apri_Info(descr = "hi"), two = Apri_Info(descr = "hi"))),181 ("num", Apri_Info(descr = "hi")),182 ("two", Apri_Info(descr = "hi"))183 },184 set(apri.iter_inner_info())185 )186 self.assertEqual(187 3,188 sum(1 for _ in apri.iter_inner_info())189 )190 apri = Apri_Info(num = Apri_Info(descr = "hey"), two = Apri_Info(descr = "hi"))191 self.assertEqual(192 {193 (None, Apri_Info(num = Apri_Info(descr = "hey"), two = Apri_Info(descr = "hi"))),194 ("num", Apri_Info(descr = "hey")),195 ("two", Apri_Info(descr = "hi"))196 },197 set(apri.iter_inner_info())198 )199 self.assertEqual(200 3,201 sum(1 for _ in apri.iter_inner_info())202 )203 def test_change_info(self):204 apri = Apri_Info(descr = "descr")205 with self.assertRaises(TypeError):206 apri.change_info(apri, 0)207 with self.assertRaises(TypeError):208 apri.change_info(0, apri)209 replaced = apri.change_info(Apri_Info(no = "yes"), Apri_Info(maybe = "maybe"))210 self.assertEqual(211 Apri_Info(descr = "descr"),212 replaced213 )214 replaced = apri.change_info(apri, Apri_Info(no = "yes"))215 self.assertEqual(216 Apri_Info(no = "yes"),217 replaced218 )219 apri = Apri_Info(descr = Apri_Info(num = 7))220 replaced = apri.change_info(Apri_Info(num = 7), Apri_Info(_num = 8))221 self.assertEqual(222 Apri_Info(descr = Apri_Info(_num = 8)),223 replaced224 )225 replaced = apri.change_info(apri, Apri_Info(hello = "hi"))226 self.assertEqual(227 Apri_Info(hello = "hi"),228 replaced229 )230 apri = Apri_Info(descr = Apri_Info(blub = Apri_Info(hi = "hello")))231 replaced = apri.change_info(Apri_Info(hi = "hello"), Apri_Info(hi = "hellox"))232 self.assertEqual(233 Apri_Info(descr = Apri_Info(blub = Apri_Info(hi = "hellox"))),234 replaced235 )236 replaced = apri.change_info(Apri_Info(blub = Apri_Info(hi = "hello")), Apri_Info(bloob = Apri_Info(hi = "hello")))237 self.assertEqual(238 Apri_Info(descr = Apri_Info(bloob = Apri_Info(hi = "hello"))),239 replaced240 )241 replaced = apri.change_info(Apri_Info(descr = Apri_Info(blub = Apri_Info(hi = "hello"))), Apri_Info(descr = "yes"))242 self.assertEqual(243 Apri_Info(descr = "yes"),244 replaced245 )246 apri = Apri_Info(num = 7, descr = Apri_Info(blub = Apri_Info(hi = "hello")))247 replaced = apri.change_info(Apri_Info(blub = Apri_Info(hi = "hello")), Apri_Info(bloob = Apri_Info(hi = "hello")))248 self.assertEqual(249 Apri_Info(num = 7, descr = Apri_Info(bloob = Apri_Info(hi = "hello"))),250 replaced251 )252 replaced = apri.change_info(Apri_Info(descr = Apri_Info(blub = Apri_Info(hi = "hello"))), Apri_Info(descr = "yes"))253 self.assertEqual(254 Apri_Info(num = 7, descr = Apri_Info(blub = Apri_Info(hi = "hello"))),255 replaced256 )257 replaced = apri.change_info(Apri_Info(num = 7, descr = Apri_Info(blub = Apri_Info(hi = "hello"))), Apri_Info(loot = "chest"))258 self.assertEqual(259 Apri_Info(loot = "chest"),260 replaced261 )262 apri = Apri_Info(num = Apri_Info(descr = "hi"), two = Apri_Info(descr = "hi"))263 replaced = apri.change_info(Apri_Info(descr = "hi") , Apri_Info(num = Apri_Info(descr = "hi")))264 self.assertEqual(265 Apri_Info(num = Apri_Info(num = Apri_Info(descr = "hi")), two = Apri_Info(num = Apri_Info(descr = "hi"))),266 replaced267 )268 apri = Apri_Info(num = Apri_Info(descr = "hey"), two = Apri_Info(descr = "hi"))269 replaced = apri.change_info(Apri_Info(descr = "hi") , Apri_Info(num = Apri_Info(descr = "hi")))270 self.assertEqual(271 Apri_Info(num = Apri_Info(descr = "hey"), two = Apri_Info(num = Apri_Info(descr = "hi"))),272 replaced...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run SeleniumBase automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful