How to use save_handler method in avocado

Best Python code snippet using avocado_python

test_checkpoint.py

Source:test_checkpoint.py Github

copy

Full Screen

1import os2import warnings3from unittest.mock import MagicMock4import pytest5import torch6import torch.nn as nn7from ignite.engine import Engine, Events, State8from ignite.handlers import Checkpoint, DiskSaver, ModelCheckpoint9_PREFIX = "PREFIX"10class DummyModel(nn.Module):11 def __init__(self):12 super(DummyModel, self).__init__()13 self.net = nn.Linear(1, 1)14 def forward(self, x):15 return self.net(x)16def test_checkpoint_wrong_input():17 with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):18 Checkpoint(19 12,20 lambda x: x,21 "prefix",22 )23 with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):24 Checkpoint(25 [26 12,27 ],28 lambda x: x,29 "prefix",30 )31 with pytest.raises(ValueError, match=r"No objects to checkpoint."):32 Checkpoint({}, lambda x: x, "prefix")33 model = DummyModel()34 to_save = {"model": model}35 with pytest.raises(TypeError, match=r"Argument `save_handler` should be callable"):36 Checkpoint(to_save, 12, "prefix")37 with pytest.raises(38 ValueError,39 match=r"If `score_name` is provided, then `score_function` should be also provided.",40 ):41 Checkpoint(to_save, lambda x: x, score_name="acc")42 with pytest.raises(TypeError, match=r"global_step_transform should be a function."):43 Checkpoint(44 to_save,45 lambda x: x,46 score_function=lambda e: 123,47 score_name="acc",48 global_step_transform=123,49 )50def test_checkpoint_default():51 def _test(to_save, obj, name):52 save_handler = MagicMock()53 save_handler.remove = MagicMock()54 checkpointer = Checkpoint(to_save, save_handler=save_handler)55 assert checkpointer.last_checkpoint is None56 trainer = Engine(lambda e, b: None)57 trainer.state = State(epoch=0, iteration=0)58 checkpointer(trainer)59 assert save_handler.call_count == 160 save_handler.assert_called_with(obj, "{}_0.pth".format(name))61 trainer.state.epoch = 1262 trainer.state.iteration = 123463 checkpointer(trainer)64 assert save_handler.call_count == 265 save_handler.assert_called_with(obj, "{}_1234.pth".format(name))66 assert save_handler.remove.call_count == 167 save_handler.remove.assert_called_with("{}_0.pth".format(name))68 assert checkpointer.last_checkpoint == "{}_1234.pth".format(name)69 model = DummyModel()70 to_save = {"model": model}71 _test(to_save, model.state_dict(), "model")72 model = DummyModel()73 optimizer = torch.optim.SGD(model.parameters(), lr=0.1)74 to_save = {"model": model, "optimizer": optimizer}75 _test(76 to_save,77 {"model": model.state_dict(), "optimizer": optimizer.state_dict()},78 "checkpoint",79 )80def test_checkpoint_with_global_step_transform():81 def _test(filename_prefix, to_save, obj, name):82 save_handler = MagicMock()83 save_handler.remove = MagicMock()84 checkpointer = Checkpoint(85 to_save,86 save_handler=save_handler,87 filename_prefix=filename_prefix,88 global_step_transform=lambda e, _: e.state.epoch,89 )90 trainer = Engine(lambda e, b: None)91 trainer.state = State(epoch=1, iteration=1)92 checkpointer(trainer)93 assert save_handler.call_count == 194 if len(filename_prefix) > 0:95 filename_prefix += "_"96 save_handler.assert_called_with(obj, "{}{}_1.pth".format(filename_prefix, name))97 trainer.state.epoch = 1298 trainer.state.iteration = 123499 checkpointer(trainer)100 assert save_handler.call_count == 2101 save_handler.assert_called_with(102 obj, "{}{}_12.pth".format(filename_prefix, name)103 )104 assert save_handler.remove.call_count == 1105 save_handler.remove.assert_called_with(106 "{}{}_1.pth".format(filename_prefix, name)107 )108 assert checkpointer.last_checkpoint == "{}{}_12.pth".format(109 filename_prefix, name110 )111 for prefix in ["", "dummytask"]:112 model = DummyModel()113 to_save = {"model": model}114 _test(prefix, to_save, model.state_dict(), "model")115 model = DummyModel()116 optimizer = torch.optim.SGD(model.parameters(), lr=0.1)117 to_save = {"model": model, "optimizer": optimizer}118 _test(119 prefix,120 to_save,121 {"model": model.state_dict(), "optimizer": optimizer.state_dict()},122 "checkpoint",123 )124def test_checkpoint_with_score_function():125 def _test(to_save, obj, name):126 save_handler = MagicMock()127 save_handler.remove = MagicMock()128 checkpointer = Checkpoint(129 to_save, save_handler=save_handler, score_function=lambda e: e.state.score130 )131 trainer = Engine(lambda e, b: None)132 trainer.state = State(epoch=1, iteration=1, score=0.77)133 checkpointer(trainer)134 assert save_handler.call_count == 1135 save_handler.assert_called_with(obj, "{}_0.77.pth".format(name))136 trainer.state.epoch = 12137 trainer.state.iteration = 1234138 trainer.state.score = 0.78139 checkpointer(trainer)140 assert save_handler.call_count == 2141 save_handler.assert_called_with(obj, "{}_0.78.pth".format(name))142 assert save_handler.remove.call_count == 1143 save_handler.remove.assert_called_with("{}_0.77.pth".format(name))144 assert checkpointer.last_checkpoint == "{}_0.78.pth".format(name)145 model = DummyModel()146 to_save = {"model": model}147 _test(to_save, model.state_dict(), "model")148 model = DummyModel()149 optimizer = torch.optim.SGD(model.parameters(), lr=0.1)150 to_save = {"model": model, "optimizer": optimizer}151 _test(152 to_save,153 {"model": model.state_dict(), "optimizer": optimizer.state_dict()},154 "checkpoint",155 )156def test_checkpoint_with_score_name_and_function():157 def _test(to_save, obj, name):158 save_handler = MagicMock()159 save_handler.remove = MagicMock()160 checkpointer = Checkpoint(161 to_save,162 save_handler=save_handler,163 score_name="loss",164 score_function=lambda e: e.state.score,165 )166 trainer = Engine(lambda e, b: None)167 trainer.state = State(epoch=1, iteration=1, score=-0.77)168 checkpointer(trainer)169 assert save_handler.call_count == 1170 save_handler.assert_called_with(obj, "{}_loss=-0.77.pth".format(name))171 trainer.state.epoch = 12172 trainer.state.iteration = 1234173 trainer.state.score = -0.76174 checkpointer(trainer)175 assert save_handler.call_count == 2176 save_handler.assert_called_with(obj, "{}_loss=-0.76.pth".format(name))177 assert save_handler.remove.call_count == 1178 save_handler.remove.assert_called_with("{}_loss=-0.77.pth".format(name))179 assert checkpointer.last_checkpoint == "{}_loss=-0.76.pth".format(name)180 model = DummyModel()181 to_save = {"model": model}182 _test(to_save, model.state_dict(), "model")183 model = DummyModel()184 optimizer = torch.optim.SGD(model.parameters(), lr=0.1)185 to_save = {"model": model, "optimizer": optimizer}186 _test(187 to_save,188 {"model": model.state_dict(), "optimizer": optimizer.state_dict()},189 "checkpoint",190 )191def test_checkpoint_with_score_function_and_trainer_epoch():192 def _test(to_save, obj, name):193 save_handler = MagicMock()194 save_handler.remove = MagicMock()195 trainer = Engine(lambda e, b: None)196 evaluator = Engine(lambda e, b: None)197 trainer.state = State(epoch=11, iteration=1)198 checkpointer = Checkpoint(199 to_save,200 save_handler=save_handler,201 global_step_transform=lambda _1, _2: trainer.state.epoch,202 score_function=lambda e: e.state.metrics["val_acc"],203 )204 evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})205 checkpointer(evaluator)206 assert save_handler.call_count == 1207 save_handler.assert_called_with(obj, "{}_11_0.77.pth".format(name))208 trainer.state.epoch = 12209 evaluator.state.metrics["val_acc"] = 0.78210 checkpointer(evaluator)211 assert save_handler.call_count == 2212 save_handler.assert_called_with(obj, "{}_12_0.78.pth".format(name))213 assert save_handler.remove.call_count == 1214 save_handler.remove.assert_called_with("{}_11_0.77.pth".format(name))215 assert checkpointer.last_checkpoint == "{}_12_0.78.pth".format(name)216 model = DummyModel()217 to_save = {"model": model}218 _test(to_save, model.state_dict(), "model")219def test_checkpoint_with_score_name_and_function_and_trainer_epoch():220 def _test(to_save, obj, name):221 save_handler = MagicMock()222 save_handler.remove = MagicMock()223 trainer = Engine(lambda e, b: None)224 evaluator = Engine(lambda e, b: None)225 trainer.state = State(epoch=11, iteration=1)226 checkpointer = Checkpoint(227 to_save,228 save_handler=save_handler,229 global_step_transform=lambda _1, _2: trainer.state.epoch,230 score_name="val_acc",231 score_function=lambda e: e.state.metrics["val_acc"],232 )233 evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})234 checkpointer(evaluator)235 assert save_handler.call_count == 1236 save_handler.assert_called_with(obj, "{}_11_val_acc=0.77.pth".format(name))237 trainer.state.epoch = 12238 evaluator.state.metrics["val_acc"] = 0.78239 checkpointer(evaluator)240 assert save_handler.call_count == 2241 save_handler.assert_called_with(obj, "{}_12_val_acc=0.78.pth".format(name))242 assert save_handler.remove.call_count == 1243 save_handler.remove.assert_called_with("{}_11_val_acc=0.77.pth".format(name))244 assert checkpointer.last_checkpoint == "{}_12_val_acc=0.78.pth".format(name)245 model = DummyModel()246 to_save = {"model": model}247 _test(to_save, model.state_dict(), "model")248def test_model_checkpoint_args_validation(dirname):249 existing = os.path.join(dirname, "existing_dir")250 nonempty = os.path.join(dirname, "nonempty")251 os.makedirs(existing)252 os.makedirs(nonempty)253 with open(os.path.join(nonempty, "{}_name_0.pth".format(_PREFIX)), "w"):254 pass255 with pytest.raises(256 ValueError, match=r"with extension '.pth' or '.pth.tar' are already present "257 ):258 ModelCheckpoint(nonempty, _PREFIX)259 with pytest.raises(260 ValueError, match=r"Argument save_interval is deprecated and should be None"261 ):262 ModelCheckpoint(existing, _PREFIX, save_interval=42)263 with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):264 ModelCheckpoint(265 os.path.join(dirname, "non_existing_dir"), _PREFIX, create_dir=False266 )267 with pytest.raises(268 ValueError,269 match=r"Argument save_as_state_dict is deprecated and should be True",270 ):271 ModelCheckpoint(existing, _PREFIX, create_dir=False, save_as_state_dict=False)272 with pytest.raises(273 ValueError, match=r"If `score_name` is provided, then `score_function` "274 ):275 ModelCheckpoint(existing, _PREFIX, create_dir=False, score_name="test")276 with pytest.raises(TypeError, match=r"global_step_transform should be a function"):277 ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)278 h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)279 assert h.last_checkpoint is None280 with pytest.raises(RuntimeError, match=r"No objects to checkpoint found."):281 h(None, [])282def test_model_checkpoint_simple_recovery(dirname):283 h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)284 engine = Engine(lambda e, b: None)285 engine.state = State(epoch=0, iteration=1)286 model = DummyModel()287 to_save = {"model": model}288 h(engine, to_save)289 fname = h.last_checkpoint290 assert isinstance(fname, str)291 assert os.path.join(dirname, _PREFIX) in fname292 assert os.path.exists(fname)293 loaded_objects = torch.load(fname)294 assert loaded_objects == model.state_dict()295def test_model_checkpoint_simple_recovery_from_existing_non_empty(dirname):296 def _test(ext, require_empty, archived):297 previous_fname = os.path.join(298 dirname, "{}_{}_{}{}".format(_PREFIX, "obj", 1, ext)299 )300 with open(previous_fname, "w") as f:301 f.write("test")302 h = ModelCheckpoint(303 dirname,304 _PREFIX,305 create_dir=True,306 require_empty=require_empty,307 archived=archived,308 )309 engine = Engine(lambda e, b: None)310 engine.state = State(epoch=0, iteration=1)311 model = DummyModel()312 to_save = {"model": model}313 h(engine, to_save)314 fname = h.last_checkpoint315 ext = ".pth.tar" if archived else ".pth"316 assert isinstance(fname, str)317 assert (318 os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "model", 1, ext))319 == fname320 )321 assert os.path.exists(fname)322 assert os.path.exists(previous_fname)323 loaded_objects = torch.load(fname)324 assert loaded_objects == model.state_dict()325 os.remove(fname)326 _test(".txt", require_empty=True, archived=False)327 _test(".txt", require_empty=True, archived=True)328 _test(".pth", require_empty=False, archived=False)329def test_disk_saver_atomic(dirname):330 model = DummyModel()331 to_save_serializable = {"model": model}332 to_save_non_serializable = {"model": lambda x: x}333 def _test_existance(atomic, _to_save, expected):334 saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)335 fname = "test.pth"336 try:337 with warnings.catch_warnings():338 # Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type339 # DummyModel. It won't be checked for correctness upon loading.340 warnings.simplefilter("ignore", category=UserWarning)341 saver(_to_save, fname)342 except Exception:343 pass344 fp = os.path.join(saver.dirname, fname)345 assert os.path.exists(fp) == expected346 if expected:347 saver.remove(fname)348 _test_existance(atomic=False, _to_save=to_save_serializable, expected=True)349 _test_existance(atomic=False, _to_save=to_save_non_serializable, expected=True)350 _test_existance(atomic=True, _to_save=to_save_serializable, expected=True)351 _test_existance(atomic=True, _to_save=to_save_non_serializable, expected=False)352def test_last_k(dirname):353 h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)354 engine = Engine(lambda e, b: None)355 engine.state = State(epoch=0, iteration=0)356 model = DummyModel()357 to_save = {"model": model}358 h(engine, to_save)359 for i in range(1, 9):360 engine.state.iteration = i361 h(engine, to_save)362 expected = ["{}_{}_{}.pth".format(_PREFIX, "model", i) for i in [7, 8]]363 assert sorted(os.listdir(dirname)) == expected, "{} vs {}".format(364 sorted(os.listdir(dirname)), expected365 )366def test_disabled_n_saved(dirname):367 h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)368 engine = Engine(lambda e, b: None)369 engine.state = State(epoch=0, iteration=0)370 model = DummyModel()371 to_save = {"model": model}372 num_iters = 100373 for i in range(num_iters):374 engine.state.iteration = i375 h(engine, to_save)376 saved_files = sorted(os.listdir(dirname))377 assert len(saved_files) == num_iters, "{}".format(saved_files)378 expected = sorted(379 ["{}_{}_{}.pth".format(_PREFIX, "model", i) for i in range(num_iters)]380 )381 assert saved_files == expected, "{} vs {}".format(saved_files, expected)382def test_best_k(dirname):383 scores = iter([1.2, -2.0, 3.1, -4.0])384 def score_function(_):385 return next(scores)386 h = ModelCheckpoint(387 dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function388 )389 engine = Engine(lambda e, b: None)390 engine.state = State(epoch=0, iteration=0)391 model = DummyModel()392 to_save = {"model": model}393 for _ in range(4):394 h(engine, to_save)395 expected = ["{}_{}_{}.pth".format(_PREFIX, "model", i) for i in [1.2, 3.1]]396 assert sorted(os.listdir(dirname)) == expected397def test_best_k_with_suffix(dirname):398 scores = [0.3456789, 0.1234, 0.4567, 0.134567]399 scores_iter = iter(scores)400 def score_function(engine):401 return next(scores_iter)402 h = ModelCheckpoint(403 dirname,404 _PREFIX,405 create_dir=False,406 n_saved=2,407 score_function=score_function,408 score_name="val_loss",409 )410 engine = Engine(lambda e, b: None)411 engine.state = State(epoch=0, iteration=0)412 model = DummyModel()413 to_save = {"model": model}414 for _ in range(4):415 engine.state.epoch += 1416 h(engine, to_save)417 expected = [418 "{}_{}_val_loss={:.7}.pth".format(_PREFIX, "model", scores[e - 1])419 for e in [1, 3]420 ]421 assert sorted(os.listdir(dirname)) == expected422def test_with_engine(dirname):423 def update_fn(_1, _2):424 pass425 name = "model"426 engine = Engine(update_fn)427 handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)428 model = DummyModel()429 to_save = {"model": model}430 engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)431 engine.run([0], max_epochs=4)432 expected = ["{}_{}_{}.pth".format(_PREFIX, name, i) for i in [3, 4]]433 assert sorted(os.listdir(dirname)) == expected434def test_with_state_dict(dirname):435 def update_fn(_1, _2):436 pass437 engine = Engine(update_fn)438 handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)439 model = DummyModel()440 to_save = {"model": model}441 engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)442 engine.run([0], max_epochs=4)443 saved_model = os.path.join(dirname, os.listdir(dirname)[0])444 load_model = torch.load(saved_model)445 assert not isinstance(load_model, DummyModel)446 assert isinstance(load_model, dict)447 model_state_dict = model.state_dict()448 loaded_model_state_dict = load_model449 for key in model_state_dict.keys():450 assert key in loaded_model_state_dict451 model_value = model_state_dict[key]452 loaded_model_value = loaded_model_state_dict[key]453 assert model_value.numpy() == loaded_model_value.numpy()454def test_valid_state_dict_save(dirname):455 model = DummyModel()456 h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)457 engine = Engine(lambda e, b: None)458 engine.state = State(epoch=0, iteration=0)459 to_save = {"name": 42}460 with pytest.raises(TypeError, match=r"should have `state_dict` method"):461 h(engine, to_save)462 to_save = {"name": model}463 try:464 h(engine, to_save)465 except ValueError:466 pytest.fail("Unexpected ValueError")467def test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):468 model = DummyModel()469 optim = torch.optim.SGD(model.parameters(), lr=0.001)470 lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)471 def update_fn(engine, batch):472 x = torch.rand((4, 1))473 optim.zero_grad()474 y = model(x)475 loss = y.pow(2.0).sum()476 loss.backward()477 optim.step()478 lr_scheduler.step()479 engine = Engine(update_fn)480 handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)481 engine.add_event_handler(482 Events.EPOCH_COMPLETED,483 handler,484 {485 "model": model,486 "optimizer": optim,487 "lr_scheduler": lr_scheduler,488 },489 )490 engine.run([0], max_epochs=4)491 saved_objects = sorted(os.listdir(dirname))492 # saved object is ['PREFIX_checkpoint_4.pth', ]493 saved_checkpoint = os.path.join(dirname, saved_objects[0])494 loaded_obj = torch.load(saved_checkpoint)495 for f in ["model", "optimizer", "lr_scheduler"]:496 assert f in loaded_obj497 loaded_model_state_dict = loaded_obj["model"]498 loaded_optimizer_state_dict = loaded_obj["optimizer"]499 loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]500 assert isinstance(loaded_model_state_dict, dict)501 assert isinstance(loaded_optimizer_state_dict, dict)502 assert isinstance(loaded_lr_scheduler_state_dict, dict)503 model_state_dict = model.state_dict()504 for key in model_state_dict.keys():505 assert key in loaded_model_state_dict506 model_value = model_state_dict[key]507 loaded_model_value = loaded_model_state_dict[key]508 assert model_value.numpy() == loaded_model_value.numpy()509 optim_state_dict = optim.state_dict()510 for key in optim_state_dict.keys():511 assert key in loaded_optimizer_state_dict512 optim_value = optim_state_dict[key]513 loaded_optim_value = loaded_optimizer_state_dict[key]514 assert optim_value == loaded_optim_value515 lr_scheduler_state_dict = lr_scheduler.state_dict()516 for key in lr_scheduler_state_dict.keys():517 assert key in loaded_lr_scheduler_state_dict518 lr_scheduler_value = lr_scheduler_state_dict[key]519 loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]520 assert lr_scheduler_value == loaded_lr_scheduler_value521def test_checkpoint_load_objects():522 with pytest.raises(TypeError, match=r"Argument checkpoint should be a dictionary"):523 Checkpoint.load_objects({}, [])524 with pytest.raises(TypeError, match=r"should have `load_state_dict` method"):525 Checkpoint.load_objects({"a": None}, {"a": None})526 model = DummyModel()527 to_load = {"model": model}528 with pytest.raises(529 ValueError, match=r"from `to_load` is not found in the checkpoint"530 ):531 Checkpoint.load_objects(to_load, {})532 model = DummyModel()533 to_load = {"model": model}534 model2 = DummyModel()535 chkpt = {"model": model2.state_dict()}536 Checkpoint.load_objects(to_load, chkpt)537 assert model.state_dict() == model2.state_dict()538def test_disksaver_wrong_input(dirname):539 with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):540 DiskSaver("/tmp/non-existing-folder", create_dir=False)541 def _test(ext):542 previous_fname = os.path.join(543 dirname, "{}_{}_{}{}".format(_PREFIX, "obj", 1, ext)544 )545 with open(previous_fname, "w") as f:546 f.write("test")547 with pytest.raises(548 ValueError, match=r"with extension '.pth' or '.pth.tar' are already present"549 ):550 DiskSaver(dirname, require_empty=True)551 _test(".pth")...

Full Screen

Full Screen

cli.py

Source:cli.py Github

copy

Full Screen

1import yaml2import click3import cro_tax_debtors4from cro_tax_debtors.screen import Screen5from cro_tax_debtors.debtors import Debtors, CategoryDone6@click.group()7@click.pass_context8@click.option('-f', '--file_path', type=click.File(mode='r'), help='Input YAML file')9def cli(ctx, file_path):10 ctx.obj = {'data': yaml.load(file_path.read())['website']['porezna-uprava']}11@cli.command()12@click.pass_context13@click.option('-p', '--print_in_terminal', default=False, is_flag=True, help='Print in terminal')14def parse(ctx, print_in_terminal):15 screen = Screen()16 for category in ctx.obj['data']:17 if not category['enabled']:18 continue19 for page in range(1, 2000):20 try:21 spider = getattr(cro_tax_debtors.spiders, category['spider'])(category['url'].format(page))22 save_handler = getattr(cro_tax_debtors.save_handlers, category['save_handler'])(category)23 Debtors(spider, save_handler, screen, category).parse(print_in_terminal)24 except CategoryDone:25 break26@cli.command()27@click.pass_context28@click.option('-n', '--name', help='Name of the debtor')29def find(ctx, name):30 screen = Screen()31 for category in ctx.obj['data']:32 if not category['enabled']:33 continue34 save_handler = getattr(cro_tax_debtors.save_handlers, category['save_handler'])(category)35 Debtors(save_handler=save_handler, screen=screen, category_data=category).find(name)36@cli.command()37@click.pass_context38def delete(ctx):39 for category in ctx.obj['data']:40 if not category['enabled']:41 continue42 save_handler = getattr(cro_tax_debtors.save_handlers, category['save_handler'])(category)43 res = Debtors(save_handler=save_handler, category_data=category).delete()...

Full Screen

Full Screen

__init__.py

Source:__init__.py Github

copy

Full Screen

1import bpy2from . import load_handler3from . import save_handler4def register():5 if load_handler.load_handler not in bpy.app.handlers.load_post:6 bpy.app.handlers.load_post.append(load_handler.load_handler)7 if save_handler.save_handler not in bpy.app.handlers.save_post:8 bpy.app.handlers.save_post.append(save_handler.save_handler)9def unregister():10 if load_handler.load_handler in bpy.app.handlers.load_post:11 bpy.app.handlers.load_post.remove(load_handler.load_handler)12 if save_handler.save_handler not in bpy.app.handlers.save_post:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful