How to use init_scenario method in molecule

Best Python code snippet using molecule_python

test_strategies.py

Source:test_strategies.py Github

copy

Full Screen

...198 if use_gpu:199 device = "cuda"200 else:201 device = "cpu"202 def init_scenario(self, multi_task=False):203 model = self.get_model(fast_test=True, multi_task=multi_task)204 optimizer = SGD(model.parameters(), lr=1e-3)205 criterion = CrossEntropyLoss()206 benchmark = self.load_benchmark(use_task_labels=multi_task)207 return model, optimizer, criterion, benchmark208 def test_naive(self):209 # SIT scenario210 model, optimizer, criterion, benchmark = self.init_scenario(211 multi_task=False)212 strategy = Naive(213 model,214 optimizer,215 criterion,216 train_mb_size=64,217 device=self.device,218 eval_mb_size=50,219 train_epochs=2,220 )221 self.run_strategy(benchmark, strategy)222 # MT scenario223 model, optimizer, criterion, benchmark = self.init_scenario(224 multi_task=True)225 strategy = Naive(226 model,227 optimizer,228 criterion,229 train_mb_size=64,230 device=self.device,231 eval_mb_size=50,232 train_epochs=2,233 )234 self.run_strategy(benchmark, strategy)235 def test_joint(self):236 class JointSTestPlugin(SupervisedPlugin):237 def __init__(self, benchmark):238 super().__init__()239 self.benchmark = benchmark240 def after_train_dataset_adaptation(241 self, strategy: "SupervisedTemplate", **kwargs242 ):243 """244 Check that the dataset used for training contains the245 correct number of samples.246 """247 cum_len = sum(248 [len(exp.dataset) for exp in self.benchmark.train_stream]249 )250 assert len(strategy.adapted_dataset) == cum_len251 # SIT scenario252 model, optimizer, criterion, benchmark = self.init_scenario(253 multi_task=False)254 strategy = JointTraining(255 model,256 optimizer,257 criterion,258 train_mb_size=64,259 device=self.device,260 eval_mb_size=50,261 train_epochs=2,262 plugins=[JointSTestPlugin(benchmark)],263 )264 strategy.evaluator.loggers = [TextLogger(sys.stdout)]265 strategy.train(benchmark.train_stream)266 # MT scenario267 model, optimizer, criterion, benchmark = self.init_scenario(268 multi_task=True)269 strategy = JointTraining(270 model,271 optimizer,272 criterion,273 train_mb_size=64,274 device=self.device,275 eval_mb_size=50,276 train_epochs=2,277 plugins=[JointSTestPlugin(benchmark)],278 )279 strategy.evaluator.loggers = [TextLogger(sys.stdout)]280 strategy.train(benchmark.train_stream)281 # Raise error when retraining282 self.assertRaises(283 AlreadyTrainedError,284 lambda: strategy.train(benchmark.train_stream),285 )286 def test_cwrstar(self):287 # SIT scenario288 model, optimizer, criterion, benchmark = self.init_scenario(289 multi_task=False)290 last_fc_name, _ = get_last_fc_layer(model)291 strategy = CWRStar(292 model,293 optimizer,294 criterion,295 last_fc_name,296 train_mb_size=64,297 device=self.device,298 )299 self.run_strategy(benchmark, strategy)300 dict_past_j = {}301 for cls in range(benchmark.n_classes):302 dict_past_j[cls] = 0303 # Check past_j SIT304 for exp in benchmark.train_stream:305 for cls in set(exp.dataset.targets):306 dict_past_j[cls] += exp.dataset.targets.count(cls)307 for cls in model.past_j.keys():308 assert model.past_j[cls] == dict_past_j[cls]309 for cls in model.past_j.keys():310 model.past_j[cls] = 0311 # MT scenario312 model, optimizer, criterion, benchmark = self.init_scenario(313 multi_task=True)314 strategy = CWRStar(315 model,316 optimizer,317 criterion,318 last_fc_name,319 train_mb_size=64,320 device=self.device,321 )322 # self.run_strategy(benchmark, strategy)323 # Check past_j MT324 dict_past_j = {}325 for cls in range(benchmark.n_classes):326 dict_past_j[cls] = 0327 for exp in benchmark.train_stream:328 for cls in set(exp.dataset.targets):329 dict_past_j[cls] += exp.dataset.targets.count(cls)330 for cls in model.past_j.keys():331 assert model.past_j[cls] == dict_past_j[cls]332 def test_replay(self):333 # SIT scenario334 model, optimizer, criterion, benchmark = self.init_scenario(335 multi_task=False)336 strategy = Replay(337 model,338 optimizer,339 criterion,340 mem_size=10,341 train_mb_size=64,342 device=self.device,343 eval_mb_size=50,344 train_epochs=2,345 )346 self.run_strategy(benchmark, strategy)347 # MT scenario348 model, optimizer, criterion, benchmark = self.init_scenario(349 multi_task=True)350 strategy = Replay(351 model,352 optimizer,353 criterion,354 mem_size=10,355 train_mb_size=64,356 device=self.device,357 eval_mb_size=50,358 train_epochs=2,359 )360 self.run_strategy(benchmark, strategy)361 def test_gdumb(self):362 # SIT scenario363 model, optimizer, criterion, benchmark = self.init_scenario(364 multi_task=False)365 strategy = GDumb(366 model,367 optimizer,368 criterion,369 mem_size=200,370 train_mb_size=64,371 device=self.device,372 eval_mb_size=50,373 train_epochs=2,374 )375 self.run_strategy(benchmark, strategy)376 # MT scenario377 model, optimizer, criterion, benchmark = self.init_scenario(378 multi_task=True)379 strategy = GDumb(380 model,381 optimizer,382 criterion,383 mem_size=200,384 train_mb_size=64,385 device=self.device,386 eval_mb_size=50,387 train_epochs=2,388 )389 self.run_strategy(benchmark, strategy)390 def test_cumulative(self):391 # SIT scenario392 model, optimizer, criterion, benchmark = self.init_scenario(393 multi_task=False)394 strategy = Cumulative(395 model,396 optimizer,397 criterion,398 train_mb_size=64,399 device=self.device,400 eval_mb_size=50,401 train_epochs=2,402 )403 self.run_strategy(benchmark, strategy)404 # MT scenario405 model, optimizer, criterion, benchmark = self.init_scenario(406 multi_task=True)407 strategy = Cumulative(408 model,409 optimizer,410 criterion,411 train_mb_size=64,412 device=self.device,413 eval_mb_size=50,414 train_epochs=2,415 )416 self.run_strategy(benchmark, strategy)417 def test_slda(self):418 model, _, criterion, benchmark = self.init_scenario(multi_task=False)419 strategy = StreamingLDA(420 model,421 criterion,422 input_size=10,423 output_layer_name="features",424 num_classes=10,425 eval_mb_size=7,426 train_epochs=1,427 device=self.device,428 train_mb_size=7,429 )430 self.run_strategy(benchmark, strategy)431 def test_warning_slda_lwf(self):432 model, _, criterion, benchmark = self.init_scenario(multi_task=False)433 with self.assertWarns(Warning) as cm:434 StreamingLDA(435 model,436 criterion,437 input_size=10,438 output_layer_name="features",439 num_classes=10,440 plugins=[LwFPlugin(), ReplayPlugin()],441 )442 def test_lwf(self):443 # SIT scenario444 model, optimizer, criterion, benchmark = self.init_scenario(445 multi_task=False)446 strategy = LwF(447 model,448 optimizer,449 criterion,450 alpha=[0, 1 / 2, 2 * (2 / 3), 3 * (3 / 4), 4 * (4 / 5)],451 temperature=2,452 device=self.device,453 train_mb_size=10,454 eval_mb_size=50,455 train_epochs=2,456 )457 self.run_strategy(benchmark, strategy)458 # MT scenario459 model, optimizer, criterion, benchmark = self.init_scenario(460 multi_task=True)461 strategy = LwF(462 model,463 optimizer,464 criterion,465 alpha=[0, 1 / 2, 2 * (2 / 3), 3 * (3 / 4), 4 * (4 / 5)],466 temperature=2,467 device=self.device,468 train_mb_size=10,469 eval_mb_size=50,470 train_epochs=2,471 )472 self.run_strategy(benchmark, strategy)473 def test_agem(self):474 # SIT scenario475 model, optimizer, criterion, benchmark = self.init_scenario(476 multi_task=False)477 strategy = AGEM(478 model,479 optimizer,480 criterion,481 patterns_per_exp=25,482 sample_size=25,483 train_mb_size=10,484 eval_mb_size=50,485 train_epochs=2,486 )487 self.run_strategy(benchmark, strategy)488 # MT scenario489 model, optimizer, criterion, benchmark = self.init_scenario(490 multi_task=True)491 strategy = AGEM(492 model,493 optimizer,494 criterion,495 patterns_per_exp=25,496 sample_size=25,497 train_mb_size=10,498 eval_mb_size=50,499 train_epochs=2,500 )501 self.run_strategy(benchmark, strategy)502 def test_gem(self):503 # SIT scenario504 model, optimizer, criterion, benchmark = self.init_scenario(505 multi_task=False)506 strategy = GEM(507 model,508 optimizer,509 criterion,510 patterns_per_exp=256,511 train_mb_size=10,512 eval_mb_size=50,513 train_epochs=2,514 )515 self.run_strategy(benchmark, strategy)516 # MT scenario517 model, optimizer, criterion, benchmark = self.init_scenario(518 multi_task=True)519 strategy = GEM(520 model,521 optimizer,522 criterion,523 patterns_per_exp=256,524 train_mb_size=10,525 eval_mb_size=50,526 train_epochs=2,527 )528 benchmark = self.load_benchmark(use_task_labels=True)529 self.run_strategy(benchmark, strategy)530 def test_ewc(self):531 # SIT scenario532 model, optimizer, criterion, benchmark = self.init_scenario(533 multi_task=False)534 strategy = EWC(535 model,536 optimizer,537 criterion,538 ewc_lambda=0.4,539 mode="separate",540 train_mb_size=10,541 eval_mb_size=50,542 train_epochs=2,543 )544 self.run_strategy(benchmark, strategy)545 # MT scenario546 model, optimizer, criterion, benchmark = self.init_scenario(547 multi_task=True)548 strategy = EWC(549 model,550 optimizer,551 criterion,552 ewc_lambda=0.4,553 mode="separate",554 train_mb_size=10,555 eval_mb_size=50,556 train_epochs=2,557 )558 self.run_strategy(benchmark, strategy)559 def test_ewc_online(self):560 # SIT scenario561 model, optimizer, criterion, benchmark = self.init_scenario(562 multi_task=False)563 strategy = EWC(564 model,565 optimizer,566 criterion,567 ewc_lambda=0.4,568 mode="online",569 decay_factor=0.1,570 train_mb_size=10,571 eval_mb_size=50,572 train_epochs=2,573 )574 self.run_strategy(benchmark, strategy)575 # # MT scenario576 # model, optimizer, criterion, benchmark = self.init_scenario(577 # multi_task=True)578 # strategy = EWC(579 # model,580 # optimizer,581 # criterion,582 # ewc_lambda=0.4,583 # mode="online",584 # decay_factor=0.1,585 # train_mb_size=10,586 # eval_mb_size=50,587 # train_epochs=2,588 # )589 # self.run_strategy(benchmark, strategy)590 def test_rwalk(self):591 # SIT scenario592 model, optimizer, criterion, benchmark = self.init_scenario(593 multi_task=False)594 strategy = Naive(595 model,596 optimizer,597 criterion,598 train_mb_size=10,599 eval_mb_size=50,600 train_epochs=2,601 plugins=[602 RWalkPlugin(603 ewc_lambda=0.1,604 ewc_alpha=0.9,605 delta_t=10,606 ),607 ],608 )609 self.run_strategy(benchmark, strategy)610 # # MT scenario611 # model, optimizer, criterion, benchmark = self.init_scenario(612 # multi_task=True)613 # strategy = Naive(614 # model,615 # optimizer,616 # criterion,617 # train_mb_size=10,618 # eval_mb_size=50,619 # train_epochs=2,620 # plugins=[621 # RWalkPlugin(622 # ewc_lambda=0.1,623 # ewc_alpha=0.9,624 # delta_t=10,625 # ),626 # ],627 # )628 # self.run_strategy(benchmark, strategy)629 def test_synaptic_intelligence(self):630 # SIT scenario631 model, optimizer, criterion, benchmark = self.init_scenario(632 multi_task=False)633 strategy = SynapticIntelligence(634 model,635 optimizer,636 criterion,637 si_lambda=0.0001,638 train_epochs=1,639 train_mb_size=10,640 eval_mb_size=10,641 )642 self.run_strategy(benchmark, strategy)643 # MT scenario644 # model, optimizer, criterion, benchmark = self.init_scenario(645 # multi_task=True)646 # strategy = SynapticIntelligence(647 # model,648 # optimizer,649 # criterion,650 # si_lambda=0.0001,651 # train_epochs=1,652 # train_mb_size=10,653 # eval_mb_size=10,654 # )655 # self.run_strategy(benchmark, strategy)656 def test_cope(self):657 # Fast benchmark (hardcoded)658 n_classes = 10659 emb_size = n_classes # Embedding size660 # SIT scenario661 model, optimizer, criterion, benchmark = self.init_scenario(662 multi_task=False)663 strategy = CoPE(664 model,665 optimizer,666 criterion,667 mem_size=10,668 n_classes=n_classes,669 p_size=emb_size,670 train_mb_size=10,671 device=self.device,672 eval_mb_size=50,673 train_epochs=2,674 )675 self.run_strategy(benchmark, strategy)676 # MT scenario677 # model, optimizer, criterion, benchmark = self.init_scenario(678 # multi_task=True)679 # strategy = CoPE(680 # model,681 # optimizer,682 # criterion,683 # mem_size=10,684 # n_classes=n_classes,685 # p_size=emb_size,686 # train_mb_size=10,687 # device=self.device,688 # eval_mb_size=50,689 # train_epochs=2,690 # )691 # self.run_strategy(benchmark, strategy)692 def test_pnn(self):693 # only multi-task scenarios.694 # eval on future tasks is not allowed.695 model = PNN(num_layers=3, in_features=6, hidden_features_per_column=10)696 optimizer = torch.optim.SGD(model.parameters(), lr=0.1)697 strategy = PNNStrategy(698 model,699 optimizer,700 train_mb_size=10,701 device=self.device,702 eval_mb_size=50,703 train_epochs=2,704 )705 # train and test loop706 benchmark = self.load_benchmark(use_task_labels=True)707 for train_task in benchmark.train_stream:708 strategy.train(train_task)709 strategy.eval(benchmark.test_stream)710 def test_icarl(self):711 model, optimizer, criterion, benchmark = self.init_scenario(712 multi_task=False)713 strategy = ICaRL(714 model.features,715 model.classifier,716 optimizer,717 20,718 buffer_transform=None,719 criterion=criterion,720 fixed_memory=True,721 train_mb_size=10,722 train_epochs=2,723 eval_mb_size=50,724 device=self.device,725 )726 self.run_strategy(benchmark, strategy)727 def test_lfl(self):728 # SIT scenario729 model, optimizer, criterion, benchmark = self.init_scenario(730 multi_task=False)731 strategy = LFL(732 model,733 optimizer,734 criterion,735 lambda_e=0.0001,736 train_mb_size=10,737 device=self.device,738 eval_mb_size=50,739 train_epochs=2,740 )741 self.run_strategy(benchmark, strategy)742 # MT scenario743 # model, optimizer, criterion, benchmark = self.init_scenario(744 # multi_task=True)745 # strategy = LFL(746 # model,747 # optimizer,748 # criterion,749 # lambda_e=0.0001,750 # train_mb_size=10,751 # device=self.device,752 # eval_mb_size=50,753 # train_epochs=2,754 # )755 # self.run_strategy(benchmark, strategy)756 def test_mas(self):757 # SIT scenario758 model, optimizer, criterion, benchmark = self.init_scenario(759 multi_task=False)760 strategy = MAS(761 model,762 optimizer,763 criterion,764 lambda_reg=1.0,765 alpha=0.5,766 train_mb_size=10,767 device=self.device,768 eval_mb_size=50,769 train_epochs=2,770 )771 self.run_strategy(benchmark, strategy)772 # MT scenario773 # model, optimizer, criterion, benchmark = self.init_scenario(774 # multi_task=True)775 # strategy = MAS(776 # model,777 # optimizer,778 # criterion,779 # lambda_reg=1.0,780 # alpha=0.5,781 # train_mb_size=10,782 # device=self.device,783 # eval_mb_size=50,784 # train_epochs=2,785 # )786 # self.run_strategy(benchmark, strategy)787 def load_benchmark(self, use_task_labels=False):...

Full Screen

Full Screen

test_node.py

Source:test_node.py Github

copy

Full Screen

...3from typing import Tuple, List, Dict, Optional, Set4from .node import Node, INVALID_SHARE5from .crypto import normalize, add, multiply, G1, H1, G2, H26from . import crypto7def init_scenario(8 n: int = 5, t: int = 2, use_random_indices: bool = True9) -> Tuple[int, int, List[Node]]:10 nodes = [Node() for _ in range(n)]11 indices = list(range(1, n + 1))12 if use_random_indices:13 indices = random.sample(range(1, 10000), n)14 public_keys = {idx: node.public_key for idx, node in zip(indices, nodes)}15 for idx, node in zip(indices, nodes):16 node.setup(n, t, idx, public_keys)17 return n, t, nodes18def compute_and_distribute_shares(19 nodes: List[Node],20 invalid_shares_from_to: Optional[Set[Tuple[Node, Node]]] = None,21 invalid_commitments_from: Optional[Set[Node]] = None,22 do_not_distribute_from: Optional[Set[Node]] = None,23):24 invalid_shares_from_to = invalid_shares_from_to or set()25 invalid_commitments_from = invalid_commitments_from or set()26 do_not_distribute_from = do_not_distribute_from or set()27 all_encrypted_shares = {}28 all_commitments = {}29 for node in nodes:30 all_encrypted_shares[node.idx], all_commitments[node.idx] = node.compute_shares()31 for issuer in nodes:32 if issuer in invalid_commitments_from:33 all_commitments[issuer.idx][0] = multiply(all_commitments[issuer.idx][0], 2)34 for receiver in nodes:35 if (issuer, receiver) in invalid_shares_from_to:36 assert issuer.idx != receiver.idx, "error in testcase"37 all_encrypted_shares[issuer.idx][receiver.idx] += 138 for issuer in nodes:39 if issuer in do_not_distribute_from:40 continue41 for receiver in nodes:42 if issuer.idx == receiver.idx:43 continue44 ok = receiver.load_shares(45 issuer.idx, all_encrypted_shares[issuer.idx], all_commitments[issuer.idx]46 )47 if (issuer, receiver) in invalid_shares_from_to or issuer in invalid_commitments_from:48 assert not ok, "invalid share should be detected"49 else:50 assert ok, "verification should pass, since the share is valid"51def test_shared_keys():52 n, t, nodes = init_scenario()53 n1, n2, *_ = nodes54 assert normalize(n1.shared_keys[n2.idx]) == normalize(n2.shared_keys[n1.idx])55def test_share_distribution_all_fine_case():56 n, t, nodes = init_scenario()57 compute_and_distribute_shares(nodes)58def test_share_distribution_node_invalid_shares_should_be_detected():59 n, t, nodes = init_scenario()60 n1, n2, n3, *_ = nodes61 compute_and_distribute_shares(nodes, invalid_shares_from_to={(n1, n2), (n1, n3), (n3, n1)})62def test_share_distribution_node_invalid_commitments_should_be_detected():63 n, t, nodes = init_scenario()64 n1, n2, *_ = nodes65 compute_and_distribute_shares(nodes, invalid_commitments_from={n1, n2})66def compute_and_distribute_disputes(nodes):67 all_disputes = {node.idx: node.compute_disputes() for node in nodes}68 for node in nodes:69 for disputer_idx, disputes in all_disputes.items():70 for issuer_idx, dispute in disputes.items():71 assert node.load_dispute(issuer_idx, disputer_idx, *dispute)72def test_valid_dispute_accepted():73 n, t, nodes = init_scenario()74 n1, n2, *_ = nodes75 compute_and_distribute_shares(nodes, invalid_shares_from_to={(n1, n2)})76 disputes = n2.compute_disputes()77 assert len(disputes) == 178 for node in nodes:79 assert node is n2 or len(node.compute_disputes()) == 080 dispute = disputes[n1.idx]81 for node in nodes:82 assert node.load_dispute(n1.idx, n2.idx, *dispute)83def test_invalid_dispute_rejected__invalid_key():84 n, t, nodes = init_scenario()85 n1, n2, *_ = nodes86 compute_and_distribute_shares(nodes, invalid_shares_from_to={(n1, n2)})87 dispute = n2.compute_disputes()[n1.idx]88 shared_key, proof = dispute89 shared_key = multiply(shared_key, 4711)90 for node in nodes:91 assert not node.load_dispute(n1.idx, n2.idx, shared_key, proof)92def test_invalid_dispute_rejected__invalid_key_proof():93 n, t, nodes = init_scenario()94 n1, n2, *_ = nodes95 compute_and_distribute_shares(nodes, invalid_shares_from_to={(n1, n2)})96 dispute = n2.compute_disputes()[n1.idx]97 shared_key, proof = dispute98 proof = proof[0], proof[1] + 471199 for node in nodes:100 assert not node.load_dispute(n1.idx, n2.idx, shared_key, proof)101def test_invalid_dispute_rejected__share_valid():102 n, t, nodes = init_scenario()103 n1, n2, *_ = nodes104 compute_and_distribute_shares(nodes)105 # wrongly mark share as invalid so that the node actually creates a dispute106 n2.decrypted_shares[n1.idx] = INVALID_SHARE107 dispute = n2.compute_disputes()[n1.idx]108 for node in nodes:109 assert not node.load_dispute(n1.idx, n2.idx, *dispute)110def test_qualified_nodes__all():111 n, t, nodes = init_scenario()112 compute_and_distribute_shares(nodes)113 compute_and_distribute_disputes(nodes)114 for node in nodes:115 assert node.compute_qualified_nodes() == [node.idx for node in nodes]116def test_qualified_nodes__exclude_disputed():117 n, t, nodes = init_scenario()118 n1, n2, *_ = nodes119 compute_and_distribute_shares(nodes, invalid_shares_from_to={(n1, n2)})120 compute_and_distribute_disputes(nodes)121 qualified_nodes = [node.idx for node in nodes if node is not n1]122 for node in nodes:123 assert node.compute_qualified_nodes() == qualified_nodes124def test_qualified_nodes__exclude_undistributed():125 n, t, nodes = init_scenario()126 n1, *_ = nodes127 compute_and_distribute_shares(nodes, do_not_distribute_from={n1})128 compute_and_distribute_disputes(nodes)129 qualified_nodes = [node.idx for node in nodes if node is not n1]130 for node in nodes:131 if node is not n1:132 assert node.compute_qualified_nodes() == qualified_nodes133def compute_and_distribute_key_shares(nodes):134 for node in nodes:135 node.compute_qualified_nodes()136 all_key_shares = {node.idx: node.compute_key_share() for node in nodes}137 for receiving_node in nodes:138 for issuer_idx, key_shares in all_key_shares.items():139 assert receiving_node.load_key_share(issuer_idx, *key_shares)140def test_key_shares_verification__all_correct():141 n, t, nodes = init_scenario()142 compute_and_distribute_shares(nodes)143 compute_and_distribute_disputes(nodes)144 compute_and_distribute_key_shares(nodes)145def test_key_shares_verification__invalid():146 n, t, nodes = init_scenario()147 compute_and_distribute_shares(nodes)148 compute_and_distribute_disputes(nodes)149 for node in nodes:150 node.compute_qualified_nodes()151 node.compute_key_share()152 n1, n2, *_ = nodes153 h1 = multiply(H1, n1.secret)154 h1_proof = crypto.dleq(H1, h1, G1, n1.commitments[n1.idx][0], n1.secret)155 h2 = multiply(H2, n1.secret)156 assert n2.load_key_share(n1.idx, h1, h1_proof, h2)157 h1 = multiply(H1, n1.secret)158 h1_proof = crypto.dleq(159 multiply(H1, 2), h1, multiply(G1, 2), n1.commitments[n1.idx][0], n1.secret160 )161 h2 = multiply(H2, n1.secret)162 assert not n2.load_key_share(n1.idx, h1, h1_proof, h2)163 h1 = multiply(H1, n1.secret + 1)164 h1_proof = crypto.dleq(H1, h1, G1, n1.commitments[n1.idx][0], n1.secret)165 h2 = multiply(H2, n1.secret + 1)166 assert not n2.load_key_share(n1.idx, h1, h1_proof, h2)167 h1 = multiply(H1, n1.secret)168 h1_proof = crypto.dleq(H1, h1, G1, n1.commitments[n1.idx][0], n1.secret)169 h2 = multiply(H2, n1.secret + 1)170 assert not n2.load_key_share(n1.idx, h1, h1_proof, h2)171def test_key_shares_recovery():172 n, t, nodes = init_scenario()173 n1, n2, *_ = nodes174 compute_and_distribute_shares(nodes)175 compute_and_distribute_disputes(nodes)176 compute_and_distribute_key_shares(nodes)177 recs_for_n1 = {178 node.idx: node.initiate_key_share_recovery(n1.idx) for node in nodes if node is not n1179 }180 for verifier in nodes:181 if verifier is n1:182 continue183 for recoverer_idx, rec in recs_for_n1.items():184 assert verifier.load_recovered_key_share(n1.idx, recoverer_idx, *rec)185 for verifier in nodes:186 if verifier is n1:187 continue188 x = verifier.key_shares189 assert verifier.recover_key_share(n1.idx)190 assert x == verifier.key_shares191def test_master_key_derivation():192 n, t, nodes = init_scenario()193 n1, *_ = nodes194 compute_and_distribute_shares(nodes)195 compute_and_distribute_disputes(nodes)196 compute_and_distribute_key_shares(nodes)197 for node in nodes:198 node.derive_master_public_key()199 mk = normalize(n1.master_public_key)200 for node in nodes:201 assert mk == normalize(node.master_public_key)202def test_group_key_derivation():203 n, t, nodes = init_scenario()204 n1, *_ = nodes205 compute_and_distribute_shares(nodes)206 compute_and_distribute_disputes(nodes)207 compute_and_distribute_key_shares(nodes)208 for node in nodes:209 node.derive_group_keys()210 for node in nodes:211 assert n1.verify_group_public_key(212 node.idx,213 node.group_public_key,214 node.group_public_key_in_G1,215 node.group_public_key_correctness_proof,...

Full Screen

Full Screen

benchmark_utils.py

Source:benchmark_utils.py Github

copy

Full Screen

...26 print("Run number {}/{} ({}/{} succesfull): dimension = {}, nclients = {}, dropout = {}".format(27 counter, total, success[True], success[True] + success[False], dimension, nclients, dropout))28 29 scenario = Scenario(dimension, inputsize, keysize, ceil(threshold*nclients), nclients, dropout)30 clients, server = init_scenario(scenario)31 valid = benchmark(clients, server, scenario, REPS)32 success[valid] += 133 nclients = 600 34 for dimension in dimensions:35 for dropout in dropouts:36 counter += 137 if dont_run: 38 print("run {}: dimension = {}, nclients = {}, dropout = {}".format(counter, dimension, nclients, dropout))39 continue40 if runs and counter not in runs:41 continue42 43 print("Run number {}/{} ({}/{} succesfull): dimension = {}, nclients = {}, dropout = {}".format(44 counter, total, success[True], success[True] + success[False], dimension, nclients, dropout))45 46 47 scenario = Scenario(dimension, inputsize, keysize, ceil(threshold*nclients), nclients, dropout)48 clients, server = init_scenario(scenario)49 valid = benchmark(clients, server, scenario, REPS) 50 success[valid] += 1...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run molecule automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful