How to use _get_args method in Molotov

Best Python code snippet using molotov_python

test_tra.py

Source:test_tra.py Github

copy

Full Screen

...17 def setUpClass(cls):18 if cls is _AbstractTRATest:19 raise unittest.SkipTest('Skip abstract parent class')20 super(_AbstractTRATest, cls).setUpClass()21 def _get_args(self):22 # Add arguments for the Torch Ranker Agent to test23 # Override in child classes24 return dict(25 task='integration_tests:candidate',26 optimizer='adamax',27 learningrate=7e-3,28 batchsize=16,29 embedding_size=32,30 num_epochs=4,31 )32 def _get_threshold(self):33 # Accuracy threshold34 return 0.835 # test train inline cands36 @testing_utils.retry(ntries=3)37 def test_train_inline(self):38 args = self._get_args()39 args['candidates'] = 'inline'40 valid, test = testing_utils.train_model(args)41 threshold = self._get_threshold()42 self.assertGreaterEqual(valid['hits@1'], threshold)43 # test train batch cands44 @testing_utils.retry(ntries=3)45 def test_train_batch(self):46 args = self._get_args()47 args['candidates'] = 'batch'48 valid, test = testing_utils.train_model(args)49 threshold = self._get_threshold()50 self.assertGreaterEqual(valid['hits@1'], threshold)51 # test train fixed52 @testing_utils.retry(ntries=3)53 def test_train_fixed(self):54 args = self._get_args()55 args['candidates'] = 'fixed'56 args['encode_candidate_vecs'] = False57 valid, test = testing_utils.train_model(args)58 threshold = self._get_threshold()59 self.assertGreaterEqual(valid['hits@1'], threshold)60 # test train batch all cands61 @testing_utils.retry(ntries=3)62 def test_train_batch_all(self):63 args = self._get_args()64 args['candidates'] = 'batch-all-cands'65 valid, test = testing_utils.train_model(args)66 threshold = self._get_threshold()67 self.assertGreaterEqual(valid['hits@1'], threshold)68 # test eval inline ecands69 @testing_utils.retry(ntries=3)70 def test_eval_inline(self):71 args = self._get_args()72 args['eval_candidates'] = 'inline'73 valid, test = testing_utils.train_model(args)74 threshold = self._get_threshold()75 self.assertGreaterEqual(valid['hits@1'], threshold)76 # test eval batch ecands77 @testing_utils.retry(ntries=3)78 def test_eval_batch(self):79 args = self._get_args()80 args['eval_candidates'] = 'batch'81 valid, test = testing_utils.train_model(args)82 threshold = self._get_threshold()83 self.assertGreaterEqual(valid['hits@1'], threshold)84 # test eval fixed ecands85 @testing_utils.retry(ntries=3)86 def test_eval_fixed(self):87 args = self._get_args()88 args['eval_candidates'] = 'fixed'89 args['encode_candidate_vecs'] = True90 args['ignore_bad_candidates'] = True91 valid, test = testing_utils.train_model(args)92 # none of the train candidates appear in evaluation, so should have93 # zero accuracy: this tests whether the fixed candidates were built94 # properly (i.e., only using candidates from the train set)95 self.assertEqual(valid['hits@1'], 0)96 # now try again with a fixed candidate file that includes all possible97 # candidates98 teacher = CandidateTeacher({'datatype': 'train'})99 all_cands = teacher.train + teacher.val + teacher.test100 all_cands_str = '\n'.join([' '.join(x) for x in all_cands])101 with testing_utils.tempdir() as tmpdir:102 tmp_cands_file = os.path.join(tmpdir, 'all_cands.text')103 with open(tmp_cands_file, 'w') as f:104 f.write(all_cands_str)105 args['fixed_candidates_path'] = tmp_cands_file106 args['encode_candidate_vecs'] = False # don't encode before training107 args['ignore_bad_candidates'] = False108 args['num_epochs'] = 4109 valid, test = testing_utils.train_model(args)110 self.assertGreaterEqual(valid['hits@100'], 0.1)111 # test eval vocab ecands112 @testing_utils.retry(ntries=3)113 def test_eval_vocab(self):114 args = self._get_args()115 args['eval_candidates'] = 'vocab'116 args['encode_candidate_vecs'] = True117 valid, test = testing_utils.train_model(args)118 # accuracy should be zero, none of the vocab candidates should be the119 # correct label120 self.assertEqual(valid['hits@100'], 0)121class TestTransformerRanker(_AbstractTRATest):122 def _get_args(self):123 args = super()._get_args()124 new_args = dict(125 model='transformer/ranker',126 n_layers=1,127 n_heads=4,128 ffn_size=64,129 gradient_clip=0.5,130 )131 for k, v in new_args.items():132 args[k] = v133 return args134class TestMemNN(_AbstractTRATest):135 def _get_args(self):136 args = super()._get_args()137 args['model'] = 'memnn'138 return args139 def _get_threshold(self):140 # this is a slightly worse model, so we expect it to perform worse141 return 0.5142class TestPolyRanker(_AbstractTRATest):143 def _get_args(self):144 args = super()._get_args()145 new_args = dict(146 model='transformer/polyencoder',147 n_layers=1,148 n_heads=4,149 ffn_size=64,150 gradient_clip=0.5,151 )152 for k, v in new_args.items():153 args[k] = v154 return args155 def _get_threshold(self):156 return 0.6157 def test_eval_fixed_label_not_in_cands(self):158 # test where cands during eval do not contain test label159 args = self._get_args()160 args[161 'model'162 ] = 'parlai.agents.transformer.polyencoder:IRFriendlyPolyencoderAgent'163 args['eval_candidates'] = 'fixed'164 teacher = CandidateTeacher({'datatype': 'train'})165 all_cands = teacher.train + teacher.val + teacher.test166 train_val_cands = teacher.train + teacher.val167 all_cands_str = '\n'.join([' '.join(x) for x in all_cands])168 train_val_cands_str = '\n'.join([' '.join(x) for x in train_val_cands])169 with testing_utils.tempdir() as tmpdir:170 tmp_cands_file = os.path.join(tmpdir, 'all_cands.text')171 with open(tmp_cands_file, 'w') as f:172 f.write(all_cands_str)173 tmp_train_val_cands_file = os.path.join(tmpdir, 'train_val_cands.text')...

Full Screen

Full Screen

test_run.py

Source:test_run.py Github

copy

Full Screen

...4import numpy as np5import os6import pandas as pd7import pytest8def _get_args(infile: str, k: list, outdir: str):9 args = RUN_DEFAULTS.copy()10 args['outdir'] = outdir11 args['k'] = k12 args["infile"] = infile13 return args14def test_init(tmpdir):15 # incorrect parameters16 with pytest.raises(AttributeError):17 SumoRun()18 fname = os.path.join(tmpdir, "data.npz")19 outdir = os.path.join(tmpdir, "outdir")20 samples = 1021 sample_labels = ['sample_{}'.format(i) for i in range(samples)]22 args = _get_args(fname, [2], outdir)23 # no input file24 with pytest.raises(FileNotFoundError):25 SumoRun(**args)26 save_arrays_to_npz({'0': np.random.random((samples, samples)),27 '1': np.random.random((samples, samples)),28 'samples': np.array(sample_labels)}, fname)29 # supervised sumo no labels file30 labels_fname = os.path.join(tmpdir, "labels.tsv")31 labels = np.array([sample_labels[0:2] + sample_labels[7:9], [1, 1, 2, 2]]).T32 labels_df = pd.DataFrame(data=labels, columns=['sample', 'label'])33 args = _get_args(fname, [2], outdir)34 args['labels'] = labels_fname35 with pytest.raises(FileNotFoundError):36 SumoRun(**args)37 labels_df.to_csv(labels_fname, sep="\t")38 args['sparsity'] = [10]39 args['n'] = 10 # makes test run quicker40 SumoRun(**args)41 # incorrect number of repetitions42 args['n'] = -143 with pytest.raises(ValueError):44 SumoRun(**args)45 # incorrect number of threads46 args = _get_args(fname, [2], outdir)47 args['t'] = -148 with pytest.raises(ValueError):49 SumoRun(**args)50 # incorrect outdir51 args = _get_args(fname, [2], fname)52 with pytest.raises(NotADirectoryError):53 SumoRun(**args)54 # incorrect k55 args = _get_args(fname, [2, 3, 4], outdir)56 with pytest.raises(ValueError):57 SumoRun(**args)58 args = _get_args(fname, [2], outdir)59 SumoRun(**args)60 args = _get_args(fname, [2, 5], outdir)61 SumoRun(**args)62 # incorrect k range63 args = _get_args(fname, [5, 2], outdir)64 with pytest.raises(ValueError):65 SumoRun(**args)66 # incorrect subsample argument67 args = _get_args(fname, [2], outdir)68 args['subsample'] = -0.169 with pytest.raises(ValueError):70 SumoRun(**args)71 args['subsample'] = 0.972 with pytest.raises(ValueError):73 SumoRun(**args)74def test_run(tmpdir):75 fname = os.path.join(tmpdir, "data.npz")76 outdir = os.path.join(tmpdir, "outdir")77 samples = 1078 a0 = np.random.random((samples, samples))79 a0 = (a0 * a0.T) / 280 a1 = np.random.random((samples, samples))81 a1 = (a1 * a1.T) / 282 sample_labels = ['sample_{}'.format(i) for i in range(samples)]83 args = _get_args(fname, [2], outdir)84 # no sample names85 save_arrays_to_npz({'0': a0, '1': a1}, fname)86 with pytest.raises(ValueError):87 sr = SumoRun(**args)88 sr.run()89 # incorrect sample names90 save_arrays_to_npz({'0': a0, '1': a1, 'samples': np.array(sample_labels[1:])}, fname)91 with pytest.raises(ValueError):92 sr = SumoRun(**args)93 sr.run()94 # incorrect adjacency matrices95 save_arrays_to_npz({'samples': np.array(sample_labels)}, fname)96 with pytest.raises(ValueError):97 sr = SumoRun(**args)98 sr.run()99 # incorrect value of h_init100 save_arrays_to_npz({'0': a0, '1': a1, 'samples': np.array(sample_labels)}, fname)101 args = _get_args(fname, [2], outdir)102 args['h_init'] = -1103 with pytest.raises(ValueError):104 sr = SumoRun(**args)105 sr.run()106 args['h_init'] = 3107 with pytest.raises(ValueError):108 sr = SumoRun(**args)109 sr.run()110 # supervised sumo111 labels_fname = os.path.join(tmpdir, "labels.tsv")112 labels = np.array([sample_labels[0:2] + sample_labels[7:9], [1, 1, 2, 2]]).T113 labels_df = pd.DataFrame(data=labels, columns=['sample', 'label'])114 labels_df.to_csv(labels_fname, sep="\t")115 args = _get_args(fname, [2], outdir)116 args['labels'] = labels_fname117 args['sparsity'] = [10]118 args['n'] = 10 # makes test run quicker119 sr = SumoRun(**args)120 sr.run()121 # unsupervised sumo122 args = _get_args(fname, [2], outdir)123 args['sparsity'] = [10]124 args['n'] = 10 # makes test run quicker125 sr = SumoRun(**args)126 sr.run()127 assert all([os.path.exists(os.path.join(outdir, x)) for x in ['k2', 'plots',128 os.path.join('plots', 'consensus_k2.png'),...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Molotov automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful