How to use verify_output method in grail

Best Python code snippet using grail_python

check_model_test.py

Source:check_model_test.py Github

copy

Full Screen

...12 stdout=subprocess.PIPE, stderr=subprocess.STDOUT13 )14 output = run_result.stdout.decode('utf8')15 return run_result.returncode, output16 def verify_output(self, output: str, *args) -> bool:17 try:18 for s in args:19 self.assertNotEqual(output.find(s), -1, f"output does not contain '{s}'; got:\n{output}")20 except AssertionError:21 print(output)22 raise23 def test_model_not_found(self):24 retcode, output = self.run_check_model('does_not_exist.py')25 self.verify_output(output, "MODEL FILE")26 self.assertNotEqual(retcode, 0)27 def test_data_not_found(self):28 retcode, output = self.run_check_model('simple_gauss_model.py', data_file='does_not_exist')29 self.verify_output(output, "DATA FILE")30 self.assertNotEqual(retcode, 0)31 def test_postprocess_broken(self):32 retcode, output = self.run_check_model('postprocess_broken.py')33 self.verify_output(output, "POSTPROCESSING", "KeyError")34 self.assertNotEqual(retcode, 0)35 def test_postprocess_old_style_broken(self):36 retcode, output = self.run_check_model('postprocess_broken.py')37 self.verify_output(output, "POSTPROCESSING", "KeyError")38 self.assertNotEqual(retcode, 0)39 def test_postprocess_old_style_wrong_returns(self):40 retcode, output = self.run_check_model('postprocess_old_style_wrong_returns.py')41 self.verify_output(output, "POSTPROCESSING", "must return")42 self.assertNotEqual(retcode, 0)43 def test_postprocess_wrong_returns(self):44 retcode, output = self.run_check_model('postprocess_wrong_returns.py')45 self.verify_output(output, "POSTPROCESSING", "must return")46 self.assertNotEqual(retcode, 0)47 def test_postprocess_wrong_signature(self):48 retcode, output = self.run_check_model('postprocess_wrong_signature.py')49 self.verify_output(output, "POSTPROCESSING", "as argument")50 self.assertNotEqual(retcode, 0)51 def test_preprocess_broken(self):52 retcode, output = self.run_check_model('preprocess_broken.py')53 self.verify_output(output, "PREPROCESSING", "KeyError")54 self.assertNotEqual(retcode, 0)55 def test_preprocess_wrong_returns(self):56 retcode, output = self.run_check_model('preprocess_wrong_returns.py')57 self.verify_output(output, "PREPROCESSING", "must return")58 self.assertNotEqual(retcode, 0)59 def test_preprocess_wrong_signature(self):60 retcode, output = self.run_check_model('preprocess_wrong_signature.py')61 self.verify_output(output, "PREPROCESSING", "as argument")62 self.assertNotEqual(retcode, 0)63 def test_preprocess_returns_array(self):64 retcode, output = self.run_check_model('preprocess_returns_array.py')65 self.verify_output(output, "PREPROCESSING", "must return")66 self.assertNotEqual(retcode, 0)67 def test_broken_model(self):68 retcode, output = self.run_check_model('simple_gauss_model_broken.py')69 self.verify_output(output, "MODEL", "NameError")70 self.assertNotEqual(retcode, 0)71 def test_no_none_model(self):72 retcode, output = self.run_check_model('simple_gauss_model_no_none.py')73 self.verify_output(output, "MODEL", "None for synthesising data")74 self.assertNotEqual(retcode, 0)75 def test_no_num_obs_total_model(self):76 retcode, output = self.run_check_model('simple_gauss_model_no_num_obs_total.py')77 self.verify_output(output, "MODEL", "num_obs_total")78 self.assertNotEqual(retcode, 0)79 def test_syntax_error(self):80 retcode, output = self.run_check_model('syntax_error.py')81 self.verify_output(output, "PARSE", "SyntaxError")82 self.assertNotEqual(retcode, 0)83 def test_new_model_old_postprocess(self):84 retcode, output = self.run_check_model('new_model_old_postprocess.py')85 self.verify_output(output, "POSTPROCESSING", "postprocessing function with a single argument", "'x'")86 self.assertNotEqual(retcode, 0)87 def test_simple_working_model(self):88 retcode, output = self.run_check_model('simple_gauss_model.py')89 self.verify_output(output, "okay")90 self.assertEqual(retcode, 0)91 def test_model_not_a_function(self):92 retcode, output = self.run_check_model('model_not_a_function.py')93 self.verify_output(output, "MODEL", "PARSE", "must be a function")94 self.assertNotEqual(retcode, 0)95 def test_model_factory(self):96 retcode, output = self.run_check_model('model_factory.py')97 self.verify_output(output, "okay")98 self.assertEqual(retcode, 0)99 def test_model_factory_with_guide(self):100 retcode, output = self.run_check_model('model_factory_with_guide.py')101 self.verify_output(output, "okay")102 self.assertEqual(retcode, 0)103 def test_model_factory_with_autoguide(self):104 retcode, output = self.run_check_model('model_factory_with_autoguide.py')105 self.verify_output(output, "okay")106 self.assertEqual(retcode, 0)107 def test_model_factory_broken(self):108 retcode, output = self.run_check_model('model_factory_broken.py')109 self.verify_output(output, "FACTORY", "AttributeError", "unspecified_arg")110 self.assertNotEqual(retcode, 0)111 def test_model_factory_wrong_signature(self):112 retcode, output = self.run_check_model('model_factory_wrong_signature.py')113 self.verify_output(output, "FACTORY", "as argument")114 self.assertNotEqual(retcode, 0)115 def test_model_factory_wrong_returns(self):116 retcode, output = self.run_check_model('model_factory_wrong_returns_none.py')117 self.verify_output(output, "FACTORY", "either a model function or a tuple")...

Full Screen

Full Screen

test_wlm_helper_functions.py

Source:test_wlm_helper_functions.py Github

copy

Full Screen

...8# defined, the tests in this file are skipped.9def test_get_hosts(alloc_specs):10 if not alloc_specs:11 pytest.skip("alloc_specs not defined")12 def verify_output(output):13 assert isinstance(output, list)14 assert all(isinstance(host, str) for host in output)15 if "host_list" in alloc_specs:16 assert output == alloc_specs["host_list"]17 if pytest.test_launcher == "slurm":18 if "SLURM_JOBID" in os.environ:19 verify_output(wlm.get_hosts())20 else:21 with pytest.raises(SmartSimError):22 wlm.get_hosts()23 elif pytest.test_launcher == "pbs":24 if "PBS_JOBID" in os.environ:25 verify_output(wlm.get_hosts())26 else:27 with pytest.raises(SmartSimError):28 wlm.get_hosts()29 else:30 with pytest.raises(SSUnsupportedError):31 wlm.get_hosts(launcher=pytest.test_launcher)32def test_get_queue(alloc_specs):33 if not alloc_specs:34 pytest.skip("alloc_specs not defined")35 def verify_output(output):36 assert isinstance(output, str)37 if "queue" in alloc_specs:38 assert output == alloc_specs["queue"]39 if pytest.test_launcher == "slurm":40 if "SLURM_JOBID" in os.environ:41 verify_output(wlm.get_queue())42 else:43 with pytest.raises(SmartSimError):44 wlm.get_queue()45 elif pytest.test_launcher == "pbs":46 if "PBS_JOBID" in os.environ:47 verify_output(wlm.get_queue())48 else:49 with pytest.raises(SmartSimError):50 wlm.get_queue()51 else:52 with pytest.raises(SSUnsupportedError):53 wlm.get_queue(launcher=pytest.test_launcher)54def test_get_tasks(alloc_specs):55 if not alloc_specs:56 pytest.skip("alloc_specs not defined")57 def verify_output(output):58 assert isinstance(output, int)59 if "num_tasks" in alloc_specs:60 assert output == alloc_specs["num_tasks"]61 if pytest.test_launcher == "slurm":62 if "SLURM_JOBID" in os.environ:63 verify_output(wlm.get_tasks())64 else:65 with pytest.raises(SmartSimError):66 wlm.get_tasks(launcher=pytest.test_launcher)67 elif pytest.test_launcher == "pbs":68 if "PBS_JOBID" in os.environ and which("qstat"):69 verify_output(wlm.get_tasks())70 elif "PBS_JOBID" in os.environ:71 with pytest.raises(LauncherError):72 wlm.get_tasks()73 else:74 with pytest.raises(SmartSimError):75 wlm.get_tasks()76 else:77 with pytest.raises(SSUnsupportedError):78 wlm.get_tasks(launcher=pytest.test_launcher)79def test_get_tasks_per_node(alloc_specs):80 if not alloc_specs:81 pytest.skip("alloc_specs not defined")82 def verify_output(output):83 assert isinstance(output, dict)84 assert all(85 isinstance(node, str) and isinstance(ntasks, int)86 for node, ntasks in output.items()87 )88 if "tasks_per_node" in alloc_specs:89 assert output == alloc_specs["tasks_per_node"]90 if pytest.test_launcher == "slurm":91 if "SLURM_JOBID" in os.environ:92 verify_output(wlm.get_tasks_per_node())93 else:94 with pytest.raises(SmartSimError):95 wlm.get_tasks_per_node()96 elif pytest.test_launcher == "pbs":97 if "PBS_JOBID" in os.environ and which("qstat"):98 verify_output(wlm.get_tasks_per_node())99 elif "PBS_JOBID" in os.environ:100 with pytest.raises(LauncherError):101 wlm.get_tasks_per_node()102 else:103 with pytest.raises(SmartSimError):104 wlm.get_tasks_per_node()105 else:106 with pytest.raises(SSUnsupportedError):...

Full Screen

Full Screen

test.py

Source:test.py Github

copy

Full Screen

...21 return_value = expected_result in output22 else:23 return_value = False24 return return_value25def verify_output(result, expected_result) -> bool:26 if result.returncode != 0:27 print(result.stderr.decode("utf-8"))28 print(f"Error: Error Code : {result.returncode}")29 return False30 output = result.stdout.decode("utf-8")31 print("Result:")32 print(output)33 if re.match(expected_result, repr(output)):34 print(output)35 return True36 print(rf"Expected Result: {expected_result}")37 print(rf"Actual Result: {repr(output)}")38 return False39def test_citus():40 assert verify_output(run_with_output('pg_ctl -D citus -o "-p 9700" -l citus/citus_logfile start'),41 r"^'waiting for server to start.... done\\nserver started\\n'$")42 assert verify_output(run_with_output('psql -p 9700 -c "CREATE EXTENSION citus;"'), r"^'CREATE EXTENSION\\n'$")43 assert verify_output(run_with_output('psql -p 9700 -c "select version();"'),44 rf".*PostgreSQL {POSTGRES_VERSION}.* on x86_64-pc-linux-gnu, compiled by gcc \(.*")45 # Since version info for ol and el 7 contains undefined, undefined was needed to add as expected param for pc46 assert verify_output(run_with_output('psql -p 9700 -c "select citus_version();"'),...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run grail automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful