How to use test_should_fail method in pyshould

Best Python code snippet using pyshould_python

test.py

Source:test.py Github

copy

Full Screen

1import os2import sys3import subprocess4from subprocess import PIPE5from collections import namedtuple6def LOCAL(*path):7 return os.path.join(os.path.dirname(__file__), *path)8TEST_DIR = LOCAL("maltests")9STEPS = {10 "step0": "step0_repl",11 "step1": "step1_read_print",12 "step2": "step2_eval",13 "step3": "step3_env",14 "step4": "step4_if_fn_do",15}16def get_step(step: str) -> str:17 step_name = STEPS.get(step)18 if step_name is None:19 raise Exception("Step {!r} not found!".format(step))20 return step_name21Test = namedtuple("Test", ["name", "cases", "type", "should_fail"])22TestCase = namedtuple("TestCase", ["input_lines", "expected_output"])23class TestType:24 Mandatory = "Mandatory"25 Deferrable = "Deferrable"26 Optional = "Optional"27def build_rust(step_name):28 cmd = ["cargo", "build", "--bin", step_name]29 subprocess.run(cmd, check=True)30def rust_cmd(step_name):31 EXEPATH = LOCAL("target", "debug", step_name)32 return [EXEPATH]33TestFailure = namedtuple("TestFailure", ["test", "case_numbers"])34def run_tests(tests, run_cmd):35 passed = []36 failed = {37 TestType.Mandatory: [],38 TestType.Deferrable: [],39 TestType.Optional: [],40 }41 print("Running {} tests...".format(len(tests)))42 print("")43 for i, test in enumerate(tests):44 failtext = " <Should fail> " if test.should_fail else ""45 print(" {} ({}){} ".format(test.name, test.type, failtext).center(80, "="))46 print("")47 failed_cases = []48 49 # Run the exe with all the input lines50 cmd = run_cmd.copy()51 for case in test.cases:52 cmd.extend(case.input_lines)53 54 res = subprocess.run(cmd, stderr=PIPE, stdout=PIPE, universal_newlines=True)55 if res.returncode != 0:56 if not test.should_fail:57 # Even if the test 'errors' it's fine if the output is correct58 # See step3_env.mal ';; Check that error aborts def!'59 output = res.stdout.rstrip().splitlines()60 # show which cases passed and which one failed61 else:62 print("T) PASSED! (by raising an error as expected)")63 print(res.stderr)64 passed.append(test)65 #print("")66 continue67 else:68 output = res.stdout.rstrip().splitlines() # strip to remove newline69 #print("OUTPUT:")70 #for line in output:71 # print(" {}".format(line))72 #print("<end>")73 if len(output) != len(test.cases):74 failed[test.type].append(TestFailure(test, []))75 print("TEST FAILED!")76 print("ERROR: Got {} lines of output, expected {}!".format(len(output), len(test.cases)))77 print_tests([test])78 print("RECEIVED OUTPUT:")79 for line in output:80 print("-> {}".format(line))81 82 continue83 84 # Match each line with the expected output85 maxw = len(str(len(test.cases)))86 tag_template = "{{:<{}}}) ".format(maxw)87 output_line = 088 for case_no, case in enumerate(test.cases, 1):89 tag = tag_template.format(case_no)90 cmd = run_cmd + case.input_lines91 inputstr = " <newline> ".join(case.input_lines) 92 93 if output_line >= len(output):94 print("{}ERROR! : {}".format(tag, inputstr))95 print(res.stderr)96 failed_cases.append(case_no)97 break98 99 case_output = output[output_line]100 if case_output == case.expected_output:101 print("{}PASSED! : {} -> {}".format(tag, inputstr, case_output))102 else:103 print("{}FAILED! : {}".format(tag, inputstr))104 print(" Input: {!r}\n".format(inputstr))105 print(" Expected: {!r}\n".format(case.expected_output))106 print(" Got: {!r}\n".format(case_output))107 failed_cases.append(case_no)108 109 output_line += 1110 111 if failed_cases:112 failed[test.type].append(TestFailure(test, failed_cases))113 else:114 passed.append(test)115 116 print("")117 118 return (passed, failed)119def print_results(passed, failed):120 121 def print_failure(specifier, failed_tests):122 t = "test" if len(failed_tests) == 1 else "tests"123 print("{} {} {} failed".format(len(failed_tests), specifier, t))124 for failure in failed_tests:125 case_text = ", ".join(str(cn) for cn in failure.case_numbers)126 print(" - '{}' [ {} ]".format(failure.test.name, case_text))127 print("")128 129 verdict = "SUCCES" if not failed[TestType.Mandatory] else "FAILURE"130 if all(not tests for tests in failed.values()):131 verdict = "PERFECT"132 133 print("")134 print(" Test Results ".center(80, "="))135 t = "test" if len(passed) == 1 else "tests"136 print("{} {} passed\n".format(len(passed), t))137 print_failure("mandatory", failed[TestType.Mandatory])138 print_failure("deferrable", failed[TestType.Deferrable])139 print_failure("optional", failed[TestType.Optional])140 print("")141 print("Verdict: {}!".format(verdict))142 print("")143 144 145def load_tests(step_name):146 filepath = os.path.join(TEST_DIR, step_name+".mal")147 if not os.path.exists(filepath):148 raise FileNotFoundException("Could not find test file: {!r}".format(step_name+".mal"))149 with open(filepath, "r") as f:150 text = f.read()151 152 tests = []153 test_type = TestType.Mandatory154 test_should_fail = False155 156 test_name = "<Unnamed Test>"157 cases = []158 case_input_lines = []159 def start_new_test(lineno):160 if cases:161 if case_input_lines:162 raise Exception("Line {}: New test start, but no output for previous test case")163 test = Test(test_name, cases.copy(), test_type, test_should_fail)164 tests.append(test)165 cases.clear()166 167 168 for i, line in enumerate(text.splitlines()):169 line = line.strip()170 if line.startswith(";;"):171 start_new_test(i+1)172 test_name = line[2:].strip()173 test_should_fail = False174 continue175 176 elif line == "" or line.isspace():177 continue178 179 elif line.startswith(";>>>"):180 ll = line.lower()181 if "optional" in ll:182 test_type = TestType.Optional183 elif "deferrable" in ll:184 test_type = TestType.Deferrable185 else:186 raise Exception("Line {}: Unknown parse directive: {!r}", i+1, line)187 188 elif line.startswith(";=>"):189 #print("{}: OUTPUT: {}".format(i+1, line))190 if case_input_lines:191 output = line[3:].strip()192 case = TestCase(case_input_lines.copy(), output)193 cases.append(case)194 case_input_lines.clear()195 else:196 raise Exception("Line {}: Found output line with no input".format(i+1))197 198 elif line.startswith("; expected"):199 if not case_input_lines:200 raise Exception("Line {}: Found output line with no input".format(i+1))201 test_should_fail = True202 case = TestCase(case_input_lines.copy(), "")203 cases.append(case)204 case_input_lines.clear()205 206 elif line.startswith(";"):207 if "not found" in line:208 if not case_input_lines:209 raise Exception("Line {}: Found output line with no input".format(i+1))210 test_should_fail = True211 case = TestCase(case_input_lines.copy(), "")212 cases.append(case)213 case_input_lines.clear()214 215 else:216 print("WARN: Line {}: Found nonspecial line starting with ';'".format(i+1))217 print(" {}".format(line))218 219 continue220 221 else:222 #print("{}: INPUT: {}".format(i+1, line))223 if not case_input_lines:224 case_input_lines.append(line)225 else:226 case_input_lines.append(line)227 #print("WARN: Line {}: Found second input line in a row".format(i+1))228 229 if case_input_lines:230 raise Exception("END: No output found for last input line")231 232 start_new_test(i+1)233 234 return tests235def print_tests(tests):236 print("Tests:")237 for test in tests:238 failtext = " <Should fail> " if test.should_fail else ""239 print("TEST: {} ({}){}".format(test.name, test.type, failtext))240 print("")241 maxw = len(str(len(test.cases)))242 tag = "{{:<{}}}".format(maxw)243 for i, case in enumerate(test.cases, 1):244 itag = tag.format(i)245 ptag = tag.format(" ")246 #print(itag)247 for j, input_line in enumerate(case.input_lines):248 if j == 0:249 prefix = itag250 else:251 prefix = itag252 print("{}| user> {}".format(prefix, input_line))253 output = "<Error>" if test.should_fail else "-> " + case.expected_output254 print("{}| {}".format(itag, output))255 print("")256 #print("-"*(maxw+1))257 258 259def main(args=sys.argv[1:]):260 if not args:261 return print("Usage: python3 test.py <step>")262 from pprint import pprint263 step_name = get_step(args[0])264 tests = load_tests(step_name)265 #print_tests(tests)266 267 build_rust(step_name)268 cmd = rust_cmd(step_name)269 (passed, failed) = run_tests(tests, cmd)270 print_results(passed, failed)271if __name__ == '__main__':...

Full Screen

Full Screen

test_seq_scheduler.py

Source:test_seq_scheduler.py Github

copy

Full Screen

1from time import sleep2import pytest3import autogluon.core as ag4from autogluon.core import args, Real5from autogluon.core.scheduler.seq_scheduler import LocalSequentialScheduler6cls = LocalSequentialScheduler7def test_get_average_trial_time_():8 running_time = cls.get_average_trial_time_(0, avg_trial_run_time=None, trial_start_time=100, time_end=102)9 assert running_time == 210 running_time = cls.get_average_trial_time_(1, avg_trial_run_time=running_time, trial_start_time=110, time_end=114)11 assert running_time == 3.012 running_time = cls.get_average_trial_time_(2, avg_trial_run_time=running_time, trial_start_time=120, time_end=126)13 assert running_time == 4.014def test_has_enough_time_for_trial__enough_time__no_avg_time():15 # Enough time - no average time16 assert cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=None)17def test_has_enough_time_for_trial__enough_time__avg_time_allows_trials():18 # Enough time - average time allows more trial19 assert cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=1)20def test_has_enough_time_for_trial__enough_time__avg_time_not_allows_trials():21 # Enough time - average time does not allow more trial22 assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=5)23def test_has_enough_time_for_trial__time_exceeded_no_avg_time():24 # Time exceeded - no average time25 assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=116, avg_trial_run_time=None)26def test_has_enough_time_for_trial__avg_time():27 # Time exceeded - no average time28 assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=116, avg_trial_run_time=0)29def test_has_enough_time_for_trial__enough_time__avg_time_not_allows_trials_by_fill_factor():30 # Enough time - average time does not allow more trial31 assert not cls.has_enough_time_for_trial_(time_out=10, time_start=100, trial_start_time=105, trial_end_time=106, avg_trial_run_time=1, fill_factor=5)32def test_LocalSequentialScheduler_no_criteria():33 @args(lr=Real(1e-2, 1e-1, log=True))34 def _train_fn_():35 pass36 with pytest.raises(AssertionError, match="Need stopping criterion: Either num_trials or time_out"):37 LocalSequentialScheduler(train_fn=_train_fn_, reward_attr='reward_attr', resource={})38def test_search_space():39 @ag.args(40 a=ag.space.Real(1e-3, 1e-2, log=True),41 b=ag.space.Real(1e-3, 1e-2),42 c=ag.space.Int(1, 10),43 d=ag.space.Categorical('a', 'b', 'c', 'd'),44 e=ag.space.Bool(),45 f=ag.space.List(46 ag.space.Int(1, 2),47 ag.space.Categorical(4, 5),48 ),49 g=ag.space.Dict(50 a=ag.Real(0, 10),51 obj=ag.space.Categorical('auto', 'gluon'),52 ),53 h=ag.space.Categorical('test', ag.space.Categorical('auto', 'gluon')),54 i=ag.space.Categorical('mxnet', 'pytorch'),55 )56 def train_fn(args, reporter):57 a, b, c, d, e, f, g, h, i = args.a, args.b, args.c, args.d, args.e, args.f, args.g, args.h, args.i58 class MyObj:59 def __init__(self, name):60 self.name = name61 def myfunc(framework):62 return framework63 assert a <= 1e-2 and a >= 1e-364 assert b <= 1e-2 and b >= 1e-365 assert c <= 10 and c >= 166 assert d in ['a', 'b', 'c', 'd']67 assert e in [True, False]68 assert f[0] in [1, 2]69 assert f[1] in [4, 5]70 assert g['a'] <= 10 and g['a'] >= 071 assert MyObj(g.obj).name in ['auto', 'gluon']72 assert e in [True, False]73 assert h in ['test', 'auto', 'gluon']74 assert myfunc(i) in ['mxnet', 'pytorch']75 reporter(epoch=1, accuracy=0)76 scheduler = LocalSequentialScheduler(77 train_fn,78 resource={'num_cpus': 'all', 'num_gpus': 0},79 num_trials=10,80 reward_attr='accuracy',81 time_attr='epoch',82 checkpoint=None83 )84 scheduler.run()85def test_scheduler_can_handle_failing_jobs():86 trails_outcomes = []87 best_result = [-1]88 @ag.args(a=ag.space.Real(0, 1))89 def train_fn(args, reporter):90 test_should_fail = args.a > 0.791 trails_outcomes.append(test_should_fail)92 if test_should_fail:93 raise Exception('Failed Trial')94 elif args.a > best_result[0]:95 best_result[0] = args.a96 sleep(0.2)97 reporter(epoch=1, accuracy=args.a)98 scheduler = LocalSequentialScheduler(99 train_fn,100 resource={'num_cpus': 'all', 'num_gpus': 0},101 time_out=3,102 reward_attr='accuracy',103 time_attr='epoch',104 checkpoint=None105 )106 scheduler.run()107 actual_runs = []108 for trial in scheduler.training_history.values():109 is_failed = False110 for i in trial:111 if 'traceback' in i:112 is_failed = True113 break114 actual_runs.append(is_failed)115 assert trails_outcomes == actual_runs116 assert (scheduler.get_best_reward() == best_result[0])...

Full Screen

Full Screen

report.py

Source:report.py Github

copy

Full Screen

1from Lib.basic.LogHTML import LogHTML2from Lib.basic.Test import fail_test3from Lib.basic.Browser import Browser4from Lib.basic.TestDecorator import test, beforeEachTest, afterEachTest5from Lib.loginEko import Login, Filter, MainDashboard, Report, JsonData6@beforeEachTest()7def before():8 global browser9 browser=Browser()10 browser.go_to(JsonData.get("loginUrl"))11 Login.login_GUI(browser, "e2e_tester", "h3lp1ngh4nd")12 Filter.wait_filter_loaded(browser)13@afterEachTest()14def after():15 global browser16 browser.close_browser()17@test(dsc="Assert reports exist")18def checkReportsExist():19 MainDashboard.open_reports(browser)20 Report.wait_report_menu_loaded(browser)21 test_should_fail=False22 for r in JsonData.get("reports"):23 try:24 LogHTML.info("Try to find report {}".format(r))25 Report.input_text(browser, r["fieldName"])26 Report.open_report_with_scroll(browser, r)27 LogHTML.info("Report {} found".format(r))28 except:29 test_should_fail = True30 LogHTML.info("Report {} not found".format(r))31 if test_should_fail:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pyshould automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful