How to use finished_test method in Slash

Best Python code snippet using slash

integration_tests.py

Source:integration_tests.py Github

copy

Full Screen

1import hashlib2from optparse import OptionParser3from xml.dom.minidom import Node, parse4import os5import queue6import subprocess7import sys8import threading9import time10Black = "\x1b[30m"11Red = "\x1b[31m"12Green = "\x1b[32m"13Yellow = "\x1b[33m"14Blue = "\x1b[34m"15Magenta = "\x1b[35m"16Cyan = "\x1b[36m"17White = "\x1b[37m"18BrightBlack = "\x1b[90m"19BrightRed = "\x1b[91m"20BrightGreen = "\x1b[92m"21BrightYellow = "\x1b[93m"22BrightBlue = "\x1b[94m"23BrightMagenta = "\x1b[95m"24BrightCyan = "\x1b[96m"25BrightWhite = "\x1b[97m"26Reset = "\x1b[0m"27class Execute(object):28 def __init__(self):29 self.program = None30 self.arguments = list()31 32 def __str__(self):33 return f"Execute({self.program}, {self.arguments})"34class Setup(object):35 def __init__(self):36 self.description = None37 self.expected_output = None38 self.expected_return_code = None39 self.execute = Execute()40 41 def __str__(self):42 return f"Setup(\"{self.description}\", \"{self.expected_output}\", {self.execute})"43 44 def get_hash(self):45 return hashlib.sha256(str(self).encode("utf-8")).hexdigest()46 47class Statistics(object):48 def __init__(self):49 self.runtime = None50 self.success = None51 52 def __str__(self):53 return f"Statistics({self.success}, {self.runtime})"54class Run(object):55 def __init__(self):56 self.return_code = None57 self.output = None58 self.statistics = Statistics()59 self.number = None60 self.error_output = None61class Test(object):62 def __init__(self):63 self.setup = Setup()64 self.last_run_statistics = None65 self.this_run = Run()66 67 def is_expect_nothing(self):68 return self.setup.expected_output == None and self.setup.expected_return_code == None69 70 def __str__(self):71 return f"Test({self.setup}, {self.last_run_statistics})"72class TestSuiteStatistics(object):73 def __init__(self):74 self.number_of_tests = 075 self.number_of_failed_tests = 076 self.number_of_succeeded_tests = 077 self.number_of_expect_nothing_tests = 078def get_text(nodes):79 texts = list()80 for node in nodes:81 if node.nodeType == node.TEXT_NODE:82 texts.append(node.data)83 return "".join(texts)84def get_space_appended(list):85 return " ".join(list)86def get_s_as_appropriate(number):87 if number == 1:88 return ""89 else:90 return "s"91def get_was_were_as_appropriate(number):92 if number == 1:93 return "was"94 else:95 return "were"96def get_bool_value(text):97 if text == "True":98 return True99 else:100 return False101def build_test_dictionary(test_list):102 result = dict()103 for test in test_list:104 test_hash = test.setup.get_hash()105 if test_hash in result:106 print(f"{BrightRed}ERROR{Reset}: Duplicated tests:")107 print(f"Test 1: {BrightYellow}{test_dictionary[test_hash].setup}{Reset}")108 print(f"Test 2: {BrightYellow}{test.setup}{Reset}")109 exit(1)110 else:111 result[test_hash] = test112 return result113def read_last_run_statistics():114 result = dict()115 try:116 with open("test_statistics.csv", "r") as file:117 for line in file:118 line_parts = line[:-1].split(",")119 result[line_parts[0]] = (get_bool_value(line_parts[1]), float(line_parts[2]))120 except FileNotFoundError:121 pass122 return result123def test_runner(test_queue, finished_queue, barrier):124 while True:125 test = None126 try:127 test = test_queue.get_nowait()128 # if the "test" object is a barrier token (any string), we are meant to wait at the barrier129 if type(test) == str:130 barrier.wait()131 else:132 start_time = time.perf_counter()133 try:134 result = subprocess.run([test.setup.execute.program] + test.setup.execute.arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE)135 test.this_run.return_code = result.returncode136 test.this_run.output = result.stdout.decode("utf-8")137 test.this_run.error_output = result.stderr.decode("utf-8")138 except FileNotFoundError as exception:139 test.this_run.error_output = str(exception)140 end_time = time.perf_counter()141 test.this_run.statistics.runtime = end_time - start_time142 finished_queue.put_nowait(test)143 except queue.Empty:144 break145def gather_tests_in_queue(test_list, number_of_threads):146 result = queue.SimpleQueue()147 last_test = None148 for test in test_list:149 if last_test != None:150 if last_test.last_run_statistics == None and test.last_run_statistics != None:151 # insert enough "barrier" tokens, so that every thread can get one152 for index in range(number_of_threads):153 result.put_nowait("barrier after new tests")154 elif last_test.last_run_statistics != None and last_test.last_run_statistics.success == False and test.last_run_statistics != None and test.last_run_statistics.success == True:155 # insert enough "barrier" tokens, so that every thread can get one156 for index in range(number_of_threads):157 result.put_nowait("barrier after failed tests")158 result.put_nowait(test)159 last_test = test160 return result161def test_scheduler(test_list, finished_queue):162 # we will spawn twice as many threads as we have CPUs163 number_of_threads = os.cpu_count() * 2164 test_queue = gather_tests_in_queue(test_list, number_of_threads)165 barrier = threading.Barrier(number_of_threads)166 threads = list()167 for thread_index in range(number_of_threads):168 thread = threading.Thread(target = test_runner, args = (test_queue, finished_queue, barrier))169 thread.start()170 threads.append(thread)171 for thread in threads:172 thread.join()173 # this signals the end of processing174 finished_queue.put_nowait(None)175def load_tests_from_test_suite(test_suite_file_path):176 result = list()177 # now parse the test suite file178 test_suite_document = parse(test_suite_file_path)179 tests_element = test_suite_document.documentElement180 for node in tests_element.childNodes:181 if node.nodeType == Node.ELEMENT_NODE and node.tagName == "test":182 test_element = node183 test = Test()184 for node in test_element.childNodes:185 if node.nodeType == Node.ELEMENT_NODE:186 if node.tagName == "description":187 test.setup.description = get_text(node.childNodes)188 elif node.tagName == "execute":189 execute_element = node190 for node in execute_element.childNodes:191 if node.nodeType == Node.ELEMENT_NODE:192 if node.tagName == "program":193 test.setup.execute.program = get_text(node.childNodes)194 elif node.tagName == "argument":195 test.setup.execute.arguments.append(get_text(node.childNodes))196 elif node.tagName == "expected-output":197 test.setup.expected_output = get_text(node.childNodes)198 elif node.tagName == "expected-return-code":199 test.setup.expected_return_code = int(get_text(node.childNodes))200 result.append(test)201 return result202def add_last_run_statistics(test_list):203 # read statistics of last run and add data to tests204 last_run_statistics = read_last_run_statistics()205 for test in test_list:206 test_hash = test.setup.get_hash()207 if test_hash in last_run_statistics:208 test.last_run_statistics = Statistics()209 test.last_run_statistics.success = last_run_statistics[test_hash][0]210 test.last_run_statistics.runtime = last_run_statistics[test_hash][1]211def sort_test_list(test_list):212 def key_function(test):213 # False < True214 if test.last_run_statistics == None:215 # assures that new tests are always run first216 return (False, None, None)217 else:218 # assures that failed tests and successful short tests are run first219 return (True, (test.last_run_statistics.success), (test.last_run_statistics.runtime))220 221 # sorting the test list by (new, success, runtime)222 test_list.sort(key = key_function)223def print_finished_tests(finished_tests_queue, test_suite_statistics):224 result = list()225 magnitude_of_number_of_tests = len(str(test_suite_statistics.number_of_tests))226 number_of_test = 0227 while True:228 finished_test = finished_tests_queue.get()229 if finished_test == None:230 break231 else:232 number_of_test += 1233 finished_test.this_run.number = number_of_test234 print(f"{BrightWhite}[{BrightBlue}{str(number_of_test).zfill(magnitude_of_number_of_tests)}{BrightWhite} / {BrightBlue}{test_suite_statistics.number_of_tests}{BrightWhite}]{Reset}", end = "")235 if finished_test.last_run_statistics == None:236 print(f" [{BrightMagenta}NEW{Reset}]", end = "")237 elif finished_test.last_run_statistics.success == False:238 print(f" [{BrightMagenta}FAILED PREVIOUSLY{Reset}]", end = "")239 if finished_test.setup.description != None:240 print(f" {BrightYellow}{finished_test.setup.description}{Reset}")241 else:242 print()243 print(f" Running \"{finished_test.setup.execute.program} {get_space_appended(finished_test.setup.execute.arguments)}\"")244 if finished_test.is_expect_nothing() == True:245 print(f" !! {BrightYellow}We are expecting nothing from this run!{White}")246 test_suite_statistics.number_of_expect_nothing_tests += 1247 if finished_test.this_run.return_code == None:248 test_suite_statistics.number_of_failed_tests += 1249 print(f" => {BrightRed}Failed{Reset} (could not be run)")250 finished_test.this_run.statistics.success = False251 elif finished_test.setup.expected_return_code == None or finished_test.this_run.return_code == finished_test.setup.expected_return_code:252 if finished_test.setup.expected_output != None:253 if finished_test.this_run.output == finished_test.setup.expected_output:254 test_suite_statistics.number_of_succeeded_tests += 1255 print(f" => {BrightGreen}Succeeded{Reset} (with output \"{Green}{finished_test.this_run.output}{Reset}\")")256 finished_test.this_run.statistics.success = True257 else:258 test_suite_statistics.number_of_failed_tests += 1259 print(f" => {BrightRed}Failed{Reset} (with output \"{Red}{finished_test.this_run.output}{Reset}\" instead of \"{Green}{finished_test.setup.expected_output}{Reset}\")")260 finished_test.this_run.statistics.success = False261 else:262 test_suite_statistics.number_of_succeeded_tests += 1263 print(f" => {BrightGreen}Succeeded{Reset}")264 finished_test.this_run.statistics.success = True265 else:266 test_suite_statistics.number_of_failed_tests += 1267 print(f" => {BrightRed}Failed{Reset} (error code was {Red}{finished_test.this_run.return_code}{Reset} instead of {Green}{finished_test.setup.expected_return_code}{Reset})")268 finished_test.this_run.statistics.success = False269 if len(finished_test.this_run.error_output) > 0:270 print(f"{BrightRed}Error output{Reset}:")271 print(f"{Red}{finished_test.this_run.error_output}{Reset}")272 print()273 result.append(finished_test)274 return result275def print_summary(test_suite_statistics, finished_tests_list):276 if test_suite_statistics.number_of_failed_tests == 0:277 print(f"All {Yellow}{test_suite_statistics.number_of_succeeded_tests}{Reset} test{get_s_as_appropriate(test_suite_statistics.number_of_succeeded_tests)} {BrightGreen}succeeded{Reset}.")278 else:279 print(f"Out of {Yellow}{test_suite_statistics.number_of_tests}{Reset} test{get_s_as_appropriate(test_suite_statistics.number_of_tests)}, {Yellow}{test_suite_statistics.number_of_succeeded_tests}{Reset} test{get_s_as_appropriate(test_suite_statistics.number_of_succeeded_tests)} {BrightGreen}succeeded{Reset} and {Yellow}{test_suite_statistics.number_of_failed_tests}{Reset} test{get_s_as_appropriate(test_suite_statistics.number_of_failed_tests)} {BrightRed}failed{Reset}.")280 print(" Failed tests: ", end = "")281 first = True282 for finished_test in finished_tests_list:283 if finished_test.this_run.statistics.success == False:284 if first == False:285 print(f", ", end = "")286 else:287 first = False288 print(f"{BrightRed}{finished_test.this_run.number}{Reset}", end = "")289 reasons = []290 if finished_test.this_run.return_code == None:291 reasons.append("could not be run")292 else:293 if finished_test.this_run.return_code != 0:294 reasons.append("wrong result code")295 if len(finished_test.this_run.error_output) > 0:296 reasons.append("error output")297 if finished_test.this_run.output != finished_test.setup.expected_output:298 reasons.append("wrong output")299 if len(reasons) > 0:300 print(f" {BrightWhite}({Red}", end = "")301 for index, reason in enumerate(reasons):302 if len(reasons) > 1:303 if index + 1 == len(reasons):304 print(" and ", end = "")305 elif index > 0:306 print(", ", end = "")307 print(reason, end = "")308 print(f"{BrightWhite}){Reset}", end = "")309 print(Reset)310 if test_suite_statistics.number_of_expect_nothing_tests > 0:311 print(f"{BrightYellow}There {get_was_were_as_appropriate(test_suite_statistics.number_of_expect_nothing_tests)} {test_suite_statistics.number_of_expect_nothing_tests} test{get_s_as_appropriate(test_suite_statistics.number_of_expect_nothing_tests)} without any expectations:{Reset}", end = "")312 for index, finished_test in enumerate(finished_tests_list):313 if finished_test.is_expect_nothing() == True:314 if index > 0:315 print(", ", end = "")316 print(f"{Yellow}{finished_test.this_run.number}{Reset}", end = "")317 print()318def execute_test_suite(test_suite_file_path):319 # prepare prioritized list of tests320 test_list = load_tests_from_test_suite(test_suite_file_path)321 # add the statistics from the last run322 add_last_run_statistics(test_list)323 # sort by statistics from last run324 sort_test_list(test_list)325 # this queue is used for returning tests that have been run from the runner threads326 finished_tests_queue = queue.SimpleQueue()327 scheduler_thread = threading.Thread(target = test_scheduler, args = (test_list, finished_tests_queue))328 scheduler_thread.start()329 # while the scheduler is waiting for the runners to finish, the main thread prints out information about finished tests as they come in330 test_suite_statistics = TestSuiteStatistics()331 test_suite_statistics.number_of_tests = len(test_list)332 finished_tests = print_finished_tests(finished_tests_queue, test_suite_statistics)333 # clean up scheduler thread334 scheduler_thread.join()335 # print summary336 print_summary(test_suite_statistics, finished_tests)337 # write statistics file338 with open("test_statistics.csv", "w") as file:339 for test in finished_tests:340 file.write(f"{test.setup.get_hash()},{test.this_run.statistics.success},{test.this_run.statistics.runtime}\n")341 # exit, possibly with error code342 if test_suite_statistics.number_of_failed_tests > 0:343 return False344 else:345 return True346if __name__ == "__main__":347 return_code = 1348 # initialize command line option parser349 parser = OptionParser()350 parser.add_option("-i", "--in", dest="test_suite", help="The test suite file.")351 # read command line options and validate352 (options, args) = parser.parse_args()353 if options.test_suite == None:354 print("Set a test suite with '--in'.")355 else:356 if execute_test_suite(options.test_suite) == True:357 return_code = 0...

Full Screen

Full Screen

testsuite.py

Source:testsuite.py Github

copy

Full Screen

1# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.2"""Test suites and related things."""3__metaclass__ = type4__all__ = [5 'ConcurrentTestSuite',6 'iterate_tests',7 ]8try:9 from Queue import Queue10except ImportError:11 from queue import Queue12import threading13import unittest14import testtools15def iterate_tests(test_suite_or_case):16 """Iterate through all of the test cases in 'test_suite_or_case'."""17 try:18 suite = iter(test_suite_or_case)19 except TypeError:20 yield test_suite_or_case21 else:22 for test in suite:23 for subtest in iterate_tests(test):24 yield subtest25class ConcurrentTestSuite(unittest.TestSuite):26 """A TestSuite whose run() calls out to a concurrency strategy."""27 def __init__(self, suite, make_tests):28 """Create a ConcurrentTestSuite to execute suite.29 :param suite: A suite to run concurrently.30 :param make_tests: A helper function to split the tests in the31 ConcurrentTestSuite into some number of concurrently executing32 sub-suites. make_tests must take a suite, and return an iterable33 of TestCase-like object, each of which must have a run(result)34 method.35 """36 super(ConcurrentTestSuite, self).__init__([suite])37 self.make_tests = make_tests38 def run(self, result):39 """Run the tests concurrently.40 This calls out to the provided make_tests helper, and then serialises41 the results so that result only sees activity from one TestCase at42 a time.43 ConcurrentTestSuite provides no special mechanism to stop the tests44 returned by make_tests, it is up to the make_tests to honour the45 shouldStop attribute on the result object they are run with, which will46 be set if an exception is raised in the thread which47 ConcurrentTestSuite.run is called in.48 """49 tests = self.make_tests(self)50 try:51 threads = {}52 queue = Queue()53 result_semaphore = threading.Semaphore(1)54 for test in tests:55 process_result = testtools.ThreadsafeForwardingResult(result,56 result_semaphore)57 reader_thread = threading.Thread(58 target=self._run_test, args=(test, process_result, queue))59 threads[test] = reader_thread, process_result60 reader_thread.start()61 while threads:62 finished_test = queue.get()63 threads[finished_test][0].join()64 del threads[finished_test]65 except:66 for thread, process_result in threads.values():67 process_result.stop()68 raise69 def _run_test(self, test, process_result, queue):70 try:71 test.run(process_result)72 finally:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful