How to use test_info method in Airtest

Best Python code snippet using Airtest

report.py

Source:report.py Github

copy

Full Screen

...102 """103 Called immediately after 'test' has run.104 """105 unittest.TestResult.stopTest(self, test)106 test_info = self._find_test_info(test)107 test_info.end_time = time.time()108 time_taken = test_info.end_time - test_info.start_time109 self.logger.info("%s ran in %0.2f seconds.", test.basename(), time_taken)110 # Asynchronously closes the buildlogger test handler to avoid having too many threads open111 # on 32-bit systems.112 logging.flush.close_later(test.logger)113 # Restore the original logger for the test.114 test.logger = self.__original_loggers.pop(test.id())115 def addError(self, test, err):116 """117 Called when a non-failureException was raised during the118 execution of 'test'.119 """120 unittest.TestResult.addError(self, test, err)121 self.num_errored += 1122 test_info = self._find_test_info(test)123 test_info.status = "error"124 test_info.return_code = test.return_code125 def setError(self, test):126 """127 Used to change the outcome of an existing test to an error.128 """129 test_info = self._find_test_info(test)130 if test_info.end_time is None:131 raise ValueError("stopTest was not called on %s" % (test.basename()))132 test_info.status = "error"133 test_info.return_code = 2134 # Recompute number of success, failures, and errors.135 self.num_succeeded = len(self.get_successful())136 self.num_failed = len(self.get_failed())137 self.num_errored = len(self.get_errored())138 def addFailure(self, test, err):139 """140 Called when a failureException was raised during the execution141 of 'test'.142 """143 unittest.TestResult.addFailure(self, test, err)144 self.num_failed += 1145 test_info = self._find_test_info(test)146 test_info.status = "fail"147 test_info.return_code = test.return_code148 def setFailure(self, test, return_code=1):149 """150 Used to change the outcome of an existing test to a failure.151 """152 test_info = self._find_test_info(test)153 if test_info.end_time is None:154 raise ValueError("stopTest was not called on %s" % (test.basename()))155 test_info.status = "fail"156 test_info.return_code = return_code157 # Recompute number of success, failures, and errors.158 self.num_succeeded = len(self.get_successful())159 self.num_failed = len(self.get_failed())160 self.num_errored = len(self.get_errored())161 def addSuccess(self, test):162 """163 Called when 'test' executed successfully.164 """165 unittest.TestResult.addSuccess(self, test)166 self.num_succeeded += 1167 test_info = self._find_test_info(test)168 test_info.status = "pass"169 test_info.return_code = test.return_code170 def wasSuccessful(self):171 """172 Returns true if all tests executed successfully.173 """174 return self.num_failed == self.num_errored == 0175 def get_successful(self):176 """177 Returns the status and timing information of the tests that178 executed successfully.179 """180 return [test_info for test_info in self.test_infos if test_info.status == "pass"]181 def get_failed(self):182 """183 Returns the status and timing information of the tests that184 raised a failureException during their execution.185 """186 return [test_info for test_info in self.test_infos if test_info.status == "fail"]187 def get_errored(self):188 """189 Returns the status and timing information of the tests that190 raised a non-failureException during their execution.191 """192 return [test_info for test_info in self.test_infos if test_info.status == "error"]193 def as_dict(self):194 """195 Return the test result information as a dictionary.196 Used to create the report.json file.197 """198 results = []199 for test_info in self.test_infos:200 # Don't distinguish between failures and errors.201 status = "pass" if test_info.status == "pass" else "fail"202 result = {203 "test_file": test_info.test_id,204 "status": status,205 "exit_code": test_info.return_code,206 "start": test_info.start_time,207 "end": test_info.end_time,208 "elapsed": test_info.end_time - test_info.start_time,209 }210 if test_info.url_endpoint is not None:211 result["url"] = test_info.url_endpoint212 result["url_raw"] = test_info.url_endpoint + "?raw=1"213 results.append(result)214 return {215 "results": results,216 "failures": self.num_failed + self.num_errored,217 }218 def reset(self):219 """220 Resets the test report back to its initial state.221 """222 self.test_infos = []223 self.num_dynamic = 0224 self.num_succeeded = 0225 self.num_failed = 0226 self.num_errored = 0227 self.__original_loggers = {}228 def _find_test_info(self, test):229 """230 Returns the status and timing information associated with231 'test'.232 """233 test_id = test.id()234 # Search the list backwards to efficiently find the status and timing information of a test235 # that was recently started.236 for test_info in reversed(self.test_infos):237 if test_info.test_id == test_id:238 return test_info239 raise ValueError("Details for %s not found in the report" % (test.basename()))240class _TestInfo(object):241 """242 Holder for the test status and timing information....

Full Screen

Full Screen

test_parser_unittest.py

Source:test_parser_unittest.py Github

copy

Full Screen

1# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.2#3# Redistribution and use in source and binary forms, with or without4# modification, are permitted provided that the following conditions5# are met:6#7# 1. Redistributions of source code must retain the above8# copyright notice, this list of conditions and the following9# disclaimer.10# 2. Redistributions in binary form must reproduce the above11# copyright notice, this list of conditions and the following12# disclaimer in the documentation and/or other materials13# provided with the distribution.14#15# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY16# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE17# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR18# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE19# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,20# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,21# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR22# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY23# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR24# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF25# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF26# SUCH DAMAGE.27import os28import unittest29from webkitpy.common.system.outputcapture import OutputCapture30from webkitpy.w3c.test_parser import TestParser31options = {'all': False, 'no_overwrite': False}32class TestParserTest(unittest.TestCase):33 def test_analyze_test_reftest_one_match(self):34 test_html = """<head>35<link rel="match" href="green-box-ref.xht" />36</head>37"""38 test_path = '/some/madeup/path/'39 parser = TestParser(options, test_path + 'somefile.html')40 test_info = parser.analyze_test(test_contents=test_html)41 self.assertNotEqual(test_info, None, 'did not find a test')42 self.assertTrue('test' in test_info.keys(), 'did not find a test file')43 self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')44 self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')45 self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')46 self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')47 def test_analyze_test_reftest_multiple_matches(self):48 test_html = """<head>49<link rel="match" href="green-box-ref.xht" />50<link rel="match" href="blue-box-ref.xht" />51<link rel="match" href="orange-box-ref.xht" />52</head>53"""54 oc = OutputCapture()55 oc.capture_output()56 try:57 test_path = '/some/madeup/path/'58 parser = TestParser(options, test_path + 'somefile.html')59 test_info = parser.analyze_test(test_contents=test_html)60 finally:61 _, _, logs = oc.restore_output()62 self.assertNotEqual(test_info, None, 'did not find a test')63 self.assertTrue('test' in test_info.keys(), 'did not find a test file')64 self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')65 self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')66 self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')67 self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')68 self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')69 def test_analyze_test_reftest_match_and_mismatch(self):70 test_html = """<head>71<link rel="match" href="green-box-ref.xht" />72<link rel="match" href="blue-box-ref.xht" />73<link rel="mismatch" href="orange-box-notref.xht" />74</head>75"""76 oc = OutputCapture()77 oc.capture_output()78 try:79 test_path = '/some/madeup/path/'80 parser = TestParser(options, test_path + 'somefile.html')81 test_info = parser.analyze_test(test_contents=test_html)82 finally:83 _, _, logs = oc.restore_output()84 self.assertNotEqual(test_info, None, 'did not find a test')85 self.assertTrue('test' in test_info.keys(), 'did not find a test file')86 self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')87 self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')88 self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')89 self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')90 self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')91 def test_analyze_test_reftest_with_ref_support_Files(self):92 """ Tests analyze_test() using a reftest that has refers to a reference file outside of the tests directory and the reference file has paths to other support files """93 test_html = """<html>94<head>95<link rel="match" href="../reference/green-box-ref.xht" />96</head>97"""98 ref_html = """<head>99<link href="support/css/ref-stylesheet.css" rel="stylesheet" type="text/css">100<style type="text/css">101 background-image: url("../../support/some-image.png")102</style>103</head>104<body>105<div><img src="../support/black96x96.png" alt="Image download support must be enabled" /></div>106</body>107</html>108"""109 test_path = '/some/madeup/path/'110 parser = TestParser(options, test_path + 'somefile.html')111 test_info = parser.analyze_test(test_contents=test_html, ref_contents=ref_html)112 self.assertNotEqual(test_info, None, 'did not find a test')113 self.assertTrue('test' in test_info.keys(), 'did not find a test file')114 self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')115 self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')116 self.assertTrue('reference_support_info' in test_info.keys(), 'there should be reference_support_info for this test')117 self.assertEquals(len(test_info['reference_support_info']['files']), 3, 'there should be 3 support files in this reference')118 self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')119 def test_analyze_jstest(self):120 """ Tests analyze_test() using a jstest """121 test_html = """<head>122<link href="/resources/testharness.css" rel="stylesheet" type="text/css">123<script src="/resources/testharness.js"></script>124</head>125"""126 test_path = '/some/madeup/path/'127 parser = TestParser(options, test_path + 'somefile.html')128 test_info = parser.analyze_test(test_contents=test_html)129 self.assertNotEqual(test_info, None, 'test_info is None')130 self.assertTrue('test' in test_info.keys(), 'did not find a test file')131 self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')132 self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')133 self.assertTrue('jstest' in test_info.keys(), 'test should be a jstest')134 def test_analyze_pixel_test_all_true(self):135 """ Tests analyze_test() using a test that is neither a reftest or jstest with all=False """136 test_html = """<html>137<head>138<title>CSS Test: DESCRIPTION OF TEST</title>139<link rel="author" title="NAME_OF_AUTHOR" />140<style type="text/css"><![CDATA[141CSS FOR TEST142]]></style>143</head>144<body>145CONTENT OF TEST146</body>147</html>148"""149 # Set options to 'all' so this gets found150 options['all'] = True151 test_path = '/some/madeup/path/'152 parser = TestParser(options, test_path + 'somefile.html')153 test_info = parser.analyze_test(test_contents=test_html)154 self.assertNotEqual(test_info, None, 'test_info is None')155 self.assertTrue('test' in test_info.keys(), 'did not find a test file')156 self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')157 self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')158 self.assertFalse('jstest' in test_info.keys(), 'test should not be a jstest')159 def test_analyze_pixel_test_all_false(self):160 """ Tests analyze_test() using a test that is neither a reftest or jstest, with -all=False """161 test_html = """<html>162<head>163<title>CSS Test: DESCRIPTION OF TEST</title>164<link rel="author" title="NAME_OF_AUTHOR" />165<style type="text/css"><![CDATA[166CSS FOR TEST167]]></style>168</head>169<body>170CONTENT OF TEST171</body>172</html>173"""174 # Set all to false so this gets skipped175 options['all'] = False176 test_path = '/some/madeup/path/'177 parser = TestParser(options, test_path + 'somefile.html')178 test_info = parser.analyze_test(test_contents=test_html)179 self.assertEqual(test_info, None, 'test should have been skipped')180 def test_analyze_non_html_file(self):181 """ Tests analyze_test() with a file that has no html"""182 # FIXME: use a mock filesystem183 parser = TestParser(options, os.path.join(os.path.dirname(__file__), 'test_parser.py'))184 test_info = parser.analyze_test()...

Full Screen

Full Screen

OmniPingTestClass.py

Source:OmniPingTestClass.py Github

copy

Full Screen

1from datetime import datetime2import time3import socket4import re5import asyncio6import http37import cherrypy8from unsync import unsync9import pprint10def sucPer(total_trys, successes):11 '''12 This is just a function to work out pecentage success13 and cope with the Zero division14 '''15 try:16 totp = "{0:.2f} %".format(successes/total_trys * 100)17 except ZeroDivisionError:18 totp = "0.00 %"19 return totp20class OmniPingTester():21 '''22 Task to manage asynchronous test calls23 '''24 # Diction ary to give meaningful responses when testing HTTP(S)25 stat_dict = {}26 stat_dict[200] = 'Good (200)'27 stat_dict[201] = 'Good (201)'28 stat_dict[202] = 'Good (202)'29 stat_dict[300] = 'Redir (300)'30 stat_dict[301] = 'Redir (301)'31 stat_dict[302] = 'Redir (302)'32 stat_dict[400] = 'Bad (400)'33 stat_dict[401] = 'Not Auth (401)'34 stat_dict[403] = 'Forbidden (403)'35 stat_dict[404] = 'Not Found (404)'36 def __init__(self, interval=2):37 '''38 time out and Interval values are calculated on instantiation39 based on desired interval. The interval the CherryPy40 Background process uses is calculated by refrenceing these41 '''42 self.timeout = 2.043 self.interval = interval44 if self.interval <= 4.0:45 self.timeout = self.interval / 246 self.loop = False47 def run_once(self, input_report):48 '''49 This Orchestrates the testing based on the passin report50 It creates a new asyncio event loop each time as this51 suffered less errors52 '''53 if not input_report['started']:54 input_report['started'] = datetime.now()55 input_report['count'] += 156 test_results = []57 try:58 for test in input_report['tests']:59 if not test:60 result = self.dummy_fail()61 elif test['test'] in ['ICMP', 'PING']:62 result = self.ping_tester(test)63 elif test['test'] in ['HTTP', 'HTTPS']:64 result = self.http_tester(test)65 test_results.append(result.result())66 input_report['tests'] = test_results67 input_report['time'] = datetime.now()68 input_report['duration'] = input_report['time'] - input_report['started']69 return input_report70 except OSError as e:71 cherrypy.log(f'[EE] {e}')72 @unsync73 async def dummy_fail(self):74 '''75 This is here to simulate a timed out test in order to76 allow for consistent intervals between good (quick) tests and77 failed (timedout) tests78 '''79 await asyncio.sleep(self.timeout - 0.12)80 return False81 @unsync82 async def ping_tester(self, test_info):83 '''84 Method to test using PING85 uses Asyncio's subprocesses86 '''87 try:88 test_info['rtt'] = '--'89 test_info['good'] = False90 test_info['last_stat'] = test_info['status']91 test_info['status'] = 'Incomplete'92 proc = await asyncio.create_subprocess_shell(93 f'ping -c 1 -W {self.timeout} -n {test_info["host"]}',94 stdout=asyncio.subprocess.PIPE,95 stderr=asyncio.subprocess.PIPE)96 stdout, _ = await proc.communicate()97 test_info['total'] += 198 if proc.returncode == 0:99 test_info['total_successes'] += 1100 test_info['last_good'] = datetime.now().strftime('%a %H:%M:%S')101 test_info['good'] = True102 test_info['status'] = 'Good'103 rtt_line_re = r'[64]+\sbytes\sfrom\s[0-9\.:a-f]+:\sicmp_seq=1\sttl=[0-9]+\s' \104 r'time=([0-9\.]+)\sms'105 if stdout:106 for line in stdout.decode().split('\n'):107 match = re.match(rtt_line_re, line)108 if match:109 test_info['rtt'] = f'{match.group(1)} ms'110 break111 else:112 test_info['status'] = 'Time Out'113 test_info['last_bad'] = datetime.now().strftime('%a %H:%M:%S')114 test_info['good'] = False115 unreach_line_re = r'^From\s[0-9\.:a-f]+\sicmp_seq=1\s([0-9a-zA-Z\ ]+)$'116 if stdout:117 for line in stdout.decode().split('\n'):118 match = re.match(unreach_line_re, line)119 if match:120 test_info['status'] = 'Unreachable'121 break122 test_info['last_bad_status'] = test_info['status']123 except asyncio.CancelledError:124 print('Cancelled !!')125 test_info['success_percent'] = sucPer(126 test_info['total'],127 test_info['total_successes']128 )129 return test_info130 @unsync131 async def http_tester(self, test_info):132 '''133 Method to test using HTTP or HTTPS using HTTP3 Library134 HTTP3 has async capabilities135 '''136 test_info['last_stat'] = test_info['status']137 test_info['rtt'] = '--'138 test_info['good'] = False139 test_info['status'] = 'Incomplete'140 try:141 client = http3.AsyncClient()142 start = time.time()143 if test_info['test'] == 'HTTP':144 url = f'http://{test_info["host"]}'145 resp = await client.get(url, timeout=self.timeout)146 if test_info['test'] == 'HTTPS':147 url = f'https://{test_info["host"]}'148 resp = await client.get(url, timeout=self.timeout, verify=False)149 test_info['good'] = True150 test_info['status'] = self.stat_dict.get(resp.status_code, 'Unknown')151 except http3.exceptions.RedirectLoop:152 test_info['good'] = False153 test_info['status'] = 'Redirect Loop'154 except http3.exceptions.ConnectTimeout:155 test_info['good'] = False156 test_info['status'] = 'Time Out'157 except http3.exceptions.ReadTimeout:158 test_info['good'] = False159 test_info['status'] = 'Time Out'160 except socket.gaierror:161 test_info['good'] = False162 test_info['status'] = 'Bad Address'163 except OSError:164 test_info['good'] = False165 test_info['status'] = 'Unreachable'166 except asyncio.CancelledError:167 print('Cancelled !!')168 test_info['total'] += 1169 if not test_info['good']:170 test_info['last_bad'] = datetime.now().strftime('%a %H:%M:%S')171 test_info['last_bad_status'] = test_info['status']172 else:173 test_info['last_good'] = datetime.now().strftime('%a %H:%M:%S')174 test_info['total_successes'] += 1175 test_info['rtt'] = '{:.2f} ms'.format((time.time() - start) * 1000)176 test_info['success_percent'] = sucPer(177 test_info['total'],178 test_info['total_successes']179 )...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Airtest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful