How to use after_each_hook method in Playwright Python

Best Python code snippet using playwright-python

test_runner.py

Source:test_runner.py Github

copy

Full Screen

1"""This module contains the method for running a single test file."""2import sys3import os4import time5import traceback6from golem.core import session7from golem.core import utils8from golem.core.test import Test9from golem.core.project import Project10from golem.test_runner.test_runner_utils import import_page_into_test11from golem.test_runner import test_logger12from golem.test_runner.conf import ResultsEnum13from golem import actions, execution14from golem.report import test_report15class Data(dict):16 """dot notation access to dictionary attributes"""17 def __getattr__(*args):18 val = dict.get(*args)19 return Data(val) if type(val) is dict else val20 __setattr__ = dict.__setitem__21 __delattr__ = dict.__delitem__22class Secrets(dict):23 """dot notation access to dictionary attributes"""24 def __getattr__(*args):25 val = dict.get(*args)26 return Secrets(val) if type(val) is dict else val27 __setattr__ = dict.__setitem__28 __delattr__ = dict.__delitem__29def run_test(testdir, project, test_name, test_data, secrets, browser, env_name,30 settings, exec_report_dir, set_name, test_functions=None,31 execution_has_failed_tests=None, tags=None, from_suite=False):32 """Run a single test"""33 session.testdir = testdir34 runner = TestRunner(testdir, project, test_name, test_data, secrets, browser, env_name,35 settings, exec_report_dir, set_name, test_functions, execution_has_failed_tests,36 tags, from_suite)37 runner.prepare()38class TestRunner:39 __test__ = False # ignore this class from Pytest40 def __init__(self, testdir, project, test_name, test_data, secrets, browser, env_name,41 settings, exec_report_dir, set_name, test_functions_to_run=None,42 execution_has_failed_tests=None, tags=None, from_suite=False):43 self.testdir = testdir44 self.project = Project(project)45 self.test = Test(project, test_name)46 self.test_data = test_data47 self.secrets = secrets48 self.browser = browser49 self.env_name = env_name50 self.settings = settings51 self.exec_report_dir = exec_report_dir52 self.set_name = set_name53 # When test_functions_to_run is empty or None, all the test functions54 # defined in the test file will be run55 self.test_functions_to_run = test_functions_to_run or []56 self.test_hooks = {57 'before_test': [],58 'before_each': [],59 'after_each': [],60 'after_test': []61 }62 self.execution_has_failed_tests = execution_has_failed_tests63 self.execution_tags = tags or []64 self.from_suite = from_suite65 self.global_skip = False66 self.skip_tests = False67 self.result = None68 self.reportdir = None69 self.test_module = None70 self.test_functions = {}71 self.test_timestamp = utils.get_timestamp()72 self.logger = None73 def prepare(self):74 # Create report directory for the test file75 self.reportdir = test_report.create_test_file_report_dir(76 self.exec_report_dir, self.test.name, self.set_name)77 # Initialize logger for the test file78 self.logger = test_logger.get_logger(self.reportdir,79 self.settings['cli_log_level'],80 self.settings['log_all_events'])81 # set execution module values82 self._set_execution_module_values()83 self._print_test_info()84 # add the 'project' directory to python path85 # to enable relative imports from the test86 # TODO87 sys.path.append(os.path.join(self.testdir, 'projects', self.project.path))88 self.import_test()89 def import_test(self):90 test_module, error = utils.import_module(self.test.path)91 if error:92 actions._add_error(message=error.splitlines()[-1], description=error)93 self.result = ResultsEnum.CODE_ERROR94 self.finalize(import_modules_failed=True)95 else:96 self.test_module = test_module97 # Gather test hooks defined in the test module98 # TODO setup is deprecated, if before_test is not present, and99 # setup is, use setup instead100 if hasattr(self.test_module, 'before_test'):101 self.test_hooks['before_test'].append(getattr(self.test_module, 'before_test'))102 elif hasattr(self.test_module, 'setup'):103 self.test_hooks['before_test'].append(getattr(self.test_module, 'setup'))104 if hasattr(self.test_module, 'before_each'):105 self.test_hooks['before_each'].append(getattr(self.test_module, 'before_each'))106 if hasattr(self.test_module, 'after_each'):107 self.test_hooks['after_each'].append(getattr(self.test_module, 'after_each'))108 # TODO teardown is deprecated, if after_test is not present, and109 # teardown is, use teardown instead110 if hasattr(self.test_module, 'after_test'):111 self.test_hooks['after_test'].append(getattr(self.test_module, 'after_test'))112 elif hasattr(self.test_module, 'teardown'):113 self.test_hooks['after_test'].append(getattr(self.test_module, 'teardown'))114 # If test_functions_to_run is empty every test function defined in the115 # file will be run116 if not self.test_functions_to_run:117 self.test_functions_to_run = self.test.test_function_list118 if not len(self.test_functions_to_run):119 msg = f'No tests were found for file: {self.test.name}'120 execution.logger.info(msg)121 self.finalize()122 return123 else:124 for test_function in self.test_functions_to_run:125 self.test_functions[test_function] = self._test_function_result_dict(test_function)126 self.import_modules()127 def import_modules(self):128 # import logger129 setattr(self.test_module, 'logger', execution.logger)130 # import actions module131 if self.settings['implicit_actions_import']:132 for action in utils.module_local_public_functions(actions):133 setattr(self.test_module, action, getattr(actions, action))134 # store test description135 if hasattr(self.test_module, 'description'):136 execution.description = self.test_module.description137 # import pages138 try:139 if hasattr(self.test_module, 'pages') and self.settings['implicit_page_import']:140 base_path = self.project.page_directory_path141 for page in self.test_module.pages:142 self.test_module = import_page_into_test(base_path, self.test_module,143 page.split('.'))144 except Exception as e:145 message = f'{e.__class__.__name__}: {e}'146 trcbk = traceback.format_exc()147 actions._add_error(message=message, description=trcbk)148 self.result = ResultsEnum.CODE_ERROR149 # check for skip flag150 # test is skipped only when run from a suite151 skip = getattr(self.test_module, 'skip', False)152 if skip and self.from_suite:153 self.global_skip = skip154 if self.result == ResultsEnum.CODE_ERROR:155 self.finalize(import_modules_failed=True)156 else:157 self.run_setup()158 def run_setup(self):159 if self.global_skip:160 self.run_test_functions()161 return162 for before_test_hook in self.test_hooks['before_test']:163 # TODO setup is deprecated164 # setup is still run as 'setup' and an info log is shown165 hook_name = 'before_test'166 if before_test_hook.__name__ == 'setup':167 hook_name = 'setup'168 execution.logger.info('setup hook function is deprecated, use before_test')169 # reset execution values specific to this test170 self._reset_execution_module_values_for_test_function(None, hook_name)171 result = self.generic_run_function(before_test_hook)172 if result != ResultsEnum.SUCCESS:173 self.generate_report_for_hook_function(hook_name, result)174 return self.run_teardown(setup_failed=True)175 self.run_test_functions()176 def run_test_functions(self):177 for test_function in self.test_functions:178 self.run_test_function(test_function)179 self.run_teardown()180 def run_test_function(self, test_name):181 result = self.test_functions[test_name]182 self._reset_execution_module_values_for_test_function(None, test_name)183 if self.global_skip or self.skip_tests:184 result['result'] = ResultsEnum.SKIPPED185 execution.logger.info(f'Test skipped: {test_name}')186 self._finalize_test_function(test_name)187 return188 # Create folder for the test function report189 test_reportdir = test_report.create_test_function_report_dir(self.reportdir, test_name)190 result['test_reportdir'] = test_reportdir191 # Run before_each hooks192 for before_each_hook in self.test_hooks['before_each']:193 # reset execution values specific to this test194 self._reset_execution_module_values_for_test_function(None, 'before_each')195 before_each_result = self.generic_run_function(before_each_hook)196 if before_each_result != ResultsEnum.SUCCESS:197 self.skip_tests = True198 self.generate_report_for_hook_function('before_each', before_each_result)199 # reset execution values specific to this test200 self._reset_execution_module_values_for_test_function(test_reportdir, test_name)201 if self.skip_tests:202 result['result'] = ResultsEnum.SKIPPED203 execution.logger.info(f'Test skipped: {test_name}')204 self._finalize_test_function(test_name)205 return206 execution.logger.info(f'Test started: {test_name}')207 result['start_time'] = time.time()208 try:209 f = getattr(self.test_module, test_name)210 f(execution.data)211 # take screenshot_on_end212 if self.settings['screenshot_on_end'] and execution.browser:213 actions.take_screenshot('Test end')214 except AssertionError as e:215 self._add_error(message='Failure', exception=e)216 result['result'] = ResultsEnum.FAILURE217 except Exception as e:218 result['result'] = ResultsEnum.CODE_ERROR219 self._add_error(message='Error', exception=e)220 if result['result'] not in [ResultsEnum.CODE_ERROR, ResultsEnum.FAILURE]:221 if execution.errors:222 result['result'] = ResultsEnum.ERROR223 if result['result'] in [None, ResultsEnum.PENDING]:224 result['result'] = ResultsEnum.SUCCESS225 result['end_time'] = time.time()226 result['test_elapsed_time'] = round(result['end_time'] - result['start_time'], 2)227 execution.logger.info(f"Test Result: {result['result'].upper()}")228 self._finalize_test_function(test_name)229 # Run after_each hooks230 for after_each_hook in self.test_hooks['after_each']:231 # reset execution values specific to this test232 self._reset_execution_module_values_for_test_function(None, 'after_each')233 after_each_result = self.generic_run_function(after_each_hook)234 if after_each_result != ResultsEnum.SUCCESS:235 self.skip_tests = True236 self.generate_report_for_hook_function('after_each', after_each_result)237 def _finalize_test_function(self, test_name):238 result = self.test_functions[test_name]239 result['description'] = execution.description240 result['steps'] = execution.steps241 result['errors'] = execution.errors242 result['test_timestamp'] = self.test_timestamp243 result['browser'] = execution.browser_definition['name']244 result['browser_capabilities'] = execution.browser_definition['capabilities']245 # Report a test has failed in the test execution,246 # this will later determine the exit status247 _error_codes = [ResultsEnum.CODE_ERROR, ResultsEnum.ERROR, ResultsEnum.FAILURE]248 if self.execution_has_failed_tests is not None and result['result'] in _error_codes:249 self.execution_has_failed_tests.value = True250 test_report.generate_report(self.test.name, result, execution.data, self.reportdir)251 self._reset_execution_module_values_for_test_function()252 def run_teardown(self, setup_failed=False):253 teardown_failed = False254 if self.global_skip:255 self.finalize()256 return257 for after_test_hook in self.test_hooks['after_test']:258 # TODO teardown is deprecated259 # teardown is still run as 'teardown' and an info log is shown260 hook_name = 'after_test'261 if after_test_hook.__name__ == 'teardown':262 hook_name = 'teardown'263 execution.logger.info('teardown hook function is deprecated, use after_test')264 # reset execution values specific to this test265 self._reset_execution_module_values_for_test_function(None, hook_name)266 result = self.generic_run_function(after_test_hook)267 if result != ResultsEnum.SUCCESS:268 self.generate_report_for_hook_function(hook_name, result)269 # if there is no teardown or teardown failed or it did not close the driver,270 # let's try to close the driver manually271 if execution.browser:272 try:273 for browser, driver in execution.browsers.items():274 driver.quit()275 except:276 # if this fails, we have lost control over the webdriver window277 # and we are not going to be able to close it278 execution.logger.error('there was an error closing the driver',279 exc_info=True)280 finally:281 execution.browser = None282 self.finalize(setup_failed=setup_failed, teardown_failed=teardown_failed)283 def finalize(self, import_modules_failed=False, setup_failed=False, teardown_failed=False):284 # TODO this should be called at the point it failed285 # instead of here. Use a common method to generate286 # report for test functions and not test functions, like setup, teardown.287 # Reset the execution module after each so the steps and errors288 # collected belong to each function/non function phase289 if import_modules_failed:290 result = self._test_function_result_dict('setup')291 result['result'] = self.result292 result['test_timestamp'] = self.test_timestamp293 result['errors'] = execution.errors294 test_report.generate_report(self.test.name, result, execution.data,295 self.reportdir)296 test_logger.reset_logger(execution.logger)297 def generic_run_function(self, function):298 result = None299 try:300 function(execution.data)301 except AssertionError as e:302 self._add_error(message='Failure', exception=e)303 result = ResultsEnum.FAILURE304 except Exception as e:305 self._add_error(message='Error', exception=e)306 result = ResultsEnum.CODE_ERROR307 if result is None:308 if execution.errors:309 result = ResultsEnum.ERROR310 else:311 result = ResultsEnum.SUCCESS312 return result313 def generate_report_for_hook_function(self, hook_name, result):314 result_dict = self._test_function_result_dict(hook_name)315 result_dict['result'] = result316 result_dict['description'] = execution.description317 result_dict['test_timestamp'] = self.test_timestamp318 result_dict['errors'] = execution.errors319 result_dict['steps'] = execution.steps320 result_dict['browser'] = execution.browser_definition['name']321 test_report.generate_report(self.test.name, result_dict, execution.data, self.reportdir)322 def _set_execution_module_values(self):323 execution.test_file = self.test.name324 execution.browser = None325 execution.browser_definition = self.browser326 execution.browsers = {}327 execution.data = Data(self.test_data)328 execution.secrets = Secrets(self.secrets)329 execution.description = None330 execution.settings = self.settings331 execution.test_dirname = self.test.dirname332 execution.test_path = self.test.path333 execution.project_name = self.project.name334 execution.project_path = self.project.path335 execution.testdir = self.testdir336 execution.execution_reportdir = self.exec_report_dir337 execution.testfile_reportdir = self.reportdir338 execution.logger = self.logger339 execution.tags = self.execution_tags340 execution.environment = self.env_name341 execution.test_name = None342 execution.steps = []343 execution.errors = []344 execution.test_reportdir = None345 execution.timers = {}346 @staticmethod347 def _reset_execution_module_values_for_test_function(test_reportdir=None, test_name=None):348 execution.test_name = test_name349 execution.steps = []350 execution.errors = []351 execution.test_reportdir = test_reportdir352 execution.timers = {}353 def _print_test_info(self):354 execution.logger.info(f'Test execution started: {self.test.name}')355 execution.logger.info(f"Browser: {self.browser['name']}")356 if 'env' in self.test_data:357 if 'name' in self.test_data['env']:358 execution.logger.info(f"Environment: {self.test_data['env']['name']}")359 if self.test_data:360 data_string = ''361 for key, value in self.test_data.items():362 if key == 'env':363 if 'url' in value:364 data_string += f"\n url: {value['url']}"365 else:366 data_string += f'\n {key}: {value}'367 execution.logger.info(f'Using data:{data_string}')368 def _add_error(self, message, exception):369 """Add an error to the test from an exception.370 * Add a new step with `message`, don't log it371 * Add an error using:372 - message -> 'exception.__class__.__name__: exception'373 e.g.: 'AssertionError: expected title to be 'foo'374 - description -> traceback.format_exc()375 * Append the error to the last step376 * Log the error377 * Take a screenshot if screenshot_on_error == True and378 there is an open browser379 """380 actions._add_step(message, log_step=False)381 error_message = f'{exception.__class__.__name__}: {exception}'382 trcbk = traceback.format_exc().rstrip()383 actions._add_error(message=error_message, description=trcbk)384 actions._append_error(message=error_message, description=trcbk)385 self._take_screeenshot_on_error()386 def _take_screeenshot_on_error(self):387 """Take a screenshot only if there is a browser available"""388 try:389 if self.settings['screenshot_on_error'] and execution.browser:390 actions._screenshot_on_error()391 except:392 # if the driver is not available capture screenshot is not possible393 pass394 def _test_function_result_dict(self, test_name):395 return {396 'name': test_name,397 'set_name': self.set_name,398 'start_time': None,399 'end_time': None,400 'test_reportdir': None,401 'result': ResultsEnum.PENDING,402 'errors': [],403 'description': '',404 'steps': [],405 'test_elapsed_time': None,406 'test_timestamp': None,407 'browser': '',408 'browser_capabilities': ''...

Full Screen

Full Screen

test_download.py

Source:test_download.py Github

copy

Full Screen

...21def assert_file_content(path, content):22 with open(path, "r") as fd:23 assert fd.read() == content24@pytest.fixture(autouse=True)25def after_each_hook(server):26 def handle_download(request):27 request.setHeader("Content-Type", "application/octet-stream")28 request.setHeader("Content-Disposition", "attachment")29 request.write(b"Hello world")30 request.finish()31 def handle_download_with_file_name(request):32 request.setHeader("Content-Type", "application/octet-stream")33 request.setHeader("Content-Disposition", "attachment; filename=file.txt")34 request.write(b"Hello world")35 request.finish()36 server.set_route("/download", handle_download)37 server.set_route("/downloadWithFilename", handle_download_with_file_name)38 yield39async def test_should_report_downloads_with_accept_downloads_false(page: Page, server):...

Full Screen

Full Screen

conftest.py

Source:conftest.py Github

copy

Full Screen

...52 test_server.start()53 yield54 test_server.stop()55@pytest.fixture(autouse=True)56def after_each_hook():57 yield58 test_server.reset()59@pytest.fixture(scope="session")60def browser_name(pytestconfig):61 return pytestconfig.getoption("browser")62@pytest.fixture(scope="session")63def is_webkit(browser_name):64 return browser_name == "webkit"65@pytest.fixture(scope="session")66def is_firefox(browser_name):67 return browser_name == "firefox"68@pytest.fixture(scope="session")69def is_chromium(browser_name):70 return browser_name == "chromium"...

Full Screen

Full Screen

Playwright tutorial

LambdaTest’s Playwright tutorial will give you a broader idea about the Playwright automation framework, its unique features, and use cases with examples to exceed your understanding of Playwright testing. This tutorial will give A to Z guidance, from installing the Playwright framework to some best practices and advanced concepts.

Chapters:

  1. What is Playwright : Playwright is comparatively new but has gained good popularity. Get to know some history of the Playwright with some interesting facts connected with it.
  2. How To Install Playwright : Learn in detail about what basic configuration and dependencies are required for installing Playwright and run a test. Get a step-by-step direction for installing the Playwright automation framework.
  3. Playwright Futuristic Features: Launched in 2020, Playwright gained huge popularity quickly because of some obliging features such as Playwright Test Generator and Inspector, Playwright Reporter, Playwright auto-waiting mechanism and etc. Read up on those features to master Playwright testing.
  4. What is Component Testing: Component testing in Playwright is a unique feature that allows a tester to test a single component of a web application without integrating them with other elements. Learn how to perform Component testing on the Playwright automation framework.
  5. Inputs And Buttons In Playwright: Every website has Input boxes and buttons; learn about testing inputs and buttons with different scenarios and examples.
  6. Functions and Selectors in Playwright: Learn how to launch the Chromium browser with Playwright. Also, gain a better understanding of some important functions like “BrowserContext,” which allows you to run multiple browser sessions, and “newPage” which interacts with a page.
  7. Handling Alerts and Dropdowns in Playwright : Playwright interact with different types of alerts and pop-ups, such as simple, confirmation, and prompt, and different types of dropdowns, such as single selector and multi-selector get your hands-on with handling alerts and dropdown in Playright testing.
  8. Playwright vs Puppeteer: Get to know about the difference between two testing frameworks and how they are different than one another, which browsers they support, and what features they provide.
  9. Run Playwright Tests on LambdaTest: Playwright testing with LambdaTest leverages test performance to the utmost. You can run multiple Playwright tests in Parallel with the LammbdaTest test cloud. Get a step-by-step guide to run your Playwright test on the LambdaTest platform.
  10. Playwright Python Tutorial: Playwright automation framework support all major languages such as Python, JavaScript, TypeScript, .NET and etc. However, there are various advantages to Python end-to-end testing with Playwright because of its versatile utility. Get the hang of Playwright python testing with this chapter.
  11. Playwright End To End Testing Tutorial: Get your hands on with Playwright end-to-end testing and learn to use some exciting features such as TraceViewer, Debugging, Networking, Component testing, Visual testing, and many more.
  12. Playwright Video Tutorial: Watch the video tutorials on Playwright testing from experts and get a consecutive in-depth explanation of Playwright automation testing.

Run Playwright Python automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful