How to use fail_header_str method in avocado

Best Python code snippet using avocado_python

list.py

Source:list.py Github

copy

Full Screen

1# This program is free software; you can redistribute it and/or modify2# it under the terms of the GNU General Public License as published by3# the Free Software Foundation; either version 2 of the License, or4# (at your option) any later version.5#6# This program is distributed in the hope that it will be useful,7# but WITHOUT ANY WARRANTY; without even the implied warranty of8# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.9#10# See LICENSE for more details.11#12# Copyright: Red Hat Inc. 2013-201413# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>14# Author: Beraldo Leal <bleal@redhat.com>15import os16from avocado.core import exit_codes, loader, parser_common_args17from avocado.core.output import LOG_UI, TERM_SUPPORT18from avocado.core.plugin_interfaces import CLICmd19from avocado.core.resolver import ReferenceResolutionResult20from avocado.core.settings import settings21from avocado.core.suite import TestSuite22from avocado.core.test import Test23from avocado.utils.astring import iter_tabular_output24def _get_test_tags(test):25 """Return a list of all tags of a test as string."""26 params = test[1]27 tags_repr = []28 for tag, values in params.get('tags', {}).items():29 if values:30 tags_repr.append("%s(%s)" % (tag, ",".join(values)))31 else:32 tags_repr.append(tag)33 return ",".join(tags_repr)34class List(CLICmd):35 """36 Implements the avocado 'list' subcommand37 """38 name = 'list'39 description = 'List available tests'40 def _display(self, suite, matrix):41 header = None42 verbose = suite.config.get('core.verbose')43 if verbose:44 header = (TERM_SUPPORT.header_str('Type'),45 TERM_SUPPORT.header_str('Test'),46 TERM_SUPPORT.header_str('Tag(s)'))47 for line in iter_tabular_output(matrix,48 header=header,49 strip=True):50 LOG_UI.debug(line)51 if verbose:52 if suite.resolutions:53 resolution_header = (TERM_SUPPORT.header_str('Resolver'),54 TERM_SUPPORT.header_str('Reference'),55 TERM_SUPPORT.header_str('Info'))56 LOG_UI.info("")57 mapping = {58 ReferenceResolutionResult.SUCCESS: TERM_SUPPORT.healthy_str,59 ReferenceResolutionResult.NOTFOUND: TERM_SUPPORT.fail_header_str,60 ReferenceResolutionResult.ERROR: TERM_SUPPORT.fail_header_str61 }62 resolution_matrix = []63 for r in suite.resolutions:64 decorator = mapping.get(r.result,65 TERM_SUPPORT.warn_header_str)66 if r.result == ReferenceResolutionResult.SUCCESS:67 continue68 resolution_matrix.append((decorator(r.origin),69 r.reference,70 r.info or ''))71 for line in iter_tabular_output(resolution_matrix,72 header=resolution_header,73 strip=True):74 LOG_UI.info(line)75 LOG_UI.info("")76 LOG_UI.info("TEST TYPES SUMMARY")77 LOG_UI.info("==================")78 for key in sorted(suite.stats):79 LOG_UI.info("%s: %s", key, suite.stats[key])80 if suite.tags_stats:81 LOG_UI.info("")82 LOG_UI.info("TEST TAGS SUMMARY")83 LOG_UI.info("=================")84 for key in sorted(suite.tags_stats):85 LOG_UI.info("%s: %s", key, suite.tags_stats[key])86 @staticmethod87 def _get_test_matrix(suite):88 """Used for loader."""89 test_matrix = []90 type_label_mapping = loader.loader.get_type_label_mapping()91 decorator_mapping = loader.loader.get_decorator_mapping()92 verbose = suite.config.get('core.verbose')93 for cls, params in suite.tests:94 if isinstance(cls, str):95 cls = Test96 type_label = type_label_mapping[cls]97 decorator = decorator_mapping[cls]98 type_label = decorator(type_label)99 if verbose:100 test_matrix.append((type_label,101 params['name'],102 _get_test_tags((cls, params))))103 else:104 test_matrix.append((type_label, params['name']))105 return test_matrix106 @staticmethod107 def _get_resolution_matrix(suite):108 """Used for resolver."""109 test_matrix = []110 verbose = suite.config.get('core.verbose')111 for test in suite.tests:112 runnable = test.runnable113 type_label = TERM_SUPPORT.healthy_str(runnable.kind)114 if verbose:115 tags_repr = []116 tags = runnable.tags or {}117 for tag, vals in tags.items():118 if vals:119 tags_repr.append("%s(%s)" % (tag,120 ",".join(vals)))121 else:122 tags_repr.append(tag)123 tags_repr = ",".join(tags_repr)124 test_matrix.append((type_label, runnable.uri, tags_repr))125 else:126 test_matrix.append((type_label, runnable.uri))127 return test_matrix128 @staticmethod129 def save_recipes(suite, directory, matrix_len):130 fmt = '%%0%uu.json' % len(str(matrix_len))131 index = 1132 for resolution in suite.resolutions:133 if resolution.result == ReferenceResolutionResult.SUCCESS:134 for res in resolution.resolutions:135 res.write_json(os.path.join(directory, fmt % index))136 index += 1137 def configure(self, parser):138 """139 Add the subparser for the list action.140 :param parser: The Avocado command line application parser141 :type parser: :class:`avocado.core.parser.ArgumentParser`142 """143 parser = super(List, self).configure(parser)144 help_msg = ('List of test references (aliases or paths). If empty, '145 'Avocado will list tests on the configured test source, '146 '(see "avocado config --datadir") Also, if there are '147 'other test loader plugins active, tests from those '148 'plugins might also show up (behavior may vary among '149 'plugins)')150 settings.register_option(section='list',151 key='references',152 default=[],153 nargs='*',154 key_type=list,155 help_msg=help_msg,156 parser=parser,157 positional_arg=True)158 loader.add_loader_options(parser, 'list')159 help_msg = ('What is the method used to detect tests? If --resolver '160 'used, Avocado will use the Next Runner Resolver method. '161 'If not the legacy one will be used.')162 settings.register_option(section='list',163 key='resolver',164 key_type=bool,165 default=False,166 help_msg=help_msg,167 parser=parser,168 long_arg='--resolver')169 help_msg = ('Writes runnable recipe files to a directory. Valid only '170 'when using --resolver.')171 settings.register_option(section='list.recipes',172 key='write_to_directory',173 default=None,174 metavar='DIRECTORY',175 help_msg=help_msg,176 parser=parser,177 long_arg='--write-recipes-to-directory')178 parser_common_args.add_tag_filter_args(parser)179 def run(self, config):180 runner = 'nrunner' if config.get('list.resolver') else 'runner'181 config['run.references'] = config.get('list.references')182 config['run.ignore_missing_references'] = True183 config['run.test_runner'] = runner184 try:185 suite = TestSuite.from_config(config)186 if runner == 'nrunner':187 matrix = self._get_resolution_matrix(suite)188 self._display(suite, matrix)189 directory = config.get('list.recipes.write_to_directory')190 if directory is not None:191 self.save_recipes(suite, directory, len(matrix))192 else:193 matrix = self._get_test_matrix(suite)194 self._display(suite, matrix)195 except KeyboardInterrupt:196 LOG_UI.error('Command interrupted by user...')...

Full Screen

Full Screen

__init__.py

Source:__init__.py Github

copy

Full Screen

1# This program is free software; you can redistribute it and/or modify2# it under the terms of the GNU General Public License as published by3# the Free Software Foundation; either version 2 of the License, or4# (at your option) any later version.5#6# This program is distributed in the hope that it will be useful,7# but WITHOUT ANY WARRANTY; without even the implied warranty of8# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.9#10# See LICENSE for more details.11#12# Copyright: Red Hat Inc. 201813# Authors: Amador Pahim <apahim@redhat.com>14"""15Plugin to run GLib Test Framework tests in Avocado16"""17import os18import re19from avocado.utils import path20from avocado.utils import process21from avocado.core import loader22from avocado.core import output23from avocado.core import test24from avocado.core.plugin_interfaces import CLI25class GLibTest(test.SimpleTest):26 """27 Run a GLib test command as a SIMPLE test.28 """29 def __init__(self, name, params=None, base_logdir=None, job=None,30 executable=None):31 super(GLibTest, self).__init__(name, params, base_logdir, job,32 executable)33 @property34 def filename(self):35 """36 Returns the path of the GLib test suite.37 """38 return self._filename.split(':')[0]39 def test(self):40 """41 Create the GLib command and execute it.42 """43 test_name = self._filename.split(':')[1]44 cmd = '%s -p=%s' % (self.filename, test_name)45 result = process.run(cmd, ignore_status=True)46 if result.exit_status != 0:47 self.fail('GLib Test execution returned a '48 'non-0 exit code (%s)' % result)49class NotGLibTest(object):50 """51 Not a GLib Test (for reporting purposes)52 """53class GLibLoader(loader.TestLoader):54 """55 GLib Test loader class56 """57 name = "glib"58 def __init__(self, args, extra_params):59 super(GLibLoader, self).__init__(args, extra_params)60 def discover(self, reference, which_tests=loader.DiscoverMode.DEFAULT):61 avocado_suite = []62 subtests_filter = None63 if reference is None:64 return []65 if ':' in reference:66 reference, _subtests_filter = reference.split(':', 1)67 subtests_filter = re.compile(_subtests_filter)68 if (os.path.isfile(reference) and69 path.PathInspector(reference).has_exec_permission()):70 try:71 cmd = '%s -l' % (reference)72 result = process.run(cmd)73 except Exception as details:74 if which_tests == loader.DiscoverMode.ALL:75 return [(NotGLibTest,76 {"name": "%s: %s" % (reference, details)})]77 return []78 for test_item in result.stdout.splitlines():79 test_name = "%s:%s" % (reference, test_item)80 if subtests_filter and not subtests_filter.search(test_name):81 continue82 avocado_suite.append((GLibTest, {'name': test_name,83 'executable': test_name}))84 if which_tests == loader.DiscoverMode.ALL and not avocado_suite:85 return [(NotGLibTest,86 {"name": "%s: No GLib-like tests found" % reference})]87 return avocado_suite88 @staticmethod89 def get_type_label_mapping():90 return {GLibTest: 'GLIB',91 NotGLibTest: "!GLIB"}92 @staticmethod93 def get_decorator_mapping():94 return {GLibTest: output.TERM_SUPPORT.healthy_str,95 NotGLibTest: output.TERM_SUPPORT.fail_header_str}96class GLibCLI(CLI):97 """98 Run GLib Test Framework tests99 """100 name = 'glib'101 description = "GLib Framework options for 'run' subcommand"102 def configure(self, parser):103 pass104 def run(self, args):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful