Best Python code snippet using autotest_python
run_tests.py
Source:run_tests.py  
...32    }33all_tests = unittest.TestSuite((test_accounts.suite,34                               test_worksheet.suite,35                               test_worksheet_list.suite))36def setup_tests(address='localhost', secure=False,37                environment='*firefox3 /usr/bin/firefox'):38    """39    Sets selected options for SageNB Selenium tests.40    INPUT:41    - ``address`` - a string (default: 'localhost'); address of the42      network interface at which the notebook server listens.  Do not43      leave this empty; see :mod:`sagenb.testing.notebook_test_case`44      for details.45    - ``secure`` - a boolean (default: False); whether to launch a46      secure notebook server.  Note: Browser security warnings will47      yield failed tests.  To work around these in Firefox, close all48      windows, create a new profile (e.g., `firefox -P selenium`),49      browse to a secure notebook server, accept the certificate, and50      quit.  Then launch the Selenium server with, e.g.,51        java -jar selenium-server -firefoxProfileTemplate $HOME/selenium/firefox52      and run the tests.  A minimal profile template directory can53      contain just the files `cert8.db` and `cert_override.txt`.54    - ``environment`` - a string (default: '*firefox355      /usr/bin/firefox'); the browser environment in which to run the56      tests.  The path is optional.  However, for the Selenium server57      to have complete control over the launched browser, it's best to58      give the full path to the browser *executable* (i.e., not a59      shell script).60      Possible environments include '*chrome', '*firefox',61      '*firefox3', '*googlechrome', '*iexplore', '*opera', '*safari'.62    EXAMPLES::63        sage: import sagenb.testing.run_tests as rt               # not tested64        sage: env = '*firefox3 /usr/lib64/firefox-3.5.6/firefox'  # not tested65        sage: rt.setup_tests('localhost', True, env)              # not tested66        sage: rt.run_any()                                        # not tested67        sage: rt.setup_tests('localhost', True, '*opera')         # not tested68        sage: rt.run_and_report()                                 # not tested69    """70    # TODO: Add a directory option for parallel testing.71    notebook_test_case.NB_OPTIONS['address'] = address72    notebook_test_case.NB_OPTIONS['secure'] = secure73    notebook_test_case.SEL_OPTIONS['environment'] = environment74def run_any(tests=all_tests, make_report=False, **kwargs):75    """76    Creates and runs an ad hoc test suite from a test name, case,77    suite, or a mixed list thereof.  If no matching tests are found,78    no tests are run.79    INPUT:80    - ``tests`` - a string, :class:`unittest.TestCase`,81      :class:`unittest.TestSuite`, or a mixed list thereof.  Strings...test_sensor.py
Source:test_sensor.py  
...40    entity_id = config["sensor"]["source"]41    hass.states.async_set(entity_id, 0, {})42    await hass.async_block_till_done()43    return config, entity_id44async def setup_tests(hass, config, times, values, expected_state):45    """Test derivative sensor state."""46    config, entity_id = await _setup_sensor(hass, config)47    # Testing a energy sensor with non-monotonic intervals and values48    for time, value in zip(times, values):49        now = dt_util.utcnow() + timedelta(seconds=time)50        with patch("homeassistant.util.dt.utcnow", return_value=now):51            hass.states.async_set(entity_id, value, {}, force_update=True)52            await hass.async_block_till_done()53    state = hass.states.get("sensor.power")54    assert state is not None55    assert round(float(state.state), config["sensor"]["round"]) == expected_state56    return state57async def test_dataSet1(hass):58    """Test derivative sensor state."""59    await setup_tests(60        hass,61        {"unit_time": TIME_SECONDS},62        times=[20, 30, 40, 50],63        values=[10, 30, 5, 0],64        expected_state=-0.5,65    )66async def test_dataSet2(hass):67    """Test derivative sensor state."""68    await setup_tests(69        hass,70        {"unit_time": TIME_SECONDS},71        times=[20, 30],72        values=[5, 0],73        expected_state=-0.5,74    )75async def test_dataSet3(hass):76    """Test derivative sensor state."""77    state = await setup_tests(78        hass,79        {"unit_time": TIME_SECONDS},80        times=[20, 30],81        values=[5, 10],82        expected_state=0.5,83    )84    assert state.attributes.get("unit_of_measurement") == f"/{TIME_SECONDS}"85async def test_dataSet4(hass):86    """Test derivative sensor state."""87    await setup_tests(88        hass,89        {"unit_time": TIME_SECONDS},90        times=[20, 30],91        values=[5, 5],92        expected_state=0,93    )94async def test_dataSet5(hass):95    """Test derivative sensor state."""96    await setup_tests(97        hass,98        {"unit_time": TIME_SECONDS},99        times=[20, 30],100        values=[10, -10],101        expected_state=-2,102    )103async def test_dataSet6(hass):104    """Test derivative sensor state."""105    await setup_tests(hass, {}, times=[0, 60], values=[0, 1 / 60], expected_state=1)106async def test_data_moving_average_for_discrete_sensor(hass):107    """Test derivative sensor state."""108    # We simulate the following situation:109    # The temperature rises 1 °C per minute for 30 minutes long.110    # There is a data point every 30 seconds, however, the sensor returns111    # the temperature rounded down to an integer value.112    # We use a time window of 10 minutes and therefore we can expect113    # (because the true derivative is 1 °C/min) an error of less than 10%.114    temperature_values = []115    for temperature in range(30):116        temperature_values += [temperature] * 2  # two values per minute117    time_window = 600118    times = list(range(0, 1800 + 30, 30))119    config, entity_id = await _setup_sensor(...runtests.py
Source:runtests.py  
1#!/usr/bin/env python2import argparse3import json4import os5import sys6from itertools import chain7# from typing import Literal8from test_utils import setup_tests9ROOT_DIR = os.path.dirname(os.path.realpath(__file__))10UTILS_DIR = os.path.join(ROOT_DIR, 'test_utils',)11TEST_REPO_DIR = os.path.join(12    ROOT_DIR,13    'tests'14)15TEST_RESULTS_FILE = os.path.join(ROOT_DIR, 'results', 'test_list.json')16TEST_VERSIONS = ('v21', 'v22', 'v30')17PY_VERSIONS = ('p36', 'p38')18DB_VERSIONS = ('sqlite', 'mongodb')19# TEST_LITERAL = Literal['v21', 'v22', 'v30']20# PY_LITERAL = Literal['p36', 'p38']21# DB_LITERAL = Literal['sqlite', 'mongodb']22#23# class TestResult(TypedDict):24#     passing: List[str]25#     failing: List[str]26#27# class TestName(TypedDict):28#     migrations: TestResult29#30# class DbVersions(TypedDict):31#     sqlite: TestName32#     mongodb: TestName33#34#35# class PyVersions(TypedDict):36#     p36: DbVersions37#     p38: DbVersions38#     repo_tests: List[str]39#40#41# class TestVersions(TypedDict):42#     v21: PyVersions43#     v22: PyVersions44#     v30: PyVersions45PARSER_ARGS = {46    '--start-index': {47        'default': None,48        'type': int,49        'dest': 'start_index'50    },51    '--django-version': {52        'default': 21,53        'type': int,54        'choices': [21, 22, 30]55    },56    '--db-type': {57        'default': 'mongodb',58        'type': str,59        'choices': ['mongodb', 'sqlite']60    },61    '--check-currently-passing': {62        'action': 'store_true',63    },64    '--discover-passing': {65        'action': 'store_true',66    },67    '--discover-tests': {68        'action': 'store_true',69    },70    '--check-specific': {71        'action': 'store_true',72    },73}74check_tests = [75    'bulk_create',76    'migrations',77    'inspectdb',78    'indexes',79    'dbshell',80    'db_utils',81    'db_typecasts',82    'db_functions',83    'datetimes',84    'dates',85    'datatypes',86    'aggregation']87class TestManager:88    def __init__(self):89        parser = argparse.ArgumentParser(parents=[setup_tests.get_parser()], add_help=False)90        for option, arg in PARSER_ARGS.items():91            parser.add_argument(option, **arg)92        parsed = self.parsed = parser.parse_args()93        setup_tests.validate_parsed(parsed, parser)94        django_version = f'v{parsed.django_version}'95        python_version = f'p{sys.version_info.major}{sys.version_info.minor}'96        self.selected_test_dir = os.path.join(97            TEST_REPO_DIR,98            django_version,99            'tests'100        )101        sys.path.insert(0, UTILS_DIR)102        sys.path.insert(0, self.selected_test_dir)103        if parsed.db_type == 'mongodb':104            os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.test_mongodb'105        else:106            os.environ['DJANGO_SETTINGS_MODULE'] = 'settings.test_sqlite'107        setup_tests.RUNTESTS_DIR = self.selected_test_dir108        self.test_list = self.load_test_list()109        self.repo_tests_list = self.test_list[django_version]['repo_tests']110        self.result_list = self.test_list[django_version][python_version][parsed.db_type]111    def check_passing(self):112        passing = self.result_list['passing']113        tests = []114        for test in passing.values():115            tests.extend(test)116        self.parsed.modules = tests117        return self.check_specific()118    def discover_tests(self):119        testlist = setup_tests.get_test_list(self.parsed)120        testlist.sort()121        self.repo_tests_list.extend(testlist)122    @staticmethod123    def to_result_dict(test_result):124        res_dict = {}125        for test, trace in test_result:126            _id = test.id()127            name, _ = _id.split('.', 1)128            try:129                res_dict[name].append(_id)130            except KeyError:131                res_dict[name] = [_id]132        for ids in res_dict.values():133            ids.sort()134        return res_dict135    def discover_passing(self):136        if not self.repo_tests_list:137            self.discover_tests()138        self.parsed.modules = self.parsed.modules or check_tests139        result = setup_tests.test_exec(self.parsed)140        res_dict = self.to_result_dict(chain(result.failures,141                                             result.errors,142                                             result.unexpectedSuccesses))143        self.result_list['failing'].update(res_dict)144        res_dict = self.to_result_dict(result.passed)145        self.result_list['passing'].update(res_dict)146    def check_specific(self):147        result = setup_tests.test_exec(self.parsed)148        if any(chain(result.failures, result.errors, result.unexpectedSuccesses)):149            return -1150        else:151            return 0152    def run(self):153        if self.parsed.discover_tests:154            self.discover_tests()155            self.store_test_list(self.test_list)156        if self.parsed.discover_passing:157            self.discover_passing()158            self.store_test_list(self.test_list)159        if self.parsed.check_currently_passing:160            return self.check_passing()161        if self.parsed.check_specific:162            return self.check_specific()163    @staticmethod164    def store_test_list(test_data):165        with open(TEST_RESULTS_FILE, 'w') as fp:166            json.dump(test_data, fp, indent=3)167    @staticmethod168    def load_test_list():169        try:170            with open(TEST_RESULTS_FILE, 'r') as fp:171                return json.load(fp)172        except FileNotFoundError:173            test_list = {}174            for tv in TEST_VERSIONS:175                test_list[tv] = {}176                test_list[tv]['repo_tests'] = []177                for pv in PY_VERSIONS:178                    test_list[tv][pv] = {}179                    for dbv in DB_VERSIONS:180                        test_list[tv][pv][dbv] = {}181                        test_list[tv][pv][dbv]['passing'] = {}182                        test_list[tv][pv][dbv]['failing'] = {}183            return test_list184if __name__ == '__main__':185    tm = TestManager()...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
