How to use all_tests method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

run_tests.py

Source:run_tests.py Github

copy

Full Screen

1#!/bin/env python32#3# This script tests various functions of the ODrive firmware and4# the ODrive Python library.5#6# Usage:7# 1. adapt test-rig.yaml for your test rig.8# 2. ./run_tests.py9import yaml10import os11import sys12import threading13import traceback14import argparse15from odrive.tests import *16from odrive.utils import Logger, Event17def for_all_parallel(objects, get_name, callback):18 """19 Executes the specified callback for every object in the objects20 list concurrently. This function waits for all callbacks to21 finish and throws an exception if any of the callbacks throw22 an exception.23 """24 tracebacks = []25 def run_callback(element):26 try:27 callback(element)28 except Exception as ex:29 tracebacks.append((get_name(element), ex))30 # Start a thread for each element in the list31 all_threads = []32 for element in objects:33 thread = threading.Thread(target=run_callback, args=(element,))34 thread.daemon = True35 thread.start()36 all_threads.append(thread)37 38 # Wait for all threads to complete39 for thread in all_threads:40 thread.join()41 if len(tracebacks) == 1:42 msg = "task {} failed.".format(tracebacks[0][0])43 raise Exception(msg) from tracebacks[0][1]44 elif len(tracebacks) > 1:45 msg = "task {} and {} failed.".format(46 tracebacks[0][0],47 "one other" if len(tracebacks) == 2 else str(len(tracebacks)-1) + " others"48 )49 raise Exception(msg) from tracebacks[0][1]50script_path=os.path.dirname(os.path.realpath(__file__))51parser = argparse.ArgumentParser(description='ODrive automated test tool\n')52parser.add_argument("--skip-boring-tests", action="store_true",53 help="Skip the boring tests and go right to the high power tests")54parser.add_argument("--ignore", metavar='DEVICE', action='store', nargs='+',55 help="Ignore one or more ODrives or axes")56parser.add_argument("--test-rig-yaml", type=argparse.FileType('r'),57 help="test rig YAML file")58# parser.set_defaults(test_rig_yaml=script_path + '/test-rig-parallel.yaml')59parser.set_defaults(ignore=[])60args = parser.parse_args()61test_rig_yaml = yaml.load(args.test_rig_yaml)62# TODO: add --only option63all_tests = []64if not args.skip_boring_tests:65 all_tests.append(TestFlashAndErase())66 all_tests.append(TestSetup())67 all_tests.append(TestMotorCalibration())68 # # TODO: test encoder index search69 all_tests.append(TestEncoderOffsetCalibration())70 # # TODO: hold down one motor while the other one does an index search (should fail)71 all_tests.append(TestClosedLoopControl())72 all_tests.append(TestStoreAndReboot())73 all_tests.append(TestEncoderOffsetCalibration()) # need to find offset _or_ index after reboot74 all_tests.append(TestClosedLoopControl())75else:76 all_tests.append(TestDiscoverAndGotoIdle())77 all_tests.append(TestEncoderOffsetCalibration(pass_if_ready=True))78all_tests.append(TestAsciiProtocol())79all_tests.append(TestSensorlessControl())80#all_tests.append(TestStepDirInput())81#all_tests.append(TestPWMInput())82if test_rig_yaml['type'] == 'parallel':83 #all_tests.append(TestHighVelocity())84 all_tests.append(TestHighVelocityInViscousFluid(load_current=35, driver_current=45))85 # all_tests.append(TestVelCtrlVsPosCtrl())86 # TODO: test step/dir87 # TODO: test sensorless88 # TODO: test ASCII protocol89 # TODO: test protocol over UART90elif test_rig_yaml['type'] == 'loopback':91 all_tests.append(TestSelfLoadedPosVelDistribution(92 rpm_range=3000, load_current_range=60, driver_current_lim=70))93print(str(args.ignore))94logger = Logger()95os.chdir(script_path + '/../Firmware')96# Build a dictionary of odrive test contexts by name97odrives_by_name = {}98for odrv_idx, odrv_yaml in enumerate(test_rig_yaml['odrives']):99 name = odrv_yaml['name'] if 'name' in odrv_yaml else 'odrive{}'.format(odrv_idx)100 if not name in args.ignore:101 odrives_by_name[name] = ODriveTestContext(name, odrv_yaml)102# Build a dictionary of axis test contexts by name (e.g. odrive0.axis0)103axes_by_name = {}104for odrv_ctx in odrives_by_name.values():105 for axis_idx, axis_ctx in enumerate(odrv_ctx.axes):106 if not axis_ctx.name in args.ignore:107 axes_by_name[axis_ctx.name] = axis_ctx108# Ensure mechanical couplings are valid109couplings = []110if test_rig_yaml['couplings'] is None:111 test_rig_yaml['couplings'] = {}112else:113 for coupling in test_rig_yaml['couplings']:114 c = [axes_by_name[axis_name] for axis_name in coupling if (axis_name in axes_by_name)]115 if len(c) > 1:116 couplings.append(c)117app_shutdown_token = Event()118try:119 for test in all_tests:120 if isinstance(test, ODriveTest):121 def odrv_test_thread(odrv_name):122 odrv_ctx = odrives_by_name[odrv_name]123 logger.notify('* running {} on {}...'.format(type(test).__name__, odrv_name))124 try:125 test.check_preconditions(odrv_ctx,126 logger.indent(' {}: '.format(odrv_name)))127 except:128 raise PreconditionsNotMet()129 test.run_test(odrv_ctx,130 logger.indent(' {}: '.format(odrv_name)))131 if test._exclusive:132 for odrv in odrives_by_name:133 odrv_test_thread(odrv)134 else:135 for_all_parallel(odrives_by_name, lambda x: type(test).__name__ + " on " + x, odrv_test_thread)136 elif isinstance(test, AxisTest):137 def axis_test_thread(axis_name):138 # Get all axes that are mechanically coupled with the axis specified by axis_name139 conflicting_axes = sum([c for c in couplings if (axis_name in [a.name for a in c])], [])140 # Remove duplicates141 conflicting_axes = list(set(conflicting_axes))142 # Acquire lock for all conflicting axes143 conflicting_axes.sort(key=lambda x: x.name) # prevent deadlocks144 axis_ctx = axes_by_name[axis_name]145 for conflicting_axis in conflicting_axes:146 conflicting_axis.lock.acquire()147 try:148 if not app_shutdown_token.is_set():149 # Run test on this axis150 logger.notify('* running {} on {}...'.format(type(test).__name__, axis_name))151 try:152 test.check_preconditions(axis_ctx,153 logger.indent(' {}: '.format(axis_name)))154 except:155 raise PreconditionsNotMet()156 test.run_test(axis_ctx,157 logger.indent(' {}: '.format(axis_name)))158 else:159 logger.warn('- skipping {} on {}'.format(type(test).__name__, axis_name))160 except:161 app_shutdown_token.set()162 raise163 finally:164 # Release all conflicting axes165 for conflicting_axis in conflicting_axes:166 conflicting_axis.lock.release()167 for_all_parallel(axes_by_name, lambda x: type(test).__name__ + " on " + x, axis_test_thread)168 elif isinstance(test, DualAxisTest):169 def dual_axis_test_thread(coupling):170 coupling_name = "...".join([a.name for a in coupling])171 # Remove duplicates172 coupled_axes = list(set(coupling))173 # Acquire lock for all conflicting axes174 coupled_axes.sort(key=lambda x: x.name) # prevent deadlocks175 for axis_ctx in coupled_axes:176 axis_ctx.lock.acquire()177 try:178 if not app_shutdown_token.is_set():179 # Run test on this axis180 logger.notify('* running {} on {}...'.format(type(test).__name__, coupling_name))181 try:182 test.check_preconditions(coupled_axes[0], coupled_axes[1],183 logger.indent(' {}: '.format(coupling_name)))184 except:185 raise PreconditionsNotMet()186 test.run_test(coupled_axes[0], coupled_axes[1],187 logger.indent(' {}: '.format(coupling_name)))188 else:189 logger.warn('- skipping {} on {}...'.format(type(test).__name__, coupling_name))190 except:191 app_shutdown_token.set()192 raise193 finally:194 # Release all conflicting axes195 for axis_ctx in coupled_axes:196 axis_ctx.lock.release()197 for_all_parallel(couplings, lambda x: type(test).__name__ + " on " + "..".join([a.name for a in x]), dual_axis_test_thread)198 else:199 logger.warn("ignoring unknown test type {}".format(type(test)))200except:201 logger.error(traceback.format_exc())202 logger.debug('=> Test failed. Please wait while I secure the test rig...')203 try:204 dont_secure_after_failure = False # TODO: disable205 if not dont_secure_after_failure:206 def odrv_reset_thread(odrv_name):207 odrv_ctx = odrives_by_name[odrv_name]208 #run("make erase PROGRAMMER='" + odrv_ctx.yaml['programmer'] + "'", logger, timeout=30)209 odrv_ctx.handle.axis0.requested_state = AXIS_STATE_IDLE210 odrv_ctx.handle.axis1.requested_state = AXIS_STATE_IDLE211 dump_errors(odrv_ctx.axes[0], logger)212 dump_errors(odrv_ctx.axes[1], logger)213 for_all_parallel(odrives_by_name, lambda x: x['name'], odrv_reset_thread)214 except:215 logger.error('///////////////////////////////////////////')216 logger.error('/// CRITICAL: COULD NOT SECURE TEST RIG ///')217 logger.error('/// CUT THE POWER IMMEDIATELY! ///')218 logger.error('///////////////////////////////////////////')219 else:220 logger.error('some test failed!')221else:...

Full Screen

Full Screen

run_tests_parallel.py

Source:run_tests_parallel.py Github

copy

Full Screen

1from __future__ import absolute_import, division, print_function2import libtbx.test_utils.parallel3from libtbx.utils import Sorry, Usage4import libtbx.phil5import random6import os7import sys8master_phil = libtbx.phil.parse("""9directory = None10 .type = path11 .multiple = True12module = None13 .type = str14 .multiple = True15script = None16 .type = path17 .multiple = True18nproc = 119 .type= int20shuffle = False21 .type = bool22quiet = False23 .type = bool24verbosity = 125 .type = int26stderr = False27 .type = bool28run_in_tmp_dir = False29 .type = bool30max_time = 18031 .type = float(value_min=0)32 .help = "Print warning and timing for all tests that take longer"33 "than max_time (in seconds) to run."34slow_tests = False35 .type = bool36 .help = "If True, also run any tests marked as slow, if any"37""")38def run(args,39 return_list_of_tests=None,40 python_keyword_text="",41 max_tests=None,42 start_test=None,43 tests_to_skip=None):44 if (len(args) == 0):45 raise Usage("""libtbx.run_tests_parallel [module=NAME] [directory=path]""")46 user_phil = []47 for arg in args :48 if os.path.isdir(arg):49 user_phil.append(libtbx.phil.parse("directory=%s" % arg))50 else :51 try :52 arg_phil = libtbx.phil.parse(arg)53 except RuntimeError :54 raise Sorry("Unrecognized argument '%s'" % arg)55 else :56 user_phil.append(arg_phil)57 params = master_phil.fetch(sources=user_phil).extract()58 if params.run_in_tmp_dir:59 from libtbx.test_utils import open_tmp_directory60 run_dir = open_tmp_directory()61 print('Running tests in %s' % run_dir)62 os.chdir(run_dir)63 elif return_list_of_tests:64 pass # don't need to check anything65 else:66 cwd = os.getcwd()67 cwd_files = os.listdir(cwd)68 if cwd_files and cwd_files != ["default.profraw"]:69 raise Sorry("Please run this program in an empty directory.")70 if (len(params.directory) == 0) and (len(params.module) == 0):71 raise Sorry("Please specify modules and/or directories to test.")72 all_tests = []73 expected_failure_list = []74 expected_unstable_list = []75 parallel_list = []76 if not return_list_of_tests: # (this fails with return_list_of_tests)77 all_tests.extend(libtbx.test_utils.parallel.make_commands(params.script,78 python_keyword_text=python_keyword_text))79 for dir_name in params.directory :80 if os.path.split(dir_name)[-1].find("cctbx_project")>-1:81 print('DANGER '*10)82 print('Using the directory option in cctbx_project can be very time consuming')83 print('DANGER '*10)84 dir_tests = libtbx.test_utils.parallel.find_tests(dir_name)85 all_tests.extend(libtbx.test_utils.parallel.make_commands(dir_tests,86 python_keyword_text=python_keyword_text))87 for module_name in params.module :88 module_tests = libtbx.test_utils.parallel.get_module_tests(module_name,89 slow_tests = params.slow_tests,90 python_keyword_text=python_keyword_text)91 fail_tests = libtbx.test_utils.parallel.\92 get_module_expected_test_failures(module_name)93 unstable_tests = libtbx.test_utils.\94 parallel.get_module_expected_unstable_tests(module_name)95 parallel_tests = libtbx.test_utils.parallel.\96 get_module_parallel_tests(module_name)97 all_tests.extend(module_tests)98 all_tests.extend(fail_tests)99 all_tests.extend(unstable_tests)100 expected_failure_list.extend(fail_tests)101 expected_unstable_list.extend(unstable_tests)102 parallel_list.extend(parallel_tests)103 # remove any specified tests:104 if tests_to_skip:105 new_tests=[]106 for t in all_tests:107 ok=True108 for tts in tests_to_skip:109 if t.find(tts)>-1:110 ok=False111 if ok:112 new_tests.append(t)113 else:114 print ("Skipping the test %s" %(t))115 all_tests=new_tests116 # check that test lists are unique117 seen = set()118 duplicates = set()119 for t in all_tests:120 if t in seen:121 duplicates.add(t)122 else:123 seen.add(t)124 assert len(duplicates) == 0, "Duplicate tests found.\n%s" % list(duplicates)125 if start_test:126 all_tests=all_tests[start_test:]127 print ("Starting with test # %s " %(start_test))128 if max_tests:129 all_tests=all_tests[:max_tests]130 print("Running only %s tests" %(max_tests))131 if return_list_of_tests:132 return all_tests133 if (len(all_tests) == 0):134 raise Sorry("No test scripts found in %s." % params.directory)135 if (params.shuffle):136 random.shuffle(all_tests)137 if (params.quiet):138 params.verbosity = 0139 with open("run_tests_parallel_zlog", "w") as log:140 result = libtbx.test_utils.parallel.run_command_list(141 cmd_list=all_tests,142 expected_failure_list=expected_failure_list,143 expected_unstable_list=expected_unstable_list,144 parallel_list=parallel_list,145 nprocs=params.nproc,146 log=log,147 verbosity=params.verbosity,148 max_time=params.max_time)149 print("\nSee run_tests_parallel_zlog for full output.\n")150 if (result.failure > 0):151 print("")152 print("*" * 80)153 print("ERROR: %d TEST FAILURES. PLEASE FIX BEFORE COMMITTING CODE." % \154 result.failure)155 print("*" * 80)156 print("")157 return result.failure158if (__name__ == "__main__"):159 if (run(sys.argv[1:]) > 0):...

Full Screen

Full Screen

tests.py

Source:tests.py Github

copy

Full Screen

...5TEST_DIR = 'test_data'6class TestSystemTestCase(TestCase):7 def setUp(self):8 self.ts = TestSystem()9 self.all_tests = get_all_tests(f'{TEST_DIR}')10 def _ce(self, lang, source_code):11 verdict = self.ts.get_verdicts(lang, source_code, [])12 return type(verdict) == tuple13 def test_java_ce(self):14 self.assertTrue(self._ce(LANGS['java'], read_file(f'{TEST_DIR}/sols/ce.java')))15 def test_python_ce(self):16 self.assertTrue(self._ce(LANGS['python'], read_file(f'{TEST_DIR}/sols/ce.py')))17 def test_javascript_ce(self):18 self.assertTrue(self._ce(LANGS['javascript'], read_file(f'{TEST_DIR}/sols/ce.js')))19 def _all(self, lang, source_code, tests, verdict):20 verdicts = self.ts.get_verdicts(lang, source_code, tests)21 for ver in verdicts.values():22 if ver[0] != verdict:23 return False...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful