How to use _log_or_execute method in autotest

Best Python code snippet using autotest_python

test_importer.py

Source:test_importer.py Github

copy

Full Screen

...114 for test in models.Test.objects.all():115 full_path = os.path.join(autotest_dir, test.path)116 if not os.path.isfile(full_path):117 logging.info("Removing %s", test.path)118 _log_or_execute(repr(test), test.delete)119 # Find profilers that are no longer present120 for profiler in models.Profiler.objects.all():121 full_path = os.path.join(autotest_dir, "client", "profilers",122 profiler.name)123 if not os.path.exists(full_path):124 logging.info("Removing %s", profiler.name)125 _log_or_execute(repr(profiler), profiler.delete)126def db_clean_all(autotest_dir):127 """128 Remove all tests from autotest_web - very destructive129 This function invoked when -C supplied on the command line.130 Removes ALL tests from the database.131 :param autotest_dir: prepended to path strings132 (see global_config.ini, COMMON, autotest_top_path).133 """134 for test in models.Test.objects.all():135 logging.info("Removing %s", test.path)136 _log_or_execute(repr(test), test.delete)137 # Find profilers that are no longer present138 for profiler in models.Profiler.objects.all():139 logging.info("Removing %s", profiler.name)140 _log_or_execute(repr(profiler), profiler.delete)141def update_profilers_in_db(profilers, description='NA',142 add_noncompliant=False):143 """144 Add only profilers to the database from the filesystem.145 This function invoked when -p supplied on command line.146 Only adds profilers to the database - does not delete any.147 Profilers are formatted slightly differently than tests.148 :param profilers: list of profilers found in the file system.149 :param description: simple text to satisfy docstring.150 :param add_noncompliant: attempt adding test with invalid control files.151 """152 for profiler in profilers:153 name = os.path.basename(profiler)154 if name.endswith('.py'):155 name = name[:-3]156 if not profilers[profiler]:157 if add_noncompliant:158 doc = description159 else:160 logging.warn("Skipping %s, missing docstring", profiler)161 continue162 else:163 doc = profilers[profiler]164 model = models.Profiler.objects.get_or_create(name=name)[0]165 model.description = doc166 _log_or_execute(repr(model), model.save)167def update_tests_in_db(tests, dry_run=False, add_experimental=False,168 add_noncompliant=False, autotest_dir=None):169 """170 Scans through all tests and add them to the database.171 This function invoked when -t supplied and for update_all.172 When test code is discovered in the file system new tests may be added173 :param tests: list of tests found in the filesystem.174 :param dry_run: not used at this time.175 :param add_experimental: add tests with experimental attribute set.176 :param add_noncompliant: attempt adding test with invalid control files.177 :param autotest_dir: prepended to path strings178 (see global_config.ini, COMMON, autotest_top_path).179 """180 site_set_attributes_module = utils.import_site_module(181 __file__, 'autotest.utils.site_test_importer_attributes')182 for test in tests:183 # if test path is not inside base test dir, the subsequent184 # test load will fail so instead notify user right away185 if not test.startswith(autotest_dir):186 raise Exception('Test path ' +187 '%s not in %s, did you forget to use -z option?' %188 (test, autotest_dir))189 new_test = models.Test.objects.get_or_create(190 path=test.replace(autotest_dir, '').lstrip('/'))[0]191 logging.info("Processing %s", new_test.path)192 # Set the test's attributes193 data = tests[test]194 _set_attributes_clean(new_test, data)195 # Custom Attribute Update196 if site_set_attributes_module:197 site_set_attributes_module._set_attributes_custom(new_test, data)198 # This only takes place if --add-noncompliant is provided on the CLI199 if not new_test.name:200 test_new_test = test.split('/')201 if test_new_test[-1] == 'control':202 new_test.name = test_new_test[-2]203 else:204 control_name = "%s:%s"205 control_name %= (test_new_test[-2],206 test_new_test[-1])207 new_test.name = control_name.replace('control.', '')208 # Experimental Check209 if not add_experimental and new_test.experimental:210 continue211 _log_or_execute(repr(new_test), new_test.save)212 add_label_dependencies(new_test)213 # save TestParameter214 for para_name in data.test_parameters:215 test_parameter = models.TestParameter.objects.get_or_create(216 test=new_test, name=para_name)[0]217 test_parameter.save()218def _set_attributes_clean(test, data):219 """220 First pass sets the attributes of the Test object from file system.221 :param test: a test object to be populated for the database.222 :param data: object with test data from the file system.223 """224 test_type = {'client': 1,225 'server': 2}226 test_time = {'short': 1,227 'medium': 2,228 'long': 3}229 test.test_type = test_type[data.test_type.lower()]230 test.test_time = test_time[data.time.lower()]231 string_attributes = ('name', 'author', 'test_class', 'test_category',232 'test_category', 'sync_count')233 for attribute in string_attributes:234 setattr(test, attribute, getattr(data, attribute))235 test.description = data.doc236 test.dependencies = ", ".join(data.dependencies)237 int_attributes = ('experimental', 'run_verify')238 for attribute in int_attributes:239 setattr(test, attribute, int(getattr(data, attribute)))240def add_label_dependencies(test):241 """242 Add proper many-to-many relationships from DEPENDENCIES field.243 :param test: test object for the database.244 """245 # clear out old relationships246 _log_or_execute(repr(test), test.dependency_labels.clear,247 subject='clear dependencies from')248 for label_name in test.dependencies.split(','):249 label_name = label_name.strip().lower()250 if not label_name:251 continue252 try:253 label = models.Label.objects.get(name=label_name)254 except models.Label.DoesNotExist:255 log_dependency_not_found(label_name)256 continue257 _log_or_execute(repr(label), test.dependency_labels.add, label,258 subject='add dependency to %s' % test.name)259def log_dependency_not_found(label_name):260 """261 Exception processing when label not found in database.262 :param label_name: from test dependencies.263 """264 if label_name in DEPENDENCIES_NOT_FOUND:265 return266 logging.info("Dependency %s not found", label_name)267 DEPENDENCIES_NOT_FOUND.add(label_name)268def get_tests_from_fs(parent_dir, control_pattern, add_noncompliant=False):269 """270 Find control files in file system and load a list with their info.271 :param parent_dir: directory to search recursively.272 :param control_pattern: name format of control file.273 :param add_noncompliant: ignore control file parse errors.274 :return: dictionary of the form: tests[file_path] = parsed_object275 """276 tests = {}277 profilers = False278 if 'client/profilers' in parent_dir:279 profilers = True280 for dir in [parent_dir]:281 files = recursive_walk(dir, control_pattern)282 for file in files:283 if '__init__.py' in file or '.svn' in file:284 continue285 if not profilers:286 if not add_noncompliant:287 try:288 found_test = control_data.parse_control(file,289 raise_warnings=True)290 tests[file] = found_test291 except control_data.ControlVariableException, e:292 logging.warn("Skipping %s\n%s", file, e)293 except Exception, e:294 logging.error("Bad %s\n%s", file, e)295 else:296 found_test = control_data.parse_control(file)297 tests[file] = found_test298 else:299 tests[file] = compiler.parseFile(file).doc300 return tests301def recursive_walk(path, wildcard):302 """303 Recursively go through a directory.304 This function invoked by get_tests_from_fs().305 :param path: base directory to start search.306 :param wildcard: name format to match.307 :return: A list of files that match wildcard308 """309 files = []310 directories = [path]311 while len(directories) > 0:312 directory = directories.pop()313 for name in os.listdir(directory):314 fullpath = os.path.join(directory, name)315 if os.path.isfile(fullpath):316 # if we are a control file317 if re.search(wildcard, name):318 files.append(fullpath)319 elif os.path.isdir(fullpath):320 directories.append(fullpath)321 return files322def _log_or_execute(content, func, *args, **kwargs):323 """324 Log a message if dry_run is enabled, or execute the given function.325 Relies on the DRY_RUN global variable.326 :param content: the actual log message.327 :param func: function to execute if dry_run is not enabled.328 :param subject: (Optional) The type of log being written. Defaults to329 the name of the provided function.330 """331 subject = kwargs.get('subject', func.__name__)332 if DRY_RUN:333 logging.info("Would %s: %s", subject, content)334 else:335 func(*args)336def _create_whitelist_set(whitelist_path):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful