How to use _fail_queue_entry method in autotest

Best Python code snippet using autotest_python

agent_task.py

Source:agent_task.py Github

copy

Full Screen

...508 def prolog(self):509 super(SpecialAgentTask, self).prolog()510 self.task.activate()511 self._write_host_keyvals(self.host)512 def _fail_queue_entry(self):513 assert self.queue_entry514 if self.queue_entry.meta_host:515 return # don't fail metahost entries, they'll be reassigned516 self.queue_entry.update_from_database()517 if self.queue_entry.status != models.HostQueueEntry.Status.QUEUED:518 return # entry has been aborted519 self._actually_fail_queue_entry()520 # TODO(milleral): http://crbug.com/268607521 # All this used to be a part of _fail_queue_entry. The522 # exact semantics of when one should and should not be failing a queue523 # entry need to be worked out, because provisioning has placed us in a524 # case where we want to fail a queue entry that could be requeued,525 # which makes us fail the two above if statements, and thus526 # _fail_queue_entry() would exit early and have no effect.527 # What's left here with _actually_fail_queue_entry is a hack to be able to528 # bypass the checks and unconditionally execute the code.529 def _actually_fail_queue_entry(self):530 self.queue_entry.set_execution_subdir()531 queued_key, queued_time = self._job_queued_keyval(532 self.queue_entry.job)533 self._write_keyval_after_job(queued_key, queued_time)534 self._write_job_finished()535 # copy results logs into the normal place for job results536 self.monitor.try_copy_results_on_drone(537 source_path=self._working_directory() + '/',538 destination_path=self.queue_entry.execution_path() + '/')539 pidfile_id = self._drone_manager.get_pidfile_id_from(540 self.queue_entry.execution_path(),541 pidfile_name=drone_manager.AUTOSERV_PID_FILE)542 self._drone_manager.register_pidfile(pidfile_id)543 if self.queue_entry.job.parse_failed_repair:...

Full Screen

Full Screen

prejob_task.py

Source:prejob_task.py Github

copy

Full Screen

...85 task=models.SpecialTask.Task.PROVISION,86 queue_entry_id=self.queue_entry.id).count()87 if (previous_provisions >88 scheduler_config.config.max_provision_retries):89 self._actually_fail_queue_entry()90 # This abort will mark the aborted bit on the HQE itself, to91 # signify that we're killing it. Technically it also will do92 # the recursive aborting of all child jobs, but that shouldn't93 # matter here, as only suites have children, and those are94 # hostless and thus don't have provisioning.95 # TODO(milleral) http://crbug.com/18821796 # However, we can't actually do this yet, as if we set the97 # abort bit the FinalReparseTask will set the status of the HQE98 # to ABORTED, which then means that we don't show the status in99 # run_suite. So in the meantime, don't mark the HQE as100 # aborted.101 # queue_entry.abort()102 else:103 # requeue() must come after handling provision retries, since104 # _actually_fail_queue_entry needs an execution subdir.105 # We also don't want to requeue if we hit the provision retry106 # limit, since then we overwrite the PARSING state of the HQE.107 self.queue_entry.requeue()108 # Limit the repair on a host when a prejob task fails, e.g., reset,109 # verify etc. The number of repair jobs is limited to the specific110 # HQE and host.111 previous_repairs = models.SpecialTask.objects.filter(112 task=models.SpecialTask.Task.REPAIR,113 queue_entry_id=self.queue_entry.id,114 host_id=self.queue_entry.host_id).count()115 if previous_repairs >= scheduler_config.config.max_repair_limit:116 self.host.set_status(models.Host.Status.REPAIR_FAILED)117 self._fail_queue_entry()118 return119 queue_entry = models.HostQueueEntry.objects.get(120 id=self.queue_entry.id)121 else:122 queue_entry = None123 models.SpecialTask.objects.create(124 host=models.Host.objects.get(id=self.host.id),125 task=models.SpecialTask.Task.REPAIR,126 queue_entry=queue_entry,127 requested_by=self.task.requested_by)128 def _should_pending(self):129 """130 Decide if we should call the host queue entry's on_pending method.131 We should if:132 1) There exists an associated host queue entry.133 2) The current special task completed successfully.134 3) There do not exist any more special tasks to be run before the135 host queue entry starts.136 @returns: True if we should call pending, false if not.137 """138 if not self.queue_entry or not self.success:139 return False140 # We know if this is the last one when we create it, so we could add141 # another column to the database to keep track of this information, but142 # I expect the overhead of querying here to be minimal.143 queue_entry = models.HostQueueEntry.objects.get(id=self.queue_entry.id)144 queued = models.SpecialTask.objects.filter(145 host__id=self.host.id, is_active=False,146 is_complete=False, queue_entry=queue_entry)147 queued = queued.exclude(id=self.task.id)148 return queued.count() == 0149class VerifyTask(PreJobTask):150 TASK_TYPE = models.SpecialTask.Task.VERIFY151 def __init__(self, task):152 args = ['-v']153 if task.queue_entry:154 args.extend(self._generate_autoserv_label_args(task))155 super(VerifyTask, self).__init__(task, args)156 self._set_ids(host=self.host, queue_entries=[self.queue_entry])157 def prolog(self):158 super(VerifyTask, self).prolog()159 logging.info("starting verify on %s", self.host.hostname)160 if self.queue_entry:161 self.queue_entry.set_status(models.HostQueueEntry.Status.VERIFYING)162 self.host.set_status(models.Host.Status.VERIFYING)163 # Delete any queued manual reverifies for this host. One verify will do164 # and there's no need to keep records of other requests.165 self.remove_special_tasks(models.SpecialTask.Task.VERIFY,166 keep_last_one=True)167 def epilog(self):168 super(VerifyTask, self).epilog()169 if self.success:170 if self._should_pending():171 self.queue_entry.on_pending()172 else:173 self.host.set_status(models.Host.Status.READY)174class CleanupTask(PreJobTask):175 # note this can also run post-job, but when it does, it's running standalone176 # against the host (not related to the job), so it's not considered a177 # PostJobTask178 TASK_TYPE = models.SpecialTask.Task.CLEANUP179 def __init__(self, task, recover_run_monitor=None):180 args = ['--cleanup']181 if task.queue_entry:182 args.extend(self._generate_autoserv_label_args(task))183 super(CleanupTask, self).__init__(task, args)184 self._set_ids(host=self.host, queue_entries=[self.queue_entry])185 def prolog(self):186 super(CleanupTask, self).prolog()187 logging.info("starting cleanup task for host: %s", self.host.hostname)188 self.host.set_status(models.Host.Status.CLEANING)189 if self.queue_entry:190 self.queue_entry.set_status(models.HostQueueEntry.Status.CLEANING)191 def _finish_epilog(self):192 if not self.queue_entry or not self.success:193 return194 do_not_verify_protection = host_protections.Protection.DO_NOT_VERIFY195 should_run_verify = (196 self.queue_entry.job.run_verify197 and self.host.protection != do_not_verify_protection)198 if should_run_verify:199 entry = models.HostQueueEntry.objects.get(id=self.queue_entry.id)200 models.SpecialTask.objects.create(201 host=models.Host.objects.get(id=self.host.id),202 queue_entry=entry,203 task=models.SpecialTask.Task.VERIFY)204 else:205 if self._should_pending():206 self.queue_entry.on_pending()207 def epilog(self):208 super(CleanupTask, self).epilog()209 if self.success:210 self.host.update_field('dirty', 0)211 self.host.set_status(models.Host.Status.READY)212 self._finish_epilog()213class ResetTask(PreJobTask):214 """Task to reset a DUT, including cleanup and verify."""215 # note this can also run post-job, but when it does, it's running standalone216 # against the host (not related to the job), so it's not considered a217 # PostJobTask218 TASK_TYPE = models.SpecialTask.Task.RESET219 def __init__(self, task, recover_run_monitor=None):220 args = ['--reset']221 if task.queue_entry:222 args.extend(self._generate_autoserv_label_args(task))223 super(ResetTask, self).__init__(task, args)224 self._set_ids(host=self.host, queue_entries=[self.queue_entry])225 def prolog(self):226 super(ResetTask, self).prolog()227 logging.info('starting reset task for host: %s',228 self.host.hostname)229 self.host.set_status(models.Host.Status.RESETTING)230 if self.queue_entry:231 self.queue_entry.set_status(models.HostQueueEntry.Status.RESETTING)232 # Delete any queued cleanups for this host.233 self.remove_special_tasks(models.SpecialTask.Task.CLEANUP,234 keep_last_one=False)235 # Delete any queued reverifies for this host.236 self.remove_special_tasks(models.SpecialTask.Task.VERIFY,237 keep_last_one=False)238 # Only one reset is needed.239 self.remove_special_tasks(models.SpecialTask.Task.RESET,240 keep_last_one=True)241 def epilog(self):242 super(ResetTask, self).epilog()243 if self.success:244 self.host.update_field('dirty', 0)245 if self._should_pending():246 self.queue_entry.on_pending()247 else:248 self.host.set_status(models.Host.Status.READY)249class ProvisionTask(PreJobTask):250 TASK_TYPE = models.SpecialTask.Task.PROVISION251 def __init__(self, task):252 # Provisioning requires that we be associated with a job/queue entry253 assert task.queue_entry, "No HQE associated with provision task!"254 # task.queue_entry is an afe model HostQueueEntry object.255 # self.queue_entry is a scheduler models HostQueueEntry object, but256 # it gets constructed and assigned in __init__, so it's not available257 # yet. Therefore, we're stuck pulling labels off of the afe model258 # so that we can pass the --provision args into the __init__ call.259 labels = {x.name for x in task.queue_entry.job.labels}260 _, provisionable = provision.filter_labels(labels)261 extra_command_args = ['--provision',262 '--job-labels', ','.join(provisionable)]263 super(ProvisionTask, self).__init__(task, extra_command_args)264 self._set_ids(host=self.host, queue_entries=[self.queue_entry])265 def _command_line(self):266 # If we give queue_entry to _autoserv_command_line, then it will append267 # -c for this invocation if the queue_entry is a client side test. We268 # don't want that, as it messes with provisioning, so we just drop it269 # from the arguments here.270 # Note that we also don't verify job_repo_url as provisioining tasks are271 # required to stage whatever content we need, and the job itself will272 # force autotest to be staged if it isn't already.273 return autoserv_utils._autoserv_command_line(self.host.hostname,274 self._extra_command_args,275 in_lab=True)276 def prolog(self):277 super(ProvisionTask, self).prolog()278 # add check for previous provision task and abort if exist.279 logging.info("starting provision task for host: %s", self.host.hostname)280 self.queue_entry.set_status(281 models.HostQueueEntry.Status.PROVISIONING)282 self.host.set_status(models.Host.Status.PROVISIONING)283 def epilog(self):284 super(ProvisionTask, self).epilog()285 # If we were not successful in provisioning the machine286 # leave the DUT in whatever status was set in the PreJobTask's287 # epilog. If this task was successful the host status will get288 # set appropriately as a fallout of the hqe's on_pending. If289 # we don't call on_pending, it can only be because:290 # 1. This task was not successful:291 # a. Another repair is queued: this repair job will set the host292 # status, and it will remain in 'Provisioning' till then.293 # b. We have hit the max_repair_limit: in which case the host294 # status is set to 'RepairFailed' in the epilog of PreJobTask.295 # 2. The task was successful, but there are other special tasks:296 # Those special tasks will set the host status appropriately.297 if self._should_pending():298 self.queue_entry.on_pending()299class RepairTask(agent_task.SpecialAgentTask):300 TASK_TYPE = models.SpecialTask.Task.REPAIR301 def __init__(self, task):302 """\303 queue_entry: queue entry to mark failed if this repair fails.304 """305 protection = host_protections.Protection.get_string(306 task.host.protection)307 # normalize the protection name308 protection = host_protections.Protection.get_attr_name(protection)309 args = ['-R', '--host-protection', protection]310 if task.queue_entry:311 args.extend(self._generate_autoserv_label_args(task))312 super(RepairTask, self).__init__(task, args)313 # *don't* include the queue entry in IDs -- if the queue entry is314 # aborted, we want to leave the repair task running315 self._set_ids(host=self.host)316 def prolog(self):317 super(RepairTask, self).prolog()318 logging.info("repair_task starting")319 self.host.set_status(models.Host.Status.REPAIRING)320 def epilog(self):321 super(RepairTask, self).epilog()322 if self.success:323 self.host.set_status(models.Host.Status.READY)324 else:325 self.host.set_status(models.Host.Status.REPAIR_FAILED)326 if self.queue_entry:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful