How to use instance method in fast-check-monorepo

Best JavaScript code snippet using fast-check-monorepo

manager.py

Source:manager.py Github

copy

Full Screen

...345 instances.346 """347 @functools.wraps(function)348 def decorated_function(self, context, *args, **kwargs):349 def _load_instance(instance_or_dict):350 if isinstance(instance_or_dict, dict):351 instance = objects.Instance._from_db_object(352 context, objects.Instance(), instance_or_dict,353 expected_attrs=metas)354 instance._context = context355 return instance356 return instance_or_dict357 metas = ['metadata', 'system_metadata']358 try:359 kwargs['instance'] = _load_instance(kwargs['instance'])360 except KeyError:361 args = (_load_instance(args[0]),) + args[1:]362 migration = kwargs.get('migration')363 if isinstance(migration, dict):364 migration = objects.Migration._from_db_object(365 context.elevated(), objects.Migration(),366 migration)367 kwargs['migration'] = migration368 return function(self, context, *args, **kwargs)369 return decorated_function370# TODO(danms): Remove me after Icehouse371def aggregate_object_compat(function):372 """Wraps a method that expects a new-world aggregate."""373 @functools.wraps(function)374 def decorated_function(self, context, *args, **kwargs):375 aggregate = kwargs.get('aggregate')376 if isinstance(aggregate, dict):377 aggregate = objects.Aggregate._from_db_object(378 context.elevated(), objects.Aggregate(),379 aggregate)380 kwargs['aggregate'] = aggregate381 return function(self, context, *args, **kwargs)382 return decorated_function383class InstanceEvents(object):384 def __init__(self):385 self._events = {}386 @staticmethod387 def _lock_name(instance):388 return '%s-%s' % (instance.uuid, 'events')389 def prepare_for_instance_event(self, instance, event_name):390 """Prepare to receive an event for an instance.391 This will register an event for the given instance that we will392 wait on later. This should be called before initiating whatever393 action will trigger the event. The resulting eventlet.event.Event394 object should be wait()'d on to ensure completion.395 :param instance: the instance for which the event will be generated396 :param event_name: the name of the event we're expecting397 :returns: an event object that should be wait()'d on398 """399 @utils.synchronized(self._lock_name(instance))400 def _create_or_get_event():401 if instance.uuid not in self._events:402 self._events.setdefault(instance.uuid, {})403 return self._events[instance.uuid].setdefault(404 event_name, eventlet.event.Event())405 LOG.debug('Preparing to wait for external event %(event)s',406 {'event': event_name}, instance=instance)407 return _create_or_get_event()408 def pop_instance_event(self, instance, event):409 """Remove a pending event from the wait list.410 This will remove a pending event from the wait list so that it411 can be used to signal the waiters to wake up.412 :param instance: the instance for which the event was generated413 :param event: the nova.objects.external_event.InstanceExternalEvent414 that describes the event415 :returns: the eventlet.event.Event object on which the waiters416 are blocked417 """418 no_events_sentinel = object()419 no_matching_event_sentinel = object()420 @utils.synchronized(self._lock_name(instance))421 def _pop_event():422 events = self._events.get(instance.uuid)423 if not events:424 return no_events_sentinel425 _event = events.pop(event.key, None)426 if not events:427 del self._events[instance.uuid]428 if _event is None:429 return no_matching_event_sentinel430 return _event431 result = _pop_event()432 if result == no_events_sentinel:433 LOG.debug('No waiting events found dispatching %(event)s',434 {'event': event.key},435 instance=instance)436 return None437 elif result == no_matching_event_sentinel:438 LOG.debug('No event matching %(event)s in %(events)s',439 {'event': event.key,440 'events': self._events.get(instance.uuid, {}).keys()},441 instance=instance)442 return None443 else:444 return result445 def clear_events_for_instance(self, instance):446 """Remove all pending events for an instance.447 This will remove all events currently pending for an instance448 and return them (indexed by event name).449 :param instance: the instance for which events should be purged450 :returns: a dictionary of {event_name: eventlet.event.Event}451 """452 @utils.synchronized(self._lock_name(instance))453 def _clear_events():454 # NOTE(danms): Use getitem syntax for the instance until455 # all the callers are using objects456 return self._events.pop(instance['uuid'], {})457 return _clear_events()458class ComputeVirtAPI(virtapi.VirtAPI):459 def __init__(self, compute):460 super(ComputeVirtAPI, self).__init__()461 self._compute = compute462 def provider_fw_rule_get_all(self, context):463 return self._compute.conductor_api.provider_fw_rule_get_all(context)464 def _default_error_callback(self, event_name, instance):465 raise exception.NovaException(_('Instance event failed'))466 @contextlib.contextmanager467 def wait_for_instance_event(self, instance, event_names, deadline=300,468 error_callback=None):469 """Plan to wait for some events, run some code, then wait.470 This context manager will first create plans to wait for the471 provided event_names, yield, and then wait for all the scheduled472 events to complete.473 Note that this uses an eventlet.timeout.Timeout to bound the474 operation, so callers should be prepared to catch that475 failure and handle that situation appropriately.476 If the event is not received by the specified timeout deadline,477 eventlet.timeout.Timeout is raised.478 If the event is received but did not have a 'completed'479 status, a NovaException is raised. If an error_callback is480 provided, instead of raising an exception as detailed above481 for the failure case, the callback will be called with the482 event_name and instance, and can return True to continue483 waiting for the rest of the events, False to stop processing,484 or raise an exception which will bubble up to the waiter.485 :param instance: The instance for which an event is expected486 :param event_names: A list of event names. Each element can be a487 string event name or tuple of strings to488 indicate (name, tag).489 :param deadline: Maximum number of seconds we should wait for all490 of the specified events to arrive.491 :param error_callback: A function to be called if an event arrives492 """493 if error_callback is None:494 error_callback = self._default_error_callback495 events = {}496 for event_name in event_names:497 if isinstance(event_name, tuple):498 name, tag = event_name499 event_name = objects.InstanceExternalEvent.make_key(500 name, tag)501 events[event_name] = (502 self._compute.instance_events.prepare_for_instance_event(503 instance, event_name))504 yield505 with eventlet.timeout.Timeout(deadline):506 for event_name, event in events.items():507 actual_event = event.wait()508 if actual_event.status == 'completed':509 continue510 decision = error_callback(event_name, instance)511 if decision is False:512 break513class ComputeManager(manager.Manager):514 """Manages the running instances from creation to destruction."""515 target = messaging.Target(version='3.34')516 # How long to wait in seconds before re-issuing a shutdown517 # signal to a instance during power off. The overall518 # time to wait is set by CONF.shutdown_timeout.519 SHUTDOWN_RETRY_INTERVAL = 10520 def __init__(self, compute_driver=None, *args, **kwargs):521 """Load configuration options and connect to the hypervisor."""522 self.virtapi = ComputeVirtAPI(self)523 self.network_api = network.API()524 self.volume_api = volume.API()525 self.image_api = image.API()526 self._last_host_check = 0527 self._last_bw_usage_poll = 0528 self._bw_usage_supported = True529 self._last_bw_usage_cell_update = 0530 self.compute_api = compute.API()531 self.compute_rpcapi = compute_rpcapi.ComputeAPI()532 self.conductor_api = conductor.API()533 self.compute_task_api = conductor.ComputeTaskAPI()534 self.is_neutron_security_groups = (535 openstack_driver.is_neutron_security_groups())536 self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()537 self.cells_rpcapi = cells_rpcapi.CellsAPI()538 self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()539 self._resource_tracker_dict = {}540 self.instance_events = InstanceEvents()541 self._sync_power_pool = eventlet.GreenPool()542 self._syncs_in_progress = {}543 super(ComputeManager, self).__init__(service_name="compute",544 *args, **kwargs)545 # NOTE(russellb) Load the driver last. It may call back into the546 # compute manager via the virtapi, so we want it to be fully547 # initialized before that happens.548 self.driver = driver.load_compute_driver(self.virtapi, compute_driver)549 self.use_legacy_block_device_info = \550 self.driver.need_legacy_block_device_info551 def _get_resource_tracker(self, nodename):552 rt = self._resource_tracker_dict.get(nodename)553 if not rt:554 if not self.driver.node_is_available(nodename):555 raise exception.NovaException(556 _("%s is not a valid node managed by this "557 "compute host.") % nodename)558 rt = resource_tracker.ResourceTracker(self.host,559 self.driver,560 nodename)561 self._resource_tracker_dict[nodename] = rt562 return rt563 def _update_resource_tracker(self, context, instance):564 """Let the resource tracker know that an instance has changed state."""565 if (instance['host'] == self.host and566 self.driver.node_is_available(instance['node'])):567 rt = self._get_resource_tracker(instance.get('node'))568 rt.update_usage(context, instance)569 def _instance_update(self, context, instance_uuid, **kwargs):570 """Update an instance in the database using kwargs as value."""571 instance_ref = self.conductor_api.instance_update(context,572 instance_uuid,573 **kwargs)574 self._update_resource_tracker(context, instance_ref)575 return instance_ref576 def _set_instance_error_state(self, context, instance):577 instance_uuid = instance['uuid']578 try:579 self._instance_update(context, instance_uuid,580 vm_state=vm_states.ERROR)581 except exception.InstanceNotFound:582 LOG.debug('Instance has been destroyed from under us while '583 'trying to set it to ERROR',584 instance_uuid=instance_uuid)585 def _set_instance_obj_error_state(self, context, instance):586 try:587 instance.vm_state = vm_states.ERROR588 instance.save()589 except exception.InstanceNotFound:590 LOG.debug('Instance has been destroyed from under us while '591 'trying to set it to ERROR', instance=instance)592 def _get_instances_on_driver(self, context, filters=None):593 """Return a list of instance records for the instances found594 on the hypervisor which satisfy the specified filters. If filters=None595 return a list of instance records for all the instances found on the596 hypervisor.597 """598 if not filters:599 filters = {}600 try:601 driver_uuids = self.driver.list_instance_uuids()602 filters['uuid'] = driver_uuids603 local_instances = objects.InstanceList.get_by_filters(604 context, filters, use_slave=True)605 return local_instances606 except NotImplementedError:607 pass608 # The driver doesn't support uuids listing, so we'll have609 # to brute force.610 driver_instances = self.driver.list_instances()611 instances = objects.InstanceList.get_by_filters(context, filters,612 use_slave=True)613 name_map = dict((instance.name, instance) for instance in instances)614 local_instances = []615 for driver_instance in driver_instances:616 instance = name_map.get(driver_instance)617 if not instance:618 continue619 local_instances.append(instance)620 return local_instances621 def _destroy_evacuated_instances(self, context):622 """Destroys evacuated instances.623 While nova-compute was down, the instances running on it could be624 evacuated to another host. Check that the instances reported625 by the driver are still associated with this host. If they are626 not, destroy them, with the exception of instances which are in627 the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH628 task state or RESIZED vm state.629 """630 our_host = self.host631 filters = {'deleted': False}632 local_instances = self._get_instances_on_driver(context, filters)633 for instance in local_instances:634 if instance.host != our_host:635 if (instance.task_state in [task_states.MIGRATING,636 task_states.RESIZE_MIGRATING,637 task_states.RESIZE_MIGRATED,638 task_states.RESIZE_FINISH]639 or instance.vm_state in [vm_states.RESIZED]):640 LOG.debug('Will not delete instance as its host ('641 '%(instance_host)s) is not equal to our '642 'host (%(our_host)s) but its task state is '643 '(%(task_state)s) and vm state is '644 '(%(vm_state)s)',645 {'instance_host': instance.host,646 'our_host': our_host,647 'task_state': instance.task_state,648 'vm_state': instance.vm_state},649 instance=instance)650 continue651 LOG.info(_('Deleting instance as its host ('652 '%(instance_host)s) is not equal to our '653 'host (%(our_host)s).'),654 {'instance_host': instance.host,655 'our_host': our_host}, instance=instance)656 try:657 network_info = self._get_instance_nw_info(context,658 instance)659 bdi = self._get_instance_block_device_info(context,660 instance)661 destroy_disks = not (self._is_instance_storage_shared(662 context, instance))663 except exception.InstanceNotFound:664 network_info = network_model.NetworkInfo()665 bdi = {}666 LOG.info(_('Instance has been marked deleted already, '667 'removing it from the hypervisor.'),668 instance=instance)669 # always destroy disks if the instance was deleted670 destroy_disks = True671 self.driver.destroy(context, instance,672 network_info,673 bdi, destroy_disks)674 def _is_instance_storage_shared(self, context, instance):675 shared_storage = True676 data = None677 try:678 data = self.driver.check_instance_shared_storage_local(context,679 instance)680 if data:681 shared_storage = (self.compute_rpcapi.682 check_instance_shared_storage(context,683 instance, data))684 except NotImplementedError:685 LOG.warning(_('Hypervisor driver does not support '686 'instance shared storage check, '687 'assuming it\'s not on shared storage'),688 instance=instance)689 shared_storage = False690 except Exception:691 LOG.exception(_LE('Failed to check if instance shared'),692 instance=instance)693 finally:694 if data:695 self.driver.check_instance_shared_storage_cleanup(context,696 data)697 return shared_storage698 def _complete_partial_deletion(self, context, instance):699 """Complete deletion for instances in DELETED status but not marked as700 deleted in the DB701 """702 instance.destroy()703 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(704 context, instance.uuid)705 quotas = objects.Quotas(context)706 project_id, user_id = quotas_obj.ids_from_instance(context, instance)707 quotas.reserve(context, project_id=project_id, user_id=user_id,708 instances=-1, cores=-instance.vcpus,709 ram=-instance.memory_mb)710 self._complete_deletion(context,711 instance,712 bdms,713 quotas,714 instance.system_metadata)715 def _complete_deletion(self, context, instance, bdms,716 quotas, system_meta):717 if quotas:718 quotas.commit()719 # ensure block device mappings are not leaked720 for bdm in bdms:721 bdm.destroy()722 self._notify_about_instance_usage(context, instance, "delete.end",723 system_metadata=system_meta)724 if CONF.vnc_enabled or CONF.spice.enabled:725 if CONF.cells.enable:726 self.cells_rpcapi.consoleauth_delete_tokens(context,727 instance.uuid)728 else:729 self.consoleauth_rpcapi.delete_tokens_for_instance(context,730 instance.uuid)731 def _init_instance(self, context, instance):732 '''Initialize this instance during service init.'''733 # Instances that are shut down, or in an error state can not be734 # initialized and are not attempted to be recovered. The exception735 # to this are instances that are in RESIZE_MIGRATING or DELETING,736 # which are dealt with further down.737 if (instance.vm_state == vm_states.SOFT_DELETED or738 (instance.vm_state == vm_states.ERROR and739 instance.task_state not in740 (task_states.RESIZE_MIGRATING, task_states.DELETING))):741 LOG.debug("Instance is in %s state.",742 instance.vm_state, instance=instance)743 return744 if instance.vm_state == vm_states.DELETED:745 try:746 self._complete_partial_deletion(context, instance)747 except Exception:748 # we don't want that an exception blocks the init_host749 msg = _LE('Failed to complete a deletion')750 LOG.exception(msg, instance=instance)751 return752 if (instance.vm_state == vm_states.BUILDING or753 instance.task_state in [task_states.SCHEDULING,754 task_states.BLOCK_DEVICE_MAPPING,755 task_states.NETWORKING,756 task_states.SPAWNING]):757 # NOTE(dave-mcnally) compute stopped before instance was fully758 # spawned so set to ERROR state. This is safe to do as the state759 # may be set by the api but the host is not so if we get here the760 # instance has already been scheduled to this particular host.761 LOG.debug("Instance failed to spawn correctly, "762 "setting to ERROR state", instance=instance)763 instance.task_state = None764 instance.vm_state = vm_states.ERROR765 instance.save()766 return767 if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and768 instance.task_state in [task_states.REBUILDING,769 task_states.REBUILD_BLOCK_DEVICE_MAPPING,770 task_states.REBUILD_SPAWNING]):771 # NOTE(jichenjc) compute stopped before instance was fully772 # spawned so set to ERROR state. This is consistent to BUILD773 LOG.debug("Instance failed to rebuild correctly, "774 "setting to ERROR state", instance=instance)775 instance.task_state = None776 instance.vm_state = vm_states.ERROR777 instance.save()778 return779 if (instance.vm_state != vm_states.ERROR and780 instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,781 task_states.IMAGE_PENDING_UPLOAD,782 task_states.IMAGE_UPLOADING,783 task_states.IMAGE_SNAPSHOT]):784 LOG.debug("Instance in transitional state %s at start-up "785 "clearing task state",786 instance['task_state'], instance=instance)787 try:788 self._post_interrupted_snapshot_cleanup(context, instance)789 except Exception:790 # we don't want that an exception blocks the init_host791 msg = _LE('Failed to cleanup snapshot.')792 LOG.exception(msg, instance=instance)793 instance.task_state = None794 instance.save()795 if instance.task_state == task_states.DELETING:796 try:797 LOG.info(_('Service started deleting the instance during '798 'the previous run, but did not finish. Restarting '799 'the deletion now.'), instance=instance)800 instance.obj_load_attr('metadata')801 instance.obj_load_attr('system_metadata')802 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(803 context, instance.uuid)804 # FIXME(comstud): This needs fixed. We should be creating805 # reservations and updating quotas, because quotas806 # wouldn't have been updated for this instance since it is807 # still in DELETING. See bug 1296414.808 #809 # Create a dummy quota object for now.810 quotas = objects.Quotas.from_reservations(811 context, None, instance=instance)812 self._delete_instance(context, instance, bdms, quotas)813 except Exception:814 # we don't want that an exception blocks the init_host815 msg = _LE('Failed to complete a deletion')816 LOG.exception(msg, instance=instance)817 self._set_instance_error_state(context, instance)818 return819 try_reboot, reboot_type = self._retry_reboot(context, instance)820 current_power_state = self._get_power_state(context, instance)821 if try_reboot:822 LOG.debug("Instance in transitional state (%(task_state)s) at "823 "start-up and power state is (%(power_state)s), "824 "triggering reboot",825 {'task_state': instance['task_state'],826 'power_state': current_power_state},827 instance=instance)828 self.compute_rpcapi.reboot_instance(context, instance,829 block_device_info=None,830 reboot_type=reboot_type)831 return832 elif (current_power_state == power_state.RUNNING and833 instance.task_state in [task_states.REBOOT_STARTED,834 task_states.REBOOT_STARTED_HARD]):835 LOG.warning(_("Instance in transitional state "836 "(%(task_state)s) at start-up and power state "837 "is (%(power_state)s), clearing task state"),838 {'task_state': instance['task_state'],839 'power_state': current_power_state},840 instance=instance)841 instance.task_state = None842 instance.vm_state = vm_states.ACTIVE843 instance.save()844 if instance.task_state == task_states.POWERING_OFF:845 try:846 LOG.debug("Instance in transitional state %s at start-up "847 "retrying stop request",848 instance['task_state'], instance=instance)849 self.stop_instance(context, instance)850 except Exception:851 # we don't want that an exception blocks the init_host852 msg = _LE('Failed to stop instance')853 LOG.exception(msg, instance=instance)854 return855 if instance.task_state == task_states.POWERING_ON:856 try:857 LOG.debug("Instance in transitional state %s at start-up "858 "retrying start request",859 instance['task_state'], instance=instance)860 self.start_instance(context, instance)861 except Exception:862 # we don't want that an exception blocks the init_host863 msg = _LE('Failed to start instance')864 LOG.exception(msg, instance=instance)865 return866 net_info = compute_utils.get_nw_info_for_instance(instance)867 try:868 self.driver.plug_vifs(instance, net_info)869 except NotImplementedError as e:870 LOG.debug(e, instance=instance)871 if instance.task_state == task_states.RESIZE_MIGRATING:872 # We crashed during resize/migration, so roll back for safety873 try:874 # NOTE(mriedem): check old_vm_state for STOPPED here, if it's875 # not in system_metadata we default to True for backwards876 # compatibility877 power_on = (instance.system_metadata.get('old_vm_state') !=878 vm_states.STOPPED)879 block_dev_info = self._get_instance_block_device_info(context,880 instance)881 self.driver.finish_revert_migration(context,882 instance, net_info, block_dev_info, power_on)883 except Exception as e:884 LOG.exception(_LE('Failed to revert crashed migration'),885 instance=instance)886 finally:887 LOG.info(_('Instance found in migrating state during '888 'startup. Resetting task_state'),889 instance=instance)890 instance.task_state = None891 instance.save()892 if instance.task_state == task_states.MIGRATING:893 # Live migration did not complete, but instance is on this894 # host, so reset the state.895 instance.task_state = None896 instance.save(expected_task_state=[task_states.MIGRATING])897 db_state = instance.power_state898 drv_state = self._get_power_state(context, instance)899 expect_running = (db_state == power_state.RUNNING and900 drv_state != db_state)901 LOG.debug('Current state is %(drv_state)s, state in DB is '902 '%(db_state)s.',903 {'drv_state': drv_state, 'db_state': db_state},904 instance=instance)905 if expect_running and CONF.resume_guests_state_on_host_boot:906 LOG.info(_('Rebooting instance after nova-compute restart.'),907 instance=instance)908 block_device_info = \909 self._get_instance_block_device_info(context, instance)910 try:911 self.driver.resume_state_on_host_boot(912 context, instance, net_info, block_device_info)913 except NotImplementedError:914 LOG.warning(_('Hypervisor driver does not support '915 'resume guests'), instance=instance)916 except Exception:917 # NOTE(vish): The instance failed to resume, so we set the918 # instance to error and attempt to continue.919 LOG.warning(_('Failed to resume instance'), instance=instance)920 self._set_instance_error_state(context, instance)921 elif drv_state == power_state.RUNNING:922 # VMwareAPI drivers will raise an exception923 try:924 self.driver.ensure_filtering_rules_for_instance(925 instance, net_info)926 except NotImplementedError:927 LOG.warning(_('Hypervisor driver does not support '928 'firewall rules'), instance=instance)929 def _retry_reboot(self, context, instance):930 current_power_state = self._get_power_state(context, instance)931 current_task_state = instance.task_state932 retry_reboot = False933 reboot_type = compute_utils.get_reboot_type(current_task_state,934 current_power_state)935 pending_soft = (current_task_state == task_states.REBOOT_PENDING and936 instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)937 pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD938 and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)939 started_not_running = (current_task_state in940 [task_states.REBOOT_STARTED,941 task_states.REBOOT_STARTED_HARD] and942 current_power_state != power_state.RUNNING)943 if pending_soft or pending_hard or started_not_running:944 retry_reboot = True945 return retry_reboot, reboot_type946 def handle_lifecycle_event(self, event):947 LOG.info(_("VM %(state)s (Lifecycle Event)") %948 {'state': event.get_name()},949 instance_uuid=event.get_instance_uuid())950 context = nova.context.get_admin_context(read_deleted='yes')951 instance = objects.Instance.get_by_uuid(context,952 event.get_instance_uuid())953 vm_power_state = None954 if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:955 vm_power_state = power_state.SHUTDOWN956 elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:957 vm_power_state = power_state.RUNNING958 elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:959 vm_power_state = power_state.PAUSED960 elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:961 vm_power_state = power_state.RUNNING962 else:963 LOG.warning(_("Unexpected power state %d") %964 event.get_transition())965 if vm_power_state is not None:966 LOG.debug('Synchronizing instance power state after lifecycle '967 'event "%(event)s"; current vm_state: %(vm_state)s, '968 'current task_state: %(task_state)s, current DB '969 'power_state: %(db_power_state)s, VM power_state: '970 '%(vm_power_state)s',971 dict(event=event.get_name(),972 vm_state=instance.vm_state,973 task_state=instance.task_state,974 db_power_state=instance.power_state,975 vm_power_state=vm_power_state),976 instance_uuid=instance.uuid)977 self._sync_instance_power_state(context,978 instance,979 vm_power_state)980 def handle_events(self, event):981 if isinstance(event, virtevent.LifecycleEvent):982 try:983 self.handle_lifecycle_event(event)984 except exception.InstanceNotFound:985 LOG.debug("Event %s arrived for non-existent instance. The "986 "instance was probably deleted.", event)987 else:988 LOG.debug("Ignoring event %s", event)989 def init_virt_events(self):990 self.driver.register_event_listener(self.handle_events)991 def init_host(self):992 """Initialization for a standalone compute service."""993 self.driver.init_host(host=self.host)994 context = nova.context.get_admin_context()995 instances = objects.InstanceList.get_by_host(996 context, self.host, expected_attrs=['info_cache'])997 if CONF.defer_iptables_apply:998 self.driver.filter_defer_apply_on()999 self.init_virt_events()1000 try:1001 # checking that instance was not already evacuated to other host1002 self._destroy_evacuated_instances(context)1003 for instance in instances:1004 self._init_instance(context, instance)1005 finally:1006 if CONF.defer_iptables_apply:1007 self.driver.filter_defer_apply_off()1008 def cleanup_host(self):1009 self.driver.cleanup_host(host=self.host)1010 def pre_start_hook(self):1011 """After the service is initialized, but before we fully bring1012 the service up by listening on RPC queues, make sure to update1013 our available resources (and indirectly our available nodes).1014 """1015 self.update_available_resource(nova.context.get_admin_context())1016 def _get_power_state(self, context, instance):1017 """Retrieve the power state for the given instance."""1018 LOG.debug('Checking state', instance=instance)1019 try:1020 return self.driver.get_info(instance)["state"]1021 except exception.NotFound:1022 return power_state.NOSTATE1023 def get_console_topic(self, context):1024 """Retrieves the console host for a project on this host.1025 Currently this is just set in the flags for each compute host.1026 """1027 # TODO(mdragon): perhaps make this variable by console_type?1028 return '%s.%s' % (CONF.console_topic, CONF.console_host)1029 def get_console_pool_info(self, context, console_type):1030 return self.driver.get_console_pool_info(console_type)1031 @wrap_exception()1032 def refresh_security_group_rules(self, context, security_group_id):1033 """Tell the virtualization driver to refresh security group rules.1034 Passes straight through to the virtualization driver.1035 """1036 return self.driver.refresh_security_group_rules(security_group_id)1037 @wrap_exception()1038 def refresh_security_group_members(self, context, security_group_id):1039 """Tell the virtualization driver to refresh security group members.1040 Passes straight through to the virtualization driver.1041 """1042 return self.driver.refresh_security_group_members(security_group_id)1043 @wrap_exception()1044 def refresh_instance_security_rules(self, context, instance):1045 """Tell the virtualization driver to refresh security rules for1046 an instance.1047 Passes straight through to the virtualization driver.1048 Synchronise the call because we may still be in the middle of1049 creating the instance.1050 """1051 @utils.synchronized(instance['uuid'])1052 def _sync_refresh():1053 try:1054 return self.driver.refresh_instance_security_rules(instance)1055 except NotImplementedError:1056 LOG.warning(_('Hypervisor driver does not support '1057 'security groups.'), instance=instance)1058 return _sync_refresh()1059 @wrap_exception()1060 def refresh_provider_fw_rules(self, context):1061 """This call passes straight through to the virtualization driver."""1062 return self.driver.refresh_provider_fw_rules()1063 def _get_instance_nw_info(self, context, instance, use_slave=False):1064 """Get a list of dictionaries of network data of an instance."""1065 if (not hasattr(instance, 'system_metadata') or1066 len(instance['system_metadata']) == 0):1067 # NOTE(danms): Several places in the code look up instances without1068 # pulling system_metadata for performance, and call this function.1069 # If we get an instance without it, re-fetch so that the call1070 # to network_api (which requires it for instance_type) will1071 # succeed.1072 instance = objects.Instance.get_by_uuid(context,1073 instance['uuid'],1074 use_slave=use_slave)1075 network_info = self.network_api.get_instance_nw_info(context,1076 instance)1077 return network_info1078 def _await_block_device_map_created(self, context, vol_id):1079 # TODO(yamahata): creating volume simultaneously1080 # reduces creation time?1081 # TODO(yamahata): eliminate dumb polling1082 start = time.time()1083 retries = CONF.block_device_allocate_retries1084 if retries < 0:1085 LOG.warn(_LW("Treating negative config value (%(retries)s) for "1086 "'block_device_retries' as 0."),1087 {'retries': retries})1088 # (1) treat negative config value as 01089 # (2) the configured value is 0, one attempt should be made1090 # (3) the configured value is > 0, then the total number attempts1091 # is (retries + 1)1092 attempts = 11093 if retries >= 1:1094 attempts = retries + 11095 for attempt in range(1, attempts + 1):1096 volume = self.volume_api.get(context, vol_id)1097 volume_status = volume['status']1098 if volume_status not in ['creating', 'downloading']:1099 if volume_status != 'available':1100 LOG.warn(_("Volume id: %s finished being created but was"1101 " not set as 'available'"), vol_id)1102 return attempt1103 greenthread.sleep(CONF.block_device_allocate_retries_interval)1104 # NOTE(harlowja): Should only happen if we ran out of attempts1105 raise exception.VolumeNotCreated(volume_id=vol_id,1106 seconds=int(time.time() - start),1107 attempts=attempts)1108 def _decode_files(self, injected_files):1109 """Base64 decode the list of files to inject."""1110 if not injected_files:1111 return []1112 def _decode(f):1113 path, contents = f1114 try:1115 decoded = base64.b64decode(contents)1116 return path, decoded1117 except TypeError:1118 raise exception.Base64Exception(path=path)1119 return [_decode(f) for f in injected_files]1120 def _run_instance(self, context, request_spec,1121 filter_properties, requested_networks, injected_files,1122 admin_password, is_first_time, node, instance,1123 legacy_bdm_in_spec):1124 """Launch a new instance with specified options."""1125 extra_usage_info = {}1126 def notify(status, msg="", fault=None, **kwargs):1127 """Send a create.{start,error,end} notification."""1128 type_ = "create.%(status)s" % dict(status=status)1129 info = extra_usage_info.copy()1130 info['message'] = msg1131 self._notify_about_instance_usage(context, instance, type_,1132 extra_usage_info=info, fault=fault, **kwargs)1133 try:1134 self._prebuild_instance(context, instance)1135 if request_spec and request_spec.get('image'):1136 image_meta = request_spec['image']1137 else:1138 image_meta = {}1139 extra_usage_info = {"image_name": image_meta.get('name', '')}1140 notify("start") # notify that build is starting1141 instance, network_info = self._build_instance(context,1142 request_spec, filter_properties, requested_networks,1143 injected_files, admin_password, is_first_time, node,1144 instance, image_meta, legacy_bdm_in_spec)1145 notify("end", msg=_("Success"), network_info=network_info)1146 except exception.RescheduledException as e:1147 # Instance build encountered an error, and has been rescheduled.1148 notify("error", fault=e)1149 except exception.BuildAbortException as e:1150 # Instance build aborted due to a non-failure1151 LOG.info(e)1152 notify("end", msg=e.format_message()) # notify that build is done1153 except Exception as e:1154 # Instance build encountered a non-recoverable error:1155 with excutils.save_and_reraise_exception():1156 self._set_instance_error_state(context, instance)1157 notify("error", fault=e) # notify that build failed1158 def _prebuild_instance(self, context, instance):1159 self._check_instance_exists(context, instance)1160 try:1161 self._start_building(context, instance)1162 except (exception.InstanceNotFound,1163 exception.UnexpectedDeletingTaskStateError):1164 msg = _("Instance disappeared before we could start it")1165 # Quickly bail out of here1166 raise exception.BuildAbortException(instance_uuid=instance.uuid,1167 reason=msg)1168 def _validate_instance_group_policy(self, context, instance,1169 filter_properties):1170 # NOTE(russellb) Instance group policy is enforced by the scheduler.1171 # However, there is a race condition with the enforcement of1172 # anti-affinity. Since more than one instance may be scheduled at the1173 # same time, it's possible that more than one instance with an1174 # anti-affinity policy may end up here. This is a validation step to1175 # make sure that starting the instance here doesn't violate the policy.1176 scheduler_hints = filter_properties.get('scheduler_hints') or {}1177 group_hint = scheduler_hints.get('group')1178 if not group_hint:1179 return1180 @utils.synchronized(group_hint)1181 def _do_validation(context, instance, group_hint):1182 group = objects.InstanceGroup.get_by_hint(context, group_hint)1183 if 'anti-affinity' not in group.policies:1184 return1185 group_hosts = group.get_hosts(context, exclude=[instance.uuid])1186 if self.host in group_hosts:1187 msg = _("Anti-affinity instance group policy was violated.")1188 raise exception.RescheduledException(1189 instance_uuid=instance.uuid,1190 reason=msg)1191 _do_validation(context, instance, group_hint)1192 def _build_instance(self, context, request_spec, filter_properties,1193 requested_networks, injected_files, admin_password, is_first_time,1194 node, instance, image_meta, legacy_bdm_in_spec):1195 original_context = context1196 context = context.elevated()1197 # NOTE(danms): This method is deprecated, but could be called,1198 # and if it is, it will have an old megatuple for requested_networks.1199 if requested_networks is not None:1200 requested_networks_obj = objects.NetworkRequestList(1201 objects=[objects.NetworkRequest.from_tuple(t)1202 for t in requested_networks])1203 else:1204 requested_networks_obj = None1205 # If neutron security groups pass requested security1206 # groups to allocate_for_instance()1207 if request_spec and self.is_neutron_security_groups:1208 security_groups = request_spec.get('security_group')1209 else:1210 security_groups = []1211 if node is None:1212 node = self.driver.get_available_nodes(refresh=True)[0]1213 LOG.debug("No node specified, defaulting to %s", node)1214 network_info = None1215 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(1216 context, instance.uuid)1217 # b64 decode the files to inject:1218 injected_files_orig = injected_files1219 injected_files = self._decode_files(injected_files)1220 rt = self._get_resource_tracker(node)1221 try:1222 limits = filter_properties.get('limits', {})1223 with rt.instance_claim(context, instance, limits):1224 # NOTE(russellb) It's important that this validation be done1225 # *after* the resource tracker instance claim, as that is where1226 # the host is set on the instance.1227 self._validate_instance_group_policy(context, instance,1228 filter_properties)1229 macs = self.driver.macs_for_instance(instance)1230 dhcp_options = self.driver.dhcp_options_for_instance(instance)1231 network_info = self._allocate_network(original_context,1232 instance, requested_networks_obj, macs,1233 security_groups, dhcp_options)1234 instance.vm_state = vm_states.BUILDING1235 instance.task_state = task_states.BLOCK_DEVICE_MAPPING1236 instance.save()1237 # Verify that all the BDMs have a device_name set and assign a1238 # default to the ones missing it with the help of the driver.1239 self._default_block_device_names(context, instance, image_meta,1240 bdms)1241 block_device_info = self._prep_block_device(1242 context, instance, bdms)1243 set_access_ip = (is_first_time and1244 not instance.access_ip_v4 and1245 not instance.access_ip_v6)1246 instance = self._spawn(context, instance, image_meta,1247 network_info, block_device_info,1248 injected_files, admin_password,1249 set_access_ip=set_access_ip)1250 except (exception.InstanceNotFound,1251 exception.UnexpectedDeletingTaskStateError):1252 # the instance got deleted during the spawn1253 # Make sure the async call finishes1254 if network_info is not None:1255 network_info.wait(do_raise=False)1256 try:1257 self._deallocate_network(context, instance)1258 except Exception:1259 msg = _LE('Failed to dealloc network '1260 'for deleted instance')1261 LOG.exception(msg, instance=instance)1262 raise exception.BuildAbortException(1263 instance_uuid=instance.uuid,1264 reason=_("Instance disappeared during build"))1265 except (exception.UnexpectedTaskStateError,1266 exception.VirtualInterfaceCreateException) as e:1267 # Don't try to reschedule, just log and reraise.1268 with excutils.save_and_reraise_exception():1269 LOG.debug(e.format_message(), instance=instance)1270 # Make sure the async call finishes1271 if network_info is not None:1272 network_info.wait(do_raise=False)1273 except exception.InvalidBDM:1274 with excutils.save_and_reraise_exception():1275 if network_info is not None:1276 network_info.wait(do_raise=False)1277 try:1278 self._deallocate_network(context, instance)1279 except Exception:1280 msg = _LE('Failed to dealloc network '1281 'for failed instance')1282 LOG.exception(msg, instance=instance)1283 except Exception:1284 exc_info = sys.exc_info()1285 # try to re-schedule instance:1286 # Make sure the async call finishes1287 if network_info is not None:1288 network_info.wait(do_raise=False)1289 rescheduled = self._reschedule_or_error(original_context, instance,1290 exc_info, requested_networks, admin_password,1291 injected_files_orig, is_first_time, request_spec,1292 filter_properties, bdms, legacy_bdm_in_spec)1293 if rescheduled:1294 # log the original build error1295 self._log_original_error(exc_info, instance.uuid)1296 raise exception.RescheduledException(1297 instance_uuid=instance.uuid,1298 reason=unicode(exc_info[1]))1299 else:1300 # not re-scheduling, go to error:1301 raise exc_info[0], exc_info[1], exc_info[2]1302 # spawn success1303 return instance, network_info1304 def _log_original_error(self, exc_info, instance_uuid):1305 LOG.error(_('Error: %s') % exc_info[1], instance_uuid=instance_uuid,1306 exc_info=exc_info)1307 def _reschedule_or_error(self, context, instance, exc_info,1308 requested_networks, admin_password, injected_files, is_first_time,1309 request_spec, filter_properties, bdms=None,1310 legacy_bdm_in_spec=True):1311 """Try to re-schedule the build or re-raise the original build error to1312 error out the instance.1313 """1314 original_context = context1315 context = context.elevated()1316 instance_uuid = instance['uuid']1317 rescheduled = False1318 compute_utils.add_instance_fault_from_exc(context,1319 instance, exc_info[1], exc_info=exc_info)1320 self._notify_about_instance_usage(context, instance,1321 'instance.create.error', fault=exc_info[1])1322 try:1323 LOG.debug("Clean up resource before rescheduling.",1324 instance=instance)1325 if bdms is None:1326 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(1327 context, instance.uuid)1328 self._shutdown_instance(context, instance,1329 bdms, requested_networks)1330 self._cleanup_volumes(context, instance['uuid'], bdms)1331 except Exception:1332 # do not attempt retry if clean up failed:1333 with excutils.save_and_reraise_exception():1334 self._log_original_error(exc_info, instance_uuid)1335 try:1336 method_args = (request_spec, admin_password, injected_files,1337 requested_networks, is_first_time, filter_properties,1338 legacy_bdm_in_spec)1339 task_state = task_states.SCHEDULING1340 rescheduled = self._reschedule(original_context, request_spec,1341 filter_properties, instance,1342 self.scheduler_rpcapi.run_instance, method_args,1343 task_state, exc_info)1344 except Exception:1345 rescheduled = False1346 LOG.exception(_LE("Error trying to reschedule"),1347 instance_uuid=instance_uuid)1348 return rescheduled1349 def _reschedule(self, context, request_spec, filter_properties,1350 instance, reschedule_method, method_args, task_state,1351 exc_info=None):1352 """Attempt to re-schedule a compute operation."""1353 instance_uuid = instance['uuid']1354 retry = filter_properties.get('retry', None)1355 if not retry:1356 # no retry information, do not reschedule.1357 LOG.debug("Retry info not present, will not reschedule",1358 instance_uuid=instance_uuid)1359 return1360 if not request_spec:1361 LOG.debug("No request spec, will not reschedule",1362 instance_uuid=instance_uuid)1363 return1364 request_spec['instance_uuids'] = [instance_uuid]1365 LOG.debug("Re-scheduling %(method)s: attempt %(num)d",1366 {'method': reschedule_method.func_name,1367 'num': retry['num_attempts']}, instance_uuid=instance_uuid)1368 # reset the task state:1369 self._instance_update(context, instance_uuid, task_state=task_state)1370 if exc_info:1371 # stringify to avoid circular ref problem in json serialization:1372 retry['exc'] = traceback.format_exception_only(exc_info[0],1373 exc_info[1])1374 reschedule_method(context, *method_args)1375 return True1376 @periodic_task.periodic_task1377 def _check_instance_build_time(self, context):1378 """Ensure that instances are not stuck in build."""1379 timeout = CONF.instance_build_timeout1380 if timeout == 0:1381 return1382 filters = {'vm_state': vm_states.BUILDING,1383 'host': self.host}1384 building_insts = objects.InstanceList.get_by_filters(context,1385 filters, expected_attrs=[], use_slave=True)1386 for instance in building_insts:1387 if timeutils.is_older_than(instance['created_at'], timeout):1388 self._set_instance_error_state(context, instance)1389 LOG.warn(_("Instance build timed out. Set to error state."),1390 instance=instance)1391 def _check_instance_exists(self, context, instance):1392 """Ensure an instance with the same name is not already present."""1393 if self.driver.instance_exists(instance):1394 raise exception.InstanceExists(name=instance.name)1395 def _start_building(self, context, instance):1396 """Save the host and launched_on fields and log appropriately."""1397 LOG.audit(_('Starting instance...'), context=context,1398 instance=instance)1399 self._instance_update(context, instance.uuid,1400 vm_state=vm_states.BUILDING,1401 task_state=None,1402 expected_task_state=(task_states.SCHEDULING,1403 None))1404 def _allocate_network_async(self, context, instance, requested_networks,1405 macs, security_groups, is_vpn, dhcp_options):1406 """Method used to allocate networks in the background.1407 Broken out for testing.1408 """1409 LOG.debug("Allocating IP information in the background.",1410 instance=instance)1411 retries = CONF.network_allocate_retries1412 if retries < 0:1413 LOG.warn(_("Treating negative config value (%(retries)s) for "1414 "'network_allocate_retries' as 0."),1415 {'retries': retries})1416 attempts = retries > 1 and retries + 1 or 11417 retry_time = 11418 for attempt in range(1, attempts + 1):1419 try:1420 nwinfo = self.network_api.allocate_for_instance(1421 context, instance, vpn=is_vpn,1422 requested_networks=requested_networks,1423 macs=macs,1424 security_groups=security_groups,1425 dhcp_options=dhcp_options)1426 LOG.debug('Instance network_info: |%s|', nwinfo,1427 instance=instance)1428 sys_meta = instance.system_metadata1429 sys_meta['network_allocated'] = 'True'1430 self._instance_update(context, instance.uuid,1431 system_metadata=sys_meta)1432 return nwinfo1433 except Exception:1434 exc_info = sys.exc_info()1435 log_info = {'attempt': attempt,1436 'attempts': attempts}1437 if attempt == attempts:1438 LOG.exception(_LE('Instance failed network setup '1439 'after %(attempts)d attempt(s)'),1440 log_info)1441 raise exc_info[0], exc_info[1], exc_info[2]1442 LOG.warn(_('Instance failed network setup '1443 '(attempt %(attempt)d of %(attempts)d)'),1444 log_info, instance=instance)1445 time.sleep(retry_time)1446 retry_time *= 21447 if retry_time > 30:1448 retry_time = 301449 # Not reached.1450 def _build_networks_for_instance(self, context, instance,1451 requested_networks, security_groups):1452 # If we're here from a reschedule the network may already be allocated.1453 if strutils.bool_from_string(1454 instance.system_metadata.get('network_allocated', 'False')):1455 return self._get_instance_nw_info(context, instance)1456 if not self.is_neutron_security_groups:1457 security_groups = []1458 macs = self.driver.macs_for_instance(instance)1459 dhcp_options = self.driver.dhcp_options_for_instance(instance)1460 network_info = self._allocate_network(context, instance,1461 requested_networks, macs, security_groups, dhcp_options)1462 if not instance.access_ip_v4 and not instance.access_ip_v6:1463 # If CONF.default_access_ip_network_name is set, grab the1464 # corresponding network and set the access ip values accordingly.1465 # Note that when there are multiple ips to choose from, an1466 # arbitrary one will be chosen.1467 network_name = CONF.default_access_ip_network_name1468 if not network_name:1469 return network_info1470 for vif in network_info:1471 if vif['network']['label'] == network_name:1472 for ip in vif.fixed_ips():1473 if ip['version'] == 4:1474 instance.access_ip_v4 = ip['address']1475 if ip['version'] == 6:1476 instance.access_ip_v6 = ip['address']1477 instance.save()1478 break1479 return network_info1480 def _allocate_network(self, context, instance, requested_networks, macs,1481 security_groups, dhcp_options):1482 """Start network allocation asynchronously. Return an instance1483 of NetworkInfoAsyncWrapper that can be used to retrieve the1484 allocated networks when the operation has finished.1485 """1486 # NOTE(comstud): Since we're allocating networks asynchronously,1487 # this task state has little meaning, as we won't be in this1488 # state for very long.1489 instance.vm_state = vm_states.BUILDING1490 instance.task_state = task_states.NETWORKING1491 instance.save(expected_task_state=[None])1492 self._update_resource_tracker(context, instance)1493 is_vpn = pipelib.is_vpn_image(instance.image_ref)1494 return network_model.NetworkInfoAsyncWrapper(1495 self._allocate_network_async, context, instance,1496 requested_networks, macs, security_groups, is_vpn,1497 dhcp_options)1498 def _default_root_device_name(self, instance, image_meta, root_bdm):1499 try:1500 return self.driver.default_root_device_name(instance,1501 image_meta,1502 root_bdm)1503 except NotImplementedError:1504 return compute_utils.get_next_device_name(instance, [])1505 def _default_device_names_for_instance(self, instance,1506 root_device_name,1507 *block_device_lists):1508 try:1509 self.driver.default_device_names_for_instance(instance,1510 root_device_name,1511 *block_device_lists)1512 except NotImplementedError:1513 compute_utils.default_device_names_for_instance(1514 instance, root_device_name, *block_device_lists)1515 def _default_block_device_names(self, context, instance,1516 image_meta, block_devices):1517 """Verify that all the devices have the device_name set. If not,1518 provide a default name.1519 It also ensures that there is a root_device_name and is set to the1520 first block device in the boot sequence (boot_index=0).1521 """1522 root_bdm = block_device.get_root_bdm(block_devices)1523 if not root_bdm:1524 return1525 # Get the root_device_name from the root BDM or the instance1526 root_device_name = None1527 update_instance = False1528 update_root_bdm = False1529 if root_bdm.device_name:1530 root_device_name = root_bdm.device_name1531 instance.root_device_name = root_device_name1532 update_instance = True1533 elif instance.root_device_name:1534 root_device_name = instance.root_device_name1535 root_bdm.device_name = root_device_name1536 update_root_bdm = True1537 else:1538 root_device_name = self._default_root_device_name(instance,1539 image_meta,1540 root_bdm)1541 instance.root_device_name = root_device_name1542 root_bdm.device_name = root_device_name1543 update_instance = update_root_bdm = True1544 if update_instance:1545 instance.save()1546 if update_root_bdm:1547 root_bdm.save()1548 ephemerals = filter(block_device.new_format_is_ephemeral,1549 block_devices)1550 swap = filter(block_device.new_format_is_swap,1551 block_devices)1552 block_device_mapping = filter(1553 driver_block_device.is_block_device_mapping, block_devices)1554 self._default_device_names_for_instance(instance,1555 root_device_name,1556 ephemerals,1557 swap,1558 block_device_mapping)1559 def _prep_block_device(self, context, instance, bdms,1560 do_check_attach=True):1561 """Set up the block device for an instance with error logging."""1562 try:1563 block_device_info = {1564 'root_device_name': instance['root_device_name'],1565 'swap': driver_block_device.convert_swap(bdms),1566 'ephemerals': driver_block_device.convert_ephemerals(bdms),1567 'block_device_mapping': (1568 driver_block_device.attach_block_devices(1569 driver_block_device.convert_volumes(bdms),1570 context, instance, self.volume_api,1571 self.driver, do_check_attach=do_check_attach) +1572 driver_block_device.attach_block_devices(1573 driver_block_device.convert_snapshots(bdms),1574 context, instance, self.volume_api,1575 self.driver, self._await_block_device_map_created,1576 do_check_attach=do_check_attach) +1577 driver_block_device.attach_block_devices(1578 driver_block_device.convert_images(bdms),1579 context, instance, self.volume_api,1580 self.driver, self._await_block_device_map_created,1581 do_check_attach=do_check_attach) +1582 driver_block_device.attach_block_devices(1583 driver_block_device.convert_blanks(bdms),1584 context, instance, self.volume_api,1585 self.driver, self._await_block_device_map_created,1586 do_check_attach=do_check_attach))1587 }1588 if self.use_legacy_block_device_info:1589 for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):1590 block_device_info[bdm_type] = \1591 driver_block_device.legacy_block_devices(1592 block_device_info[bdm_type])1593 # Get swap out of the list1594 block_device_info['swap'] = driver_block_device.get_swap(1595 block_device_info['swap'])1596 return block_device_info1597 except exception.OverQuota:1598 msg = _LW('Failed to create block device for instance due to '1599 'being over volume resource quota')1600 LOG.warn(msg, instance=instance)1601 raise exception.InvalidBDM()1602 except Exception:1603 LOG.exception(_LE('Instance failed block device setup'),1604 instance=instance)1605 raise exception.InvalidBDM()1606 @object_compat1607 def _spawn(self, context, instance, image_meta, network_info,1608 block_device_info, injected_files, admin_password,1609 set_access_ip=False):1610 """Spawn an instance with error logging and update its power state."""1611 instance.vm_state = vm_states.BUILDING1612 instance.task_state = task_states.SPAWNING1613 instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING)1614 try:1615 self.driver.spawn(context, instance, image_meta,1616 injected_files, admin_password,1617 network_info,1618 block_device_info)1619 except Exception:1620 with excutils.save_and_reraise_exception():1621 LOG.exception(_LE('Instance failed to spawn'),1622 instance=instance)1623 current_power_state = self._get_power_state(context, instance)1624 instance.power_state = current_power_state1625 instance.vm_state = vm_states.ACTIVE1626 instance.task_state = None1627 instance.launched_at = timeutils.utcnow()1628 def _set_access_ip_values():1629 """Add access ip values for a given instance.1630 If CONF.default_access_ip_network_name is set, this method will1631 grab the corresponding network and set the access ip values1632 accordingly. Note that when there are multiple ips to choose1633 from, an arbitrary one will be chosen.1634 """1635 network_name = CONF.default_access_ip_network_name1636 if not network_name:1637 return1638 for vif in network_info:1639 if vif['network']['label'] == network_name:1640 for ip in vif.fixed_ips():1641 if ip['version'] == 4:1642 instance.access_ip_v4 = ip['address']1643 if ip['version'] == 6:1644 instance.access_ip_v6 = ip['address']1645 return1646 if set_access_ip:1647 _set_access_ip_values()1648 network_info.wait(do_raise=True)1649 instance.info_cache.network_info = network_info1650 instance.save(expected_task_state=task_states.SPAWNING)1651 return instance1652 def _notify_about_instance_usage(self, context, instance, event_suffix,1653 network_info=None, system_metadata=None,1654 extra_usage_info=None, fault=None):1655 compute_utils.notify_about_instance_usage(1656 self.notifier, context, instance, event_suffix,1657 network_info=network_info,1658 system_metadata=system_metadata,1659 extra_usage_info=extra_usage_info, fault=fault)1660 def _deallocate_network(self, context, instance,1661 requested_networks=None):1662 LOG.debug('Deallocating network for instance', instance=instance)1663 self.network_api.deallocate_for_instance(1664 context, instance, requested_networks=requested_networks)1665 def _get_instance_block_device_info(self, context, instance,1666 refresh_conn_info=False,1667 bdms=None):1668 """Transform block devices to the driver block_device format."""1669 if not bdms:1670 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(1671 context, instance['uuid'])1672 swap = driver_block_device.convert_swap(bdms)1673 ephemerals = driver_block_device.convert_ephemerals(bdms)1674 block_device_mapping = (1675 driver_block_device.convert_volumes(bdms) +1676 driver_block_device.convert_snapshots(bdms) +1677 driver_block_device.convert_images(bdms))1678 if not refresh_conn_info:1679 # if the block_device_mapping has no value in connection_info1680 # (returned as None), don't include in the mapping1681 block_device_mapping = [1682 bdm for bdm in block_device_mapping1683 if bdm.get('connection_info')]1684 else:1685 block_device_mapping = driver_block_device.refresh_conn_infos(1686 block_device_mapping, context, instance, self.volume_api,1687 self.driver)1688 if self.use_legacy_block_device_info:1689 swap = driver_block_device.legacy_block_devices(swap)1690 ephemerals = driver_block_device.legacy_block_devices(ephemerals)1691 block_device_mapping = driver_block_device.legacy_block_devices(1692 block_device_mapping)1693 # Get swap out of the list1694 swap = driver_block_device.get_swap(swap)1695 return {'swap': swap,1696 'ephemerals': ephemerals,1697 'block_device_mapping': block_device_mapping}1698 # NOTE(mikal): No object_compat wrapper on this method because its1699 # callers all pass objects already1700 @wrap_exception()1701 @reverts_task_state1702 @wrap_instance_event1703 @wrap_instance_fault1704 def build_and_run_instance(self, context, instance, image, request_spec,1705 filter_properties, admin_password=None,1706 injected_files=None, requested_networks=None,1707 security_groups=None, block_device_mapping=None,1708 node=None, limits=None):1709 # NOTE(danms): Remove this in v4.0 of the RPC API1710 if (requested_networks and1711 not isinstance(requested_networks,1712 objects.NetworkRequestList)):1713 requested_networks = objects.NetworkRequestList(1714 objects=[objects.NetworkRequest.from_tuple(t)1715 for t in requested_networks])1716 @utils.synchronized(instance.uuid)1717 def do_build_and_run_instance(context, instance, image, request_spec,1718 filter_properties, admin_password, injected_files,1719 requested_networks, security_groups, block_device_mapping,1720 node=None, limits=None):1721 try:1722 LOG.audit(_('Starting instance...'), context=context,1723 instance=instance)1724 instance.vm_state = vm_states.BUILDING1725 instance.task_state = None1726 instance.save(expected_task_state=1727 (task_states.SCHEDULING, None))1728 except exception.InstanceNotFound:1729 msg = 'Instance disappeared before build.'1730 LOG.debug(msg, instance=instance)1731 return1732 except exception.UnexpectedTaskStateError as e:1733 LOG.debug(e.format_message(), instance=instance)1734 return1735 # b64 decode the files to inject:1736 decoded_files = self._decode_files(injected_files)1737 if limits is None:1738 limits = {}1739 if node is None:1740 node = self.driver.get_available_nodes(refresh=True)[0]1741 LOG.debug('No node specified, defaulting to %s', node,1742 instance=instance)1743 try:1744 self._build_and_run_instance(context, instance, image,1745 decoded_files, admin_password, requested_networks,1746 security_groups, block_device_mapping, node, limits,1747 filter_properties)1748 except exception.RescheduledException as e:1749 LOG.debug(e.format_message(), instance=instance)1750 retry = filter_properties.get('retry', None)1751 if not retry:1752 # no retry information, do not reschedule.1753 LOG.debug("Retry info not present, will not reschedule",1754 instance=instance)1755 self._cleanup_allocated_networks(context, instance,1756 requested_networks)1757 compute_utils.add_instance_fault_from_exc(context,1758 instance, e, sys.exc_info())1759 self._set_instance_error_state(context, instance)1760 return1761 retry['exc'] = traceback.format_exception(*sys.exc_info())1762 # NOTE(comstud): Deallocate networks if the driver wants1763 # us to do so.1764 if self.driver.deallocate_networks_on_reschedule(instance):1765 self._cleanup_allocated_networks(context, instance,1766 requested_networks)1767 instance.task_state = task_states.SCHEDULING1768 instance.save()1769 self.compute_task_api.build_instances(context, [instance],1770 image, filter_properties, admin_password,1771 injected_files, requested_networks, security_groups,1772 block_device_mapping)1773 except (exception.InstanceNotFound,1774 exception.UnexpectedDeletingTaskStateError):1775 msg = 'Instance disappeared during build.'1776 LOG.debug(msg, instance=instance)1777 self._cleanup_allocated_networks(context, instance,1778 requested_networks)1779 except exception.BuildAbortException as e:1780 LOG.exception(e.format_message(), instance=instance)1781 self._cleanup_allocated_networks(context, instance,1782 requested_networks)1783 self._cleanup_volumes(context, instance.uuid,1784 block_device_mapping, raise_exc=False)1785 compute_utils.add_instance_fault_from_exc(context, instance,1786 e, sys.exc_info())1787 self._set_instance_error_state(context, instance)1788 except Exception as e:1789 # Should not reach here.1790 msg = _LE('Unexpected build failure, not rescheduling build.')1791 LOG.exception(msg, instance=instance)1792 self._cleanup_allocated_networks(context, instance,1793 requested_networks)1794 self._cleanup_volumes(context, instance.uuid,1795 block_device_mapping, raise_exc=False)1796 compute_utils.add_instance_fault_from_exc(context, instance,1797 e, sys.exc_info())1798 self._set_instance_error_state(context, instance)1799 do_build_and_run_instance(context, instance, image, request_spec,1800 filter_properties, admin_password, injected_files,1801 requested_networks, security_groups, block_device_mapping,1802 node, limits)1803 def _build_and_run_instance(self, context, instance, image, injected_files,1804 admin_password, requested_networks, security_groups,1805 block_device_mapping, node, limits, filter_properties):1806 image_name = image.get('name')1807 self._notify_about_instance_usage(context, instance, 'create.start',1808 extra_usage_info={'image_name': image_name})1809 try:1810 rt = self._get_resource_tracker(node)1811 with rt.instance_claim(context, instance, limits):1812 # NOTE(russellb) It's important that this validation be done1813 # *after* the resource tracker instance claim, as that is where1814 # the host is set on the instance.1815 self._validate_instance_group_policy(context, instance,1816 filter_properties)1817 with self._build_resources(context, instance,1818 requested_networks, security_groups, image,1819 block_device_mapping) as resources:1820 instance.vm_state = vm_states.BUILDING1821 instance.task_state = task_states.SPAWNING1822 instance.save(expected_task_state=1823 task_states.BLOCK_DEVICE_MAPPING)1824 block_device_info = resources['block_device_info']1825 network_info = resources['network_info']1826 self.driver.spawn(context, instance, image,1827 injected_files, admin_password,1828 network_info=network_info,1829 block_device_info=block_device_info)1830 except (exception.InstanceNotFound,1831 exception.UnexpectedDeletingTaskStateError) as e:1832 with excutils.save_and_reraise_exception():1833 self._notify_about_instance_usage(context, instance,1834 'create.end', fault=e)1835 except exception.ComputeResourcesUnavailable as e:1836 LOG.debug(e.format_message(), instance=instance)1837 self._notify_about_instance_usage(context, instance,1838 'create.error', fault=e)1839 raise exception.RescheduledException(1840 instance_uuid=instance.uuid, reason=e.format_message())1841 except exception.BuildAbortException as e:1842 with excutils.save_and_reraise_exception():1843 LOG.debug(e.format_message(), instance=instance)1844 self._notify_about_instance_usage(context, instance,1845 'create.error', fault=e)1846 except (exception.FixedIpLimitExceeded,1847 exception.NoMoreNetworks) as e:1848 LOG.warn(_LW('No more network or fixed IP to be allocated'),1849 instance=instance)1850 self._notify_about_instance_usage(context, instance,1851 'create.error', fault=e)1852 msg = _('Failed to allocate the network(s) with error %s, '1853 'not rescheduling.') % e.format_message()1854 raise exception.BuildAbortException(instance_uuid=instance.uuid,1855 reason=msg)1856 except (exception.VirtualInterfaceCreateException,1857 exception.VirtualInterfaceMacAddressException) as e:1858 LOG.exception(_LE('Failed to allocate network(s)'),1859 instance=instance)1860 self._notify_about_instance_usage(context, instance,1861 'create.error', fault=e)1862 msg = _('Failed to allocate the network(s), not rescheduling.')1863 raise exception.BuildAbortException(instance_uuid=instance.uuid,1864 reason=msg)1865 except (exception.FlavorDiskTooSmall,1866 exception.FlavorMemoryTooSmall,1867 exception.ImageNotActive,1868 exception.ImageUnacceptable) as e:1869 self._notify_about_instance_usage(context, instance,1870 'create.error', fault=e)1871 raise exception.BuildAbortException(instance_uuid=instance.uuid,1872 reason=e.format_message())1873 except Exception as e:1874 self._notify_about_instance_usage(context, instance,1875 'create.error', fault=e)1876 raise exception.RescheduledException(1877 instance_uuid=instance.uuid, reason=six.text_type(e))1878 # NOTE(alaski): This is only useful during reschedules, remove it now.1879 instance.system_metadata.pop('network_allocated', None)1880 instance.power_state = self._get_power_state(context, instance)1881 instance.vm_state = vm_states.ACTIVE1882 instance.task_state = None1883 instance.launched_at = timeutils.utcnow()1884 try:1885 instance.save(expected_task_state=task_states.SPAWNING)1886 except (exception.InstanceNotFound,1887 exception.UnexpectedDeletingTaskStateError) as e:1888 with excutils.save_and_reraise_exception():1889 self._notify_about_instance_usage(context, instance,1890 'create.end', fault=e)1891 self._notify_about_instance_usage(context, instance, 'create.end',1892 extra_usage_info={'message': _('Success')},1893 network_info=network_info)1894 @contextlib.contextmanager1895 def _build_resources(self, context, instance, requested_networks,1896 security_groups, image, block_device_mapping):1897 resources = {}1898 network_info = None1899 try:1900 network_info = self._build_networks_for_instance(context, instance,1901 requested_networks, security_groups)1902 resources['network_info'] = network_info1903 except (exception.InstanceNotFound,1904 exception.UnexpectedDeletingTaskStateError):1905 raise1906 except exception.UnexpectedTaskStateError as e:1907 raise exception.BuildAbortException(instance_uuid=instance.uuid,1908 reason=e.format_message())1909 except Exception:1910 # Because this allocation is async any failures are likely to occur1911 # when the driver accesses network_info during spawn().1912 LOG.exception(_LE('Failed to allocate network(s)'),1913 instance=instance)1914 msg = _('Failed to allocate the network(s), not rescheduling.')1915 raise exception.BuildAbortException(instance_uuid=instance.uuid,1916 reason=msg)1917 try:1918 # Verify that all the BDMs have a device_name set and assign a1919 # default to the ones missing it with the help of the driver.1920 self._default_block_device_names(context, instance, image,1921 block_device_mapping)1922 instance.vm_state = vm_states.BUILDING1923 instance.task_state = task_states.BLOCK_DEVICE_MAPPING1924 instance.save()1925 block_device_info = self._prep_block_device(context, instance,1926 block_device_mapping)1927 resources['block_device_info'] = block_device_info1928 except (exception.InstanceNotFound,1929 exception.UnexpectedDeletingTaskStateError):1930 with excutils.save_and_reraise_exception() as ctxt:1931 # Make sure the async call finishes1932 if network_info is not None:1933 network_info.wait(do_raise=False)1934 except exception.UnexpectedTaskStateError as e:1935 # Make sure the async call finishes1936 if network_info is not None:1937 network_info.wait(do_raise=False)1938 raise exception.BuildAbortException(instance_uuid=instance.uuid,1939 reason=e.format_message())1940 except Exception:1941 LOG.exception(_LE('Failure prepping block device'),1942 instance=instance)1943 # Make sure the async call finishes1944 if network_info is not None:1945 network_info.wait(do_raise=False)1946 msg = _('Failure prepping block device.')1947 raise exception.BuildAbortException(instance_uuid=instance.uuid,1948 reason=msg)1949 try:1950 yield resources1951 except Exception as exc:1952 with excutils.save_and_reraise_exception() as ctxt:1953 if not isinstance(exc, (exception.InstanceNotFound,1954 exception.UnexpectedDeletingTaskStateError)):1955 LOG.exception(_LE('Instance failed to spawn'),1956 instance=instance)1957 # Make sure the async call finishes1958 if network_info is not None:1959 network_info.wait(do_raise=False)1960 try:1961 self._shutdown_instance(context, instance,1962 block_device_mapping, requested_networks,1963 try_deallocate_networks=False)1964 except Exception:1965 ctxt.reraise = False1966 msg = _('Could not clean up failed build,'1967 ' not rescheduling')1968 raise exception.BuildAbortException(1969 instance_uuid=instance.uuid, reason=msg)1970 def _cleanup_allocated_networks(self, context, instance,1971 requested_networks):1972 try:1973 self._deallocate_network(context, instance, requested_networks)1974 except Exception:1975 msg = _LE('Failed to deallocate networks')1976 LOG.exception(msg, instance=instance)1977 return1978 instance.system_metadata['network_allocated'] = 'False'1979 try:1980 instance.save()1981 except exception.InstanceNotFound:1982 # NOTE(alaski): It's possible that we're cleaning up the networks1983 # because the instance was deleted. If that's the case then this1984 # exception will be raised by instance.save()1985 pass1986 @object_compat1987 @messaging.expected_exceptions(exception.BuildAbortException,1988 exception.UnexpectedTaskStateError,1989 exception.VirtualInterfaceCreateException,1990 exception.RescheduledException)1991 @wrap_exception()1992 @reverts_task_state1993 @wrap_instance_event1994 @wrap_instance_fault1995 def run_instance(self, context, instance, request_spec,1996 filter_properties, requested_networks,1997 injected_files, admin_password,1998 is_first_time, node, legacy_bdm_in_spec):1999 # NOTE(alaski) This method should be deprecated when the scheduler and2000 # compute rpc interfaces are bumped to 4.x, and slated for removal in2001 # 5.x as it is no longer used.2002 if filter_properties is None:2003 filter_properties = {}2004 @utils.synchronized(instance.uuid)2005 def do_run_instance():2006 self._run_instance(context, request_spec,2007 filter_properties, requested_networks, injected_files,2008 admin_password, is_first_time, node, instance,2009 legacy_bdm_in_spec)2010 do_run_instance()2011 def _try_deallocate_network(self, context, instance,2012 requested_networks=None):2013 try:2014 # tear down allocated network structure2015 self._deallocate_network(context, instance, requested_networks)2016 except Exception:2017 with excutils.save_and_reraise_exception():2018 LOG.error(_('Failed to deallocate network for instance.'),2019 instance=instance)2020 self._set_instance_error_state(context, instance)2021 def _get_power_off_values(self, context, instance, clean_shutdown):2022 """Get the timing configuration for powering down this instance."""2023 if clean_shutdown:2024 timeout = compute_utils.get_value_from_system_metadata(instance,2025 key='image_os_shutdown_timeout', type=int,2026 default=CONF.shutdown_timeout)2027 retry_interval = self.SHUTDOWN_RETRY_INTERVAL2028 else:2029 timeout = 02030 retry_interval = 02031 return timeout, retry_interval2032 def _power_off_instance(self, context, instance, clean_shutdown=True):2033 """Power off an instance on this host."""2034 timeout, retry_interval = self._get_power_off_values(context,2035 instance, clean_shutdown)2036 self.driver.power_off(instance, timeout, retry_interval)2037 def _shutdown_instance(self, context, instance,2038 bdms, requested_networks=None, notify=True,2039 try_deallocate_networks=True):2040 """Shutdown an instance on this host.2041 :param:context: security context2042 :param:instance: a nova.objects.Instance object2043 :param:bdms: the block devices for the instance to be torn2044 down2045 :param:requested_networks: the networks on which the instance2046 has ports2047 :param:notify: true if a final usage notification should be2048 emitted2049 :param:try_deallocate_networks: false if we should avoid2050 trying to teardown networking2051 """2052 context = context.elevated()2053 LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},2054 context=context, instance=instance)2055 if notify:2056 self._notify_about_instance_usage(context, instance,2057 "shutdown.start")2058 network_info = compute_utils.get_nw_info_for_instance(instance)2059 # NOTE(vish) get bdms before destroying the instance2060 vol_bdms = [bdm for bdm in bdms if bdm.is_volume]2061 block_device_info = self._get_instance_block_device_info(2062 context, instance, bdms=bdms)2063 # NOTE(melwitt): attempt driver destroy before releasing ip, may2064 # want to keep ip allocated for certain failures2065 try:2066 self.driver.destroy(context, instance, network_info,2067 block_device_info)2068 except exception.InstancePowerOffFailure:2069 # if the instance can't power off, don't release the ip2070 with excutils.save_and_reraise_exception():2071 pass2072 except Exception:2073 with excutils.save_and_reraise_exception():2074 # deallocate ip and fail without proceeding to2075 # volume api calls, preserving current behavior2076 if try_deallocate_networks:2077 self._try_deallocate_network(context, instance,2078 requested_networks)2079 if try_deallocate_networks:2080 self._try_deallocate_network(context, instance, requested_networks)2081 for bdm in vol_bdms:2082 try:2083 # NOTE(vish): actual driver detach done in driver.destroy, so2084 # just tell cinder that we are done with it.2085 connector = self.driver.get_volume_connector(instance)2086 self.volume_api.terminate_connection(context,2087 bdm.volume_id,2088 connector)2089 self.volume_api.detach(context, bdm.volume_id)2090 except exception.DiskNotFound as exc:2091 LOG.debug('Ignoring DiskNotFound: %s', exc,2092 instance=instance)2093 except exception.VolumeNotFound as exc:2094 LOG.debug('Ignoring VolumeNotFound: %s', exc,2095 instance=instance)2096 except cinder_exception.EndpointNotFound as exc:2097 LOG.warn(_LW('Ignoring EndpointNotFound: %s'), exc,2098 instance=instance)2099 if notify:2100 self._notify_about_instance_usage(context, instance,2101 "shutdown.end")2102 def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):2103 exc_info = None2104 for bdm in bdms:2105 LOG.debug("terminating bdm %s", bdm,2106 instance_uuid=instance_uuid)2107 if bdm.volume_id and bdm.delete_on_termination:2108 try:2109 self.volume_api.delete(context, bdm.volume_id)2110 except Exception as exc:2111 exc_info = sys.exc_info()2112 LOG.warn(_LW('Failed to delete volume: %(volume_id)s due '2113 'to %(exc)s'), {'volume_id': bdm.volume_id,2114 'exc': unicode(exc)})2115 if exc_info is not None and raise_exc:2116 six.reraise(exc_info[0], exc_info[1], exc_info[2])2117 @hooks.add_hook("delete_instance")2118 def _delete_instance(self, context, instance, bdms, quotas):2119 """Delete an instance on this host. Commit or rollback quotas2120 as necessary.2121 """2122 instance_uuid = instance['uuid']2123 was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED2124 if was_soft_deleted:2125 # Instances in SOFT_DELETED vm_state have already had quotas2126 # decremented.2127 try:2128 quotas.rollback()2129 except Exception:2130 pass2131 try:2132 events = self.instance_events.clear_events_for_instance(instance)2133 if events:2134 LOG.debug('Events pending at deletion: %(events)s',2135 {'events': ','.join(events.keys())},2136 instance=instance)2137 instance.info_cache.delete()2138 self._notify_about_instance_usage(context, instance,2139 "delete.start")2140 self._shutdown_instance(context, instance, bdms)2141 # NOTE(vish): We have already deleted the instance, so we have2142 # to ignore problems cleaning up the volumes. It2143 # would be nice to let the user know somehow that2144 # the volume deletion failed, but it is not2145 # acceptable to have an instance that can not be2146 # deleted. Perhaps this could be reworked in the2147 # future to set an instance fault the first time2148 # and to only ignore the failure if the instance2149 # is already in ERROR.2150 self._cleanup_volumes(context, instance_uuid, bdms,2151 raise_exc=False)2152 # if a delete task succeed, always update vm state and task2153 # state without expecting task state to be DELETING2154 instance.vm_state = vm_states.DELETED2155 instance.task_state = None2156 instance.terminated_at = timeutils.utcnow()2157 instance.save()2158 self._update_resource_tracker(context, instance)2159 system_meta = instance.system_metadata2160 instance.destroy()2161 except Exception:2162 with excutils.save_and_reraise_exception():2163 quotas.rollback()2164 self._complete_deletion(context,2165 instance,2166 bdms,2167 quotas,2168 system_meta)2169 @wrap_exception()2170 @reverts_task_state2171 @wrap_instance_event2172 @wrap_instance_fault2173 def terminate_instance(self, context, instance, bdms, reservations):2174 """Terminate an instance on this host."""2175 # NOTE (ndipanov): If we get non-object BDMs, just get them from the2176 # db again, as this means they are sent in the old format and we want2177 # to avoid converting them back when we can just get them.2178 # Remove this when we bump the RPC major version to 4.02179 if (bdms and2180 any(not isinstance(bdm, obj_base.NovaObject)2181 for bdm in bdms)):2182 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(2183 context, instance.uuid)2184 quotas = objects.Quotas.from_reservations(context,2185 reservations,2186 instance=instance)2187 @utils.synchronized(instance['uuid'])2188 def do_terminate_instance(instance, bdms):2189 try:2190 self._delete_instance(context, instance, bdms, quotas)2191 except exception.InstanceNotFound:2192 LOG.info(_("Instance disappeared during terminate"),2193 instance=instance)2194 except Exception:2195 # As we're trying to delete always go to Error if something2196 # goes wrong that _delete_instance can't handle.2197 with excutils.save_and_reraise_exception():2198 LOG.exception(_LE('Setting instance vm_state to ERROR'),2199 instance=instance)2200 self._set_instance_error_state(context, instance)2201 do_terminate_instance(instance, bdms)2202 # NOTE(johannes): This is probably better named power_off_instance2203 # so it matches the driver method, but because of other issues, we2204 # can't use that name in grizzly.2205 @wrap_exception()2206 @reverts_task_state2207 @wrap_instance_event2208 @wrap_instance_fault2209 def stop_instance(self, context, instance, clean_shutdown=True):2210 """Stopping an instance on this host."""2211 @utils.synchronized(instance.uuid)2212 def do_stop_instance():2213 current_power_state = self._get_power_state(context, instance)2214 LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '2215 'current task_state: %(task_state)s, current DB '2216 'power_state: %(db_power_state)s, current VM '2217 'power_state: %(current_power_state)s',2218 dict(vm_state=instance.vm_state,2219 task_state=instance.task_state,2220 db_power_state=instance.power_state,2221 current_power_state=current_power_state),2222 instance_uuid=instance.uuid)2223 # NOTE(mriedem): If the instance is already powered off, we are2224 # possibly tearing down and racing with other operations, so we can2225 # expect the task_state to be None if something else updates the2226 # instance and we're not locking it.2227 expected_task_state = [task_states.POWERING_OFF]2228 # The list of power states is from _sync_instance_power_state.2229 if current_power_state in (power_state.NOSTATE,2230 power_state.SHUTDOWN,2231 power_state.CRASHED):2232 LOG.info(_LI('Instance is already powered off in the '2233 'hypervisor when stop is called.'),2234 instance=instance)2235 expected_task_state.append(None)2236 self._notify_about_instance_usage(context, instance,2237 "power_off.start")2238 self._power_off_instance(context, instance, clean_shutdown)2239 current_power_state = self._get_power_state(context, instance)2240 instance.power_state = current_power_state2241 instance.vm_state = vm_states.STOPPED2242 instance.task_state = None2243 instance.save(expected_task_state=expected_task_state)2244 self._notify_about_instance_usage(context, instance,2245 "power_off.end")2246 do_stop_instance()2247 def _power_on(self, context, instance):2248 network_info = self._get_instance_nw_info(context, instance)2249 block_device_info = self._get_instance_block_device_info(context,2250 instance)2251 self.driver.power_on(context, instance,2252 network_info,2253 block_device_info)2254 # NOTE(johannes): This is probably better named power_on_instance2255 # so it matches the driver method, but because of other issues, we2256 # can't use that name in grizzly.2257 @wrap_exception()2258 @reverts_task_state2259 @wrap_instance_event2260 @wrap_instance_fault2261 def start_instance(self, context, instance):2262 """Starting an instance on this host."""2263 self._notify_about_instance_usage(context, instance, "power_on.start")2264 self._power_on(context, instance)2265 current_power_state = self._get_power_state(context, instance)2266 instance.power_state = current_power_state2267 instance.vm_state = vm_states.ACTIVE2268 instance.task_state = None2269 instance.save(expected_task_state=task_states.POWERING_ON)2270 self._notify_about_instance_usage(context, instance, "power_on.end")2271 @wrap_exception()2272 @reverts_task_state2273 @wrap_instance_event2274 @wrap_instance_fault2275 def soft_delete_instance(self, context, instance, reservations):2276 """Soft delete an instance on this host."""2277 quotas = objects.Quotas.from_reservations(context,2278 reservations,2279 instance=instance)2280 try:2281 self._notify_about_instance_usage(context, instance,2282 "soft_delete.start")2283 try:2284 self.driver.soft_delete(instance)2285 except NotImplementedError:2286 # Fallback to just powering off the instance if the2287 # hypervisor doesn't implement the soft_delete method2288 self.driver.power_off(instance)2289 current_power_state = self._get_power_state(context, instance)2290 instance.power_state = current_power_state2291 instance.vm_state = vm_states.SOFT_DELETED2292 instance.task_state = None2293 instance.save(expected_task_state=[task_states.SOFT_DELETING])2294 except Exception:2295 with excutils.save_and_reraise_exception():2296 quotas.rollback()2297 quotas.commit()2298 self._notify_about_instance_usage(context, instance, "soft_delete.end")2299 @object_compat2300 @wrap_exception()2301 @reverts_task_state2302 @wrap_instance_event2303 @wrap_instance_fault2304 def restore_instance(self, context, instance):2305 """Restore a soft-deleted instance on this host."""2306 self._notify_about_instance_usage(context, instance, "restore.start")2307 try:2308 self.driver.restore(instance)2309 except NotImplementedError:2310 # Fallback to just powering on the instance if the hypervisor2311 # doesn't implement the restore method2312 self._power_on(context, instance)2313 current_power_state = self._get_power_state(context, instance)2314 instance.power_state = current_power_state2315 instance.vm_state = vm_states.ACTIVE2316 instance.task_state = None2317 instance.save(expected_task_state=task_states.RESTORING)2318 self._notify_about_instance_usage(context, instance, "restore.end")2319 def _rebuild_default_impl(self, context, instance, image_meta,2320 injected_files, admin_password, bdms,2321 detach_block_devices, attach_block_devices,2322 network_info=None,2323 recreate=False, block_device_info=None,2324 preserve_ephemeral=False):2325 if preserve_ephemeral:2326 # The default code path does not support preserving ephemeral2327 # partitions.2328 raise exception.PreserveEphemeralNotSupported()2329 detach_block_devices(context, bdms)2330 if not recreate:2331 self.driver.destroy(context, instance, network_info,2332 block_device_info=block_device_info)2333 instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING2334 instance.save(expected_task_state=[task_states.REBUILDING])2335 new_block_device_info = attach_block_devices(context, instance, bdms)2336 instance.task_state = task_states.REBUILD_SPAWNING2337 instance.save(2338 expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])2339 self.driver.spawn(context, instance, image_meta, injected_files,2340 admin_password, network_info=network_info,2341 block_device_info=new_block_device_info)2342 @object_compat2343 @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)2344 @wrap_exception()2345 @reverts_task_state2346 @wrap_instance_event2347 @wrap_instance_fault2348 def rebuild_instance(self, context, instance, orig_image_ref, image_ref,2349 injected_files, new_pass, orig_sys_metadata,2350 bdms, recreate, on_shared_storage,2351 preserve_ephemeral=False):2352 """Destroy and re-make this instance.2353 A 'rebuild' effectively purges all existing data from the system and2354 remakes the VM with given 'metadata' and 'personalities'.2355 :param context: `nova.RequestContext` object2356 :param instance: Instance object2357 :param orig_image_ref: Original image_ref before rebuild2358 :param image_ref: New image_ref for rebuild2359 :param injected_files: Files to inject2360 :param new_pass: password to set on rebuilt instance2361 :param orig_sys_metadata: instance system metadata from pre-rebuild2362 :param bdms: block-device-mappings to use for rebuild2363 :param recreate: True if the instance is being recreated (e.g. the2364 hypervisor it was on failed) - cleanup of old state will be2365 skipped.2366 :param on_shared_storage: True if instance files on shared storage2367 :param preserve_ephemeral: True if the default ephemeral storage2368 partition must be preserved on rebuild2369 """2370 context = context.elevated()2371 # NOTE (ndipanov): If we get non-object BDMs, just get them from the2372 # db again, as this means they are sent in the old format and we want2373 # to avoid converting them back when we can just get them.2374 # Remove this on the next major RPC version bump2375 if (bdms and2376 any(not isinstance(bdm, obj_base.NovaObject)2377 for bdm in bdms)):2378 bdms = None2379 orig_vm_state = instance.vm_state2380 with self._error_out_instance_on_exception(context, instance):2381 LOG.audit(_("Rebuilding instance"), context=context,2382 instance=instance)2383 if recreate:2384 if not self.driver.capabilities["supports_recreate"]:2385 raise exception.InstanceRecreateNotSupported2386 self._check_instance_exists(context, instance)2387 # To cover case when admin expects that instance files are on2388 # shared storage, but not accessible and vice versa2389 if on_shared_storage != self.driver.instance_on_disk(instance):2390 raise exception.InvalidSharedStorage(2391 _("Invalid state of instance files on shared"2392 " storage"))2393 if on_shared_storage:2394 LOG.info(_('disk on shared storage, recreating using'2395 ' existing disk'))2396 else:2397 image_ref = orig_image_ref = instance.image_ref2398 LOG.info(_("disk not on shared storage, rebuilding from:"2399 " '%s'") % str(image_ref))2400 # NOTE(mriedem): On a recreate (evacuate), we need to update2401 # the instance's host and node properties to reflect it's2402 # destination node for the recreate.2403 node_name = None2404 try:2405 compute_node = self._get_compute_info(context, self.host)2406 node_name = compute_node.hypervisor_hostname2407 except exception.NotFound:2408 LOG.exception(_LE('Failed to get compute_info for %s'),2409 self.host)2410 finally:2411 instance.host = self.host2412 instance.node = node_name2413 instance.save()2414 if image_ref:2415 image_meta = self.image_api.get(context, image_ref)2416 else:2417 image_meta = {}2418 # This instance.exists message should contain the original2419 # image_ref, not the new one. Since the DB has been updated2420 # to point to the new one... we have to override it.2421 # TODO(jaypipes): Move generate_image_url() into the nova.image.api2422 orig_image_ref_url = glance.generate_image_url(orig_image_ref)2423 extra_usage_info = {'image_ref_url': orig_image_ref_url}2424 self.conductor_api.notify_usage_exists(context,2425 obj_base.obj_to_primitive(instance),2426 current_period=True, system_metadata=orig_sys_metadata,2427 extra_usage_info=extra_usage_info)2428 # This message should contain the new image_ref2429 extra_usage_info = {'image_name': image_meta.get('name', '')}2430 self._notify_about_instance_usage(context, instance,2431 "rebuild.start", extra_usage_info=extra_usage_info)2432 instance.power_state = self._get_power_state(context, instance)2433 instance.task_state = task_states.REBUILDING2434 instance.save(expected_task_state=[task_states.REBUILDING])2435 if recreate:2436 self.network_api.setup_networks_on_host(2437 context, instance, self.host)2438 network_info = compute_utils.get_nw_info_for_instance(instance)2439 if bdms is None:2440 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(2441 context, instance.uuid)2442 block_device_info = \2443 self._get_instance_block_device_info(2444 context, instance, bdms=bdms)2445 def detach_block_devices(context, bdms):2446 for bdm in bdms:2447 if bdm.is_volume:2448 self.volume_api.detach(context, bdm.volume_id)2449 files = self._decode_files(injected_files)2450 kwargs = dict(2451 context=context,2452 instance=instance,2453 image_meta=image_meta,2454 injected_files=files,2455 admin_password=new_pass,2456 bdms=bdms,2457 detach_block_devices=detach_block_devices,2458 attach_block_devices=self._prep_block_device,2459 block_device_info=block_device_info,2460 network_info=network_info,2461 preserve_ephemeral=preserve_ephemeral)2462 try:2463 self.driver.rebuild(**kwargs)2464 except NotImplementedError:2465 # NOTE(rpodolyaka): driver doesn't provide specialized version2466 # of rebuild, fall back to the default implementation2467 self._rebuild_default_impl(**kwargs)2468 instance.power_state = self._get_power_state(context, instance)2469 instance.vm_state = vm_states.ACTIVE2470 instance.task_state = None2471 instance.launched_at = timeutils.utcnow()2472 instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])2473 if orig_vm_state == vm_states.STOPPED:2474 LOG.info(_LI("bringing vm to original state: '%s'"),2475 orig_vm_state, instance=instance)2476 instance.vm_state = vm_states.ACTIVE2477 instance.task_state = task_states.POWERING_OFF2478 instance.progress = 02479 instance.save()2480 self.stop_instance(context, instance)2481 self._notify_about_instance_usage(2482 context, instance, "rebuild.end",2483 network_info=network_info,2484 extra_usage_info=extra_usage_info)2485 def _handle_bad_volumes_detached(self, context, instance, bad_devices,2486 block_device_info):2487 """Handle cases where the virt-layer had to detach non-working volumes2488 in order to complete an operation.2489 """2490 for bdm in block_device_info['block_device_mapping']:2491 if bdm.get('mount_device') in bad_devices:2492 try:2493 volume_id = bdm['connection_info']['data']['volume_id']2494 except KeyError:2495 continue2496 # NOTE(sirp): ideally we'd just call2497 # `compute_api.detach_volume` here but since that hits the2498 # DB directly, that's off limits from within the2499 # compute-manager.2500 #2501 # API-detach2502 LOG.info(_("Detaching from volume api: %s") % volume_id)2503 volume = self.volume_api.get(context, volume_id)2504 self.volume_api.check_detach(context, volume)2505 self.volume_api.begin_detaching(context, volume_id)2506 # Manager-detach2507 self.detach_volume(context, volume_id, instance)2508 @wrap_exception()2509 @reverts_task_state2510 @wrap_instance_event2511 @wrap_instance_fault2512 def reboot_instance(self, context, instance, block_device_info,2513 reboot_type):2514 """Reboot an instance on this host."""2515 # acknowledge the request made it to the manager2516 if reboot_type == "SOFT":2517 instance.task_state = task_states.REBOOT_PENDING2518 expected_states = (task_states.REBOOTING,2519 task_states.REBOOT_PENDING,2520 task_states.REBOOT_STARTED)2521 else:2522 instance.task_state = task_states.REBOOT_PENDING_HARD2523 expected_states = (task_states.REBOOTING_HARD,2524 task_states.REBOOT_PENDING_HARD,2525 task_states.REBOOT_STARTED_HARD)2526 context = context.elevated()2527 LOG.audit(_("Rebooting instance"), context=context, instance=instance)2528 block_device_info = self._get_instance_block_device_info(context,2529 instance)2530 network_info = self._get_instance_nw_info(context, instance)2531 self._notify_about_instance_usage(context, instance, "reboot.start")2532 current_power_state = self._get_power_state(context, instance)2533 instance.power_state = current_power_state2534 instance.save(expected_task_state=expected_states)2535 if instance['power_state'] != power_state.RUNNING:2536 state = instance['power_state']2537 running = power_state.RUNNING2538 LOG.warn(_('trying to reboot a non-running instance:'2539 ' (state: %(state)s expected: %(running)s)'),2540 {'state': state, 'running': running},2541 context=context, instance=instance)2542 def bad_volumes_callback(bad_devices):2543 self._handle_bad_volumes_detached(2544 context, instance, bad_devices, block_device_info)2545 try:2546 # Don't change it out of rescue mode2547 if instance['vm_state'] == vm_states.RESCUED:2548 new_vm_state = vm_states.RESCUED2549 else:2550 new_vm_state = vm_states.ACTIVE2551 new_power_state = None2552 if reboot_type == "SOFT":2553 instance.task_state = task_states.REBOOT_STARTED2554 expected_state = task_states.REBOOT_PENDING2555 else:2556 instance.task_state = task_states.REBOOT_STARTED_HARD2557 expected_state = task_states.REBOOT_PENDING_HARD2558 instance.save(expected_task_state=expected_state)2559 self.driver.reboot(context, instance,2560 network_info,2561 reboot_type,2562 block_device_info=block_device_info,2563 bad_volumes_callback=bad_volumes_callback)2564 except Exception as error:2565 with excutils.save_and_reraise_exception() as ctxt:2566 exc_info = sys.exc_info()2567 # if the reboot failed but the VM is running don't2568 # put it into an error state2569 new_power_state = self._get_power_state(context, instance)2570 if new_power_state == power_state.RUNNING:2571 LOG.warning(_('Reboot failed but instance is running'),2572 context=context, instance=instance)2573 compute_utils.add_instance_fault_from_exc(context,2574 instance, error, exc_info)2575 self._notify_about_instance_usage(context, instance,2576 'reboot.error', fault=error)2577 ctxt.reraise = False2578 else:2579 LOG.error(_('Cannot reboot instance: %s'), error,2580 context=context, instance=instance)2581 self._set_instance_obj_error_state(context, instance)2582 if not new_power_state:2583 new_power_state = self._get_power_state(context, instance)2584 try:2585 instance.power_state = new_power_state2586 instance.vm_state = new_vm_state2587 instance.task_state = None2588 instance.save()2589 except exception.InstanceNotFound:2590 LOG.warn(_("Instance disappeared during reboot"),2591 context=context, instance=instance)2592 self._notify_about_instance_usage(context, instance, "reboot.end")2593 @delete_image_on_error2594 def _do_snapshot_instance(self, context, image_id, instance, rotation):2595 if rotation < 0:2596 raise exception.RotationRequiredForBackup()2597 self._snapshot_instance(context, image_id, instance,2598 task_states.IMAGE_BACKUP)2599 @wrap_exception()2600 @reverts_task_state2601 @wrap_instance_fault2602 def backup_instance(self, context, image_id, instance, backup_type,2603 rotation):2604 """Backup an instance on this host.2605 :param backup_type: daily | weekly2606 :param rotation: int representing how many backups to keep around2607 """2608 self._do_snapshot_instance(context, image_id, instance, rotation)2609 self._rotate_backups(context, instance, backup_type, rotation)2610 @wrap_exception()2611 @reverts_task_state2612 @wrap_instance_fault2613 @delete_image_on_error2614 def snapshot_instance(self, context, image_id, instance):2615 """Snapshot an instance on this host.2616 :param context: security context2617 :param instance: a nova.objects.instance.Instance object2618 :param image_id: glance.db.sqlalchemy.models.Image.Id2619 """2620 # NOTE(dave-mcnally) the task state will already be set by the api2621 # but if the compute manager has crashed/been restarted prior to the2622 # request getting here the task state may have been cleared so we set2623 # it again and things continue normally2624 try:2625 instance.task_state = task_states.IMAGE_SNAPSHOT2626 instance.save(2627 expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)2628 except exception.InstanceNotFound:2629 # possibility instance no longer exists, no point in continuing2630 LOG.debug("Instance not found, could not set state %s "2631 "for instance.",2632 task_states.IMAGE_SNAPSHOT, instance=instance)2633 return2634 except exception.UnexpectedDeletingTaskStateError:2635 LOG.debug("Instance being deleted, snapshot cannot continue",2636 instance=instance)2637 return2638 self._snapshot_instance(context, image_id, instance,2639 task_states.IMAGE_SNAPSHOT)2640 def _snapshot_instance(self, context, image_id, instance,2641 expected_task_state):2642 context = context.elevated()2643 current_power_state = self._get_power_state(context, instance)2644 try:2645 instance.power_state = current_power_state2646 instance.save()2647 LOG.audit(_('instance snapshotting'), context=context,2648 instance=instance)2649 if instance.power_state != power_state.RUNNING:2650 state = instance.power_state2651 running = power_state.RUNNING2652 LOG.warn(_('trying to snapshot a non-running instance: '2653 '(state: %(state)s expected: %(running)s)'),2654 {'state': state, 'running': running},2655 instance=instance)2656 self._notify_about_instance_usage(2657 context, instance, "snapshot.start")2658 def update_task_state(task_state,2659 expected_state=expected_task_state):2660 instance.task_state = task_state2661 instance.save(expected_task_state=expected_state)2662 self.driver.snapshot(context, instance, image_id,2663 update_task_state)2664 instance.task_state = None2665 instance.save(expected_task_state=task_states.IMAGE_UPLOADING)2666 self._notify_about_instance_usage(context, instance,2667 "snapshot.end")2668 except (exception.InstanceNotFound,2669 exception.UnexpectedDeletingTaskStateError):2670 # the instance got deleted during the snapshot2671 # Quickly bail out of here2672 msg = 'Instance disappeared during snapshot'2673 LOG.debug(msg, instance=instance)2674 try:2675 image_service = glance.get_default_image_service()2676 image = image_service.show(context, image_id)2677 if image['status'] != 'active':2678 image_service.delete(context, image_id)2679 except Exception:2680 LOG.warning(_("Error while trying to clean up image %s"),2681 image_id, instance=instance)2682 except exception.ImageNotFound:2683 instance.task_state = None2684 instance.save()2685 msg = _("Image not found during snapshot")2686 LOG.warn(msg, instance=instance)2687 def _post_interrupted_snapshot_cleanup(self, context, instance):2688 self.driver.post_interrupted_snapshot_cleanup(context, instance)2689 @object_compat2690 @messaging.expected_exceptions(NotImplementedError)2691 def volume_snapshot_create(self, context, instance, volume_id,2692 create_info):2693 self.driver.volume_snapshot_create(context, instance, volume_id,2694 create_info)2695 @object_compat2696 @messaging.expected_exceptions(NotImplementedError)2697 def volume_snapshot_delete(self, context, instance, volume_id,2698 snapshot_id, delete_info):2699 self.driver.volume_snapshot_delete(context, instance, volume_id,2700 snapshot_id, delete_info)2701 @wrap_instance_fault2702 def _rotate_backups(self, context, instance, backup_type, rotation):2703 """Delete excess backups associated to an instance.2704 Instances are allowed a fixed number of backups (the rotation number);2705 this method deletes the oldest backups that exceed the rotation2706 threshold.2707 :param context: security context2708 :param instance: Instance dict2709 :param backup_type: daily | weekly2710 :param rotation: int representing how many backups to keep around;2711 None if rotation shouldn't be used (as in the case of snapshots)2712 """2713 filters = {'property-image_type': 'backup',2714 'property-backup_type': backup_type,2715 'property-instance_uuid': instance.uuid}2716 images = self.image_api.get_all(context, filters=filters,2717 sort_key='created_at', sort_dir='desc')2718 num_images = len(images)2719 LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",2720 {'num_images': num_images, 'rotation': rotation},2721 instance=instance)2722 if num_images > rotation:2723 # NOTE(sirp): this deletes all backups that exceed the rotation2724 # limit2725 excess = len(images) - rotation2726 LOG.debug("Rotating out %d backups", excess,2727 instance=instance)2728 for i in xrange(excess):2729 image = images.pop()2730 image_id = image['id']2731 LOG.debug("Deleting image %s", image_id,2732 instance=instance)2733 self.image_api.delete(context, image_id)2734 @object_compat2735 @wrap_exception()2736 @reverts_task_state2737 @wrap_instance_event2738 @wrap_instance_fault2739 def set_admin_password(self, context, instance, new_pass):2740 """Set the root/admin password for an instance on this host.2741 This is generally only called by API password resets after an2742 image has been built.2743 @param context: Nova auth context.2744 @param instance: Nova instance object.2745 @param new_pass: The admin password for the instance.2746 """2747 context = context.elevated()2748 if new_pass is None:2749 # Generate a random password2750 new_pass = utils.generate_password()2751 current_power_state = self._get_power_state(context, instance)2752 expected_state = power_state.RUNNING2753 if current_power_state != expected_state:2754 instance.task_state = None2755 instance.save(expected_task_state=task_states.UPDATING_PASSWORD)2756 _msg = _('Failed to set admin password. Instance %s is not'2757 ' running') % instance.uuid2758 raise exception.InstancePasswordSetFailed(2759 instance=instance.uuid, reason=_msg)2760 try:2761 self.driver.set_admin_password(instance, new_pass)2762 LOG.audit(_("Root password set"), instance=instance)2763 instance.task_state = None2764 instance.save(2765 expected_task_state=task_states.UPDATING_PASSWORD)2766 except NotImplementedError:2767 _msg = _('set_admin_password is not implemented '2768 'by this driver or guest instance.')2769 LOG.warn(_msg, instance=instance)2770 instance.task_state = None2771 instance.save(2772 expected_task_state=task_states.UPDATING_PASSWORD)2773 raise NotImplementedError(_msg)2774 except exception.UnexpectedTaskStateError:2775 # interrupted by another (most likely delete) task2776 # do not retry2777 raise2778 except Exception as e:2779 # Catch all here because this could be anything.2780 LOG.exception(_LE('set_admin_password failed: %s'), e,2781 instance=instance)2782 self._set_instance_obj_error_state(context, instance)2783 # We create a new exception here so that we won't2784 # potentially reveal password information to the2785 # API caller. The real exception is logged above2786 _msg = _('error setting admin password')2787 raise exception.InstancePasswordSetFailed(2788 instance=instance.uuid, reason=_msg)2789 @wrap_exception()2790 @reverts_task_state2791 @wrap_instance_fault2792 def inject_file(self, context, path, file_contents, instance):2793 """Write a file to the specified path in an instance on this host."""2794 # NOTE(russellb) Remove this method, as well as the underlying virt2795 # driver methods, when the compute rpc interface is bumped to 4.x2796 # as it is no longer used.2797 context = context.elevated()2798 current_power_state = self._get_power_state(context, instance)2799 expected_state = power_state.RUNNING2800 if current_power_state != expected_state:2801 LOG.warn(_('trying to inject a file into a non-running (state: '2802 '%(current_state)s expected: %(expected_state)s)'),2803 {'current_state': current_power_state,2804 'expected_state': expected_state},2805 instance=instance)2806 LOG.audit(_('injecting file to %s'), path,2807 instance=instance)2808 self.driver.inject_file(instance, path, file_contents)2809 def _get_rescue_image(self, context, instance, rescue_image_ref=None):2810 """Determine what image should be used to boot the rescue VM."""2811 # 1. If rescue_image_ref is passed in, use that for rescue.2812 # 2. Else, use the base image associated with instance's current image.2813 # The idea here is to provide the customer with a rescue2814 # environment which they are familiar with.2815 # So, if they built their instance off of a Debian image,2816 # their rescue VM will also be Debian.2817 # 3. As a last resort, use instance's current image.2818 if not rescue_image_ref:2819 system_meta = utils.instance_sys_meta(instance)2820 rescue_image_ref = system_meta.get('image_base_image_ref')2821 if not rescue_image_ref:2822 LOG.warn(_('Unable to find a different image to use for rescue VM,'2823 ' using instance\'s current image'), instance=instance)2824 rescue_image_ref = instance.image_ref2825 image_meta = compute_utils.get_image_metadata(context, self.image_api,2826 rescue_image_ref,2827 instance)2828 # NOTE(belliott) bug #1227350 - xenapi needs the actual image id2829 image_meta['id'] = rescue_image_ref2830 return image_meta2831 @object_compat2832 @wrap_exception()2833 @reverts_task_state2834 @wrap_instance_event2835 @wrap_instance_fault2836 def rescue_instance(self, context, instance, rescue_password,2837 rescue_image_ref=None, clean_shutdown=True):2838 context = context.elevated()2839 LOG.audit(_('Rescuing'), context=context, instance=instance)2840 admin_password = (rescue_password if rescue_password else2841 utils.generate_password())2842 network_info = self._get_instance_nw_info(context, instance)2843 rescue_image_meta = self._get_rescue_image(context, instance,2844 rescue_image_ref)2845 extra_usage_info = {'rescue_image_name':2846 rescue_image_meta.get('name', '')}2847 self._notify_about_instance_usage(context, instance,2848 "rescue.start", extra_usage_info=extra_usage_info,2849 network_info=network_info)2850 try:2851 self._power_off_instance(context, instance, clean_shutdown)2852 self.driver.rescue(context, instance,2853 network_info,2854 rescue_image_meta, admin_password)2855 except Exception as e:2856 LOG.exception(_LE("Error trying to Rescue Instance"),2857 instance=instance)2858 raise exception.InstanceNotRescuable(2859 instance_id=instance.uuid,2860 reason=_("Driver Error: %s") % unicode(e))2861 self.conductor_api.notify_usage_exists(context, instance,2862 current_period=True)2863 current_power_state = self._get_power_state(context, instance)2864 instance.vm_state = vm_states.RESCUED2865 instance.task_state = None2866 instance.power_state = current_power_state2867 instance.launched_at = timeutils.utcnow()2868 instance.save(expected_task_state=task_states.RESCUING)2869 self._notify_about_instance_usage(context, instance,2870 "rescue.end", extra_usage_info=extra_usage_info,2871 network_info=network_info)2872 @object_compat2873 @wrap_exception()2874 @reverts_task_state2875 @wrap_instance_event2876 @wrap_instance_fault2877 def unrescue_instance(self, context, instance):2878 context = context.elevated()2879 LOG.audit(_('Unrescuing'), context=context, instance=instance)2880 network_info = self._get_instance_nw_info(context, instance)2881 self._notify_about_instance_usage(context, instance,2882 "unrescue.start", network_info=network_info)2883 with self._error_out_instance_on_exception(context, instance):2884 self.driver.unrescue(instance,2885 network_info)2886 current_power_state = self._get_power_state(context, instance)2887 instance.vm_state = vm_states.ACTIVE2888 instance.task_state = None2889 instance.power_state = current_power_state2890 instance.save(expected_task_state=task_states.UNRESCUING)2891 self._notify_about_instance_usage(context,2892 instance,2893 "unrescue.end",2894 network_info=network_info)2895 @object_compat2896 @wrap_exception()2897 @wrap_instance_fault2898 def change_instance_metadata(self, context, diff, instance):2899 """Update the metadata published to the instance."""2900 LOG.debug("Changing instance metadata according to %r",2901 diff, instance=instance)2902 self.driver.change_instance_metadata(context, instance, diff)2903 def _cleanup_stored_instance_types(self, migration, instance,2904 restore_old=False):2905 """Clean up "old" and "new" instance_type information stored in2906 instance's system_metadata. Optionally update the "current"2907 instance_type to the saved old one first.2908 Returns the updated system_metadata as a dict, the2909 post-cleanup current instance type and the to-be dropped2910 instance type.2911 """2912 sys_meta = instance.system_metadata2913 if restore_old:2914 instance_type = flavors.extract_flavor(instance, 'old_')2915 drop_instance_type = flavors.extract_flavor(instance)2916 sys_meta = flavors.save_flavor_info(sys_meta, instance_type)2917 else:2918 instance_type = flavors.extract_flavor(instance)2919 drop_instance_type = flavors.extract_flavor(instance, 'old_')2920 flavors.delete_flavor_info(sys_meta, 'old_')2921 flavors.delete_flavor_info(sys_meta, 'new_')2922 return sys_meta, instance_type, drop_instance_type2923 @wrap_exception()2924 @wrap_instance_event2925 @wrap_instance_fault2926 def confirm_resize(self, context, instance, reservations, migration):2927 quotas = objects.Quotas.from_reservations(context,2928 reservations,2929 instance=instance)2930 @utils.synchronized(instance['uuid'])2931 def do_confirm_resize(context, instance, migration_id):2932 # NOTE(wangpan): Get the migration status from db, if it has been2933 # confirmed, we do nothing and return here2934 LOG.debug("Going to confirm migration %s", migration_id,2935 context=context, instance=instance)2936 try:2937 # TODO(russellb) Why are we sending the migration object just2938 # to turn around and look it up from the db again?2939 migration = objects.Migration.get_by_id(2940 context.elevated(), migration_id)2941 except exception.MigrationNotFound:2942 LOG.error(_("Migration %s is not found during confirmation") %2943 migration_id, context=context, instance=instance)2944 quotas.rollback()2945 return2946 if migration.status == 'confirmed':2947 LOG.info(_("Migration %s is already confirmed") %2948 migration_id, context=context, instance=instance)2949 quotas.rollback()2950 return2951 elif migration.status not in ('finished', 'confirming'):2952 LOG.warn(_("Unexpected confirmation status '%(status)s' of "2953 "migration %(id)s, exit confirmation process") %2954 {"status": migration.status, "id": migration_id},2955 context=context, instance=instance)2956 quotas.rollback()2957 return2958 # NOTE(wangpan): Get the instance from db, if it has been2959 # deleted, we do nothing and return here2960 expected_attrs = ['metadata', 'system_metadata']2961 try:2962 instance = objects.Instance.get_by_uuid(2963 context, instance.uuid,2964 expected_attrs=expected_attrs)2965 except exception.InstanceNotFound:2966 LOG.info(_("Instance is not found during confirmation"),2967 context=context, instance=instance)2968 quotas.rollback()2969 return2970 self._confirm_resize(context, instance, quotas,2971 migration=migration)2972 do_confirm_resize(context, instance, migration.id)2973 def _confirm_resize(self, context, instance, quotas,2974 migration=None):2975 """Destroys the source instance."""2976 self._notify_about_instance_usage(context, instance,2977 "resize.confirm.start")2978 with self._error_out_instance_on_exception(context, instance,2979 quotas=quotas):2980 # NOTE(danms): delete stashed migration information2981 sys_meta, instance_type, old_instance_type = (2982 self._cleanup_stored_instance_types(migration, instance))2983 sys_meta.pop('old_vm_state', None)2984 instance.system_metadata = sys_meta2985 instance.save()2986 # NOTE(tr3buchet): tear down networks on source host2987 self.network_api.setup_networks_on_host(context, instance,2988 migration.source_compute, teardown=True)2989 network_info = self._get_instance_nw_info(context, instance)2990 self.driver.confirm_migration(migration, instance,2991 network_info)2992 migration.status = 'confirmed'2993 migration.save(context.elevated())2994 rt = self._get_resource_tracker(migration.source_node)2995 rt.drop_resize_claim(context, instance, old_instance_type)2996 # NOTE(mriedem): The old_vm_state could be STOPPED but the user2997 # might have manually powered up the instance to confirm the2998 # resize/migrate, so we need to check the current power state2999 # on the instance and set the vm_state appropriately. We default3000 # to ACTIVE because if the power state is not SHUTDOWN, we3001 # assume _sync_instance_power_state will clean it up.3002 p_state = instance.power_state3003 vm_state = None3004 if p_state == power_state.SHUTDOWN:3005 vm_state = vm_states.STOPPED3006 LOG.debug("Resized/migrated instance is powered off. "3007 "Setting vm_state to '%s'.", vm_state,3008 instance=instance)3009 else:3010 vm_state = vm_states.ACTIVE3011 instance.vm_state = vm_state3012 instance.task_state = None3013 instance.save(expected_task_state=[None, task_states.DELETING])3014 self._notify_about_instance_usage(3015 context, instance, "resize.confirm.end",3016 network_info=network_info)3017 quotas.commit()3018 @wrap_exception()3019 @reverts_task_state3020 @wrap_instance_event3021 @wrap_instance_fault3022 def revert_resize(self, context, instance, migration, reservations):3023 """Destroys the new instance on the destination machine.3024 Reverts the model changes, and powers on the old instance on the3025 source machine.3026 """3027 quotas = quotas_obj.Quotas.from_reservations(context,3028 reservations,3029 instance=instance)3030 # NOTE(comstud): A revert_resize is essentially a resize back to3031 # the old size, so we need to send a usage event here.3032 self.conductor_api.notify_usage_exists(3033 context, instance, current_period=True)3034 with self._error_out_instance_on_exception(context, instance,3035 quotas=quotas):3036 # NOTE(tr3buchet): tear down networks on destination host3037 self.network_api.setup_networks_on_host(context, instance,3038 teardown=True)3039 instance_p = obj_base.obj_to_primitive(instance)3040 migration_p = obj_base.obj_to_primitive(migration)3041 self.network_api.migrate_instance_start(context,3042 instance_p,3043 migration_p)3044 network_info = self._get_instance_nw_info(context, instance)3045 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(3046 context, instance.uuid)3047 block_device_info = self._get_instance_block_device_info(3048 context, instance, bdms=bdms)3049 self.driver.destroy(context, instance, network_info,3050 block_device_info)3051 self._terminate_volume_connections(context, instance, bdms)3052 migration.status = 'reverted'3053 migration.save(context.elevated())3054 rt = self._get_resource_tracker(instance.node)3055 rt.drop_resize_claim(context, instance)3056 self.compute_rpcapi.finish_revert_resize(context, instance,3057 migration, migration.source_compute,3058 quotas.reservations)3059 @wrap_exception()3060 @reverts_task_state3061 @wrap_instance_event3062 @wrap_instance_fault3063 def finish_revert_resize(self, context, instance, reservations, migration):3064 """Finishes the second half of reverting a resize.3065 Bring the original source instance state back (active/shutoff) and3066 revert the resized attributes in the database.3067 """3068 quotas = quotas_obj.Quotas.from_reservations(context,3069 reservations,3070 instance=instance)3071 with self._error_out_instance_on_exception(context, instance,3072 quotas=quotas):3073 network_info = self._get_instance_nw_info(context, instance)3074 self._notify_about_instance_usage(3075 context, instance, "resize.revert.start")3076 sys_meta, instance_type, drop_instance_type = (3077 self._cleanup_stored_instance_types(migration, instance, True))3078 # NOTE(mriedem): delete stashed old_vm_state information; we3079 # default to ACTIVE for backwards compatibility if old_vm_state3080 # is not set3081 old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE)3082 instance.system_metadata = sys_meta3083 instance.memory_mb = instance_type['memory_mb']3084 instance.vcpus = instance_type['vcpus']3085 instance.root_gb = instance_type['root_gb']3086 instance.ephemeral_gb = instance_type['ephemeral_gb']3087 instance.instance_type_id = instance_type['id']3088 instance.host = migration['source_compute']3089 instance.node = migration['source_node']3090 instance.save()3091 self.network_api.setup_networks_on_host(context, instance,3092 migration['source_compute'])3093 block_device_info = self._get_instance_block_device_info(3094 context, instance, refresh_conn_info=True)3095 power_on = old_vm_state != vm_states.STOPPED3096 self.driver.finish_revert_migration(context, instance,3097 network_info,3098 block_device_info, power_on)3099 instance.launched_at = timeutils.utcnow()3100 instance.save(expected_task_state=task_states.RESIZE_REVERTING)3101 instance_p = obj_base.obj_to_primitive(instance)3102 migration_p = obj_base.obj_to_primitive(migration)3103 self.network_api.migrate_instance_finish(context,3104 instance_p,3105 migration_p)3106 # if the original vm state was STOPPED, set it back to STOPPED3107 LOG.info(_("Updating instance to original state: '%s'") %3108 old_vm_state)3109 if power_on:3110 instance.vm_state = vm_states.ACTIVE3111 instance.task_state = None3112 instance.save()3113 else:3114 instance.task_state = task_states.POWERING_OFF3115 instance.save()3116 self.stop_instance(context, instance=instance)3117 self._notify_about_instance_usage(3118 context, instance, "resize.revert.end")3119 quotas.commit()3120 def _prep_resize(self, context, image, instance, instance_type,3121 quotas, request_spec, filter_properties, node):3122 if not filter_properties:3123 filter_properties = {}3124 if not instance['host']:3125 self._set_instance_error_state(context, instance)3126 msg = _('Instance has no source host')3127 raise exception.MigrationError(msg)3128 same_host = instance['host'] == self.host3129 if same_host and not CONF.allow_resize_to_same_host:3130 self._set_instance_error_state(context, instance)3131 msg = _('destination same as source!')3132 raise exception.MigrationError(msg)3133 # NOTE(danms): Stash the new instance_type to avoid having to3134 # look it up in the database later3135 sys_meta = instance.system_metadata3136 flavors.save_flavor_info(sys_meta, instance_type, prefix='new_')3137 # NOTE(mriedem): Stash the old vm_state so we can set the3138 # resized/reverted instance back to the same state later.3139 vm_state = instance['vm_state']3140 LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)3141 sys_meta['old_vm_state'] = vm_state3142 instance.save()3143 limits = filter_properties.get('limits', {})3144 rt = self._get_resource_tracker(node)3145 with rt.resize_claim(context, instance, instance_type,3146 image_meta=image, limits=limits) as claim:3147 LOG.audit(_('Migrating'), context=context, instance=instance)3148 self.compute_rpcapi.resize_instance(3149 context, instance, claim.migration, image,3150 instance_type, quotas.reservations)3151 @wrap_exception()3152 @reverts_task_state3153 @wrap_instance_event3154 @wrap_instance_fault3155 def prep_resize(self, context, image, instance, instance_type,3156 reservations, request_spec, filter_properties, node):3157 """Initiates the process of moving a running instance to another host.3158 Possibly changes the RAM and disk size in the process.3159 """3160 if node is None:3161 node = self.driver.get_available_nodes(refresh=True)[0]3162 LOG.debug("No node specified, defaulting to %s", node,3163 instance=instance)3164 quotas = quotas_obj.Quotas.from_reservations(context,3165 reservations,3166 instance=instance)3167 with self._error_out_instance_on_exception(context, instance,3168 quotas=quotas):3169 self.conductor_api.notify_usage_exists(3170 context, instance, current_period=True)3171 self._notify_about_instance_usage(3172 context, instance, "resize.prep.start")3173 try:3174 self._prep_resize(context, image, instance,3175 instance_type, quotas,3176 request_spec, filter_properties,3177 node)3178 # NOTE(dgenin): This is thrown in LibvirtDriver when the3179 # instance to be migrated is backed by LVM.3180 # Remove when LVM migration is implemented.3181 except exception.MigrationPreCheckError:3182 raise3183 except Exception:3184 # try to re-schedule the resize elsewhere:3185 exc_info = sys.exc_info()3186 self._reschedule_resize_or_reraise(context, image, instance,3187 exc_info, instance_type, quotas, request_spec,3188 filter_properties)3189 finally:3190 extra_usage_info = dict(3191 new_instance_type=instance_type['name'],3192 new_instance_type_id=instance_type['id'])3193 self._notify_about_instance_usage(3194 context, instance, "resize.prep.end",3195 extra_usage_info=extra_usage_info)3196 def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,3197 instance_type, quotas, request_spec, filter_properties):3198 """Try to re-schedule the resize or re-raise the original error to3199 error out the instance.3200 """3201 if not request_spec:3202 request_spec = {}3203 if not filter_properties:3204 filter_properties = {}3205 rescheduled = False3206 instance_uuid = instance['uuid']3207 try:3208 reschedule_method = self.compute_task_api.resize_instance3209 scheduler_hint = dict(filter_properties=filter_properties)3210 method_args = (instance, None, scheduler_hint, instance_type,3211 quotas.reservations)3212 task_state = task_states.RESIZE_PREP3213 rescheduled = self._reschedule(context, request_spec,3214 filter_properties, instance, reschedule_method,3215 method_args, task_state, exc_info)3216 except Exception as error:3217 rescheduled = False3218 LOG.exception(_LE("Error trying to reschedule"),3219 instance_uuid=instance_uuid)3220 compute_utils.add_instance_fault_from_exc(context,3221 instance, error,3222 exc_info=sys.exc_info())3223 self._notify_about_instance_usage(context, instance,3224 'resize.error', fault=error)3225 if rescheduled:3226 self._log_original_error(exc_info, instance_uuid)3227 compute_utils.add_instance_fault_from_exc(context,3228 instance, exc_info[1], exc_info=exc_info)3229 self._notify_about_instance_usage(context, instance,3230 'resize.error', fault=exc_info[1])3231 else:3232 # not re-scheduling3233 raise exc_info[0], exc_info[1], exc_info[2]3234 @wrap_exception()3235 @reverts_task_state3236 @wrap_instance_event3237 @errors_out_migration3238 @wrap_instance_fault3239 def resize_instance(self, context, instance, image,3240 reservations, migration, instance_type,3241 clean_shutdown=True):3242 """Starts the migration of a running instance to another host."""3243 quotas = quotas_obj.Quotas.from_reservations(context,3244 reservations,3245 instance=instance)3246 with self._error_out_instance_on_exception(context, instance,3247 quotas=quotas):3248 if not instance_type:3249 instance_type = objects.Flavor.get_by_id(3250 context, migration['new_instance_type_id'])3251 network_info = self._get_instance_nw_info(context, instance)3252 migration.status = 'migrating'3253 migration.save(context.elevated())3254 instance.task_state = task_states.RESIZE_MIGRATING3255 instance.save(expected_task_state=task_states.RESIZE_PREP)3256 self._notify_about_instance_usage(3257 context, instance, "resize.start", network_info=network_info)3258 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(3259 context, instance.uuid)3260 block_device_info = self._get_instance_block_device_info(3261 context, instance, bdms=bdms)3262 timeout, retry_interval = self._get_power_off_values(context,3263 instance, clean_shutdown)3264 disk_info = self.driver.migrate_disk_and_power_off(3265 context, instance, migration.dest_host,3266 instance_type, network_info,3267 block_device_info,3268 timeout, retry_interval)3269 self._terminate_volume_connections(context, instance, bdms)3270 migration_p = obj_base.obj_to_primitive(migration)3271 instance_p = obj_base.obj_to_primitive(instance)3272 self.network_api.migrate_instance_start(context,3273 instance_p,3274 migration_p)3275 migration.status = 'post-migrating'3276 migration.save(context.elevated())3277 instance.host = migration.dest_compute3278 instance.node = migration.dest_node3279 instance.task_state = task_states.RESIZE_MIGRATED3280 instance.save(expected_task_state=task_states.RESIZE_MIGRATING)3281 self.compute_rpcapi.finish_resize(context, instance,3282 migration, image, disk_info,3283 migration.dest_compute, reservations=quotas.reservations)3284 self._notify_about_instance_usage(context, instance, "resize.end",3285 network_info=network_info)3286 self.instance_events.clear_events_for_instance(instance)3287 def _terminate_volume_connections(self, context, instance, bdms):3288 connector = self.driver.get_volume_connector(instance)3289 for bdm in bdms:3290 if bdm.is_volume:3291 self.volume_api.terminate_connection(context, bdm.volume_id,3292 connector)3293 @staticmethod3294 def _save_instance_info(instance, instance_type, sys_meta):3295 flavors.save_flavor_info(sys_meta, instance_type)3296 instance.instance_type_id = instance_type['id']3297 instance.memory_mb = instance_type['memory_mb']3298 instance.vcpus = instance_type['vcpus']3299 instance.root_gb = instance_type['root_gb']3300 instance.ephemeral_gb = instance_type['ephemeral_gb']3301 instance.system_metadata = sys_meta3302 instance.save()3303 def _finish_resize(self, context, instance, migration, disk_info,3304 image):3305 resize_instance = False3306 old_instance_type_id = migration['old_instance_type_id']3307 new_instance_type_id = migration['new_instance_type_id']3308 old_instance_type = flavors.extract_flavor(instance)3309 sys_meta = instance.system_metadata3310 # NOTE(mriedem): Get the old_vm_state so we know if we should3311 # power on the instance. If old_vm_state is not set we need to default3312 # to ACTIVE for backwards compatibility3313 old_vm_state = sys_meta.get('old_vm_state', vm_states.ACTIVE)3314 flavors.save_flavor_info(sys_meta,3315 old_instance_type,3316 prefix='old_')3317 if old_instance_type_id != new_instance_type_id:3318 instance_type = flavors.extract_flavor(instance, prefix='new_')3319 self._save_instance_info(instance, instance_type, sys_meta)3320 resize_instance = True3321 # NOTE(tr3buchet): setup networks on destination host3322 self.network_api.setup_networks_on_host(context, instance,3323 migration['dest_compute'])3324 instance_p = obj_base.obj_to_primitive(instance)3325 migration_p = obj_base.obj_to_primitive(migration)3326 self.network_api.migrate_instance_finish(context,3327 instance_p,3328 migration_p)3329 network_info = self._get_instance_nw_info(context, instance)3330 instance.task_state = task_states.RESIZE_FINISH3331 instance.system_metadata = sys_meta3332 instance.save(expected_task_state=task_states.RESIZE_MIGRATED)3333 self._notify_about_instance_usage(3334 context, instance, "finish_resize.start",3335 network_info=network_info)3336 block_device_info = self._get_instance_block_device_info(3337 context, instance, refresh_conn_info=True)3338 # NOTE(mriedem): If the original vm_state was STOPPED, we don't3339 # automatically power on the instance after it's migrated3340 power_on = old_vm_state != vm_states.STOPPED3341 try:3342 self.driver.finish_migration(context, migration, instance,3343 disk_info,3344 network_info,3345 image, resize_instance,3346 block_device_info, power_on)3347 except Exception:3348 with excutils.save_and_reraise_exception():3349 if resize_instance:3350 self._save_instance_info(instance,3351 old_instance_type, sys_meta)3352 migration.status = 'finished'3353 migration.save(context.elevated())3354 instance.vm_state = vm_states.RESIZED3355 instance.task_state = None3356 instance.launched_at = timeutils.utcnow()3357 instance.save(expected_task_state=task_states.RESIZE_FINISH)3358 self._notify_about_instance_usage(3359 context, instance, "finish_resize.end",3360 network_info=network_info)3361 @wrap_exception()3362 @reverts_task_state3363 @wrap_instance_event3364 @errors_out_migration3365 @wrap_instance_fault3366 def finish_resize(self, context, disk_info, image, instance,3367 reservations, migration):3368 """Completes the migration process.3369 Sets up the newly transferred disk and turns on the instance at its3370 new host machine.3371 """3372 quotas = quotas_obj.Quotas.from_reservations(context,3373 reservations,3374 instance=instance)3375 try:3376 self._finish_resize(context, instance, migration,3377 disk_info, image)3378 quotas.commit()3379 except Exception:3380 LOG.exception(_LE('Setting instance vm_state to ERROR'),3381 instance=instance)3382 with excutils.save_and_reraise_exception():3383 try:3384 quotas.rollback()3385 except Exception as qr_error:3386 LOG.exception(_LE("Failed to rollback quota for failed "3387 "finish_resize: %s"),3388 qr_error, instance=instance)3389 self._set_instance_error_state(context, instance)3390 @object_compat3391 @wrap_exception()3392 @wrap_instance_fault3393 def add_fixed_ip_to_instance(self, context, network_id, instance):3394 """Calls network_api to add new fixed_ip to instance3395 then injects the new network info and resets instance networking.3396 """3397 self._notify_about_instance_usage(3398 context, instance, "create_ip.start")3399 network_info = self.network_api.add_fixed_ip_to_instance(context,3400 instance,3401 network_id)3402 self._inject_network_info(context, instance, network_info)3403 self.reset_network(context, instance)3404 # NOTE(russellb) We just want to bump updated_at. See bug 1143466.3405 instance.updated_at = timeutils.utcnow()3406 instance.save()3407 self._notify_about_instance_usage(3408 context, instance, "create_ip.end", network_info=network_info)3409 @object_compat3410 @wrap_exception()3411 @wrap_instance_fault3412 def remove_fixed_ip_from_instance(self, context, address, instance):3413 """Calls network_api to remove existing fixed_ip from instance3414 by injecting the altered network info and resetting3415 instance networking.3416 """3417 self._notify_about_instance_usage(3418 context, instance, "delete_ip.start")3419 network_info = self.network_api.remove_fixed_ip_from_instance(context,3420 instance,3421 address)3422 self._inject_network_info(context, instance, network_info)3423 self.reset_network(context, instance)3424 # NOTE(russellb) We just want to bump updated_at. See bug 1143466.3425 instance.updated_at = timeutils.utcnow()3426 instance.save()3427 self._notify_about_instance_usage(3428 context, instance, "delete_ip.end", network_info=network_info)3429 @wrap_exception()3430 @reverts_task_state3431 @wrap_instance_event3432 @wrap_instance_fault3433 def pause_instance(self, context, instance):3434 """Pause an instance on this host."""3435 context = context.elevated()3436 LOG.audit(_('Pausing'), context=context, instance=instance)3437 self._notify_about_instance_usage(context, instance, 'pause.start')3438 self.driver.pause(instance)3439 current_power_state = self._get_power_state(context, instance)3440 instance.power_state = current_power_state3441 instance.vm_state = vm_states.PAUSED3442 instance.task_state = None3443 instance.save(expected_task_state=task_states.PAUSING)3444 self._notify_about_instance_usage(context, instance, 'pause.end')3445 @wrap_exception()3446 @reverts_task_state3447 @wrap_instance_event3448 @wrap_instance_fault3449 def unpause_instance(self, context, instance):3450 """Unpause a paused instance on this host."""3451 context = context.elevated()3452 LOG.audit(_('Unpausing'), context=context, instance=instance)3453 self._notify_about_instance_usage(context, instance, 'unpause.start')3454 self.driver.unpause(instance)3455 current_power_state = self._get_power_state(context, instance)3456 instance.power_state = current_power_state3457 instance.vm_state = vm_states.ACTIVE3458 instance.task_state = None3459 instance.save(expected_task_state=task_states.UNPAUSING)3460 self._notify_about_instance_usage(context, instance, 'unpause.end')3461 @wrap_exception()3462 def host_power_action(self, context, action):3463 """Reboots, shuts down or powers up the host."""3464 # TODO(russellb) Remove the unused host parameter from the driver API3465 return self.driver.host_power_action(None, action)3466 @wrap_exception()3467 def host_maintenance_mode(self, context, host, mode):3468 """Start/Stop host maintenance window. On start, it triggers3469 guest VMs evacuation.3470 """3471 return self.driver.host_maintenance_mode(host, mode)3472 @wrap_exception()3473 def set_host_enabled(self, context, enabled):3474 """Sets the specified host's ability to accept new instances."""3475 # TODO(russellb) Remove the unused host parameter from the driver API3476 return self.driver.set_host_enabled(None, enabled)3477 @wrap_exception()3478 def get_host_uptime(self, context):3479 """Returns the result of calling "uptime" on the target host."""3480 return self.driver.get_host_uptime(self.host)3481 @object_compat3482 @wrap_exception()3483 @wrap_instance_fault3484 def get_diagnostics(self, context, instance):3485 """Retrieve diagnostics for an instance on this host."""3486 current_power_state = self._get_power_state(context, instance)3487 if current_power_state == power_state.RUNNING:3488 LOG.audit(_("Retrieving diagnostics"), context=context,3489 instance=instance)3490 return self.driver.get_diagnostics(instance)3491 else:3492 raise exception.InstanceInvalidState(3493 attr='power_state',3494 instance_uuid=instance.uuid,3495 state=instance.power_state,3496 method='get_diagnostics')3497 @object_compat3498 @wrap_exception()3499 @wrap_instance_fault3500 def get_instance_diagnostics(self, context, instance):3501 """Retrieve diagnostics for an instance on this host."""3502 current_power_state = self._get_power_state(context, instance)3503 if current_power_state == power_state.RUNNING:3504 LOG.audit(_("Retrieving diagnostics"), context=context,3505 instance=instance)3506 diags = self.driver.get_instance_diagnostics(instance)3507 return diags.serialize()3508 else:3509 raise exception.InstanceInvalidState(3510 attr='power_state',3511 instance_uuid=instance.uuid,3512 state=instance.power_state,3513 method='get_diagnostics')3514 @wrap_exception()3515 @reverts_task_state3516 @wrap_instance_event3517 @wrap_instance_fault3518 def suspend_instance(self, context, instance):3519 """Suspend the given instance."""3520 context = context.elevated()3521 # Store the old state3522 instance.system_metadata['old_vm_state'] = instance.vm_state3523 with self._error_out_instance_on_exception(context, instance,3524 instance_state=instance['vm_state']):3525 self.driver.suspend(instance)3526 current_power_state = self._get_power_state(context, instance)3527 instance.power_state = current_power_state3528 instance.vm_state = vm_states.SUSPENDED3529 instance.task_state = None3530 instance.save(expected_task_state=task_states.SUSPENDING)3531 self._notify_about_instance_usage(context, instance, 'suspend')3532 @wrap_exception()3533 @reverts_task_state3534 @wrap_instance_event3535 @wrap_instance_fault3536 def resume_instance(self, context, instance):3537 """Resume the given suspended instance."""3538 context = context.elevated()3539 LOG.audit(_('Resuming'), context=context, instance=instance)3540 network_info = self._get_instance_nw_info(context, instance)3541 block_device_info = self._get_instance_block_device_info(3542 context, instance)3543 self.driver.resume(context, instance, network_info,3544 block_device_info)3545 instance.power_state = self._get_power_state(context, instance)3546 # We default to the ACTIVE state for backwards compatibility3547 instance.vm_state = instance.system_metadata.pop('old_vm_state',3548 vm_states.ACTIVE)3549 instance.task_state = None3550 instance.save(expected_task_state=task_states.RESUMING)3551 self._notify_about_instance_usage(context, instance, 'resume')3552 @wrap_exception()3553 @reverts_task_state3554 @wrap_instance_event3555 @wrap_instance_fault3556 def shelve_instance(self, context, instance, image_id,3557 clean_shutdown=True):3558 """Shelve an instance.3559 This should be used when you want to take a snapshot of the instance.3560 It also adds system_metadata that can be used by a periodic task to3561 offload the shelved instance after a period of time.3562 :param context: request context3563 :param instance: an Instance object3564 :param image_id: an image id to snapshot to.3565 """3566 self.conductor_api.notify_usage_exists(3567 context, obj_base.obj_to_primitive(instance),3568 current_period=True)3569 self._notify_about_instance_usage(context, instance, 'shelve.start')3570 def update_task_state(task_state, expected_state=task_states.SHELVING):3571 shelving_state_map = {3572 task_states.IMAGE_PENDING_UPLOAD:3573 task_states.SHELVING_IMAGE_PENDING_UPLOAD,3574 task_states.IMAGE_UPLOADING:3575 task_states.SHELVING_IMAGE_UPLOADING,3576 task_states.SHELVING: task_states.SHELVING}3577 task_state = shelving_state_map[task_state]3578 expected_state = shelving_state_map[expected_state]3579 instance.task_state = task_state3580 instance.save(expected_task_state=expected_state)3581 self._power_off_instance(context, instance, clean_shutdown)3582 current_power_state = self._get_power_state(context, instance)3583 self.driver.snapshot(context, instance, image_id, update_task_state)3584 instance.system_metadata['shelved_at'] = timeutils.strtime()3585 instance.system_metadata['shelved_image_id'] = image_id3586 instance.system_metadata['shelved_host'] = self.host3587 instance.vm_state = vm_states.SHELVED3588 instance.task_state = None3589 if CONF.shelved_offload_time == 0:3590 instance.task_state = task_states.SHELVING_OFFLOADING3591 instance.power_state = current_power_state3592 instance.save(expected_task_state=[3593 task_states.SHELVING,3594 task_states.SHELVING_IMAGE_UPLOADING])3595 self._notify_about_instance_usage(context, instance, 'shelve.end')3596 if CONF.shelved_offload_time == 0:3597 self.shelve_offload_instance(context, instance)3598 @wrap_exception()3599 @reverts_task_state3600 @wrap_instance_fault3601 def shelve_offload_instance(self, context, instance):3602 """Remove a shelved instance from the hypervisor.3603 This frees up those resources for use by other instances, but may lead3604 to slower unshelve times for this instance. This method is used by3605 volume backed instances since restoring them doesn't involve the3606 potentially large download of an image.3607 :param context: request context3608 :param instance: nova.objects.instance.Instance3609 """3610 self._notify_about_instance_usage(context, instance,3611 'shelve_offload.start')3612 self.driver.power_off(instance)3613 current_power_state = self._get_power_state(context, instance)3614 network_info = self._get_instance_nw_info(context, instance)3615 block_device_info = self._get_instance_block_device_info(context,3616 instance)3617 self.driver.destroy(context, instance, network_info,3618 block_device_info)3619 instance.power_state = current_power_state3620 instance.host = None3621 instance.node = None3622 instance.vm_state = vm_states.SHELVED_OFFLOADED3623 instance.task_state = None3624 instance.save(expected_task_state=[task_states.SHELVING,3625 task_states.SHELVING_OFFLOADING])3626 self._notify_about_instance_usage(context, instance,3627 'shelve_offload.end')3628 @wrap_exception()3629 @reverts_task_state3630 @wrap_instance_event3631 @wrap_instance_fault3632 def unshelve_instance(self, context, instance, image,3633 filter_properties=None, node=None):3634 """Unshelve the instance.3635 :param context: request context3636 :param instance: a nova.objects.instance.Instance object3637 :param image: an image to build from. If None we assume a3638 volume backed instance.3639 :param filter_properties: dict containing limits, retry info etc.3640 :param node: target compute node3641 """3642 if filter_properties is None:3643 filter_properties = {}3644 @utils.synchronized(instance['uuid'])3645 def do_unshelve_instance():3646 self._unshelve_instance(context, instance, image,3647 filter_properties, node)3648 do_unshelve_instance()3649 def _unshelve_instance_key_scrub(self, instance):3650 """Remove data from the instance that may cause side effects."""3651 cleaned_keys = dict(3652 key_data=instance.key_data,3653 auto_disk_config=instance.auto_disk_config)3654 instance.key_data = None3655 instance.auto_disk_config = False3656 return cleaned_keys3657 def _unshelve_instance_key_restore(self, instance, keys):3658 """Restore previously scrubbed keys before saving the instance."""3659 instance.update(keys)3660 def _unshelve_instance(self, context, instance, image, filter_properties,3661 node):3662 self._notify_about_instance_usage(context, instance, 'unshelve.start')3663 instance.task_state = task_states.SPAWNING3664 instance.save()3665 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(3666 context, instance.uuid)3667 block_device_info = self._prep_block_device(context, instance, bdms,3668 do_check_attach=False)3669 scrubbed_keys = self._unshelve_instance_key_scrub(instance)3670 if node is None:3671 node = self.driver.get_available_nodes()[0]3672 LOG.debug('No node specified, defaulting to %s', node,3673 instance=instance)3674 rt = self._get_resource_tracker(node)3675 limits = filter_properties.get('limits', {})3676 if image:3677 shelved_image_ref = instance.image_ref3678 instance.image_ref = image['id']3679 self.network_api.migrate_instance_finish(context, instance,3680 {'source_compute': '', 'dest_compute': self.host})3681 network_info = self._get_instance_nw_info(context, instance)3682 try:3683 with rt.instance_claim(context, instance, limits):3684 self.driver.spawn(context, instance, image, injected_files=[],3685 admin_password=None,3686 network_info=network_info,3687 block_device_info=block_device_info)3688 except Exception:3689 with excutils.save_and_reraise_exception():3690 LOG.exception(_LE('Instance failed to spawn'),3691 instance=instance)3692 if image:3693 instance.image_ref = shelved_image_ref3694 self.image_api.delete(context, image['id'])3695 self._unshelve_instance_key_restore(instance, scrubbed_keys)3696 instance.power_state = self._get_power_state(context, instance)3697 instance.vm_state = vm_states.ACTIVE3698 instance.task_state = None3699 instance.launched_at = timeutils.utcnow()3700 instance.save(expected_task_state=task_states.SPAWNING)3701 self._notify_about_instance_usage(context, instance, 'unshelve.end')3702 @messaging.expected_exceptions(NotImplementedError)3703 @wrap_instance_fault3704 def reset_network(self, context, instance):3705 """Reset networking on the given instance."""3706 LOG.debug('Reset network', context=context, instance=instance)3707 self.driver.reset_network(instance)3708 def _inject_network_info(self, context, instance, network_info):3709 """Inject network info for the given instance."""3710 LOG.debug('Inject network info', context=context, instance=instance)3711 LOG.debug('network_info to inject: |%s|', network_info,3712 instance=instance)3713 self.driver.inject_network_info(instance,3714 network_info)3715 @wrap_instance_fault3716 def inject_network_info(self, context, instance):3717 """Inject network info, but don't return the info."""3718 network_info = self._get_instance_nw_info(context, instance)3719 self._inject_network_info(context, instance, network_info)3720 @object_compat3721 @messaging.expected_exceptions(NotImplementedError,3722 exception.InstanceNotFound)3723 @wrap_exception()3724 @wrap_instance_fault3725 def get_console_output(self, context, instance, tail_length):3726 """Send the console output for the given instance."""3727 context = context.elevated()3728 LOG.audit(_("Get console output"), context=context,3729 instance=instance)3730 output = self.driver.get_console_output(context, instance)3731 if tail_length is not None:3732 output = self._tail_log(output, tail_length)3733 return output.decode('utf-8', 'replace').encode('ascii', 'replace')3734 def _tail_log(self, log, length):3735 try:3736 length = int(length)3737 except ValueError:3738 length = 03739 if length == 0:3740 return ''3741 else:3742 return '\n'.join(log.split('\n')[-int(length):])3743 @messaging.expected_exceptions(exception.ConsoleTypeInvalid,3744 exception.InstanceNotReady,3745 exception.InstanceNotFound,3746 exception.ConsoleTypeUnavailable,3747 NotImplementedError)3748 @object_compat3749 @wrap_exception()3750 @wrap_instance_fault3751 def get_vnc_console(self, context, console_type, instance):3752 """Return connection information for a vnc console."""3753 context = context.elevated()3754 LOG.debug("Getting vnc console", instance=instance)3755 token = str(uuid.uuid4())3756 if not CONF.vnc_enabled:3757 raise exception.ConsoleTypeUnavailable(console_type=console_type)3758 if console_type == 'novnc':3759 # For essex, novncproxy_base_url must include the full path3760 # including the html file (like http://myhost/vnc_auto.html)3761 access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)3762 elif console_type == 'xvpvnc':3763 access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)3764 else:3765 raise exception.ConsoleTypeInvalid(console_type=console_type)3766 try:3767 # Retrieve connect info from driver, and then decorate with our3768 # access info token3769 console = self.driver.get_vnc_console(context, instance)3770 connect_info = console.get_connection_info(token, access_url)3771 except exception.InstanceNotFound:3772 if instance['vm_state'] != vm_states.BUILDING:3773 raise3774 raise exception.InstanceNotReady(instance_id=instance['uuid'])3775 return connect_info3776 @object_compat3777 @messaging.expected_exceptions(exception.ConsoleTypeInvalid,3778 exception.InstanceNotReady,3779 exception.InstanceNotFound,3780 exception.ConsoleTypeUnavailable)3781 @wrap_exception()3782 @wrap_instance_fault3783 def get_spice_console(self, context, console_type, instance):3784 """Return connection information for a spice console."""3785 context = context.elevated()3786 LOG.debug("Getting spice console", instance=instance)3787 token = str(uuid.uuid4())3788 if not CONF.spice.enabled:3789 raise exception.ConsoleTypeUnavailable(console_type=console_type)3790 if console_type == 'spice-html5':3791 # For essex, spicehtml5proxy_base_url must include the full path3792 # including the html file (like http://myhost/spice_auto.html)3793 access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,3794 token)3795 else:3796 raise exception.ConsoleTypeInvalid(console_type=console_type)3797 try:3798 # Retrieve connect info from driver, and then decorate with our3799 # access info token3800 console = self.driver.get_spice_console(context, instance)3801 connect_info = console.get_connection_info(token, access_url)3802 except exception.InstanceNotFound:3803 if instance['vm_state'] != vm_states.BUILDING:3804 raise3805 raise exception.InstanceNotReady(instance_id=instance['uuid'])3806 return connect_info3807 @object_compat3808 @messaging.expected_exceptions(exception.ConsoleTypeInvalid,3809 exception.InstanceNotReady,3810 exception.InstanceNotFound,3811 exception.ConsoleTypeUnavailable,3812 NotImplementedError)3813 @wrap_exception()3814 @wrap_instance_fault3815 def get_rdp_console(self, context, console_type, instance):3816 """Return connection information for a RDP console."""3817 context = context.elevated()3818 LOG.debug("Getting RDP console", instance=instance)3819 token = str(uuid.uuid4())3820 if not CONF.rdp.enabled:3821 raise exception.ConsoleTypeUnavailable(console_type=console_type)3822 if console_type == 'rdp-html5':3823 access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,3824 token)3825 else:3826 raise exception.ConsoleTypeInvalid(console_type=console_type)3827 try:3828 # Retrieve connect info from driver, and then decorate with our3829 # access info token3830 console = self.driver.get_rdp_console(context, instance)3831 connect_info = console.get_connection_info(token, access_url)3832 except exception.InstanceNotFound:3833 if instance['vm_state'] != vm_states.BUILDING:3834 raise3835 raise exception.InstanceNotReady(instance_id=instance['uuid'])3836 return connect_info3837 @messaging.expected_exceptions(3838 exception.ConsoleTypeInvalid,3839 exception.InstanceNotReady,3840 exception.InstanceNotFound,3841 exception.ConsoleTypeUnavailable,3842 exception.SocketPortRangeExhaustedException,3843 exception.ImageSerialPortNumberInvalid,3844 exception.ImageSerialPortNumberExceedFlavorValue,3845 NotImplementedError)3846 @wrap_exception()3847 @wrap_instance_fault3848 def get_serial_console(self, context, console_type, instance):3849 """Returns connection information for a serial console."""3850 LOG.debug("Getting serial console", instance=instance)3851 if not CONF.serial_console.enabled:3852 raise exception.ConsoleTypeUnavailable(console_type=console_type)3853 context = context.elevated()3854 token = str(uuid.uuid4())3855 access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)3856 try:3857 # Retrieve connect info from driver, and then decorate with our3858 # access info token3859 console = self.driver.get_serial_console(context, instance)3860 connect_info = console.get_connection_info(token, access_url)3861 except exception.InstanceNotFound:3862 if instance.vm_state != vm_states.BUILDING:3863 raise3864 raise exception.InstanceNotReady(instance_id=instance['uuid'])3865 return connect_info3866 @messaging.expected_exceptions(exception.ConsoleTypeInvalid,3867 exception.InstanceNotReady,3868 exception.InstanceNotFound)3869 @object_compat3870 @wrap_exception()3871 @wrap_instance_fault3872 def validate_console_port(self, ctxt, instance, port, console_type):3873 if console_type == "spice-html5":3874 console_info = self.driver.get_spice_console(ctxt, instance)3875 elif console_type == "rdp-html5":3876 console_info = self.driver.get_rdp_console(ctxt, instance)3877 elif console_type == "serial":3878 console_info = self.driver.get_serial_console(ctxt, instance)3879 else:3880 console_info = self.driver.get_vnc_console(ctxt, instance)3881 return console_info.port == port3882 @object_compat3883 @wrap_exception()3884 @reverts_task_state3885 @wrap_instance_fault3886 def reserve_block_device_name(self, context, instance, device,3887 volume_id, disk_bus=None, device_type=None):3888 # NOTE(ndipanov): disk_bus and device_type will be set to None if not3889 # passed (by older clients) and defaulted by the virt driver. Remove3890 # default values on the next major RPC version bump.3891 @utils.synchronized(instance['uuid'])3892 def do_reserve():3893 bdms = (3894 objects.BlockDeviceMappingList.get_by_instance_uuid(3895 context, instance.uuid))3896 device_name = compute_utils.get_device_name_for_instance(3897 context, instance, bdms, device)3898 # NOTE(vish): create bdm here to avoid race condition3899 bdm = objects.BlockDeviceMapping(3900 source_type='volume', destination_type='volume',3901 instance_uuid=instance.uuid,3902 volume_id=volume_id or 'reserved',3903 device_name=device_name,3904 disk_bus=disk_bus, device_type=device_type)3905 bdm.create(context)3906 return device_name3907 return do_reserve()3908 @object_compat3909 @wrap_exception()3910 @reverts_task_state3911 @wrap_instance_fault3912 def attach_volume(self, context, volume_id, mountpoint,3913 instance, bdm=None):3914 """Attach a volume to an instance."""3915 if not bdm:3916 bdm = objects.BlockDeviceMapping.get_by_volume_id(3917 context, volume_id)3918 driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)3919 @utils.synchronized(instance.uuid)3920 def do_attach_volume(context, instance, driver_bdm):3921 try:3922 return self._attach_volume(context, instance, driver_bdm)3923 except Exception:3924 with excutils.save_and_reraise_exception():3925 bdm.destroy(context)3926 do_attach_volume(context, instance, driver_bdm)3927 def _attach_volume(self, context, instance, bdm):3928 context = context.elevated()3929 LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),3930 {'volume_id': bdm.volume_id,3931 'mountpoint': bdm['mount_device']},3932 context=context, instance=instance)3933 try:3934 bdm.attach(context, instance, self.volume_api, self.driver,3935 do_check_attach=False, do_driver_attach=True)3936 except Exception: # pylint: disable=W07023937 with excutils.save_and_reraise_exception():3938 LOG.exception(_LE("Failed to attach %(volume_id)s "3939 "at %(mountpoint)s"),3940 {'volume_id': bdm.volume_id,3941 'mountpoint': bdm['mount_device']},3942 context=context, instance=instance)3943 self.volume_api.unreserve_volume(context, bdm.volume_id)3944 info = {'volume_id': bdm.volume_id}3945 self._notify_about_instance_usage(3946 context, instance, "volume.attach", extra_usage_info=info)3947 def _detach_volume(self, context, instance, bdm):3948 """Do the actual driver detach using block device mapping."""3949 mp = bdm.device_name3950 volume_id = bdm.volume_id3951 LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),3952 {'volume_id': volume_id, 'mp': mp},3953 context=context, instance=instance)3954 connection_info = jsonutils.loads(bdm.connection_info)3955 # NOTE(vish): We currently don't use the serial when disconnecting,3956 # but added for completeness in case we ever do.3957 if connection_info and 'serial' not in connection_info:3958 connection_info['serial'] = volume_id3959 try:3960 if not self.driver.instance_exists(instance):3961 LOG.warn(_('Detaching volume from unknown instance'),3962 context=context, instance=instance)3963 encryption = encryptors.get_encryption_metadata(3964 context, self.volume_api, volume_id, connection_info)3965 self.driver.detach_volume(connection_info,3966 instance,3967 mp,3968 encryption=encryption)3969 except Exception: # pylint: disable=W07023970 with excutils.save_and_reraise_exception():3971 LOG.exception(_LE('Failed to detach volume %(volume_id)s '3972 'from %(mp)s'),3973 {'volume_id': volume_id, 'mp': mp},3974 context=context, instance=instance)3975 self.volume_api.roll_detaching(context, volume_id)3976 @object_compat3977 @wrap_exception()3978 @reverts_task_state3979 @wrap_instance_fault3980 def detach_volume(self, context, volume_id, instance):3981 """Detach a volume from an instance."""3982 bdm = objects.BlockDeviceMapping.get_by_volume_id(3983 context, volume_id)3984 if CONF.volume_usage_poll_interval > 0:3985 vol_stats = []3986 mp = bdm.device_name3987 # Handle bootable volumes which will not contain /dev/3988 if '/dev/' in mp:3989 mp = mp[5:]3990 try:3991 vol_stats = self.driver.block_stats(instance.name, mp)3992 except NotImplementedError:3993 pass3994 if vol_stats:3995 LOG.debug("Updating volume usage cache with totals",3996 instance=instance)3997 rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats3998 self.conductor_api.vol_usage_update(context, volume_id,3999 rd_req, rd_bytes,4000 wr_req, wr_bytes,4001 instance,4002 update_totals=True)4003 self._detach_volume(context, instance, bdm)4004 connector = self.driver.get_volume_connector(instance)4005 self.volume_api.terminate_connection(context, volume_id, connector)4006 bdm.destroy()4007 info = dict(volume_id=volume_id)4008 self._notify_about_instance_usage(4009 context, instance, "volume.detach", extra_usage_info=info)4010 self.volume_api.detach(context.elevated(), volume_id)4011 def _init_volume_connection(self, context, new_volume_id,4012 old_volume_id, connector, instance, bdm):4013 new_cinfo = self.volume_api.initialize_connection(context,4014 new_volume_id,4015 connector)4016 old_cinfo = jsonutils.loads(bdm['connection_info'])4017 if old_cinfo and 'serial' not in old_cinfo:4018 old_cinfo['serial'] = old_volume_id4019 new_cinfo['serial'] = old_cinfo['serial']4020 return (old_cinfo, new_cinfo)4021 def _swap_volume(self, context, instance, bdm, connector, old_volume_id,4022 new_volume_id):4023 mountpoint = bdm['device_name']4024 failed = False4025 new_cinfo = None4026 resize_to = 04027 try:4028 old_cinfo, new_cinfo = self._init_volume_connection(context,4029 new_volume_id,4030 old_volume_id,4031 connector,4032 instance,4033 bdm)4034 old_vol_size = self.volume_api.get(context, old_volume_id)['size']4035 new_vol_size = self.volume_api.get(context, new_volume_id)['size']4036 if new_vol_size > old_vol_size:4037 resize_to = new_vol_size4038 self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,4039 resize_to)4040 except Exception: # pylint: disable=W07024041 failed = True4042 with excutils.save_and_reraise_exception():4043 if new_cinfo:4044 msg = _LE("Failed to swap volume %(old_volume_id)s "4045 "for %(new_volume_id)s")4046 LOG.exception(msg, {'old_volume_id': old_volume_id,4047 'new_volume_id': new_volume_id},4048 context=context,4049 instance=instance)4050 else:4051 msg = _LE("Failed to connect to volume %(volume_id)s "4052 "with volume at %(mountpoint)s")4053 LOG.exception(msg, {'volume_id': new_volume_id,4054 'mountpoint': bdm['device_name']},4055 context=context,4056 instance=instance)4057 self.volume_api.roll_detaching(context, old_volume_id)4058 self.volume_api.unreserve_volume(context, new_volume_id)4059 finally:4060 conn_volume = new_volume_id if failed else old_volume_id4061 if new_cinfo:4062 self.volume_api.terminate_connection(context,4063 conn_volume,4064 connector)4065 # If Cinder initiated the swap, it will keep4066 # the original ID4067 comp_ret = self.volume_api.migrate_volume_completion(4068 context,4069 old_volume_id,4070 new_volume_id,4071 error=failed)4072 return (comp_ret, new_cinfo)4073 @wrap_exception()4074 @reverts_task_state4075 @wrap_instance_fault4076 def swap_volume(self, context, old_volume_id, new_volume_id, instance):4077 """Swap volume for an instance."""4078 context = context.elevated()4079 bdm = objects.BlockDeviceMapping.get_by_volume_id(4080 context, old_volume_id, instance_uuid=instance.uuid)4081 connector = self.driver.get_volume_connector(instance)4082 comp_ret, new_cinfo = self._swap_volume(context, instance,4083 bdm,4084 connector,4085 old_volume_id,4086 new_volume_id)4087 save_volume_id = comp_ret['save_volume_id']4088 mountpoint = bdm.device_name4089 # Update bdm4090 values = {4091 'connection_info': jsonutils.dumps(new_cinfo),4092 'delete_on_termination': False,4093 'source_type': 'volume',4094 'destination_type': 'volume',4095 'snapshot_id': None,4096 'volume_id': save_volume_id,4097 'volume_size': None,4098 'no_device': None}4099 bdm.update(values)4100 bdm.save()4101 self.volume_api.attach(context,4102 new_volume_id,4103 instance.uuid,4104 mountpoint)4105 # Remove old connection4106 self.volume_api.detach(context.elevated(), old_volume_id)4107 @wrap_exception()4108 def remove_volume_connection(self, context, volume_id, instance):4109 """Remove a volume connection using the volume api."""4110 # NOTE(vish): We don't want to actually mark the volume4111 # detached, or delete the bdm, just remove the4112 # connection from this host.4113 # NOTE(PhilDay): Can't use object_compat decorator here as4114 # instance is not the second parameter4115 if isinstance(instance, dict):4116 metas = ['metadata', 'system_metadata']4117 instance = objects.Instance._from_db_object(4118 context, objects.Instance(), instance,4119 expected_attrs=metas)4120 instance._context = context4121 try:4122 bdm = objects.BlockDeviceMapping.get_by_volume_id(4123 context, volume_id)4124 self._detach_volume(context, instance, bdm)4125 connector = self.driver.get_volume_connector(instance)4126 self.volume_api.terminate_connection(context, volume_id, connector)4127 except exception.NotFound:4128 pass4129 @object_compat4130 @wrap_exception()4131 @reverts_task_state4132 @wrap_instance_fault4133 def attach_interface(self, context, instance, network_id, port_id,4134 requested_ip):4135 """Use hotplug to add an network adapter to an instance."""4136 network_info = self.network_api.allocate_port_for_instance(4137 context, instance, port_id, network_id, requested_ip)4138 if len(network_info) != 1:4139 LOG.error(_('allocate_port_for_instance returned %(ports)s ports')4140 % dict(ports=len(network_info)))4141 raise exception.InterfaceAttachFailed(4142 instance_uuid=instance.uuid)4143 image_ref = instance.get('image_ref')4144 image_meta = compute_utils.get_image_metadata(4145 context, self.image_api, image_ref, instance)4146 self.driver.attach_interface(instance, image_meta, network_info[0])4147 return network_info[0]4148 @object_compat4149 @wrap_exception()4150 @reverts_task_state4151 @wrap_instance_fault4152 def detach_interface(self, context, instance, port_id):4153 """Detach an network adapter from an instance."""4154 network_info = instance.info_cache.network_info4155 condemned = None4156 for vif in network_info:4157 if vif['id'] == port_id:4158 condemned = vif4159 break4160 if condemned is None:4161 raise exception.PortNotFound(_("Port %s is not "4162 "attached") % port_id)4163 self.network_api.deallocate_port_for_instance(context, instance,4164 port_id)4165 self.driver.detach_interface(instance, condemned)4166 def _get_compute_info(self, context, host):4167 service = objects.Service.get_by_compute_host(context, host)4168 try:4169 return service.compute_node4170 except IndexError:4171 raise exception.NotFound(_("Host %s not found") % host)4172 @wrap_exception()4173 def check_instance_shared_storage(self, ctxt, instance, data):4174 """Check if the instance files are shared4175 :param context: security context4176 :param data: result of driver.check_instance_shared_storage_local4177 Returns True if instance disks located on shared storage and4178 False otherwise.4179 """4180 return self.driver.check_instance_shared_storage_remote(ctxt, data)4181 @wrap_exception()4182 @wrap_instance_fault4183 def check_can_live_migrate_destination(self, ctxt, instance,4184 block_migration, disk_over_commit):4185 """Check if it is possible to execute live migration.4186 This runs checks on the destination host, and then calls4187 back to the source host to check the results.4188 :param context: security context4189 :param instance: dict of instance data4190 :param block_migration: if true, prepare for block migration4191 :param disk_over_commit: if true, allow disk over commit4192 :returns: a dict containing migration info4193 """4194 src_compute_info = obj_base.obj_to_primitive(4195 self._get_compute_info(ctxt, instance.host))4196 dst_compute_info = obj_base.obj_to_primitive(4197 self._get_compute_info(ctxt, CONF.host))4198 dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,4199 instance, src_compute_info, dst_compute_info,4200 block_migration, disk_over_commit)4201 migrate_data = {}4202 try:4203 migrate_data = self.compute_rpcapi.\4204 check_can_live_migrate_source(ctxt, instance,4205 dest_check_data)4206 finally:4207 self.driver.check_can_live_migrate_destination_cleanup(ctxt,4208 dest_check_data)4209 if 'migrate_data' in dest_check_data:4210 migrate_data.update(dest_check_data['migrate_data'])4211 return migrate_data4212 @wrap_exception()4213 @wrap_instance_fault4214 def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):4215 """Check if it is possible to execute live migration.4216 This checks if the live migration can succeed, based on the4217 results from check_can_live_migrate_destination.4218 :param context: security context4219 :param instance: dict of instance data4220 :param dest_check_data: result of check_can_live_migrate_destination4221 :returns: a dict containing migration info4222 """4223 is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,4224 instance)4225 dest_check_data['is_volume_backed'] = is_volume_backed4226 return self.driver.check_can_live_migrate_source(ctxt, instance,4227 dest_check_data)4228 @object_compat4229 @wrap_exception()4230 @wrap_instance_fault4231 def pre_live_migration(self, context, instance, block_migration, disk,4232 migrate_data):4233 """Preparations for live migration at dest host.4234 :param context: security context4235 :param instance: dict of instance data4236 :param block_migration: if true, prepare for block migration4237 :param migrate_data: if not None, it is a dict which holds data4238 required for live migration without shared4239 storage.4240 """4241 block_device_info = self._get_instance_block_device_info(4242 context, instance, refresh_conn_info=True)4243 network_info = self._get_instance_nw_info(context, instance)4244 self._notify_about_instance_usage(4245 context, instance, "live_migration.pre.start",4246 network_info=network_info)4247 pre_live_migration_data = self.driver.pre_live_migration(context,4248 instance,4249 block_device_info,4250 network_info,4251 disk,4252 migrate_data)4253 # NOTE(tr3buchet): setup networks on destination host4254 self.network_api.setup_networks_on_host(context, instance,4255 self.host)4256 # Creating filters to hypervisors and firewalls.4257 # An example is that nova-instance-instance-xxx,4258 # which is written to libvirt.xml(Check "virsh nwfilter-list")4259 # This nwfilter is necessary on the destination host.4260 # In addition, this method is creating filtering rule4261 # onto destination host.4262 self.driver.ensure_filtering_rules_for_instance(instance,4263 network_info)4264 self._notify_about_instance_usage(4265 context, instance, "live_migration.pre.end",4266 network_info=network_info)4267 return pre_live_migration_data4268 @wrap_exception()4269 @wrap_instance_fault4270 def live_migration(self, context, dest, instance, block_migration,4271 migrate_data):4272 """Executing live migration.4273 :param context: security context4274 :param instance: a nova.objects.instance.Instance object4275 :param dest: destination host4276 :param block_migration: if true, prepare for block migration4277 :param migrate_data: implementation specific params4278 """4279 # NOTE(danms): since instance is not the first parameter, we can't4280 # use @object_compat on this method. Since this is the only example,4281 # we do this manually instead of complicating the decorator4282 if not isinstance(instance, obj_base.NovaObject):4283 expected = ['metadata', 'system_metadata',4284 'security_groups', 'info_cache']4285 instance = objects.Instance._from_db_object(4286 context, objects.Instance(), instance,4287 expected_attrs=expected)4288 # Create a local copy since we'll be modifying the dictionary4289 migrate_data = dict(migrate_data or {})4290 try:4291 if block_migration:4292 disk = self.driver.get_instance_disk_info(instance.name)4293 else:4294 disk = None4295 pre_migration_data = self.compute_rpcapi.pre_live_migration(4296 context, instance,4297 block_migration, disk, dest, migrate_data)4298 migrate_data['pre_live_migration_result'] = pre_migration_data4299 except Exception:4300 with excutils.save_and_reraise_exception():4301 LOG.exception(_LE('Pre live migration failed at %s'),4302 dest, instance=instance)4303 self._rollback_live_migration(context, instance, dest,4304 block_migration, migrate_data)4305 # Executing live migration4306 # live_migration might raises exceptions, but4307 # nothing must be recovered in this version.4308 self.driver.live_migration(context, instance, dest,4309 self._post_live_migration,4310 self._rollback_live_migration,4311 block_migration, migrate_data)4312 def _live_migration_cleanup_flags(self, block_migration, migrate_data):4313 """Determine whether disks or intance path need to be cleaned up after4314 live migration (at source on success, at destination on rollback)4315 Block migration needs empty image at destination host before migration4316 starts, so if any failure occurs, any empty images has to be deleted.4317 Also Volume backed live migration w/o shared storage needs to delete4318 newly created instance-xxx dir on the destination as a part of its4319 rollback process4320 :param block_migration: if true, it was a block migration4321 :param migrate_data: implementation specific data4322 :returns: (bool, bool) -- do_cleanup, destroy_disks4323 """4324 # NOTE(angdraug): block migration wouldn't have been allowed if either4325 # block storage or instance path were shared4326 is_shared_block_storage = not block_migration4327 is_shared_instance_path = not block_migration4328 if migrate_data:4329 is_shared_block_storage = migrate_data.get(4330 'is_shared_block_storage', is_shared_block_storage)4331 is_shared_instance_path = migrate_data.get(4332 'is_shared_instance_path', is_shared_instance_path)4333 # No instance booting at source host, but instance dir4334 # must be deleted for preparing next block migration4335 # must be deleted for preparing next live migration w/o shared storage4336 do_cleanup = block_migration or not is_shared_instance_path4337 destroy_disks = not is_shared_block_storage4338 return (do_cleanup, destroy_disks)4339 @wrap_exception()4340 @wrap_instance_fault4341 def _post_live_migration(self, ctxt, instance,4342 dest, block_migration=False, migrate_data=None):4343 """Post operations for live migration.4344 This method is called from live_migration4345 and mainly updating database record.4346 :param ctxt: security context4347 :param instance: instance dict4348 :param dest: destination host4349 :param block_migration: if true, prepare for block migration4350 :param migrate_data: if not None, it is a dict which has data4351 required for live migration without shared storage4352 """4353 LOG.info(_('_post_live_migration() is started..'),4354 instance=instance)4355 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(4356 ctxt, instance['uuid'])4357 # Cleanup source host post live-migration4358 block_device_info = self._get_instance_block_device_info(4359 ctxt, instance, bdms=bdms)4360 self.driver.post_live_migration(ctxt, instance, block_device_info,4361 migrate_data)4362 # Detaching volumes.4363 connector = self.driver.get_volume_connector(instance)4364 for bdm in bdms:4365 # NOTE(vish): We don't want to actually mark the volume4366 # detached, or delete the bdm, just remove the4367 # connection from this host.4368 # remove the volume connection without detaching from hypervisor4369 # because the instance is not running anymore on the current host4370 if bdm.is_volume:4371 self.volume_api.terminate_connection(ctxt, bdm.volume_id,4372 connector)4373 # Releasing vlan.4374 # (not necessary in current implementation?)4375 network_info = self._get_instance_nw_info(ctxt, instance)4376 self._notify_about_instance_usage(ctxt, instance,4377 "live_migration._post.start",4378 network_info=network_info)4379 # Releasing security group ingress rule.4380 self.driver.unfilter_instance(instance,4381 network_info)4382 migration = {'source_compute': self.host,4383 'dest_compute': dest, }4384 self.network_api.migrate_instance_start(ctxt,4385 instance,4386 migration)4387 destroy_vifs = False4388 try:4389 self.driver.post_live_migration_at_source(ctxt, instance,4390 network_info)4391 except NotImplementedError as ex:4392 LOG.debug(ex, instance=instance)4393 # For all hypervisors other than libvirt, there is a possibility4394 # they are unplugging networks from source node in the cleanup4395 # method4396 destroy_vifs = True4397 # Define domain at destination host, without doing it,4398 # pause/suspend/terminate do not work.4399 self.compute_rpcapi.post_live_migration_at_destination(ctxt,4400 instance, block_migration, dest)4401 do_cleanup, destroy_disks = self._live_migration_cleanup_flags(4402 block_migration, migrate_data)4403 if do_cleanup:4404 self.driver.cleanup(ctxt, instance, network_info,4405 destroy_disks=destroy_disks,4406 migrate_data=migrate_data,4407 destroy_vifs=destroy_vifs)4408 # NOTE(tr3buchet): tear down networks on source host4409 self.network_api.setup_networks_on_host(ctxt, instance,4410 self.host, teardown=True)4411 self.instance_events.clear_events_for_instance(instance)4412 # NOTE(timello): make sure we update available resources on source4413 # host even before next periodic task.4414 self.update_available_resource(ctxt)4415 self._notify_about_instance_usage(ctxt, instance,4416 "live_migration._post.end",4417 network_info=network_info)4418 LOG.info(_('Migrating instance to %s finished successfully.'),4419 dest, instance=instance)4420 LOG.info(_("You may see the error \"libvirt: QEMU error: "4421 "Domain not found: no domain with matching name.\" "4422 "This error can be safely ignored."),4423 instance=instance)4424 if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled:4425 if CONF.cells.enable:4426 self.cells_rpcapi.consoleauth_delete_tokens(ctxt,4427 instance['uuid'])4428 else:4429 self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt,4430 instance['uuid'])4431 @object_compat4432 @wrap_exception()4433 @wrap_instance_fault4434 def post_live_migration_at_destination(self, context, instance,4435 block_migration):4436 """Post operations for live migration .4437 :param context: security context4438 :param instance: Instance dict4439 :param block_migration: if true, prepare for block migration4440 """4441 LOG.info(_('Post operation of migration started'),4442 instance=instance)4443 # NOTE(tr3buchet): setup networks on destination host4444 # this is called a second time because4445 # multi_host does not create the bridge in4446 # plug_vifs4447 self.network_api.setup_networks_on_host(context, instance,4448 self.host)4449 migration = {'source_compute': instance['host'],4450 'dest_compute': self.host, }4451 self.network_api.migrate_instance_finish(context,4452 instance,4453 migration)4454 network_info = self._get_instance_nw_info(context, instance)4455 self._notify_about_instance_usage(4456 context, instance, "live_migration.post.dest.start",4457 network_info=network_info)4458 block_device_info = self._get_instance_block_device_info(context,4459 instance)4460 self.driver.post_live_migration_at_destination(context, instance,4461 network_info,4462 block_migration, block_device_info)4463 # Restore instance state4464 current_power_state = self._get_power_state(context, instance)4465 node_name = None4466 try:4467 compute_node = self._get_compute_info(context, self.host)4468 node_name = compute_node.hypervisor_hostname4469 except exception.NotFound:4470 LOG.exception(_LE('Failed to get compute_info for %s'), self.host)4471 finally:4472 instance.host = self.host4473 instance.power_state = current_power_state4474 instance.vm_state = vm_states.ACTIVE4475 instance.task_state = None4476 instance.node = node_name4477 instance.save(expected_task_state=task_states.MIGRATING)4478 # NOTE(vish): this is necessary to update dhcp4479 self.network_api.setup_networks_on_host(context, instance, self.host)4480 self._notify_about_instance_usage(4481 context, instance, "live_migration.post.dest.end",4482 network_info=network_info)4483 @wrap_exception()4484 @wrap_instance_fault4485 def _rollback_live_migration(self, context, instance,4486 dest, block_migration, migrate_data=None):4487 """Recovers Instance/volume state from migrating -> running.4488 :param context: security context4489 :param instance: nova.db.sqlalchemy.models.Instance4490 :param dest:4491 This method is called from live migration src host.4492 This param specifies destination host.4493 :param block_migration: if true, prepare for block migration4494 :param migrate_data:4495 if not none, contains implementation specific data.4496 """4497 instance.vm_state = vm_states.ACTIVE4498 instance.task_state = None4499 instance.save(expected_task_state=[task_states.MIGRATING])4500 # NOTE(tr3buchet): setup networks on source host (really it's re-setup)4501 self.network_api.setup_networks_on_host(context, instance, self.host)4502 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(4503 context, instance['uuid'])4504 for bdm in bdms:4505 if bdm.is_volume:4506 self.compute_rpcapi.remove_volume_connection(4507 context, instance, bdm.volume_id, dest)4508 self._notify_about_instance_usage(context, instance,4509 "live_migration._rollback.start")4510 do_cleanup, destroy_disks = self._live_migration_cleanup_flags(4511 block_migration, migrate_data)4512 if do_cleanup:4513 self.compute_rpcapi.rollback_live_migration_at_destination(4514 context, instance, dest, destroy_disks=destroy_disks,4515 migrate_data=migrate_data)4516 self._notify_about_instance_usage(context, instance,4517 "live_migration._rollback.end")4518 @object_compat4519 @wrap_exception()4520 @wrap_instance_fault4521 def rollback_live_migration_at_destination(self, context, instance,4522 destroy_disks=True,4523 migrate_data=None):4524 """Cleaning up image directory that is created pre_live_migration.4525 :param context: security context4526 :param instance: a nova.objects.instance.Instance object sent over rpc4527 """4528 network_info = self._get_instance_nw_info(context, instance)4529 self._notify_about_instance_usage(4530 context, instance, "live_migration.rollback.dest.start",4531 network_info=network_info)4532 # NOTE(tr3buchet): tear down networks on destination host4533 self.network_api.setup_networks_on_host(context, instance,4534 self.host, teardown=True)4535 # NOTE(vish): The mapping is passed in so the driver can disconnect4536 # from remote volumes if necessary4537 block_device_info = self._get_instance_block_device_info(context,4538 instance)4539 self.driver.rollback_live_migration_at_destination(4540 context, instance, network_info, block_device_info,4541 destroy_disks=destroy_disks, migrate_data=migrate_data)4542 self._notify_about_instance_usage(4543 context, instance, "live_migration.rollback.dest.end",4544 network_info=network_info)4545 @periodic_task.periodic_task(4546 spacing=CONF.heal_instance_info_cache_interval)4547 def _heal_instance_info_cache(self, context):4548 """Called periodically. On every call, try to update the4549 info_cache's network information for another instance by4550 calling to the network manager.4551 This is implemented by keeping a cache of uuids of instances4552 that live on this host. On each call, we pop one off of a4553 list, pull the DB record, and try the call to the network API.4554 If anything errors don't fail, as it's possible the instance4555 has been deleted, etc.4556 """4557 heal_interval = CONF.heal_instance_info_cache_interval4558 if not heal_interval:4559 return4560 instance_uuids = getattr(self, '_instance_uuids_to_heal', [])4561 instance = None4562 LOG.debug('Starting heal instance info cache')4563 if not instance_uuids:4564 # The list of instances to heal is empty so rebuild it4565 LOG.debug('Rebuilding the list of instances to heal')4566 db_instances = objects.InstanceList.get_by_host(4567 context, self.host, expected_attrs=[], use_slave=True)4568 for inst in db_instances:4569 # We don't want to refersh the cache for instances4570 # which are building or deleting so don't put them4571 # in the list. If they are building they will get4572 # added to the list next time we build it.4573 if (inst.vm_state == vm_states.BUILDING):4574 LOG.debug('Skipping network cache update for instance '4575 'because it is Building.', instance=inst)4576 continue4577 if (inst.task_state == task_states.DELETING):4578 LOG.debug('Skipping network cache update for instance '4579 'because it is being deleted.', instance=inst)4580 continue4581 if not instance:4582 # Save the first one we find so we don't4583 # have to get it again4584 instance = inst4585 else:4586 instance_uuids.append(inst['uuid'])4587 self._instance_uuids_to_heal = instance_uuids4588 else:4589 # Find the next valid instance on the list4590 while instance_uuids:4591 try:4592 inst = objects.Instance.get_by_uuid(4593 context, instance_uuids.pop(0),4594 expected_attrs=['system_metadata', 'info_cache'],4595 use_slave=True)4596 except exception.InstanceNotFound:4597 # Instance is gone. Try to grab another.4598 continue4599 # Check the instance hasn't been migrated4600 if inst.host != self.host:4601 LOG.debug('Skipping network cache update for instance '4602 'because it has been migrated to another '4603 'host.', instance=inst)4604 # Check the instance isn't being deleting4605 elif inst.task_state == task_states.DELETING:4606 LOG.debug('Skipping network cache update for instance '4607 'because it is being deleted.', instance=inst)4608 else:4609 instance = inst4610 break4611 if instance:4612 # We have an instance now to refresh4613 try:4614 # Call to network API to get instance info.. this will4615 # force an update to the instance's info_cache4616 self._get_instance_nw_info(context, instance, use_slave=True)4617 LOG.debug('Updated the network info_cache for instance',4618 instance=instance)4619 except Exception:4620 LOG.error(_('An error occurred while refreshing the network '4621 'cache.'), instance=instance, exc_info=True)4622 else:4623 LOG.debug("Didn't find any instances for network info cache "4624 "update.")4625 @periodic_task.periodic_task4626 def _poll_rebooting_instances(self, context):4627 if CONF.reboot_timeout > 0:4628 filters = {'task_state': task_states.REBOOTING,4629 'host': self.host}4630 rebooting = objects.InstanceList.get_by_filters(4631 context, filters, expected_attrs=[], use_slave=True)4632 to_poll = []4633 for instance in rebooting:4634 if timeutils.is_older_than(instance['updated_at'],4635 CONF.reboot_timeout):4636 to_poll.append(instance)4637 self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)4638 @periodic_task.periodic_task4639 def _poll_rescued_instances(self, context):4640 if CONF.rescue_timeout > 0:4641 filters = {'vm_state': vm_states.RESCUED,4642 'host': self.host}4643 rescued_instances = objects.InstanceList.get_by_filters(4644 context, filters, expected_attrs=["system_metadata"],4645 use_slave=True)4646 to_unrescue = []4647 for instance in rescued_instances:4648 if timeutils.is_older_than(instance['launched_at'],4649 CONF.rescue_timeout):4650 to_unrescue.append(instance)4651 for instance in to_unrescue:4652 self.compute_api.unrescue(context, instance)4653 @periodic_task.periodic_task4654 def _poll_unconfirmed_resizes(self, context):4655 if CONF.resize_confirm_window == 0:4656 return4657 migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(4658 context, CONF.resize_confirm_window, self.host,4659 use_slave=True)4660 migrations_info = dict(migration_count=len(migrations),4661 confirm_window=CONF.resize_confirm_window)4662 if migrations_info["migration_count"] > 0:4663 LOG.info(_("Found %(migration_count)d unconfirmed migrations "4664 "older than %(confirm_window)d seconds"),4665 migrations_info)4666 def _set_migration_to_error(migration, reason, **kwargs):4667 LOG.warn(_("Setting migration %(migration_id)s to error: "4668 "%(reason)s"),4669 {'migration_id': migration['id'], 'reason': reason},4670 **kwargs)4671 migration.status = 'error'4672 migration.save(context.elevated())4673 for migration in migrations:4674 instance_uuid = migration.instance_uuid4675 LOG.info(_("Automatically confirming migration "4676 "%(migration_id)s for instance %(instance_uuid)s"),4677 {'migration_id': migration.id,4678 'instance_uuid': instance_uuid})4679 expected_attrs = ['metadata', 'system_metadata']4680 try:4681 instance = objects.Instance.get_by_uuid(context,4682 instance_uuid, expected_attrs=expected_attrs,4683 use_slave=True)4684 except exception.InstanceNotFound:4685 reason = (_("Instance %s not found") %4686 instance_uuid)4687 _set_migration_to_error(migration, reason)4688 continue4689 if instance['vm_state'] == vm_states.ERROR:4690 reason = _("In ERROR state")4691 _set_migration_to_error(migration, reason,4692 instance=instance)4693 continue4694 # race condition: The instance in DELETING state should not be4695 # set the migration state to error, otherwise the instance in4696 # to be deleted which is in RESIZED state4697 # will not be able to confirm resize4698 if instance.task_state in [task_states.DELETING,4699 task_states.SOFT_DELETING]:4700 msg = ("Instance being deleted or soft deleted during resize "4701 "confirmation. Skipping.")4702 LOG.debug(msg, instance=instance)4703 continue4704 vm_state = instance['vm_state']4705 task_state = instance['task_state']4706 if vm_state != vm_states.RESIZED or task_state is not None:4707 reason = (_("In states %(vm_state)s/%(task_state)s, not "4708 "RESIZED/None") %4709 {'vm_state': vm_state,4710 'task_state': task_state})4711 _set_migration_to_error(migration, reason,4712 instance=instance)4713 continue4714 try:4715 self.compute_api.confirm_resize(context, instance,4716 migration=migration)4717 except Exception as e:4718 LOG.info(_("Error auto-confirming resize: %s. "4719 "Will retry later."),4720 e, instance=instance)4721 @compute_utils.periodic_task_spacing_warn("shelved_poll_interval")4722 @periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)4723 def _poll_shelved_instances(self, context):4724 if CONF.shelved_offload_time <= 0:4725 return4726 filters = {'vm_state': vm_states.SHELVED,4727 'host': self.host}4728 shelved_instances = objects.InstanceList.get_by_filters(4729 context, filters=filters, expected_attrs=['system_metadata'],4730 use_slave=True)4731 to_gc = []4732 for instance in shelved_instances:4733 sys_meta = instance.system_metadata4734 shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])4735 if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):4736 to_gc.append(instance)4737 for instance in to_gc:4738 try:4739 instance.task_state = task_states.SHELVING_OFFLOADING4740 instance.save()4741 self.shelve_offload_instance(context, instance)4742 except Exception:4743 LOG.exception(_LE('Periodic task failed to offload instance.'),4744 instance=instance)4745 @periodic_task.periodic_task4746 def _instance_usage_audit(self, context):4747 if not CONF.instance_usage_audit:4748 return4749 if compute_utils.has_audit_been_run(context,4750 self.conductor_api,4751 self.host):4752 return4753 begin, end = utils.last_completed_audit_period()4754 instances = objects.InstanceList.get_active_by_window_joined(4755 context, begin, end, host=self.host,4756 expected_attrs=['system_metadata', 'info_cache', 'metadata'],4757 use_slave=True)4758 num_instances = len(instances)4759 errors = 04760 successes = 04761 LOG.info(_("Running instance usage audit for"4762 " host %(host)s from %(begin_time)s to "4763 "%(end_time)s. %(number_instances)s"4764 " instances."),4765 dict(host=self.host,4766 begin_time=begin,4767 end_time=end,4768 number_instances=num_instances))4769 start_time = time.time()4770 compute_utils.start_instance_usage_audit(context,4771 self.conductor_api,4772 begin, end,4773 self.host, num_instances)4774 for instance in instances:4775 try:4776 self.conductor_api.notify_usage_exists(4777 context, instance,4778 ignore_missing_network_data=False)4779 successes += 14780 except Exception:4781 LOG.exception(_LE('Failed to generate usage '4782 'audit for instance '4783 'on host %s'), self.host,4784 instance=instance)4785 errors += 14786 compute_utils.finish_instance_usage_audit(context,4787 self.conductor_api,4788 begin, end,4789 self.host, errors,4790 "Instance usage audit ran "4791 "for host %s, %s instances "4792 "in %s seconds." % (4793 self.host,4794 num_instances,4795 time.time() - start_time))4796 @compute_utils.periodic_task_spacing_warn("bandwidth_poll_interval")4797 @periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)4798 def _poll_bandwidth_usage(self, context):4799 if (CONF.bandwidth_poll_interval <= 0 or not self._bw_usage_supported):4800 return4801 prev_time, start_time = utils.last_completed_audit_period()4802 curr_time = time.time()4803 if (curr_time - self._last_bw_usage_poll >4804 CONF.bandwidth_poll_interval):4805 self._last_bw_usage_poll = curr_time4806 LOG.info(_("Updating bandwidth usage cache"))4807 cells_update_interval = CONF.cells.bandwidth_update_interval4808 if (cells_update_interval > 0 and4809 curr_time - self._last_bw_usage_cell_update >4810 cells_update_interval):4811 self._last_bw_usage_cell_update = curr_time4812 update_cells = True4813 else:4814 update_cells = False4815 instances = objects.InstanceList.get_by_host(context,4816 self.host,4817 use_slave=True)4818 try:4819 bw_counters = self.driver.get_all_bw_counters(instances)4820 except NotImplementedError:4821 # NOTE(mdragon): Not all hypervisors have bandwidth polling4822 # implemented yet. If they don't it doesn't break anything,4823 # they just don't get the info in the usage events.4824 # NOTE(PhilDay): Record that its not supported so we can4825 # skip fast on future calls rather than waste effort getting4826 # the list of instances.4827 LOG.warning(_("Bandwidth usage not supported by hypervisor."))4828 self._bw_usage_supported = False4829 return4830 refreshed = timeutils.utcnow()4831 for bw_ctr in bw_counters:4832 # Allow switching of greenthreads between queries.4833 greenthread.sleep(0)4834 bw_in = 04835 bw_out = 04836 last_ctr_in = None4837 last_ctr_out = None4838 usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(4839 context, bw_ctr['uuid'], bw_ctr['mac_address'],4840 start_period=start_time, use_slave=True)4841 if usage:4842 bw_in = usage.bw_in4843 bw_out = usage.bw_out4844 last_ctr_in = usage.last_ctr_in4845 last_ctr_out = usage.last_ctr_out4846 else:4847 usage = (objects.BandwidthUsage.4848 get_by_instance_uuid_and_mac(4849 context, bw_ctr['uuid'], bw_ctr['mac_address'],4850 start_period=prev_time, use_slave=True))4851 if usage:4852 last_ctr_in = usage.last_ctr_in4853 last_ctr_out = usage.last_ctr_out4854 if last_ctr_in is not None:4855 if bw_ctr['bw_in'] < last_ctr_in:4856 # counter rollover4857 bw_in += bw_ctr['bw_in']4858 else:4859 bw_in += (bw_ctr['bw_in'] - last_ctr_in)4860 if last_ctr_out is not None:4861 if bw_ctr['bw_out'] < last_ctr_out:4862 # counter rollover4863 bw_out += bw_ctr['bw_out']4864 else:4865 bw_out += (bw_ctr['bw_out'] - last_ctr_out)4866 objects.BandwidthUsage.create(context,4867 bw_ctr['uuid'],4868 bw_ctr['mac_address'],4869 bw_in,4870 bw_out,4871 bw_ctr['bw_in'],4872 bw_ctr['bw_out'],4873 start_period=start_time,4874 last_refreshed=refreshed,4875 update_cells=update_cells)4876 def _get_host_volume_bdms(self, context, use_slave=False):4877 """Return all block device mappings on a compute host."""4878 compute_host_bdms = []4879 instances = objects.InstanceList.get_by_host(context, self.host)4880 for instance in instances:4881 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(4882 context, instance.uuid, use_slave=use_slave)4883 instance_bdms = [bdm for bdm in bdms if bdm.is_volume]4884 compute_host_bdms.append(dict(instance=instance,4885 instance_bdms=instance_bdms))4886 return compute_host_bdms4887 def _update_volume_usage_cache(self, context, vol_usages):4888 """Updates the volume usage cache table with a list of stats."""4889 for usage in vol_usages:4890 # Allow switching of greenthreads between queries.4891 greenthread.sleep(0)4892 self.conductor_api.vol_usage_update(context, usage['volume'],4893 usage['rd_req'],4894 usage['rd_bytes'],4895 usage['wr_req'],4896 usage['wr_bytes'],4897 usage['instance'])4898 @periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)4899 def _poll_volume_usage(self, context, start_time=None):4900 if CONF.volume_usage_poll_interval == 0:4901 return4902 if not start_time:4903 start_time = utils.last_completed_audit_period()[1]4904 compute_host_bdms = self._get_host_volume_bdms(context,4905 use_slave=True)4906 if not compute_host_bdms:4907 return4908 LOG.debug("Updating volume usage cache")4909 try:4910 vol_usages = self.driver.get_all_volume_usage(context,4911 compute_host_bdms)4912 except NotImplementedError:4913 return4914 self._update_volume_usage_cache(context, vol_usages)4915 @compute_utils.periodic_task_spacing_warn("sync_power_state_interval")4916 @periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,4917 run_immediately=True)4918 def _sync_power_states(self, context):4919 """Align power states between the database and the hypervisor.4920 To sync power state data we make a DB call to get the number of4921 virtual machines known by the hypervisor and if the number matches the4922 number of virtual machines known by the database, we proceed in a lazy4923 loop, one database record at a time, checking if the hypervisor has the4924 same power state as is in the database.4925 """4926 db_instances = objects.InstanceList.get_by_host(context,4927 self.host,4928 use_slave=True)4929 num_vm_instances = self.driver.get_num_instances()4930 num_db_instances = len(db_instances)4931 if num_vm_instances != num_db_instances:4932 LOG.warn(_("Found %(num_db_instances)s in the database and "4933 "%(num_vm_instances)s on the hypervisor."),4934 {'num_db_instances': num_db_instances,4935 'num_vm_instances': num_vm_instances})4936 def _sync(db_instance):4937 # NOTE(melwitt): This must be synchronized as we query state from4938 # two separate sources, the driver and the database.4939 # They are set (in stop_instance) and read, in sync.4940 @utils.synchronized(db_instance.uuid)4941 def query_driver_power_state_and_sync():4942 self._query_driver_power_state_and_sync(context, db_instance)4943 try:4944 query_driver_power_state_and_sync()4945 except Exception:4946 LOG.exception(_LE("Periodic sync_power_state task had an "4947 "error while processing an instance."),4948 instance=db_instance)4949 self._syncs_in_progress.pop(db_instance.uuid)4950 for db_instance in db_instances:4951 # process syncs asynchronously - don't want instance locking to4952 # block entire periodic task thread4953 uuid = db_instance.uuid4954 if uuid in self._syncs_in_progress:4955 LOG.debug('Sync already in progress for %s' % uuid)4956 else:4957 LOG.debug('Triggering sync for uuid %s' % uuid)4958 self._syncs_in_progress[uuid] = True4959 self._sync_power_pool.spawn_n(_sync, db_instance)4960 def _query_driver_power_state_and_sync(self, context, db_instance):4961 if db_instance.task_state is not None:4962 LOG.info(_LI("During sync_power_state the instance has a "4963 "pending task (%(task)s). Skip."),4964 {'task': db_instance.task_state}, instance=db_instance)4965 return4966 # No pending tasks. Now try to figure out the real vm_power_state.4967 try:4968 vm_instance = self.driver.get_info(db_instance)4969 vm_power_state = vm_instance['state']4970 except exception.InstanceNotFound:4971 vm_power_state = power_state.NOSTATE4972 # Note(maoy): the above get_info call might take a long time,4973 # for example, because of a broken libvirt driver.4974 try:4975 self._sync_instance_power_state(context,4976 db_instance,4977 vm_power_state,4978 use_slave=True)4979 except exception.InstanceNotFound:4980 # NOTE(hanlind): If the instance gets deleted during sync,4981 # silently ignore.4982 pass4983 def _sync_instance_power_state(self, context, db_instance, vm_power_state,4984 use_slave=False):4985 """Align instance power state between the database and hypervisor.4986 If the instance is not found on the hypervisor, but is in the database,4987 then a stop() API will be called on the instance.4988 """4989 # We re-query the DB to get the latest instance info to minimize4990 # (not eliminate) race condition.4991 db_instance.refresh(use_slave=use_slave)4992 db_power_state = db_instance.power_state4993 vm_state = db_instance.vm_state4994 if self.host != db_instance.host:4995 # on the sending end of nova-compute _sync_power_state4996 # may have yielded to the greenthread performing a live4997 # migration; this in turn has changed the resident-host4998 # for the VM; However, the instance is still active, it4999 # is just in the process of migrating to another host.5000 # This implies that the compute source must relinquish5001 # control to the compute destination.5002 LOG.info(_("During the sync_power process the "5003 "instance has moved from "5004 "host %(src)s to host %(dst)s") %5005 {'src': db_instance.host,5006 'dst': self.host},5007 instance=db_instance)5008 return5009 elif db_instance.task_state is not None:5010 # on the receiving end of nova-compute, it could happen5011 # that the DB instance already report the new resident5012 # but the actual VM has not showed up on the hypervisor5013 # yet. In this case, let's allow the loop to continue5014 # and run the state sync in a later round5015 LOG.info(_("During sync_power_state the instance has a "5016 "pending task (%(task)s). Skip."),5017 {'task': db_instance.task_state},5018 instance=db_instance)5019 return5020 if vm_power_state != db_power_state:5021 # power_state is always updated from hypervisor to db5022 db_instance.power_state = vm_power_state5023 db_instance.save()5024 db_power_state = vm_power_state5025 # Note(maoy): Now resolve the discrepancy between vm_state and5026 # vm_power_state. We go through all possible vm_states.5027 if vm_state in (vm_states.BUILDING,5028 vm_states.RESCUED,5029 vm_states.RESIZED,5030 vm_states.SUSPENDED,5031 vm_states.ERROR):5032 # TODO(maoy): we ignore these vm_state for now.5033 pass5034 elif vm_state == vm_states.ACTIVE:5035 # The only rational power state should be RUNNING5036 if vm_power_state in (power_state.SHUTDOWN,5037 power_state.CRASHED):5038 LOG.warn(_LW("Instance shutdown by itself. Calling the stop "5039 "API. Current vm_state: %(vm_state)s, current "5040 "task_state: %(task_state)s, current DB "5041 "power_state: %(db_power_state)s, current VM "5042 "power_state: %(vm_power_state)s"),5043 {'vm_state': vm_state,5044 'task_state': db_instance.task_state,5045 'db_power_state': db_power_state,5046 'vm_power_state': vm_power_state},5047 instance=db_instance)5048 try:5049 # Note(maoy): here we call the API instead of5050 # brutally updating the vm_state in the database5051 # to allow all the hooks and checks to be performed.5052 if db_instance.shutdown_terminate:5053 self.compute_api.delete(context, db_instance)5054 else:5055 self.compute_api.stop(context, db_instance)5056 except Exception:5057 # Note(maoy): there is no need to propagate the error5058 # because the same power_state will be retrieved next5059 # time and retried.5060 # For example, there might be another task scheduled.5061 LOG.exception(_LE("error during stop() in "5062 "sync_power_state."),5063 instance=db_instance)5064 elif vm_power_state == power_state.SUSPENDED:5065 LOG.warn(_("Instance is suspended unexpectedly. Calling "5066 "the stop API."), instance=db_instance)5067 try:5068 self.compute_api.stop(context, db_instance)5069 except Exception:5070 LOG.exception(_LE("error during stop() in "5071 "sync_power_state."),5072 instance=db_instance)5073 elif vm_power_state == power_state.PAUSED:5074 # Note(maoy): a VM may get into the paused state not only5075 # because the user request via API calls, but also5076 # due to (temporary) external instrumentations.5077 # Before the virt layer can reliably report the reason,5078 # we simply ignore the state discrepancy. In many cases,5079 # the VM state will go back to running after the external5080 # instrumentation is done. See bug 1097806 for details.5081 LOG.warn(_("Instance is paused unexpectedly. Ignore."),5082 instance=db_instance)5083 elif vm_power_state == power_state.NOSTATE:5084 # Occasionally, depending on the status of the hypervisor,5085 # which could be restarting for example, an instance may5086 # not be found. Therefore just log the condition.5087 LOG.warn(_("Instance is unexpectedly not found. Ignore."),5088 instance=db_instance)5089 elif vm_state == vm_states.STOPPED:5090 if vm_power_state not in (power_state.NOSTATE,5091 power_state.SHUTDOWN,5092 power_state.CRASHED):5093 LOG.warn(_LW("Instance is not stopped. Calling "5094 "the stop API. Current vm_state: %(vm_state)s, "5095 "current task_state: %(task_state)s, "5096 "current DB power_state: %(db_power_state)s, "5097 "current VM power_state: %(vm_power_state)s"),5098 {'vm_state': vm_state,5099 'task_state': db_instance.task_state,5100 'db_power_state': db_power_state,5101 'vm_power_state': vm_power_state},5102 instance=db_instance)5103 try:5104 # NOTE(russellb) Force the stop, because normally the5105 # compute API would not allow an attempt to stop a stopped5106 # instance.5107 self.compute_api.force_stop(context, db_instance)5108 except Exception:5109 LOG.exception(_LE("error during stop() in "5110 "sync_power_state."),5111 instance=db_instance)5112 elif vm_state == vm_states.PAUSED:5113 if vm_power_state in (power_state.SHUTDOWN,5114 power_state.CRASHED):5115 LOG.warn(_("Paused instance shutdown by itself. Calling "5116 "the stop API."), instance=db_instance)5117 try:5118 self.compute_api.force_stop(context, db_instance)5119 except Exception:5120 LOG.exception(_LE("error during stop() in "5121 "sync_power_state."),5122 instance=db_instance)5123 elif vm_state in (vm_states.SOFT_DELETED,5124 vm_states.DELETED):5125 if vm_power_state not in (power_state.NOSTATE,5126 power_state.SHUTDOWN):5127 # Note(maoy): this should be taken care of periodically in5128 # _cleanup_running_deleted_instances().5129 LOG.warn(_("Instance is not (soft-)deleted."),5130 instance=db_instance)5131 @periodic_task.periodic_task5132 def _reclaim_queued_deletes(self, context):5133 """Reclaim instances that are queued for deletion."""5134 interval = CONF.reclaim_instance_interval5135 if interval <= 0:5136 LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")5137 return5138 # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.5139 # The only case that the quota might be inconsistent is5140 # the compute node died between set instance state to SOFT_DELETED5141 # and quota commit to DB. When compute node starts again5142 # it will have no idea the reservation is committed or not or even5143 # expired, since it's a rare case, so marked as todo.5144 quotas = quotas_obj.Quotas.from_reservations(context, None)5145 filters = {'vm_state': vm_states.SOFT_DELETED,5146 'task_state': None,5147 'host': self.host}5148 instances = objects.InstanceList.get_by_filters(5149 context, filters,5150 expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,5151 use_slave=True)5152 for instance in instances:5153 if self._deleted_old_enough(instance, interval):5154 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(5155 context, instance.uuid)5156 LOG.info(_('Reclaiming deleted instance'), instance=instance)5157 try:5158 self._delete_instance(context, instance, bdms, quotas)5159 except Exception as e:5160 LOG.warning(_("Periodic reclaim failed to delete "5161 "instance: %s"),5162 unicode(e), instance=instance)5163 @periodic_task.periodic_task5164 def update_available_resource(self, context):5165 """See driver.get_available_resource()5166 Periodic process that keeps that the compute host's understanding of5167 resource availability and usage in sync with the underlying hypervisor.5168 :param context: security context5169 """5170 new_resource_tracker_dict = {}5171 nodenames = set(self.driver.get_available_nodes())5172 for nodename in nodenames:5173 rt = self._get_resource_tracker(nodename)5174 rt.update_available_resource(context)5175 new_resource_tracker_dict[nodename] = rt5176 # Delete orphan compute node not reported by driver but still in db5177 compute_nodes_in_db = self._get_compute_nodes_in_db(context,5178 use_slave=True)5179 for cn in compute_nodes_in_db:5180 if cn.hypervisor_hostname not in nodenames:5181 LOG.audit(_("Deleting orphan compute node %s") % cn.id)5182 cn.destroy()5183 self._resource_tracker_dict = new_resource_tracker_dict5184 def _get_compute_nodes_in_db(self, context, use_slave=False):5185 service = objects.Service.get_by_compute_host(context, self.host,5186 use_slave=use_slave)5187 if not service:5188 LOG.error(_("No service record for host %s"), self.host)5189 return []5190 return objects.ComputeNodeList.get_by_service(context,5191 service,5192 use_slave=use_slave)5193 @periodic_task.periodic_task(5194 spacing=CONF.running_deleted_instance_poll_interval)5195 def _cleanup_running_deleted_instances(self, context):5196 """Cleanup any instances which are erroneously still running after5197 having been deleted.5198 Valid actions to take are:5199 1. noop - do nothing5200 2. log - log which instances are erroneously running5201 3. reap - shutdown and cleanup any erroneously running instances5202 4. shutdown - power off *and disable* any erroneously running5203 instances5204 The use-case for this cleanup task is: for various reasons, it may be5205 possible for the database to show an instance as deleted but for that5206 instance to still be running on a host machine (see bug5207 https://bugs.launchpad.net/nova/+bug/911366).5208 This cleanup task is a cross-hypervisor utility for finding these5209 zombied instances and either logging the discrepancy (likely what you5210 should do in production), or automatically reaping the instances (more5211 appropriate for dev environments).5212 """5213 action = CONF.running_deleted_instance_action5214 if action == "noop":5215 return5216 # NOTE(sirp): admin contexts don't ordinarily return deleted records5217 with utils.temporary_mutation(context, read_deleted="yes"):5218 for instance in self._running_deleted_instances(context):5219 bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(5220 context, instance.uuid, use_slave=True)5221 if action == "log":5222 LOG.warning(_("Detected instance with name label "5223 "'%s' which is marked as "5224 "DELETED but still present on host."),5225 instance['name'], instance=instance)5226 elif action == 'shutdown':5227 LOG.info(_("Powering off instance with name label "5228 "'%s' which is marked as "5229 "DELETED but still present on host."),5230 instance['name'], instance=instance)5231 try:5232 try:5233 # disable starting the instance5234 self.driver.set_bootable(instance, False)5235 except NotImplementedError:5236 LOG.warn(_("set_bootable is not implemented for "5237 "the current driver"))5238 # and power it off5239 self.driver.power_off(instance)5240 except Exception:5241 msg = _("Failed to power off instance")5242 LOG.warn(msg, instance=instance, exc_info=True)5243 elif action == 'reap':5244 LOG.info(_("Destroying instance with name label "5245 "'%s' which is marked as "5246 "DELETED but still present on host."),5247 instance['name'], instance=instance)5248 self.instance_events.clear_events_for_instance(instance)5249 try:5250 self._shutdown_instance(context, instance, bdms,5251 notify=False)5252 self._cleanup_volumes(context, instance['uuid'], bdms)5253 except Exception as e:5254 LOG.warning(_("Periodic cleanup failed to delete "5255 "instance: %s"),5256 unicode(e), instance=instance)5257 else:5258 raise Exception(_("Unrecognized value '%s'"5259 " for CONF.running_deleted_"5260 "instance_action") % action)5261 def _running_deleted_instances(self, context):5262 """Returns a list of instances nova thinks is deleted,5263 but the hypervisor thinks is still running.5264 """5265 timeout = CONF.running_deleted_instance_timeout5266 filters = {'deleted': True,5267 'soft_deleted': False,5268 'host': self.host}5269 instances = self._get_instances_on_driver(context, filters)5270 return [i for i in instances if self._deleted_old_enough(i, timeout)]5271 def _deleted_old_enough(self, instance, timeout):5272 deleted_at = instance['deleted_at']5273 if isinstance(instance, obj_base.NovaObject) and deleted_at:5274 deleted_at = deleted_at.replace(tzinfo=None)5275 return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))5276 @contextlib.contextmanager5277 def _error_out_instance_on_exception(self, context, instance,5278 quotas=None,5279 instance_state=vm_states.ACTIVE):5280 instance_uuid = instance['uuid']5281 try:5282 yield5283 except NotImplementedError as error:5284 with excutils.save_and_reraise_exception():5285 if quotas:5286 quotas.rollback()5287 LOG.info(_("Setting instance back to %(state)s after: "5288 "%(error)s") %5289 {'state': instance_state, 'error': error},5290 instance_uuid=instance_uuid)5291 self._instance_update(context, instance_uuid,5292 vm_state=instance_state,5293 task_state=None)5294 except exception.InstanceFaultRollback as error:5295 if quotas:5296 quotas.rollback()5297 LOG.info(_("Setting instance back to ACTIVE after: %s"),5298 error, instance_uuid=instance_uuid)5299 self._instance_update(context, instance_uuid,5300 vm_state=vm_states.ACTIVE,5301 task_state=None)5302 raise error.inner_exception5303 except Exception:5304 LOG.exception(_LE('Setting instance vm_state to ERROR'),5305 instance_uuid=instance_uuid)5306 with excutils.save_and_reraise_exception():5307 if quotas:5308 quotas.rollback()5309 self._set_instance_error_state(context, instance)5310 @aggregate_object_compat5311 @wrap_exception()5312 def add_aggregate_host(self, context, aggregate, host, slave_info):5313 """Notify hypervisor of change (for hypervisor pools)."""5314 try:5315 self.driver.add_to_aggregate(context, aggregate, host,5316 slave_info=slave_info)5317 except NotImplementedError:5318 LOG.debug('Hypervisor driver does not support '5319 'add_aggregate_host')5320 except exception.AggregateError:5321 with excutils.save_and_reraise_exception():5322 self.driver.undo_aggregate_operation(5323 context,5324 aggregate.delete_host,5325 aggregate, host)5326 @aggregate_object_compat5327 @wrap_exception()5328 def remove_aggregate_host(self, context, host, slave_info, aggregate):5329 """Removes a host from a physical hypervisor pool."""5330 try:5331 self.driver.remove_from_aggregate(context, aggregate, host,5332 slave_info=slave_info)5333 except NotImplementedError:5334 LOG.debug('Hypervisor driver does not support '5335 'remove_aggregate_host')5336 except (exception.AggregateError,5337 exception.InvalidAggregateAction) as e:5338 with excutils.save_and_reraise_exception():5339 self.driver.undo_aggregate_operation(5340 context,5341 aggregate.add_host,5342 aggregate, host,5343 isinstance(e, exception.AggregateError))5344 def _process_instance_event(self, instance, event):5345 _event = self.instance_events.pop_instance_event(instance, event)5346 if _event:5347 LOG.debug('Processing event %(event)s',5348 {'event': event.key}, instance=instance)5349 _event.send(event)5350 @wrap_exception()5351 def external_instance_event(self, context, instances, events):5352 # NOTE(danms): Some event types are handled by the manager, such5353 # as when we're asked to update the instance's info_cache. If it's5354 # not one of those, look for some thread(s) waiting for the event and5355 # unblock them if so.5356 for event in events:5357 instance = [inst for inst in instances...

Full Screen

Full Screen

rpcapi.py

Source:rpcapi.py Github

copy

Full Screen

...56 * 1.1 - Adds get_host_uptime()57 * 1.2 - Adds check_can_live_migrate_[destination|source]58 * 1.3 - Adds change_instance_metadata()59 * 1.4 - Remove instance_uuid, add instance argument to60 reboot_instance()61 * 1.5 - Remove instance_uuid, add instance argument to62 pause_instance(), unpause_instance()63 * 1.6 - Remove instance_uuid, add instance argument to64 suspend_instance()65 * 1.7 - Remove instance_uuid, add instance argument to66 get_console_output()67 * 1.8 - Remove instance_uuid, add instance argument to68 add_fixed_ip_to_instance()69 * 1.9 - Remove instance_uuid, add instance argument to attach_volume()70 * 1.10 - Remove instance_id, add instance argument to71 check_can_live_migrate_destination()72 * 1.11 - Remove instance_id, add instance argument to73 check_can_live_migrate_source()74 * 1.12 - Remove instance_uuid, add instance argument to75 confirm_resize()76 * 1.13 - Remove instance_uuid, add instance argument to detach_volume()77 * 1.14 - Remove instance_uuid, add instance argument to finish_resize()78 * 1.15 - Remove instance_uuid, add instance argument to79 finish_revert_resize()80 * 1.16 - Remove instance_uuid, add instance argument to81 get_diagnostics()82 * 1.17 - Remove instance_uuid, add instance argument to83 get_vnc_console()84 * 1.18 - Remove instance_uuid, add instance argument to inject_file()85 * 1.19 - Remove instance_uuid, add instance argument to86 inject_network_info()87 * 1.20 - Remove instance_id, add instance argument to88 post_live_migration_at_destination()89 * 1.21 - Remove instance_uuid, add instance argument to90 power_off_instance() and stop_instance()91 * 1.22 - Remove instance_uuid, add instance argument to92 power_on_instance() and start_instance()93 * 1.23 - Remove instance_id, add instance argument to94 pre_live_migration()95 * 1.24 - Remove instance_uuid, add instance argument to96 rebuild_instance()97 * 1.25 - Remove instance_uuid, add instance argument to98 remove_fixed_ip_from_instance()99 * 1.26 - Remove instance_id, add instance argument to100 remove_volume_connection()101 * 1.27 - Remove instance_uuid, add instance argument to102 rescue_instance()103 * 1.28 - Remove instance_uuid, add instance argument to reset_network()104 * 1.29 - Remove instance_uuid, add instance argument to105 resize_instance()106 * 1.30 - Remove instance_uuid, add instance argument to107 resume_instance()108 * 1.31 - Remove instance_uuid, add instance argument to revert_resize()109 * 1.32 - Remove instance_id, add instance argument to110 rollback_live_migration_at_destination()111 * 1.33 - Remove instance_uuid, add instance argument to112 set_admin_password()113 * 1.34 - Remove instance_uuid, add instance argument to114 snapshot_instance()115 * 1.35 - Remove instance_uuid, add instance argument to116 unrescue_instance()117 * 1.36 - Remove instance_uuid, add instance argument to118 change_instance_metadata()119 * 1.37 - Remove instance_uuid, add instance argument to120 terminate_instance()121 * 1.38 - Changes to prep_resize():122 * remove instance_uuid, add instance123 * remove instance_type_id, add instance_type124 * remove topic, it was unused125 * 1.39 - Remove instance_uuid, add instance argument to run_instance()126 * 1.40 - Remove instance_id, add instance argument to live_migration()127 * 1.41 - Adds refresh_instance_security_rules()128 * 1.42 - Add reservations arg to prep_resize(), resize_instance(),129 finish_resize(), confirm_resize(), revert_resize() and130 finish_revert_resize()131 * 1.43 - Add migrate_data to live_migration()132 * 1.44 - Adds reserve_block_device_name()133 * 2.0 - Remove 1.x backwards compat134 * 2.1 - Adds orig_sys_metadata to rebuild_instance()135 * 2.2 - Adds slave_info parameter to add_aggregate_host() and136 remove_aggregate_host()137 * 2.3 - Adds volume_id to reserve_block_device_name()138 * 2.4 - Add bdms to terminate_instance139 * 2.5 - Add block device and network info to reboot_instance140 * 2.6 - Remove migration_id, add migration to resize_instance141 * 2.7 - Remove migration_id, add migration to confirm_resize142 * 2.8 - Remove migration_id, add migration to finish_resize143 * 2.9 - Add publish_service_capabilities()144 * 2.10 - Adds filter_properties and request_spec to prep_resize()145 * 2.11 - Adds soft_delete_instance() and restore_instance()146 * 2.12 - Remove migration_id, add migration to revert_resize147 * 2.13 - Remove migration_id, add migration to finish_revert_resize148 * 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host149 * 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host150 * 2.16 - Add instance_type to resize_instance151 * 2.17 - Add get_backdoor_port()152 * 2.18 - Add bdms to rebuild_instance153 * 2.19 - Add node to run_instance154 * 2.20 - Add node to prep_resize155 * 2.21 - Add migrate_data dict param to pre_live_migration()156 * 2.22 - Add recreate, on_shared_storage and host arguments to157 rebuild_instance()158 * 2.23 - Remove network_info from reboot_instance159 * 2.24 - Added get_spice_console method160 * 2.25 - Add attach_interface() and detach_interface()161 * 2.26 - Add validate_console_port to ensure the service connects to162 vnc on the correct port163 * 2.27 - Adds 'reservations' to terminate_instance() and164 soft_delete_instance()165 ... Grizzly supports message version 2.27. So, any changes to existing166 methods in 2.x after that point should be done such that they can167 handle the version_cap being set to 2.27.168 * 2.28 - Adds check_instance_shared_storage()169 * 2.29 - Made start_instance() and stop_instance() take new-world170 instance objects171 * 2.30 - Adds live_snapshot_instance()172 * 2.31 - Adds shelve_instance(), shelve_offload_instance, and173 unshelve_instance()174 * 2.32 - Make reboot_instance take a new world instance object175 * 2.33 - Made suspend_instance() and resume_instance() take new-world176 instance objects177 * 2.34 - Added swap_volume()178 * 2.35 - Made terminate_instance() and soft_delete_instance() take179 new-world instance objects180 * 2.36 - Made pause_instance() and unpause_instance() take new-world181 instance objects182 * 2.37 - Added the legacy_bdm_in_spec parameter to run_instance183 * 2.38 - Made check_can_live_migrate_[destination|source] take184 new-world instance objects185 * 2.39 - Made revert_resize() and confirm_resize() take new-world186 instance objects187 * 2.40 - Made reset_network() take new-world instance object188 * 2.41 - Make inject_network_info take new-world instance object189 * 2.42 - Splits snapshot_instance() into snapshot_instance() and190 backup_instance() and makes them take new-world instance191 objects.192 * 2.43 - Made prep_resize() take new-world instance object193 * 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()194 * 2.45 - Made resize_instance() take new-world objects195 * 2.46 - Made finish_resize() take new-world objects196 * 2.47 - Made finish_revert_resize() take new-world objects197 ... Havana supports message version 2.47. So, any changes to existing198 methods in 2.x after that point should be done such that they can199 handle the version_cap being set to 2.47.200 * 2.48 - Make add_aggregate_host() and remove_aggregate_host() take201 new-world objects202 * ... - Remove live_snapshot() that was never actually used203 * 3.0 - Remove 2.x compatibility204 * 3.1 - Update get_spice_console() to take an instance object205 * 3.2 - Update get_vnc_console() to take an instance object206 * 3.3 - Update validate_console_port() to take an instance object207 * 3.4 - Update rebuild_instance() to take an instance object208 * 3.5 - Pass preserve_ephemeral flag to rebuild_instance()209 * 3.6 - Make volume_snapshot_{create,delete} use new-world objects210 * 3.7 - Update change_instance_metadata() to take an instance object211 * 3.8 - Update set_admin_password() to take an instance object212 * 3.9 - Update rescue_instance() to take an instance object213 * 3.10 - Added get_rdp_console method214 * 3.11 - Update unrescue_instance() to take an object215 * 3.12 - Update add_fixed_ip_to_instance() to take an object216 * 3.13 - Update remove_fixed_ip_from_instance() to take an object217 * 3.14 - Update post_live_migration_at_destination() to take an object218 * 3.15 - Adds filter_properties and node to unshelve_instance()219 * 3.16 - Make reserve_block_device_name and attach_volume use new-world220 objects, and add disk_bus and device_type params to221 reserve_block_device_name, and bdm param to attach_volume222 * 3.17 - Update attach_interface and detach_interface to take an object223 * 3.18 - Update get_diagnostics() to take an instance object224 * Removed inject_file(), as it was unused.225 * 3.19 - Update pre_live_migration to take instance object226 * 3.20 - Make restore_instance take an instance object227 * 3.21 - Made rebuild take new-world BDM objects228 * 3.22 - Made terminate_instance take new-world BDM objects229 * 3.23 - Added external_instance_event()230 * build_and_run_instance was added in Havana and not used or231 documented.232 ... Icehouse supports message version 3.23. So, any changes to233 existing methods in 3.x after that point should be done such that they234 can handle the version_cap being set to 3.23.235 * 3.24 - Update rescue_instance() to take optional rescue_image_ref236 * 3.25 - Make detach_volume take an object237 * 3.26 - Make live_migration() and238 rollback_live_migration_at_destination() take an object239 * ... Removed run_instance()240 * 3.27 - Make run_instance() accept a new-world object241 * 3.28 - Update get_console_output() to accept a new-world object242 * 3.29 - Make check_instance_shared_storage accept a new-world object243 * 3.30 - Make remove_volume_connection() accept a new-world object244 * 3.31 - Add get_instance_diagnostics245 * 3.32 - Add destroy_disks and migrate_data optional parameters to246 rollback_live_migration_at_destination()247 * 3.33 - Make build_and_run_instance() take a NetworkRequestList object248 * 3.34 - Add get_serial_console method249 '''250 VERSION_ALIASES = {251 'icehouse': '3.23',252 }253 def __init__(self):254 super(ComputeAPI, self).__init__()255 target = messaging.Target(topic=CONF.compute_topic, version='3.0')256 version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,257 CONF.upgrade_levels.compute)258 serializer = objects_base.NovaObjectSerializer()259 self.client = self.get_client(target, version_cap, serializer)260 # Cells overrides this261 def get_client(self, target, version_cap, serializer):262 return rpc.get_client(target,263 version_cap=version_cap,264 serializer=serializer)265 def _check_live_migration_api_version(self, server):266 # NOTE(angdraug): live migration involving a compute host running Nova267 # API older than v3.32 as either source or destination can cause268 # instance disks to be deleted from shared storage269 if not self.client.can_send_version('3.32'):270 raise exception.LiveMigrationWithOldNovaNotSafe(server=server)271 def add_aggregate_host(self, ctxt, aggregate, host_param, host,272 slave_info=None):273 '''Add aggregate host.274 :param ctxt: request context275 :param aggregate_id:276 :param host_param: This value is placed in the message to be the 'host'277 parameter for the remote method.278 :param host: This is the host to send the message to.279 '''280 version = '3.0'281 cctxt = self.client.prepare(server=host, version=version)282 cctxt.cast(ctxt, 'add_aggregate_host',283 aggregate=aggregate, host=host_param,284 slave_info=slave_info)285 def add_fixed_ip_to_instance(self, ctxt, instance, network_id):286 version = '3.12'287 cctxt = self.client.prepare(server=_compute_host(None, instance),288 version=version)289 cctxt.cast(ctxt, 'add_fixed_ip_to_instance',290 instance=instance, network_id=network_id)291 def attach_interface(self, ctxt, instance, network_id, port_id,292 requested_ip):293 version = '3.17'294 cctxt = self.client.prepare(server=_compute_host(None, instance),295 version=version)296 return cctxt.call(ctxt, 'attach_interface',297 instance=instance, network_id=network_id,298 port_id=port_id, requested_ip=requested_ip)299 def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None):300 # NOTE(ndipanov): Remove volume_id and mountpoint on the next major301 # version bump - they are not needed when using bdm objects.302 version = '3.16'303 kw = {'instance': instance, 'volume_id': volume_id,304 'mountpoint': mountpoint, 'bdm': bdm}305 cctxt = self.client.prepare(server=_compute_host(None, instance),306 version=version)307 cctxt.cast(ctxt, 'attach_volume', **kw)308 def change_instance_metadata(self, ctxt, instance, diff):309 version = '3.7'310 cctxt = self.client.prepare(server=_compute_host(None, instance),311 version=version)312 cctxt.cast(ctxt, 'change_instance_metadata',313 instance=instance, diff=diff)314 def check_can_live_migrate_destination(self, ctxt, instance, destination,315 block_migration, disk_over_commit):316 self._check_live_migration_api_version(destination)317 cctxt = self.client.prepare(server=destination, version='3.32')318 return cctxt.call(ctxt, 'check_can_live_migrate_destination',319 instance=instance,320 block_migration=block_migration,321 disk_over_commit=disk_over_commit)322 def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):323 source = _compute_host(None, instance)324 self._check_live_migration_api_version(source)325 cctxt = self.client.prepare(server=source, version='3.32')326 return cctxt.call(ctxt, 'check_can_live_migrate_source',327 instance=instance,328 dest_check_data=dest_check_data)329 def check_instance_shared_storage(self, ctxt, instance, data):330 if self.client.can_send_version('3.29'):331 version = '3.29'332 else:333 version = '3.0'334 instance = jsonutils.to_primitive(instance)335 cctxt = self.client.prepare(server=_compute_host(None, instance),336 version=version)337 return cctxt.call(ctxt, 'check_instance_shared_storage',338 instance=instance,339 data=data)340 def confirm_resize(self, ctxt, instance, migration, host,341 reservations=None, cast=True):342 version = '3.0'343 cctxt = self.client.prepare(server=_compute_host(host, instance),344 version=version)345 rpc_method = cctxt.cast if cast else cctxt.call346 return rpc_method(ctxt, 'confirm_resize',347 instance=instance, migration=migration,348 reservations=reservations)349 def detach_interface(self, ctxt, instance, port_id):350 version = '3.17'351 cctxt = self.client.prepare(server=_compute_host(None, instance),352 version=version)353 cctxt.cast(ctxt, 'detach_interface',354 instance=instance, port_id=port_id)355 def detach_volume(self, ctxt, instance, volume_id):356 if self.client.can_send_version('3.25'):357 version = '3.25'358 else:359 version = '3.0'360 instance = jsonutils.to_primitive(instance)361 cctxt = self.client.prepare(server=_compute_host(None, instance),362 version=version)363 cctxt.cast(ctxt, 'detach_volume',364 instance=instance, volume_id=volume_id)365 def finish_resize(self, ctxt, instance, migration, image, disk_info,366 host, reservations=None):367 version = '3.0'368 cctxt = self.client.prepare(server=host, version=version)369 cctxt.cast(ctxt, 'finish_resize',370 instance=instance, migration=migration,371 image=image, disk_info=disk_info, reservations=reservations)372 def finish_revert_resize(self, ctxt, instance, migration, host,373 reservations=None):374 version = '3.0'375 cctxt = self.client.prepare(server=host, version=version)376 cctxt.cast(ctxt, 'finish_revert_resize',377 instance=instance, migration=migration,378 reservations=reservations)379 def get_console_output(self, ctxt, instance, tail_length):380 if self.client.can_send_version('3.28'):381 version = '3.28'382 else:383 version = '3.0'384 instance = jsonutils.to_primitive(instance)385 cctxt = self.client.prepare(server=_compute_host(None, instance),386 version=version)387 return cctxt.call(ctxt, 'get_console_output',388 instance=instance, tail_length=tail_length)389 def get_console_pool_info(self, ctxt, console_type, host):390 version = '3.0'391 cctxt = self.client.prepare(server=host, version=version)392 return cctxt.call(ctxt, 'get_console_pool_info',393 console_type=console_type)394 def get_console_topic(self, ctxt, host):395 version = '3.0'396 cctxt = self.client.prepare(server=host, version=version)397 return cctxt.call(ctxt, 'get_console_topic')398 def get_diagnostics(self, ctxt, instance):399 version = '3.18'400 cctxt = self.client.prepare(server=_compute_host(None, instance),401 version=version)402 return cctxt.call(ctxt, 'get_diagnostics', instance=instance)403 def get_instance_diagnostics(self, ctxt, instance):404 instance_p = jsonutils.to_primitive(instance)405 kwargs = {'instance': instance_p}406 version = '3.31'407 cctxt = self.client.prepare(server=_compute_host(None, instance),408 version=version)409 return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs)410 def get_vnc_console(self, ctxt, instance, console_type):411 version = '3.2'412 cctxt = self.client.prepare(server=_compute_host(None, instance),413 version=version)414 return cctxt.call(ctxt, 'get_vnc_console',415 instance=instance, console_type=console_type)416 def get_spice_console(self, ctxt, instance, console_type):417 version = '3.1'418 cctxt = self.client.prepare(server=_compute_host(None, instance),419 version=version)420 return cctxt.call(ctxt, 'get_spice_console',421 instance=instance, console_type=console_type)422 def get_rdp_console(self, ctxt, instance, console_type):423 version = '3.10'424 cctxt = self.client.prepare(server=_compute_host(None, instance),425 version=version)426 return cctxt.call(ctxt, 'get_rdp_console',427 instance=instance, console_type=console_type)428 def get_serial_console(self, ctxt, instance, console_type):429 version = '3.34'430 cctxt = self.client.prepare(server=_compute_host(None, instance),431 version=version)432 return cctxt.call(ctxt, 'get_serial_console',433 instance=instance, console_type=console_type)434 def validate_console_port(self, ctxt, instance, port, console_type):435 version = '3.3'436 cctxt = self.client.prepare(server=_compute_host(None, instance),437 version=version)438 return cctxt.call(ctxt, 'validate_console_port',439 instance=instance, port=port,440 console_type=console_type)441 def host_maintenance_mode(self, ctxt, host_param, mode, host):442 '''Set host maintenance mode443 :param ctxt: request context444 :param host_param: This value is placed in the message to be the 'host'445 parameter for the remote method.446 :param mode:447 :param host: This is the host to send the message to.448 '''449 version = '3.0'450 cctxt = self.client.prepare(server=host, version=version)451 return cctxt.call(ctxt, 'host_maintenance_mode',452 host=host_param, mode=mode)453 def host_power_action(self, ctxt, action, host):454 version = '3.0'455 cctxt = self.client.prepare(server=host, version=version)456 return cctxt.call(ctxt, 'host_power_action', action=action)457 def inject_network_info(self, ctxt, instance):458 version = '3.0'459 cctxt = self.client.prepare(server=_compute_host(None, instance),460 version=version)461 cctxt.cast(ctxt, 'inject_network_info', instance=instance)462 def live_migration(self, ctxt, instance, dest, block_migration, host,463 migrate_data=None):464 if self.client.can_send_version('3.26'):465 version = '3.26'466 else:467 version = '3.0'468 instance = jsonutils.to_primitive(instance)469 cctxt = self.client.prepare(server=host, version=version)470 cctxt.cast(ctxt, 'live_migration', instance=instance,471 dest=dest, block_migration=block_migration,472 migrate_data=migrate_data)473 def pause_instance(self, ctxt, instance):474 version = '3.0'475 cctxt = self.client.prepare(server=_compute_host(None, instance),476 version=version)477 cctxt.cast(ctxt, 'pause_instance', instance=instance)478 def post_live_migration_at_destination(self, ctxt, instance,479 block_migration, host):480 version = '3.14'481 cctxt = self.client.prepare(server=host, version=version)482 cctxt.cast(ctxt, 'post_live_migration_at_destination',483 instance=instance, block_migration=block_migration)484 def pre_live_migration(self, ctxt, instance, block_migration, disk,485 host, migrate_data=None):486 version = '3.19'487 cctxt = self.client.prepare(server=host, version=version)488 return cctxt.call(ctxt, 'pre_live_migration',489 instance=instance,490 block_migration=block_migration,491 disk=disk, migrate_data=migrate_data)492 def prep_resize(self, ctxt, image, instance, instance_type, host,493 reservations=None, request_spec=None,494 filter_properties=None, node=None):495 version = '3.0'496 instance_type_p = jsonutils.to_primitive(instance_type)497 image_p = jsonutils.to_primitive(image)498 cctxt = self.client.prepare(server=host, version=version)499 cctxt.cast(ctxt, 'prep_resize',500 instance=instance,501 instance_type=instance_type_p,502 image=image_p, reservations=reservations,503 request_spec=request_spec,504 filter_properties=filter_properties,505 node=node)506 def reboot_instance(self, ctxt, instance, block_device_info,507 reboot_type):508 version = '3.0'509 cctxt = self.client.prepare(server=_compute_host(None, instance),510 version=version)511 cctxt.cast(ctxt, 'reboot_instance',512 instance=instance,513 block_device_info=block_device_info,514 reboot_type=reboot_type)515 def rebuild_instance(self, ctxt, instance, new_pass, injected_files,516 image_ref, orig_image_ref, orig_sys_metadata, bdms,517 recreate=False, on_shared_storage=False, host=None,518 preserve_ephemeral=False, kwargs=None):519 # NOTE(danms): kwargs is only here for cells compatibility, don't520 # actually send it to compute521 extra = {'preserve_ephemeral': preserve_ephemeral}522 version = '3.21'523 cctxt = self.client.prepare(server=_compute_host(host, instance),524 version=version)525 cctxt.cast(ctxt, 'rebuild_instance',526 instance=instance, new_pass=new_pass,527 injected_files=injected_files, image_ref=image_ref,528 orig_image_ref=orig_image_ref,529 orig_sys_metadata=orig_sys_metadata, bdms=bdms,530 recreate=recreate, on_shared_storage=on_shared_storage,531 **extra)532 def refresh_provider_fw_rules(self, ctxt, host):533 version = '3.0'534 cctxt = self.client.prepare(server=host, version=version)535 cctxt.cast(ctxt, 'refresh_provider_fw_rules')536 def remove_aggregate_host(self, ctxt, aggregate, host_param, host,537 slave_info=None):538 '''Remove aggregate host.539 :param ctxt: request context540 :param aggregate_id:541 :param host_param: This value is placed in the message to be the 'host'542 parameter for the remote method.543 :param host: This is the host to send the message to.544 '''545 version = '3.0'546 cctxt = self.client.prepare(server=host, version=version)547 cctxt.cast(ctxt, 'remove_aggregate_host',548 aggregate=aggregate, host=host_param,549 slave_info=slave_info)550 def remove_fixed_ip_from_instance(self, ctxt, instance, address):551 version = '3.13'552 cctxt = self.client.prepare(server=_compute_host(None, instance),553 version=version)554 cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',555 instance=instance, address=address)556 def remove_volume_connection(self, ctxt, instance, volume_id, host):557 if self.client.can_send_version('3.30'):558 version = '3.30'559 else:560 version = '3.0'561 instance = jsonutils.to_primitive(instance)562 cctxt = self.client.prepare(server=host, version=version)563 return cctxt.call(ctxt, 'remove_volume_connection',564 instance=instance, volume_id=volume_id)565 def rescue_instance(self, ctxt, instance, rescue_password,566 rescue_image_ref=None):567 msg_args = {'rescue_password': rescue_password}568 if self.client.can_send_version('3.24'):569 version = '3.24'570 msg_args['rescue_image_ref'] = rescue_image_ref571 else:572 version = '3.9'573 msg_args['instance'] = instance574 cctxt = self.client.prepare(server=_compute_host(None, instance),575 version=version)576 cctxt.cast(ctxt, 'rescue_instance', **msg_args)577 def reset_network(self, ctxt, instance):578 version = '3.0'579 cctxt = self.client.prepare(server=_compute_host(None, instance),580 version=version)581 cctxt.cast(ctxt, 'reset_network', instance=instance)582 def resize_instance(self, ctxt, instance, migration, image, instance_type,583 reservations=None):584 version = '3.0'585 instance_type_p = jsonutils.to_primitive(instance_type)586 cctxt = self.client.prepare(server=_compute_host(None, instance),587 version=version)588 cctxt.cast(ctxt, 'resize_instance',589 instance=instance, migration=migration,590 image=image, reservations=reservations,591 instance_type=instance_type_p)592 def resume_instance(self, ctxt, instance):593 version = '3.0'594 cctxt = self.client.prepare(server=_compute_host(None, instance),595 version=version)596 cctxt.cast(ctxt, 'resume_instance', instance=instance)597 def revert_resize(self, ctxt, instance, migration, host,598 reservations=None):599 version = '3.0'600 cctxt = self.client.prepare(server=_compute_host(host, instance),601 version=version)602 cctxt.cast(ctxt, 'revert_resize',603 instance=instance, migration=migration,604 reservations=reservations)605 def rollback_live_migration_at_destination(self, ctxt, instance, host,606 destroy_disks=True,607 migrate_data=None):608 self._check_live_migration_api_version(host)609 cctxt = self.client.prepare(server=host, version='3.32')610 cctxt.cast(ctxt, 'rollback_live_migration_at_destination',611 instance=instance,612 destroy_disks=destroy_disks, migrate_data=migrate_data)613 # NOTE(alaski): Remove this method when the scheduler rpc interface is614 # bumped to 4.x as the only callers of this method will be removed.615 def run_instance(self, ctxt, instance, host, request_spec,616 filter_properties, requested_networks,617 injected_files, admin_password,618 is_first_time, node=None, legacy_bdm_in_spec=True):619 if self.client.can_send_version('3.27'):620 version = '3.27'621 else:622 version = '3.0'623 instance = jsonutils.to_primitive(instance)624 msg_kwargs = {'instance': instance, 'request_spec': request_spec,625 'filter_properties': filter_properties,626 'requested_networks': requested_networks,627 'injected_files': injected_files,628 'admin_password': admin_password,629 'is_first_time': is_first_time, 'node': node,630 'legacy_bdm_in_spec': legacy_bdm_in_spec}631 cctxt = self.client.prepare(server=host, version=version)632 cctxt.cast(ctxt, 'run_instance', **msg_kwargs)633 def set_admin_password(self, ctxt, instance, new_pass):634 version = '3.8'635 cctxt = self.client.prepare(server=_compute_host(None, instance),636 version=version)637 return cctxt.call(ctxt, 'set_admin_password',638 instance=instance, new_pass=new_pass)639 def set_host_enabled(self, ctxt, enabled, host):640 version = '3.0'641 cctxt = self.client.prepare(server=host, version=version)642 return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)643 def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):644 version = '3.0'645 cctxt = self.client.prepare(server=_compute_host(None, instance),646 version=version)647 cctxt.cast(ctxt, 'swap_volume',648 instance=instance, old_volume_id=old_volume_id,649 new_volume_id=new_volume_id)650 def get_host_uptime(self, ctxt, host):651 version = '3.0'652 cctxt = self.client.prepare(server=host, version=version)653 return cctxt.call(ctxt, 'get_host_uptime')654 def reserve_block_device_name(self, ctxt, instance, device, volume_id,655 disk_bus=None, device_type=None):656 version = '3.16'657 kw = {'instance': instance, 'device': device,658 'volume_id': volume_id, 'disk_bus': disk_bus,659 'device_type': device_type}660 cctxt = self.client.prepare(server=_compute_host(None, instance),661 version=version)662 return cctxt.call(ctxt, 'reserve_block_device_name', **kw)663 def backup_instance(self, ctxt, instance, image_id, backup_type,664 rotation):665 version = '3.0'666 cctxt = self.client.prepare(server=_compute_host(None, instance),667 version=version)668 cctxt.cast(ctxt, 'backup_instance',669 instance=instance,670 image_id=image_id,671 backup_type=backup_type,672 rotation=rotation)673 def snapshot_instance(self, ctxt, instance, image_id):674 version = '3.0'675 cctxt = self.client.prepare(server=_compute_host(None, instance),676 version=version)677 cctxt.cast(ctxt, 'snapshot_instance',678 instance=instance,679 image_id=image_id)680 def start_instance(self, ctxt, instance):681 version = '3.0'682 cctxt = self.client.prepare(server=_compute_host(None, instance),683 version=version)684 cctxt.cast(ctxt, 'start_instance', instance=instance)685 def stop_instance(self, ctxt, instance, do_cast=True):686 version = '3.0'687 cctxt = self.client.prepare(server=_compute_host(None, instance),688 version=version)689 rpc_method = cctxt.cast if do_cast else cctxt.call690 return rpc_method(ctxt, 'stop_instance', instance=instance)691 def suspend_instance(self, ctxt, instance):692 version = '3.0'693 cctxt = self.client.prepare(server=_compute_host(None, instance),694 version=version)695 cctxt.cast(ctxt, 'suspend_instance', instance=instance)696 def terminate_instance(self, ctxt, instance, bdms, reservations=None):697 version = '3.22'698 cctxt = self.client.prepare(server=_compute_host(None, instance),699 version=version)700 cctxt.cast(ctxt, 'terminate_instance',701 instance=instance, bdms=bdms,702 reservations=reservations)703 def unpause_instance(self, ctxt, instance):704 version = '3.0'705 cctxt = self.client.prepare(server=_compute_host(None, instance),706 version=version)707 cctxt.cast(ctxt, 'unpause_instance', instance=instance)708 def unrescue_instance(self, ctxt, instance):709 version = '3.11'710 cctxt = self.client.prepare(server=_compute_host(None, instance),711 version=version)712 cctxt.cast(ctxt, 'unrescue_instance', instance=instance)713 def soft_delete_instance(self, ctxt, instance, reservations=None):714 version = '3.0'715 cctxt = self.client.prepare(server=_compute_host(None, instance),716 version=version)717 cctxt.cast(ctxt, 'soft_delete_instance',718 instance=instance, reservations=reservations)719 def restore_instance(self, ctxt, instance):720 version = '3.20'721 cctxt = self.client.prepare(server=_compute_host(None, instance),722 version=version)723 cctxt.cast(ctxt, 'restore_instance', instance=instance)724 def shelve_instance(self, ctxt, instance, image_id=None):725 version = '3.0'726 cctxt = self.client.prepare(server=_compute_host(None, instance),727 version=version)728 cctxt.cast(ctxt, 'shelve_instance',729 instance=instance, image_id=image_id)730 def shelve_offload_instance(self, ctxt, instance):731 version = '3.0'732 cctxt = self.client.prepare(server=_compute_host(None, instance),733 version=version)734 cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance)735 def unshelve_instance(self, ctxt, instance, host, image=None,736 filter_properties=None, node=None):737 version = '3.15'738 msg_kwargs = {739 'instance': instance,740 'image': image,741 'filter_properties': filter_properties,742 'node': node,743 }744 cctxt = self.client.prepare(server=host, version=version)745 cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs)746 def volume_snapshot_create(self, ctxt, instance, volume_id,747 create_info):748 version = '3.6'749 cctxt = self.client.prepare(server=_compute_host(None, instance),750 version=version)751 cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance,752 volume_id=volume_id, create_info=create_info)753 def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,754 delete_info):755 version = '3.6'756 cctxt = self.client.prepare(server=_compute_host(None, instance),757 version=version)758 cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance,759 volume_id=volume_id, snapshot_id=snapshot_id,760 delete_info=delete_info)761 def external_instance_event(self, ctxt, instances, events):762 cctxt = self.client.prepare(763 server=_compute_host(None, instances[0]),764 version='3.23')765 cctxt.cast(ctxt, 'external_instance_event', instances=instances,766 events=events)767 def build_and_run_instance(self, ctxt, instance, host, image, request_spec,768 filter_properties, admin_password=None, injected_files=None,769 requested_networks=None, security_groups=None,770 block_device_mapping=None, node=None, limits=None):771 version = '3.33'772 if not self.client.can_send_version(version):773 version = '3.23'774 if requested_networks is not None:775 requested_networks = requested_networks.as_tuples()776 cctxt = self.client.prepare(server=host, version=version)777 cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,778 image=image, request_spec=request_spec,779 filter_properties=filter_properties,780 admin_password=admin_password,781 injected_files=injected_files,...

Full Screen

Full Screen

navtreeindex2.js

Source:navtreeindex2.js Github

copy

Full Screen

1var NAVTREEINDEX2 =2{3"structarm__biquad__cascade__df2T__instance__f64.html":[4,6],4"structarm__biquad__cascade__df2T__instance__f64.html#a0bde57b618e3f9059b23b0de64e12ce3":[4,6,2],5"structarm__biquad__cascade__df2T__instance__f64.html#ad55380ff835b533aa5168f836db8a4de":[4,6,0],6"structarm__biquad__cascade__df2T__instance__f64.html#ae2f0180f9038c0393e1d6921bb3b878b":[4,6,1],7"structarm__biquad__cascade__stereo__df2T__instance__f32.html":[4,7],8"structarm__biquad__cascade__stereo__df2T__instance__f32.html#a2cb00048bb1fe957a03c1ff56dfaf8f0":[4,7,2],9"structarm__biquad__cascade__stereo__df2T__instance__f32.html#a5655328252da5c2c2425ceed253bc4f1":[4,7,0],10"structarm__biquad__cascade__stereo__df2T__instance__f32.html#a68a22132ff134715ed2cc8d0f8225bc0":[4,7,1],11"structarm__biquad__casd__df1__inst__f32.html":[4,8],12"structarm__biquad__casd__df1__inst__f32.html#a4e873ff8bb6273858481cfd4c7f8f6d5":[4,8,1],13"structarm__biquad__casd__df1__inst__f32.html#a8c245d79e0d8cfabc82409d4b54fb682":[4,8,2],14"structarm__biquad__casd__df1__inst__f32.html#af69820c37a87252c46453e4cfe120585":[4,8,0],15"structarm__biquad__casd__df1__inst__q15.html":[4,9],16"structarm__biquad__casd__df1__inst__q15.html#a5481104ef2f8f81360b80b47d69ae932":[4,9,3],17"structarm__biquad__casd__df1__inst__q15.html#a9219b50b6ca227c92da85f2151ea362e":[4,9,1],18"structarm__biquad__casd__df1__inst__q15.html#ad6d95e70abcf4ff1300181415ad92153":[4,9,0],19"structarm__biquad__casd__df1__inst__q15.html#ada7e9d6269e6ed4eacf8f68729e9832d":[4,9,2],20"structarm__biquad__casd__df1__inst__q31.html":[4,10],21"structarm__biquad__casd__df1__inst__q31.html#a2c2b579f1df1d8273a5d9d945c27e1b2":[4,10,0],22"structarm__biquad__casd__df1__inst__q31.html#a5dcf4727f58eb4e8e8b392508d8657bb":[4,10,3],23"structarm__biquad__casd__df1__inst__q31.html#a636c7fbe09ec4bef0bc0a4b4e2151cbe":[4,10,2],24"structarm__biquad__casd__df1__inst__q31.html#aee29599a6db153ff0e9a5de18b6af73d":[4,10,1],25"structarm__cfft__instance__f32.html":[4,11],26"structarm__cfft__instance__f32.html#a21ceaf59a1bb8440af57c28d2dd9bbab":[4,11,2],27"structarm__cfft__instance__f32.html#a3ba329ed153d182746376208e773d648":[4,11,0],28"structarm__cfft__instance__f32.html#a59cc6f753f1498716e1444ac054c06de":[4,11,3],29"structarm__cfft__instance__f32.html#acd8f9e9540e3dd348212726e5d6aaa95":[4,11,1],30"structarm__cfft__instance__q15.html":[4,12],31"structarm__cfft__instance__q15.html#a5f9e1d3a8c127ee323b5e6929aeb90df":[4,12,1],32"structarm__cfft__instance__q15.html#a738907cf34bdbbaf724414ac2decbc3c":[4,12,0],33"structarm__cfft__instance__q15.html#ac9160b80243b99a0b6e2f75ddb5cf0ae":[4,12,2],34"structarm__cfft__instance__q15.html#afdaf12ce4687cec021c5ae73d0987a3f":[4,12,3],35"structarm__cfft__instance__q31.html":[4,13],36"structarm__cfft__instance__q31.html#a2250fa6b8fe73292c5418c50c0549f87":[4,13,0],37"structarm__cfft__instance__q31.html#a4406f23e8fd0bff8d555225612e2a2a8":[4,13,1],38"structarm__cfft__instance__q31.html#a8a464461649f023325ced1e10470f5d0":[4,13,2],39"structarm__cfft__instance__q31.html#af751114feb91de3ace8600e91bdd0872":[4,13,3],40"structarm__cfft__radix2__instance__f32.html":[4,14],41"structarm__cfft__radix2__instance__f32.html#a18f2a5bea3038dfe36b8ee0b2e655403":[4,14,5],42"structarm__cfft__radix2__instance__f32.html#a1d3d289d47443e597d88a40effd14b8f":[4,14,4],43"structarm__cfft__radix2__instance__f32.html#a1d7f92796c5ffd02a7373f48313c26b5":[4,14,6],44"structarm__cfft__radix2__instance__f32.html#a2f915a1c29635c1623086aaaa726be8f":[4,14,2],45"structarm__cfft__radix2__instance__f32.html#a411f75b6ed01690293f4f5988030ea42":[4,14,7],46"structarm__cfft__radix2__instance__f32.html#a8dbe98d2c924e35e0a3fed2fe948176f":[4,14,3],47"structarm__cfft__radix2__instance__f32.html#ac1688dafa5177f6b1505abbfd0cf8b21":[4,14,1],48"structarm__cfft__radix2__instance__f32.html#af713b4ac5256a19bc965c89fe3005fa3":[4,14,0],49"structarm__cfft__radix2__instance__q15.html":[4,15],50"structarm__cfft__radix2__instance__q15.html#a1bfc91c423e8a1fa6b71f9091df54f39":[4,15,4],51"structarm__cfft__radix2__instance__q15.html#a68eaf57e58bbc42f7e1dfa8de8145647":[4,15,5],52"structarm__cfft__radix2__instance__q15.html#a6f2ab87fb4c568656e1f92f687b5c850":[4,15,6],53"structarm__cfft__radix2__instance__q15.html#a8722720c542cabd41df83fe88ef4f4cb":[4,15,1],54"structarm__cfft__radix2__instance__q15.html#a874085647351dcf3f0de39d2b1d49744":[4,15,2],55"structarm__cfft__radix2__instance__q15.html#ab5c073286bdd2f6e2bf783ced36bf1de":[4,15,3],56"structarm__cfft__radix2__instance__q15.html#af8300c1f60caa21e6b44b9240ab5af19":[4,15,0],57"structarm__cfft__radix2__instance__q31.html":[4,16],58"structarm__cfft__radix2__instance__q31.html#a2607378ce64be16698bb8a3b1af8d3c8":[4,16,3],59"structarm__cfft__radix2__instance__q31.html#a6239b8d268285334e88c008c07d68616":[4,16,0],60"structarm__cfft__radix2__instance__q31.html#a6a965fcf4ba88bd2661a263a5f18a6b1":[4,16,5],61"structarm__cfft__radix2__instance__q31.html#a960199f1373a192366878ef279eab00f":[4,16,2],62"structarm__cfft__radix2__instance__q31.html#a9d17a87263953fe3559a007512c9f3a4":[4,16,1],63"structarm__cfft__radix2__instance__q31.html#adf510de23a78c9119e0442f7f35efcd1":[4,16,4],64"structarm__cfft__radix2__instance__q31.html#ae63ca9193322cd477970c1d2086407d1":[4,16,6],65"structarm__cfft__radix4__instance__f32.html":[4,17],66"structarm__cfft__radix4__instance__f32.html#a25d1da64dd6487c291f04d226f9acc66":[4,17,3],67"structarm__cfft__radix4__instance__f32.html#a7e6a6d290ce158ce9a15a45e364b021a":[4,17,2],68"structarm__cfft__radix4__instance__f32.html#ab6e84ac830b6d413768dcafe271514a1":[4,17,5],69"structarm__cfft__radix4__instance__f32.html#ab9eed39e40b8d7c16381fbccf84467cd":[4,17,4],70"structarm__cfft__radix4__instance__f32.html#abe31ea2157dfa233e389cdfd3b9993ee":[4,17,7],71"structarm__cfft__radix4__instance__f32.html#ac10927a1620195a88649ce63dab66120":[4,17,0],72"structarm__cfft__radix4__instance__f32.html#ac1455c9894e7fc53c41f3806b13bd274":[4,17,6],73"structarm__cfft__radix4__instance__f32.html#acc8cb18a8b901b8321ab9d86491e41a3":[4,17,1],74"structarm__cfft__radix4__instance__q15.html":[4,18],75"structarm__cfft__radix4__instance__q15.html#a101e3f7b0bd6b5b14cd5214f23df4133":[4,18,0],76"structarm__cfft__radix4__instance__q15.html#a2ecff6ea735cb4d22e922d0fd5736655":[4,18,3],77"structarm__cfft__radix4__instance__q15.html#a5fc543e7d84ca8cb7cf6648970f21ca6":[4,18,2],78"structarm__cfft__radix4__instance__q15.html#a69a91f93a3cae666b56c42d47beab1b4":[4,18,5],79"structarm__cfft__radix4__instance__q15.html#a6b010e5f02d1130c621e3d2e26b95df1":[4,18,1],80"structarm__cfft__radix4__instance__q15.html#a821544995ab3a0fd8f33bef3110cce0e":[4,18,4],81"structarm__cfft__radix4__instance__q15.html#af32fdc78bcc27ca385f9b76a0a1f71c3":[4,18,6],82"structarm__cfft__radix4__instance__q31.html":[4,19],83"structarm__cfft__radix4__instance__q31.html#a499e97268c131e20faea4b8565d4204c":[4,19,5],84"structarm__cfft__radix4__instance__q31.html#a5a7c4f4c7b3fb655cbb2bc11ef160a2a":[4,19,0],85"structarm__cfft__radix4__instance__q31.html#a64b58f7643b0e27c7f2d66ec0ef2e3dc":[4,19,4],86"structarm__cfft__radix4__instance__q31.html#a8cf8187b8232815cf17ee82bf572ecf9":[4,19,6],87"structarm__cfft__radix4__instance__q31.html#a94d2fead4efa4d5eaae142bbe30b0e15":[4,19,1],88"structarm__cfft__radix4__instance__q31.html#ab413d2a5d3f45fa187d93813bf3bf81b":[4,19,2],89"structarm__cfft__radix4__instance__q31.html#adc0a62ba669ad2282ecbe43d5d96abab":[4,19,3],90"structarm__dct4__instance__f32.html":[4,20],91"structarm__dct4__instance__f32.html#a018f7860b6e070af533fb7d76c7cdc32":[4,20,3],92"structarm__dct4__instance__f32.html#a01fe45dbe46c782245832f2169622c29":[4,20,6],93"structarm__dct4__instance__f32.html#a262b29a51c371b46efc89120e31ccf37":[4,20,0],94"structarm__dct4__instance__f32.html#a61ce8c967b2e998a9c0041cca73cdef8":[4,20,2],95"structarm__dct4__instance__f32.html#a7762296d8871c58f2ef0b6ce327b8f4e":[4,20,4],96"structarm__dct4__instance__f32.html#a978f37fc19add31af243ab5c63ae502f":[4,20,5],97"structarm__dct4__instance__f32.html#adb1ef2739ddbe62e5cdadc47455a4147":[4,20,1],98"structarm__dct4__instance__q15.html":[4,21],99"structarm__dct4__instance__q15.html#a11cf95c1cd9dd2dd5e4b81b8f88dc208":[4,21,5],100"structarm__dct4__instance__q15.html#a197098140d68e89a08f7a249003a0b86":[4,21,2],101"structarm__dct4__instance__q15.html#a53d24009bb9b2e93d0aa07db7f1a6c25":[4,21,0],102"structarm__dct4__instance__q15.html#a7284932ee8c36107c33815eb62eadffc":[4,21,3],103"structarm__dct4__instance__q15.html#aa1a1d7e957ca6af3f341721f5fa3b95b":[4,21,6],104"structarm__dct4__instance__q15.html#aad2097a5525c1a5b3f5cbd7235c06499":[4,21,4],105"structarm__dct4__instance__q15.html#af43dcbbc2fc661ffbc525afe3dcbd7da":[4,21,1],106"structarm__dct4__instance__q31.html":[4,22],107"structarm__dct4__instance__q31.html#a107b210eca05f0bd16246b28fd32c4e7":[4,22,6],108"structarm__dct4__instance__q31.html#a32d3268ba4629908dba056599f0a904d":[4,22,1],109"structarm__dct4__instance__q31.html#a34241ee5930de99d4f22a040d32026e3":[4,22,4],110"structarm__dct4__instance__q31.html#a46a9f136457350676e2bfd3768ff9d6d":[4,22,0],111"structarm__dct4__instance__q31.html#ac80ff7b28fca36aeef74dea12e8312dd":[4,22,2],112"structarm__dct4__instance__q31.html#ac96579cfb28d08bb11dd2fe4c6303833":[4,22,3],113"structarm__dct4__instance__q31.html#af1487dab5e7963b85dc0fdc6bf492542":[4,22,5],114"structarm__fir__decimate__instance__f32.html":[4,23],115"structarm__fir__decimate__instance__f32.html#a1aef1d796b60749d87ff36e282284628":[4,23,2],116"structarm__fir__decimate__instance__f32.html#a2aa2986129db8affef03ede88dd45a03":[4,23,1],117"structarm__fir__decimate__instance__f32.html#a5bddf29aaaf2011d2e3bcec59a83f633":[4,23,3],118"structarm__fir__decimate__instance__f32.html#a76a8b2161731638eb3d67f277919f95d":[4,23,0],119"structarm__fir__decimate__instance__q15.html":[4,24],120"structarm__fir__decimate__instance__q15.html#a3f7b5184bb28853ef401b001df121047":[4,24,3],121"structarm__fir__decimate__instance__q15.html#a44ba13cf8aa4425c34adc82dc468ad54":[4,24,2],122"structarm__fir__decimate__instance__q15.html#aad9320284218b3aa378527ea518cf093":[4,24,0],123"structarm__fir__decimate__instance__q15.html#ac1e9844488ec717da334fbd4c4f41990":[4,24,1],124"structarm__fir__decimate__instance__q31.html":[4,25],125"structarm__fir__decimate__instance__q31.html#a0ef0ef9e265f7ab873cfc6daa7593fdb":[4,25,3],126"structarm__fir__decimate__instance__q31.html#a37915d42b0dc5e3057ebe83110798482":[4,25,1],127"structarm__fir__decimate__instance__q31.html#a530ab6a5d1a39367b281206cd7201dae":[4,25,2],128"structarm__fir__decimate__instance__q31.html#ad3d6936c36303b30dd38f1eddf248ae5":[4,25,0],129"structarm__fir__instance__f32.html":[4,26],130"structarm__fir__instance__f32.html#a20cf98c92b5323799b7881c9ff4d2f7c":[4,26,0],131"structarm__fir__instance__f32.html#a7afcf4022e8560db9b8fd28b0d090a15":[4,26,2],132"structarm__fir__instance__f32.html#aa590e6c845fcb19e027d77307c8ef016":[4,26,1],133"structarm__fir__instance__q15.html":[4,27],134"structarm__fir__instance__q15.html#a0e46f93cf51bfb18b1be808be9c5bfc9":[4,27,0],135"structarm__fir__instance__q15.html#aa8d25f44f45b6a6c4cf38c31569b8a01":[4,27,2],136"structarm__fir__instance__q15.html#ac7f6e5f1b0be8d07c4c35b4be07157b0":[4,27,1],137"structarm__fir__instance__q31.html":[4,28],138"structarm__fir__instance__q31.html#a409f39c93b744784648bdc365541444d":[4,28,2],139"structarm__fir__instance__q31.html#a918fadd775b7a0482b21bf34dae2f094":[4,28,0],140"structarm__fir__instance__q31.html#ae8bb1e9787be4578a23671bf53d3b7cb":[4,28,1],141"structarm__fir__instance__q7.html":[4,29],142"structarm__fir__instance__q7.html#a6947bd695db2d5f8e198e68ca0c72026":[4,29,1],143"structarm__fir__instance__q7.html#a9b50840e2c5ef5b17e1a584fb4cf0d06":[4,29,0],144"structarm__fir__instance__q7.html#aaddea3b9c7e16ddfd9428b7bf9f9c200":[4,29,2],145"structarm__fir__interpolate__instance__f32.html":[4,30],146"structarm__fir__interpolate__instance__f32.html#a389e669e13ec56292a70db8e92194b12":[4,30,2],147"structarm__fir__interpolate__instance__f32.html#a42a8ba1bda85fa86d7b6c84d3da4c75b":[4,30,3],148"structarm__fir__interpolate__instance__f32.html#a9e1258a3dbe84fde28d5ee67311337f4":[4,30,1],149"structarm__fir__interpolate__instance__f32.html#ae6f94dcc0ccd8aa4bc699b20985d9df5":[4,30,0],150"structarm__fir__interpolate__instance__q15.html":[4,31],151"structarm__fir__interpolate__instance__q15.html#a239cee528ee8f79188fc34a94c86673b":[4,31,1],152"structarm__fir__interpolate__instance__q15.html#a26b864363fa47954248f2590e3a82a3c":[4,31,3],153"structarm__fir__interpolate__instance__q15.html#a5431bdc079e72a973b51d359f7f13603":[4,31,0],154"structarm__fir__interpolate__instance__q15.html#ad5178a02a697a77e0d0e60705d9f0a19":[4,31,2],155"structarm__fir__interpolate__instance__q31.html":[4,32],156"structarm__fir__interpolate__instance__q31.html#a5cdf0a631cb74e0e9588c388abe5235c":[4,32,0],157"structarm__fir__interpolate__instance__q31.html#a5d243796584afc7cd6c557f00b7acca5":[4,32,2],158"structarm__fir__interpolate__instance__q31.html#a89c871637896a08168e87ec13691f020":[4,32,1],159"structarm__fir__interpolate__instance__q31.html#addde04514b6e6ac72be3d609f0398b1a":[4,32,3],160"structarm__fir__lattice__instance__f32.html":[4,33],161"structarm__fir__lattice__instance__f32.html#ad369bd9997a250f195254df37408a38f":[4,33,0],162"structarm__fir__lattice__instance__f32.html#ae348884a1ba9b83fadccd5da640cbcaf":[4,33,2],163"structarm__fir__lattice__instance__f32.html#aec9f6580180489d073bb5be700d72bbc":[4,33,1],164"structarm__fir__lattice__instance__q15.html":[4,34],165"structarm__fir__lattice__instance__q15.html#a0248d4f9908c38f9c7865ea5261f6856":[4,34,1],166"structarm__fir__lattice__instance__q15.html#a37b90dea2bc3ee7c9951a9fe74db0cbb":[4,34,2],167"structarm__fir__lattice__instance__q15.html#a38b179138d6a6c9cac4f8f79b6fd5357":[4,34,0],168"structarm__fir__lattice__instance__q31.html":[4,35],169"structarm__fir__lattice__instance__q31.html#a08fe9494ab7cd336b791e9657adadcf6":[4,35,2],170"structarm__fir__lattice__instance__q31.html#a9f3773bbb76bc5a8a5ee9d37786bf478":[4,35,0],171"structarm__fir__lattice__instance__q31.html#ac9e648f1e5bc7d5ee6d5f7d8d22b1e74":[4,35,1],172"structarm__fir__sparse__instance__f32.html":[4,36],173"structarm__fir__sparse__instance__f32.html#a4bc78b1761b6349e6842241e813d6446":[4,36,2],174"structarm__fir__sparse__instance__f32.html#a57585aeca9dc8686e08df2865375a86d":[4,36,5],175"structarm__fir__sparse__instance__f32.html#a5e19e7f234ac30a3db843352bf2a8515":[4,36,1],176"structarm__fir__sparse__instance__f32.html#a794af0916666d11cc564d6df08553555":[4,36,3],177"structarm__fir__sparse__instance__f32.html#aaa54ae67e5d10c6dd0d697945c638d31":[4,36,4],178"structarm__fir__sparse__instance__f32.html#af8b8c775f4084c36774f06c082b4c078":[4,36,0],179"structarm__fir__sparse__instance__q15.html":[4,37],180"structarm__fir__sparse__instance__q15.html#a0f66b126dd8b85f7467cfb01b7bc4d77":[4,37,1],181"structarm__fir__sparse__instance__q15.html#a4180a6d16a979b59b707a3e7b8cc274b":[4,37,2],182"structarm__fir__sparse__instance__q15.html#a89487f28cab52637426024005e478985":[4,37,5],183"structarm__fir__sparse__instance__q15.html#a98b92b0f5208110129b9a67b1db90408":[4,37,3],184"structarm__fir__sparse__instance__q15.html#ad14cc1070eecf7e1926d8f67a8273182":[4,37,0],185"structarm__fir__sparse__instance__q15.html#aeab2855176c6efdb231a73a3672837d5":[4,37,4],186"structarm__fir__sparse__instance__q31.html":[4,38],187"structarm__fir__sparse__instance__q31.html#a07b6c01e58ec6dde384719130d36b0dc":[4,38,1],188"structarm__fir__sparse__instance__q31.html#a557ed9d477e76e4ad2019344f19f568a":[4,38,5],189"structarm__fir__sparse__instance__q31.html#a830be89daa5a393b225048889aa045d1":[4,38,3],190"structarm__fir__sparse__instance__q31.html#ab87ae457adec8f727afefaa2599fc983":[4,38,4],191"structarm__fir__sparse__instance__q31.html#af714d56f8fec4f93ad557abce6994d45":[4,38,2],192"structarm__fir__sparse__instance__q31.html#afdd3a1dc72132c854dc379154b68b674":[4,38,0],193"structarm__fir__sparse__instance__q7.html":[4,39],194"structarm__fir__sparse__instance__q7.html#a18072cf3ef3666d588f0d49512f2b28f":[4,39,3],195"structarm__fir__sparse__instance__q7.html#a2d2e65473fe3a3f2b953b4e0b60824df":[4,39,5],196"structarm__fir__sparse__instance__q7.html#a54cdd27ca1c672b126c38763ce678b1c":[4,39,1],197"structarm__fir__sparse__instance__q7.html#a7f0989ef34fdb5d08ce5a5abf917218f":[4,39,2],198"structarm__fir__sparse__instance__q7.html#ac625393c84bc0342ffdf26fc4eba1ac1":[4,39,4],199"structarm__fir__sparse__instance__q7.html#af74dacc1d34c078283e50f2530eb91df":[4,39,0],200"structarm__iir__lattice__instance__f32.html":[4,40],201"structarm__iir__lattice__instance__f32.html#a30babe7815510219e6e3d28e6e4a5969":[4,40,2],202"structarm__iir__lattice__instance__f32.html#aa69fcdd3775e828d450ce1bbd978fa31":[4,40,1],203"structarm__iir__lattice__instance__f32.html#af8de449af5efe1f30be82f9ba35587ee":[4,40,0],204"structarm__iir__lattice__instance__f32.html#afc7c8f577e6f27d097fe55f57e707f72":[4,40,3],205"structarm__iir__lattice__instance__q15.html":[4,41],206"structarm__iir__lattice__instance__q15.html#a41c214a1ec38d4a82fae8899d715dd29":[4,41,1],207"structarm__iir__lattice__instance__q15.html#a4c4f57f45b223abbe2a9fb727bd2cad9":[4,41,3],208"structarm__iir__lattice__instance__q15.html#a96fbed313bef01070409fa182d26ba3f":[4,41,0],209"structarm__iir__lattice__instance__q15.html#afd0136ab917b529554d93f41a5e04618":[4,41,2],210"structarm__iir__lattice__instance__q31.html":[4,42],211"structarm__iir__lattice__instance__q31.html#a04507e2b982b1dfa97b7b55752dea6b9":[4,42,3],212"structarm__iir__lattice__instance__q31.html#a1d30aa16aac7722936ea9dee59211863":[4,42,1],213"structarm__iir__lattice__instance__q31.html#a941282745effd26a889fbfadf4b95e6a":[4,42,2],214"structarm__iir__lattice__instance__q31.html#a9df4570ed28c50fd9193ab654ff236ad":[4,42,0],215"structarm__linear__interp__instance__f32.html":[4,43],216"structarm__linear__interp__instance__f32.html#a08352dc6ea82fbc0827408e018535481":[4,43,2],217"structarm__linear__interp__instance__f32.html#a95f02a926b16d35359aca5b31e813b11":[4,43,0],218"structarm__linear__interp__instance__f32.html#aa8e2d686b5434a406d390b347b183511":[4,43,3],219"structarm__linear__interp__instance__f32.html#ab373001f6afad0850359c344a4d7eee4":[4,43,1],220"structarm__lms__instance__f32.html":[4,44],221"structarm__lms__instance__f32.html#a4795c6f7d3f17cec15c2fd09f66edd1a":[4,44,2],222"structarm__lms__instance__f32.html#aaf94285be2f99b5b9af40bea8dcb14b9":[4,44,3],223"structarm__lms__instance__f32.html#ae2af43d74c93dba16b876e10c97a5b99":[4,44,0],224"structarm__lms__instance__f32.html#af73880d9009982f5d14529869494ec3d":[4,44,1],225"structarm__lms__instance__q15.html":[4,45],226"structarm__lms__instance__q15.html#a0078e894f805af1b360369e619fb57b3":[4,45,1],227"structarm__lms__instance__q15.html#a42f95368b94898eb82608e1113d18cab":[4,45,2],228"structarm__lms__instance__q15.html#a9a575ff82c1e68cbb583083439260d08":[4,45,4],229"structarm__lms__instance__q15.html#aae46129d7cfd7f1c162cc502ed0a9d49":[4,45,0],230"structarm__lms__instance__q15.html#acca5fbaef4a52ae411de24c9a0b929cf":[4,45,3],231"structarm__lms__instance__q31.html":[4,46],232"structarm__lms__instance__q31.html#a206d47b49de6f357f933ebe61520753c":[4,46,4],233"structarm__lms__instance__q31.html#a4705a8f0011bb9166e09bf5bd51e595e":[4,46,3],234"structarm__lms__instance__q31.html#a4afe56e991a5416adfd462aa88bda500":[4,46,2],235"structarm__lms__instance__q31.html#ac0d84f7d054555931ef8a62511fbcb8a":[4,46,1],236"structarm__lms__instance__q31.html#acb6ca9996b3c5f740d5d6c8e9f4f1d46":[4,46,0],237"structarm__lms__norm__instance__f32.html":[4,47],238"structarm__lms__norm__instance__f32.html#a0bc03338687002ed5f2e4a363eb095ec":[4,47,4],239"structarm__lms__norm__instance__f32.html#a1ba688d90aba7de003ed4ad8e2e7ddda":[4,47,3],240"structarm__lms__norm__instance__f32.html#a6a4119e4f39447bbee31b066deafa16f":[4,47,0],241"structarm__lms__norm__instance__f32.html#a84401d3cfc6c40f69c08223cf341b886":[4,47,1],242"structarm__lms__norm__instance__f32.html#ac95f8ca3d816524c2070643852fac5e8":[4,47,2],243"structarm__lms__norm__instance__f32.html#aec958fe89b164a30f38bcca9f5d96218":[4,47,5],244"structarm__lms__norm__instance__q15.html":[4,48],245"structarm__lms__norm__instance__q15.html#a07d272351a3291ed09e3f42447609df0":[4,48,6],246"structarm__lms__norm__instance__q15.html#a1c81ded399919d8181026bc1c8602e7b":[4,48,0],247"structarm__lms__norm__instance__q15.html#a3fc1d6f97d2c6d5324871de6895cb7e9":[4,48,7],248"structarm__lms__norm__instance__q15.html#a7ce00f21d11cfda6d963240641deea8c":[4,48,1],249"structarm__lms__norm__instance__q15.html#a9ee7a45f4f315d7996a969e25fdc7146":[4,48,2],250"structarm__lms__norm__instance__q15.html#aa0d435fbcf7dedb7179d4467e9b79e9f":[4,48,4],251"structarm__lms__norm__instance__q15.html#aa4de490b3bdbd03561b76ee07901c8e3":[4,48,5],252"structarm__lms__norm__instance__q15.html#ae7bca648c75a2ffa02d87852bb78bc8a":[4,48,3]...

Full Screen

Full Screen

main.js

Source:main.js Github

copy

Full Screen

1AUI.add(2 'liferay-plugin-mail',3 function(A) {4 Liferay.Mail = {5 init: function(params) {6 var instance = this;7 instance.namespace = params.namespace;8 instance.accountId = params.initialAccountId;9 instance.folderId = params.initialFolderId;10 instance.accountsContainer = A.one('#accountsContainer');11 instance.composeContainer = A.one('#composeContainer');12 instance.controlContainer = A.one('#controlContainer');13 instance.contentContainer = A.one('#contentContainer');14 instance.foldersContainer = A.one('#foldersContainer');15 instance.mailContainer = A.one('#mailContainer');16 instance.manageFoldersContainer = A.one('#manageFoldersContainer');17 instance.messageContainer = A.one('#messageContainer');18 instance.messagesContainer = A.one('#messagesContainer');19 instance._assignEvents();20 if (instance.accountId == 0) {21 return;22 }23 instance.loadAccount(instance.accountId, instance.folderId);24 Liferay.on('closePortlet', instance._pollStopMessages, instance);25 },26 addAccount: function() {27 var instance = this;28 instance.dialog = Liferay.Util.Window.getWindow(29 {30 dialog: {31 centered: true,32 cssClass: 'mail-dialog',33 destroyOnClose: true,34 modal: true,35 width: 60036 },37 title: Liferay.Language.get('add-account')38 }39 ).plug(40 A.Plugin.IO,41 {42 uri: themeDisplay.getLayoutURL() + '/-/mail/add_account'43 }44 ).render();45 },46 checkMessages: function(folderId) {47 var instance = this;48 if (instance.accountId <= 0) {49 return;50 }51 A.io.request(52 themeDisplay.getLayoutURL() + '/-/mail/check_messages',53 {54 data: Liferay.Util.ns(55 instance.namespace,56 {57 accountId: instance.accountId,58 folderId: folderId59 }60 ),61 dataType: 'JSON',62 method: 'POST',63 on: {64 success: function(event, id, obj) {65 var responseData = this.get('responseData');66 if (responseData.status != 'success' || responseData.value == 'false') {67 return;68 }69 instance.loadFolders(instance.accountId);70 if (instance.folderId == folderId) {71 instance.loadMessages(folderId, instance.pageNumber, instance.orderByField, instance.orderByType, instance.keywords, true);72 }73 }74 }75 }76 );77 },78 deleteMessages: function(messageIds) {79 var instance = this;80 instance.setStatus('info', Liferay.Language.get('deleting-messages'));81 A.io.request(82 themeDisplay.getLayoutURL() + '/-/mail/delete_messages',83 {84 data: Liferay.Util.ns(85 instance.namespace,86 {87 messageIds: messageIds88 }89 ),90 dataType: 'JSON',91 method: 'POST',92 on: {93 failure: function(event, id, obj) {94 instance.setStatus('error', Liferay.Language.get('unable-to-connect-with-mail-server'));95 },96 success: function(event, id, obj) {97 var responseData = this.get('responseData');98 instance.setStatus(responseData.status, responseData.message);99 if (responseData.status == 'success') {100 instance.loadMessages(instance.folderId, instance.pageNumber, instance.orderByField, instance.orderByType, instance.keywords);101 }102 }103 }104 }105 );106 },107 editAccount: function(accountId) {108 var instance = this;109 instance.dialog = Liferay.Util.Window.getWindow(110 {111 dialog: {112 centered: true,113 cssClass: 'mail-dialog',114 destroyOnClose: true,115 modal: true,116 width: 600117 },118 title: Liferay.Language.get('edit-account')119 }120 ).plug(121 A.Plugin.IO,122 {123 data: Liferay.Util.ns(124 instance.namespace,125 {126 accountId: accountId127 }128 ),129 uri: themeDisplay.getLayoutURL() + '/-/mail/edit_account'130 }131 ).render();132 },133 flagMessages: function(flag, value, messageIds) {134 var instance = this;135 instance.setStatus('info', Liferay.Language.get('flagging-messages'));136 A.io.request(137 themeDisplay.getLayoutURL() + '/-/mail/flag_messages',138 {139 data: Liferay.Util.ns(140 instance.namespace,141 {142 flag: flag,143 messageIds: messageIds,144 value: value145 }146 ),147 dataType: 'JSON',148 method: 'POST',149 on: {150 failure: function(event, id, obj) {151 instance.setStatus('error', Liferay.Language.get('unable-to-connect-with-mail-server'));152 },153 success: function(event, id, obj) {154 var responseData = this.get('responseData');155 instance.setStatus(responseData.status, responseData.message);156 instance.loadMessages(instance.folderId, instance.pageNumber, instance.orderByField, instance.orderByType, instance.keywords);157 }158 }159 }160 );161 },162 loadAccount: function(accountId, inboxFolderId) {163 var instance = this;164 instance._displayContainer(instance.messagesContainer);165 A.io.request(166 themeDisplay.getLayoutURL() + '/-/mail/password_saved',167 {168 data: Liferay.Util.ns(169 instance.namespace,170 {171 accountId: accountId,172 inboxFolderId: inboxFolderId173 }174 ),175 method: 'POST',176 on: {177 failure: function(event, id, obj) {178 instance.setStatus('error', Liferay.Language.get('unable-to-connect-with-mail-server'));179 },180 success: function(event, id, obj) {181 var responseData = this.get('responseData');182 if (A.DataType.Boolean.parse(responseData)) {183 instance.accountId = accountId;184 instance.inboxFolderId = inboxFolderId;185 instance.loadFolders(accountId);186 instance.loadMessages(inboxFolderId, 1, 'sentDate', 'desc', '');187 }188 else {189 instance.passwordPrompt(accountId, inboxFolderId);190 }191 }192 }193 }194 );195 instance._hideWindow();196 },197 loadAccounts: function(accountId) {198 var instance = this;199 instance.accountsContainer.io.set(200 'data',201 Liferay.Util.ns(202 instance.namespace,203 {204 accountId: accountId205 }206 )207 );208 instance.accountsContainer.io.start();209 },210 loadCompose: function(accountId, messageId, messageType, replyMessageId) {211 var instance = this;212 instance._displayContainer(instance.composeContainer);213 instance.composeContainer.io.set(214 'data',215 Liferay.Util.ns(216 instance.namespace,217 {218 accountId: accountId,219 messageId: messageId,220 messageType: messageType,221 replyMessageId: replyMessageId222 }223 )224 );225 instance.composeContainer.io.start();226 },227 loadFolders: function(accountId) {228 var instance = this;229 if (accountId > 0) {230 instance.controlContainer.show();231 instance.foldersContainer.io.set(232 'data',233 Liferay.Util.ns(234 instance.namespace,235 {236 accountId: accountId237 }238 )239 );240 instance.foldersContainer.io.start();241 }242 instance._hideWindow();243 },244 loadManageFolders: function(accountId) {245 var instance = this;246 instance._displayContainer(instance.manageFoldersContainer);247 instance.manageFoldersContainer.io.set(248 'data',249 Liferay.Util.ns(250 instance.namespace,251 {252 accountId: accountId253 }254 )255 );256 instance.manageFoldersContainer.io.start();257 },258 loadMessage: function(folderId, messageNumber, orderByField, orderByType, keywords) {259 var instance = this;260 instance.folderId = folderId;261 instance.orderByField = orderByField;262 instance.orderByType = orderByType;263 instance.keywords = keywords;264 instance._displayContainer(instance.messageContainer);265 instance.messageContainer.io.set(266 'data',267 Liferay.Util.ns(268 instance.namespace,269 {270 folderId: folderId,271 keywords: keywords,272 messageNumber: messageNumber,273 orderByField: orderByField,274 orderByType: orderByType275 }276 )277 );278 instance.messageContainer.io.start();279 },280 loadMessages: function(folderId, pageNumber, orderByField, orderByType, keywords, autoLoad) {281 var instance = this;282 instance.folderId = folderId;283 instance.pageNumber = pageNumber;284 instance.orderByField = orderByField;285 instance.orderByType = orderByType;286 instance.keywords = keywords;287 if (autoLoad) {288 instance.messagesContainer.io.set('showLoading', false);289 }290 else {291 instance._displayContainer(instance.messagesContainer);292 if (pageNumber == 1) {293 instance.checkMessages(folderId);294 }295 }296 instance.messagesContainer.io.set(297 'data',298 Liferay.Util.ns(299 instance.namespace,300 {301 folderId: folderId,302 keywords: keywords,303 orderByField: orderByField,304 orderByType: orderByType,305 pageNumber: pageNumber306 }307 )308 );309 instance.messagesContainer.io.start();310 instance.messagesContainer.io.set('showLoading', true);311 },312 moveMessages: function(folderId, messageIds) {313 var instance = this;314 instance.setStatus('info', Liferay.Language.get('moving-messages'));315 A.io.request(316 themeDisplay.getLayoutURL() + '/-/mail/move_messages',317 {318 data: Liferay.Util.ns(319 instance.namespace,320 {321 folderId: folderId,322 messageIds: messageIds323 }324 ),325 dataType: 'JSON',326 method: 'POST',327 on: {328 failure: function(event, id, obj) {329 instance.setStatus('error', Liferay.Language.get('unable-to-connect-with-mail-server'));330 },331 success: function(event, id, obj) {332 var responseData = this.get('responseData');333 instance.setStatus(responseData.status, responseData.message);334 instance.loadFolders(instance.accountId);335 instance.loadMessages(instance.folderId, instance.pageNumber, instance.orderByField, instance.orderByType, instance.keywords);336 }337 }338 }339 );340 },341 passwordPrompt: function(accountId, inboxFolderId) {342 var instance = this;343 instance.dialog = Liferay.Util.Window.getWindow(344 {345 dialog: {346 centered: true,347 cssClass: 'mail-dialog',348 destroyOnClose: true,349 modal: true,350 width: 600351 },352 title: Liferay.Language.get('password')353 }354 ).plug(355 A.Plugin.IO,356 {357 data: Liferay.Util.ns(358 instance.namespace,359 {360 accountId: accountId,361 inboxFolderId: inboxFolderId362 }363 ),364 uri: themeDisplay.getLayoutURL() + '/-/mail/password_prompt'365 }366 ).render();367 },368 reset: function() {369 var instance = this;370 instance.accountId = null;371 instance.folderId = null;372 instance.inboxFolderId = null;373 instance.keywords = '';374 instance.orderByField = 'sentDate';375 instance.orderByType = 'desc';376 instance.pageNumber = 1;377 instance.composeContainer.html('');378 instance.foldersContainer.html('');379 instance.manageFoldersContainer.html('');380 instance.messageContainer.html('');381 instance.messagesContainer.html('');382 instance.controlContainer.hide();383 instance.loadAccounts();384 instance._pollStopMessages();385 instance._hideWindow();386 },387 setStatus: function(type, message, indefinite) {388 var instance = this;389 var statusContainers = A.all('.mail-status').html('<table style="margin: 0 auto;"><tr><td>&nbsp;</td><td><span class="alert message alert-' + type + '">' + message + '</span></td><td>&nbsp;</td></tr></table>');390 var status = statusContainers.all('table');391 if (!indefinite) {392 setTimeout(393 function() {394 status.remove();395 },396 5000397 );398 }399 },400 _assignEvents: function() {401 var instance = this;402 instance.accountsContainer.plug(403 A.Plugin.IO,404 {405 autoLoad: true,406 data: Liferay.Util.ns(407 instance.namespace,408 {409 accountId: instance.accountId410 }411 ),412 method: 'POST',413 showLoading: false,414 uri: themeDisplay.getLayoutURL() + '/-/mail/view_accounts'415 }416 );417 instance.composeContainer.plug(418 A.Plugin.IO,419 {420 autoLoad: false,421 method: 'POST',422 on: {423 success: function(event) {424 try {425 window[instance.namespace + 'editor'] = null;426 delete window[instance.namespace + 'editor'];427 }428 catch (e) {429 }430 }431 },432 uri: themeDisplay.getLayoutURL() + '/-/mail/compose'433 }434 );435 instance.foldersContainer.plug(436 A.Plugin.IO,437 {438 autoLoad: false,439 method: 'POST',440 showLoading: false,441 uri: themeDisplay.getLayoutURL() + '/-/mail/view_folders'442 }443 );444 instance.messageContainer.plug(445 A.Plugin.IO,446 {447 after: {448 success: function() {449 instance.loadFolders(instance.accountId);450 }451 },452 autoLoad: false,453 method: 'POST',454 uri: themeDisplay.getLayoutURL() + '/-/mail/view_message'455 }456 );457 instance.messagesContainer.plug(458 A.Plugin.IO,459 {460 after: {461 success: function() {462 instance.messagesContainer.all('.flag-messages').on(463 'click',464 function(event) {465 var messageIds = instance._getSelectedMessageIds();466 var currentTarget = event.currentTarget;467 var flagToggle = currentTarget.getData('flagToggle');468 var flagType = currentTarget.getData('flagType');469 instance.flagMessages(flagType, flagToggle, messageIds);470 }471 );472 instance.messagesContainer.all('.move-messages').on(473 'click',474 function(event) {475 var folderId = event.currentTarget.text().trim();476 var messageIds = instance._getSelectedMessageIds();477 instance.moveMessages(folderId, messageIds);478 }479 );480 }481 },482 autoLoad: false,483 method: 'POST',484 uri: themeDisplay.getLayoutURL() + '/-/mail/view_messages'485 }486 );487 instance.accountsContainer.delegate(488 'click',489 function(event) {490 var link = event.currentTarget;491 var accountId = link.getData('accountId');492 var inboxFolderId = link.getData('inboxFolderId');493 instance.loadAccounts(accountId);494 instance.loadAccount(accountId, inboxFolderId);495 },496 '.folders-link'497 );498 instance.composeContainer.delegate(499 'click',500 function(event) {501 var messageId = event.currentTarget.getData('messageId');502 instance.deleteMessages([messageId]);503 },504 '.discard-draft'505 );506 instance.contentContainer.delegate(507 'click',508 function(event) {509 var link = event.currentTarget;510 var li = link.ancestor('li');511 if (!li || !li.hasClass('disabled')) {512 var folderId = link.getData('folderId');513 var keywords = link.getData('keywords');514 var messageNumber = link.getData('messageNumber');515 var orderByField = link.getData('orderByField');516 var orderByType = link.getData('orderByType');517 instance.loadMessage(folderId, messageNumber, orderByField, orderByType, keywords);518 }519 },520 '.message-link'521 );522 instance.contentContainer.delegate(523 'click',524 function(event) {525 var link = event.currentTarget;526 var messageId = link.getData('messageId');527 instance.loadCompose(instance.accountId, messageId, 'edit', 0);528 },529 '.draft-link'530 );531 instance.foldersContainer.delegate(532 'click',533 function(event) {534 instance.editAccount(instance.accountId);535 },536 '.edit-account'537 );538 instance.foldersContainer.delegate(539 'click',540 function(event) {541 instance.loadManageFolders(instance.accountId);542 },543 '.manage-folders'544 );545 instance.mailContainer.delegate(546 'click',547 function(event) {548 var link = event.currentTarget;549 var li = link.ancestor('li');550 if (!li || !li.hasClass('disabled')) {551 var folderId = link.getData('folderId');552 var keywords = link.getData('keywords');553 var orderByField = link.getData('orderByField');554 var orderByType = link.getData('orderByType');555 var pageNumber = link.getData('pageNumber');556 instance.loadMessages(folderId, pageNumber, orderByField, orderByType, keywords);557 }558 },559 '.messages-link'560 );561 instance.mailContainer.delegate(562 'click',563 function(event) {564 var link = event.currentTarget;565 if (!link.hasAttribute('data-messageType')) {566 link = link.one('input[type="button"]');567 }568 var messageId = link.getData('messageId');569 var messageType = link.getData('messageType');570 var replyMessageId = link.getData('replyMessageId');571 instance.loadCompose(instance.accountId, messageId, messageType, replyMessageId);572 },573 '.compose-message'574 );575 instance.manageFoldersContainer.plug(576 A.Plugin.IO,577 {578 autoLoad: false,579 method: 'POST',580 uri: themeDisplay.getLayoutURL() + '/-/mail/manage_folders'581 }582 );583 instance.messageContainer.delegate(584 'click',585 function(event) {586 var messageId = event.currentTarget.getData('messageId');587 instance.deleteMessages([messageId]);588 },589 '.delete-message'590 );591 instance.messagesContainer.delegate(592 'click',593 function(event) {594 var messageIds = instance._getSelectedMessageIds();595 instance.deleteMessages(messageIds);596 },597 '.delete-messages'598 );599 instance.messagesContainer.delegate(600 'click',601 function(event) {602 var keywords = event.currentTarget.ancestor('.search-messages').one('input').val();603 instance.loadMessages(instance.folderId, 1, instance.orderByField, instance.orderByType, keywords);604 },605 '.search-messages button'606 );607 instance.messagesContainer.delegate(608 'keyup',609 function(event) {610 if (event.keyCode != 13) {611 return;612 }613 var keywords = event.currentTarget.val();614 instance.loadMessages(instance.folderId, 1, instance.orderByField, instance.orderByType, keywords);615 },616 '.search-messages input'617 );618 instance.messagesContainer.delegate(619 'click',620 function(event) {621 instance.messagesContainer.all('input[type=checkbox]').each(622 function(item, index, collection) {623 item.set('checked', true);624 }625 );626 },627 '.select-all'628 );629 instance.messagesContainer.delegate(630 'click',631 function(event) {632 instance.messagesContainer.all('input[type=checkbox]').each(633 function(item, index, collection) {634 item.set('checked', false);635 }636 );637 },638 '.select-none'639 );640 instance._pollCheckMessages();641 },642 _displayContainer: function(container) {643 var instance = this;644 instance.composeContainer.hide();645 instance.manageFoldersContainer.hide();646 instance.messagesContainer.hide();647 instance.messageContainer.hide();648 container.show();649 },650 _getSelectedMessageIds: function() {651 var instance = this;652 var messageIds = [];653 instance.messagesContainer.all('input[type=checkbox]').each(654 function(item, index, collection) {655 var messageId = item.getAttribute('messageId');656 if (messageId && item.get('checked')) {657 messageIds.push(messageId);658 }659 }660 );661 return messageIds;662 },663 _hideWindow: function() {664 var instance = this;665 var dialog = instance.dialog;666 if (dialog) {667 dialog.hide();668 }669 },670 _pollCheckMessages: function() {671 var instance = this;672 if (instance.inboxFolderId) {673 instance.checkMessages(instance.inboxFolderId);674 }675 if (instance.timeoutMessages) {676 clearTimeout(instance.timeoutMessages);677 }678 instance.timeoutMessages = setTimeout(A.bind('_pollCheckMessages', instance), instance._pollInterval);679 },680 _pollStopMessages: function() {681 var instance = this;682 clearTimeout(instance.timeoutMessages);683 },684 accountId: null,685 folderId: null,686 inboxFolderId: null,687 keywords: '',688 orderByField: 'sentDate',689 orderByType: 'desc',690 pageNumber: 1,691 _pollInterval: 60000692 };693 },694 '',695 {696 requires: ['aui-base', 'aui-datatype', 'aui-io-deprecated', 'liferay-util-window']697 }...

Full Screen

Full Screen

instancegroupsutils.py

Source:instancegroupsutils.py Github

copy

Full Screen

1# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License"). You4# may not use this file except in compliance with the License. A copy of5# the License is located at6#7# http://aws.amazon.com/apache2.0/8#9# or in the "license" file accompanying this file. This file is10# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF11# ANY KIND, either express or implied. See the License for the specific12# language governing permissions and limitations under the License.13from awscli.customizations.emr import constants14from awscli.customizations.emr import exceptions15def build_instance_groups(parsed_instance_groups):16 """17 Helper method that converts --instance-groups option value in18 create-cluster and add-instance-groups to19 Amazon Elastic MapReduce InstanceGroupConfig data type.20 """21 instance_groups = []22 for instance_group in parsed_instance_groups:23 ig_config = {}24 keys = instance_group.keys()25 if 'Name' in keys:26 ig_config['Name'] = instance_group['Name']27 else:28 ig_config['Name'] = instance_group['InstanceGroupType']29 ig_config['InstanceType'] = instance_group['InstanceType']30 ig_config['InstanceCount'] = instance_group['InstanceCount']31 ig_config['InstanceRole'] = instance_group['InstanceGroupType'].upper()32 if 'BidPrice' in keys:33 ig_config['BidPrice'] = instance_group['BidPrice']34 ig_config['Market'] = constants.SPOT35 else:36 ig_config['Market'] = constants.ON_DEMAND37 if 'EbsConfiguration' in keys:38 ig_config['EbsConfiguration'] = instance_group['EbsConfiguration']39 if 'AutoScalingPolicy' in keys:40 ig_config['AutoScalingPolicy'] = instance_group['AutoScalingPolicy']41 instance_groups.append(ig_config)42 return instance_groups43def _build_instance_group(44 instance_type, instance_count, instance_group_type):45 ig_config = {}46 ig_config['InstanceType'] = instance_type47 ig_config['InstanceCount'] = instance_count48 ig_config['InstanceRole'] = instance_group_type.upper()49 ig_config['Name'] = ig_config['InstanceRole']50 ig_config['Market'] = constants.ON_DEMAND51 return ig_config52def validate_and_build_instance_groups(53 instance_groups, instance_type, instance_count):54 if (instance_groups is None and instance_type is None):55 raise exceptions.MissingRequiredInstanceGroupsError56 if (instance_groups is not None and57 (instance_type is not None or58 instance_count is not None)):59 raise exceptions.InstanceGroupsValidationError60 if instance_groups is not None:61 return build_instance_groups(instance_groups)62 else:63 instance_groups = []64 master_ig = _build_instance_group(65 instance_type=instance_type,66 instance_count=1,67 instance_group_type="MASTER")68 instance_groups.append(master_ig)69 if instance_count is not None and int(instance_count) > 1:70 core_ig = _build_instance_group(71 instance_type=instance_type,72 instance_count=int(instance_count) - 1,73 instance_group_type="CORE")74 instance_groups.append(core_ig)...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1const {instance} = require('fast-check-monorepo');2instance();3const {static} = require('fast-check-monorepo');4static();5const {static} = require('fast-check-monorepo');6static();7const {static} = require('fast-check-monorepo');8static();9const {static} = require('fast-check-monorepo');10static();11const {static} = require('fast-check-monorepo');12static();13const {static} = require('fast-check-monorepo');14static();15const {static} = require('fast-check-monorepo');16static();17const {static} = require('fast-check-monorepo');18static();19const {static} = require('fast-check-monorepo');20static();21const {static} = require('fast-check-monorepo');22static();23const {static} = require('fast-check-monorepo');24static();25const {static} = require('fast-check-monorepo');26static();27const {static} = require('fast-check-monorepo');28static();29const {static} = require('fast

Full Screen

Using AI Code Generation

copy

Full Screen

1const test2 = require('fast-check-monorepo-test2');2const test2Instance = new test2();3console.log(test2Instance.test2Method());4const test2 = require('fast-check-monorepo-test2');5const test2Instance = new test2();6console.log(test2Instance.test2Method());7const test2 = require('fast-check-monorepo-test2');8const test2Instance = new test2();9console.log(test2Instance.test2Method());10const test2 = require('fast-check-monorepo-test2');11const test2Instance = new test2();12console.log(test2Instance.test2Method());13const test2 = require('fast-check-monorepo-test2');14const test2Instance = new test2();15console.log(test2Instance.test2Method());16const test2 = require('fast-check-monorepo-test2');17const test2Instance = new test2();18console.log(test2Instance.test2Method());19const test2 = require('fast-check-monorepo-test2');20const test2Instance = new test2();21console.log(test2Instance.test2Method());22const test2 = require('fast-check-monorepo-test2');23const test2Instance = new test2();24console.log(test2Instance.test2Method());25const test2 = require('fast-check-monorepo-test2');26const test2Instance = new test2();27console.log(test2Instance.test2Method());28const test2 = require('fast-check-monore

Full Screen

Using AI Code Generation

copy

Full Screen

1const { check } = require('fast-check');2const { instanceMethod } = require('fast-check-monorepo');3const isEven = (n) => n % 2 === 0;4describe('instanceMethod', () => {5 it('should generate even numbers', () => {6 check(instanceMethod(isEven), {7 });8 });9});10const { check } = require('fast-check');11const { instanceMethod } = require('fast-check-monorepo');12const isEven = (n) => n % 2 === 0;13describe('instanceMethod', () => {14 it('should generate even numbers', () => {15 check(instanceMethod(isEven), {16 });17 });18});19const { check } = require('fast-check');20const { instanceMethod } = require('fast-check-monorepo');21const isEven = (n) => n % 2 === 0;22describe('instanceMethod', () => {23 it('should generate even numbers', () => {24 check(instanceMethod(isEven), {25 });26 });27});28const { check } = require('fast-check');29const { instanceMethod } = require('fast-check-monorepo');30const isEven = (n) => n % 2 === 0;31describe('instanceMethod', () => {32 it('should generate even numbers', () => {33 check(instanceMethod(isEven), {34 });35 });36});37const { check } = require('fast-check');38const { instanceMethod } = require('fast-check-monorepo');39const isEven = (n) => n % 2 === 0;40describe('instanceMethod', () => {41 it('should generate even numbers', () => {42 check(instanceMethod(isEven), {

Full Screen

Using AI Code Generation

copy

Full Screen

1const {check} = require('fast-check');2const {myInstanceMethod} = require('fast-check-monorepo');3check(myInstanceMethod(), {numRuns: 10});4const {check} = require('fast-check');5const {myInstanceMethod} = require('fast-check-monorepo');6check(myInstanceMethod(), {numRuns: 10});7const {check} = require('fast-check');8const {myInstanceMethod} = require('fast-check-monorepo');9check(myInstanceMethod(), {numRuns: 10});10const {check} = require('fast-check');11const {myInstanceMethod} = require('fast-check-monorepo');12check(myInstanceMethod(), {numRuns: 10});13const {check} = require('fast-check');14const {myInstanceMethod} = require('fast-check-monorepo');15check(myInstanceMethod(), {numRuns: 10});16const {check} = require('fast-check');17const {myInstanceMethod} = require('fast-check-monorepo');18check(myInstanceMethod(), {numRuns: 10});19const {check} = require('fast-check');20const {myInstanceMethod} = require('fast-check-monorepo');21check(myInstanceMethod(), {numRuns: 10});22const {check} = require('fast-check');23const {myInstanceMethod} = require('fast-check-monorepo');24check(myInstanceMethod(), {numRuns: 10});25const {check} = require('fast-check');26const {myInstanceMethod} = require('fast-check-monorepo');27check(myInstanceMethod(), {numRuns: 10});

Full Screen

Using AI Code Generation

copy

Full Screen

1import { fc } from "fast-check"2const { instanceMethod } = require("fast-check-monorepo")3const result = instanceMethod()4console.log(result)5import { fc } from "fast-check"6const { staticMethod } = require("fast-check-monorepo")7const result = staticMethod()8console.log(result)9import { fc } from "fast-check"10const { staticMethod } = require("fast-check-monorepo")11const result = staticMethod()12console.log(result)13import { fc } from "fast-check"14const { staticMethod } = require("fast-check-monorepo")15const result = staticMethod()16console.log(result)17import { fc } from "fast-check"18const { staticMethod } = require("fast-check-monorepo")19const result = staticMethod()20console.log(result)21import { fc } from "fast-check"22const { staticMethod } = require("fast-check-monorepo")23const result = staticMethod()24console.log(result)25import { fc } from "fast-check"26const { staticMethod } = require("fast-check-monorepo")27const result = staticMethod()28console.log(result)29import { fc } from "fast-check"30const { staticMethod } = require("fast-check-monorepo")31const result = staticMethod()32console.log(result)33import { fc } from "fast-check"34const { staticMethod } = require("fast-check-monorepo")35const result = staticMethod()36console.log(result)37import { fc } from "fast-check"38const { staticMethod } = require("fast-check-monore

Full Screen

Using AI Code Generation

copy

Full Screen

1const { check, property } = require('fast-check');2const { instanceMethod } = require('fast-check-monorepo');3check(property(instanceMethod({ foo: () => 42 }), (value) => value === 42), {4});5const { check, property } = require('fast-check');6const { instanceMethod } = require('fast-check-monorepo');7check(property(instanceMethod({ foo: () => 42 }), (value) => value === 42), {8});9const { check, property } = require('fast-check');10const { instanceMethod } = require('fast-check-monorepo');11check(property(instanceMethod({ foo: () => 42 }), (value) => value === 42), {12});13const { check, property } = require('fast-check');14const { instanceMethod } = require('fast-check-monorepo');15check(property(instanceMethod({ foo: () => 42 }), (value) => value === 42), {16});17const { check, property } = require('fast-check');18const { instanceMethod } = require('fast-check-monorepo');19check(property(instanceMethod({ foo: () => 42 }), (value) => value === 42), {20});21const { check, property } = require('fast-check');22const { instanceMethod } = require('fast-check-monorepo');23check(property(instanceMethod({ foo: () => 42 }), (value) => value === 42), {24});25const { check, property } = require('fast-check');26const { instanceMethod } = require('fast-check-monorepo');27check(property(instanceMethod({ foo: () =>

Full Screen

Using AI Code Generation

copy

Full Screen

1const fc = require('fast-check');2console.log(fc.sample(fc.nat(), 10));3import fc from 'fast-check';4SyntaxError: Cannot use import statement outside a module5 at Module._compile (internal/modules/cjs/loader.js:721:23)6 at Object.Module._extensions..js (internal/modules/cjs/loader.js:789:10)7 at Module.load (internal/modules/cjs/loader.js:653:32)8 at tryModuleLoad (internal/modules/cjs/loader.js:593:12)9 at Function.Module._load (internal/modules/cjs/loader.js:585:3)10 at Function.Module.runMain (internal/modules/cjs/loader.js:829:12)11If I use require instead of import, I get this error:12const fc = require('fast-check');13SyntaxError: Cannot use import statement outside a module14 at Module._compile (internal/modules/cjs/loader.js:721:23)15 at Object.Module._extensions..js (internal/modules/cjs/loader.js:789:10)16 at Module.load (internal/modules/cjs/loader.js:653:32)17 at tryModuleLoad (internal/modules/cjs/loader.js:593:12)18 at Function.Module._load (internal/modules/cjs/loader.js:585:3)19 at Function.Module.runMain (internal/modules/cjs/loader.js:829:12)20import fc

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fast-check-monorepo automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful