How to use _get_host_capabilities method in lisa

Best Python code snippet using lisa_python

driver.py

Source:driver.py Github

copy

Full Screen

...592 configurations to indicate to administrators that the quality is593 unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems594 is tested upstream.595 """596 caps = self._get_host_capabilities()597 hostarch = caps.host.cpu.arch598 if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or599 hostarch not in (arch.I686, arch.X86_64)):600 LOG.warn(_LW('The libvirt driver is not tested on '601 '%(type)s/%(arch)s by the OpenStack project and '602 'thus its quality can not be ensured. For more '603 'information, see: https://wiki.openstack.org/wiki/'604 'HypervisorSupportMatrix'),605 {'type': CONF.libvirt.virt_type, 'arch': hostarch})606 def init_host(self, host):607 # NOTE(dkliban): Error handler needs to be registered before libvirt608 # connection is used for the first time. Otherwise, the609 # handler does not get registered.610 libvirt.registerErrorHandler(libvirt_error_handler, None)611 libvirt.virEventRegisterDefaultImpl()612 self._do_quality_warnings()613 if (CONF.libvirt.virt_type == 'lxc' and614 not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):615 LOG.warn(_LW("Running libvirt-lxc without user namespaces is "616 "dangerous. Containers spawned by Nova will be run "617 "as the host's root user. It is highly suggested "618 "that user namespaces be used in a public or "619 "multi-tenant environment."))620 # Stop libguestfs using KVM unless we're also configured621 # to use this. This solves problem where people need to622 # stop Nova use of KVM because nested-virt is broken623 if CONF.libvirt.virt_type != "kvm":624 guestfs.force_tcg()625 if not self._has_min_version(MIN_LIBVIRT_VERSION):626 major = MIN_LIBVIRT_VERSION[0]627 minor = MIN_LIBVIRT_VERSION[1]628 micro = MIN_LIBVIRT_VERSION[2]629 LOG.error(_LE('Nova requires libvirt version '630 '%(major)i.%(minor)i.%(micro)i or greater.'),631 {'major': major, 'minor': minor, 'micro': micro})632 self._init_events()633 def _get_new_connection(self):634 # call with _wrapped_conn_lock held635 LOG.debug('Connecting to libvirt: %s', self.uri())636 wrapped_conn = None637 try:638 wrapped_conn = self._connect(self.uri(), self.read_only)639 finally:640 # Enabling the compute service, in case it was disabled641 # since the connection was successful.642 disable_reason = DISABLE_REASON_UNDEFINED643 if not wrapped_conn:644 disable_reason = 'Failed to connect to libvirt'645 self._set_host_enabled(bool(wrapped_conn), disable_reason)646 self._wrapped_conn = wrapped_conn647 self._skip_list_all_domains = False648 try:649 LOG.debug("Registering for lifecycle events %s", self)650 wrapped_conn.domainEventRegisterAny(651 None,652 libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,653 self._event_lifecycle_callback,654 self)655 except Exception as e:656 LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),657 {'uri': self.uri(), 'error': e})658 try:659 LOG.debug("Registering for connection events: %s", str(self))660 wrapped_conn.registerCloseCallback(self._close_callback, None)661 except (TypeError, AttributeError) as e:662 # NOTE: The registerCloseCallback of python-libvirt 1.0.1+663 # is defined with 3 arguments, and the above registerClose-664 # Callback succeeds. However, the one of python-libvirt 1.0.0665 # is defined with 4 arguments and TypeError happens here.666 # Then python-libvirt 0.9 does not define a method register-667 # CloseCallback.668 LOG.debug("The version of python-libvirt does not support "669 "registerCloseCallback or is too old: %s", e)670 except libvirt.libvirtError as e:671 LOG.warn(_LW("URI %(uri)s does not support connection"672 " events: %(error)s"),673 {'uri': self.uri(), 'error': e})674 return wrapped_conn675 def _get_connection(self):676 # multiple concurrent connections are protected by _wrapped_conn_lock677 with self._wrapped_conn_lock:678 wrapped_conn = self._wrapped_conn679 if not wrapped_conn or not self._test_connection(wrapped_conn):680 wrapped_conn = self._get_new_connection()681 return wrapped_conn682 _conn = property(_get_connection)683 def _close_callback(self, conn, reason, opaque):684 close_info = {'conn': conn, 'reason': reason}685 self._queue_event(close_info)686 @staticmethod687 def _test_connection(conn):688 try:689 conn.getLibVersion()690 return True691 except libvirt.libvirtError as e:692 if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,693 libvirt.VIR_ERR_INTERNAL_ERROR) and694 e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,695 libvirt.VIR_FROM_RPC)):696 LOG.debug('Connection to libvirt broke')697 return False698 raise699 @staticmethod700 def uri():701 if CONF.libvirt.virt_type == 'uml':702 uri = CONF.libvirt.connection_uri or 'uml:///system'703 elif CONF.libvirt.virt_type == 'xen':704 uri = CONF.libvirt.connection_uri or 'xen:///'705 elif CONF.libvirt.virt_type == 'lxc':706 uri = CONF.libvirt.connection_uri or 'lxc:///'707 else:708 uri = CONF.libvirt.connection_uri or 'qemu:///system'709 return uri710 @staticmethod711 def _connect_auth_cb(creds, opaque):712 if len(creds) == 0:713 return 0714 raise exception.NovaException(715 _("Can not handle authentication request for %d credentials")716 % len(creds))717 @staticmethod718 def _connect(uri, read_only):719 auth = [[libvirt.VIR_CRED_AUTHNAME,720 libvirt.VIR_CRED_ECHOPROMPT,721 libvirt.VIR_CRED_REALM,722 libvirt.VIR_CRED_PASSPHRASE,723 libvirt.VIR_CRED_NOECHOPROMPT,724 libvirt.VIR_CRED_EXTERNAL],725 LibvirtDriver._connect_auth_cb,726 None]727 try:728 flags = 0729 if read_only:730 flags = libvirt.VIR_CONNECT_RO731 # tpool.proxy_call creates a native thread. Due to limitations732 # with eventlet locking we cannot use the logging API inside733 # the called function.734 return tpool.proxy_call(735 (libvirt.virDomain, libvirt.virConnect),736 libvirt.openAuth, uri, auth, flags)737 except libvirt.libvirtError as ex:738 LOG.exception(_LE("Connection to libvirt failed: %s"), ex)739 payload = dict(ip=LibvirtDriver.get_host_ip_addr(),740 method='_connect',741 reason=ex)742 rpc.get_notifier('compute').error(nova_context.get_admin_context(),743 'compute.libvirt.error',744 payload)745 raise exception.HypervisorUnavailable(host=CONF.host)746 def instance_exists(self, instance):747 """Efficient override of base instance_exists method."""748 try:749 self._lookup_by_name(instance.name)750 return True751 except exception.NovaException:752 return False753 def _list_instance_domains_fast(self, only_running=True):754 # The modern (>= 0.9.13) fast way - 1 single API call for all domains755 flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE756 if not only_running:757 flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE758 return self._conn.listAllDomains(flags)759 def _list_instance_domains_slow(self, only_running=True):760 # The legacy (< 0.9.13) slow way - O(n) API call for n domains761 uuids = []762 doms = []763 # Redundant numOfDomains check is for libvirt bz #836647764 if self._conn.numOfDomains() > 0:765 for id in self._conn.listDomainsID():766 try:767 dom = self._lookup_by_id(id)768 doms.append(dom)769 uuids.append(dom.UUIDString())770 except exception.InstanceNotFound:771 continue772 if only_running:773 return doms774 for name in self._conn.listDefinedDomains():775 try:776 dom = self._lookup_by_name(name)777 if dom.UUIDString() not in uuids:778 doms.append(dom)779 except exception.InstanceNotFound:780 continue781 return doms782 def _list_instance_domains(self, only_running=True, only_guests=True):783 """Get a list of libvirt.Domain objects for nova instances784 :param only_running: True to only return running instances785 :param only_guests: True to filter out any host domain (eg Dom-0)786 Query libvirt to a get a list of all libvirt.Domain objects787 that correspond to nova instances. If the only_running parameter788 is true this list will only include active domains, otherwise789 inactive domains will be included too. If the only_guests parameter790 is true the list will have any "host" domain (aka Xen Domain-0)791 filtered out.792 :returns: list of libvirt.Domain objects793 """794 if not self._skip_list_all_domains:795 try:796 alldoms = self._list_instance_domains_fast(only_running)797 except (libvirt.libvirtError, AttributeError) as ex:798 LOG.info(_LI("Unable to use bulk domain list APIs, "799 "falling back to slow code path: %(ex)s"),800 {'ex': ex})801 self._skip_list_all_domains = True802 if self._skip_list_all_domains:803 # Old libvirt, or a libvirt driver which doesn't804 # implement the new API805 alldoms = self._list_instance_domains_slow(only_running)806 doms = []807 for dom in alldoms:808 if only_guests and dom.ID() == 0:809 continue810 doms.append(dom)811 return doms812 def list_instances(self):813 names = []814 for dom in self._list_instance_domains(only_running=False):815 names.append(dom.name())816 return names817 def list_instance_uuids(self):818 uuids = []819 for dom in self._list_instance_domains(only_running=False):820 uuids.append(dom.UUIDString())821 return uuids822 def plug_vifs(self, instance, network_info):823 """Plug VIFs into networks."""824 for vif in network_info:825 self.vif_driver.plug(instance, vif)826 def _unplug_vifs(self, instance, network_info, ignore_errors):827 """Unplug VIFs from networks."""828 for vif in network_info:829 try:830 self.vif_driver.unplug(instance, vif)831 except exception.NovaException:832 if not ignore_errors:833 raise834 def unplug_vifs(self, instance, network_info):835 self._unplug_vifs(instance, network_info, False)836 def _teardown_container(self, instance):837 inst_path = libvirt_utils.get_instance_path(instance)838 container_dir = os.path.join(inst_path, 'rootfs')839 rootfs_dev = instance.system_metadata.get('rootfs_device_name')840 disk.teardown_container(container_dir, rootfs_dev)841 def _destroy(self, instance):842 try:843 virt_dom = self._lookup_by_name(instance['name'])844 except exception.InstanceNotFound:845 virt_dom = None846 # If the instance is already terminated, we're still happy847 # Otherwise, destroy it848 old_domid = -1849 if virt_dom is not None:850 try:851 old_domid = virt_dom.ID()852 virt_dom.destroy()853 # NOTE(GuanQiang): teardown container to avoid resource leak854 if CONF.libvirt.virt_type == 'lxc':855 self._teardown_container(instance)856 except libvirt.libvirtError as e:857 is_okay = False858 errcode = e.get_error_code()859 if errcode == libvirt.VIR_ERR_NO_DOMAIN:860 # Domain already gone. This can safely be ignored.861 is_okay = True862 elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:863 # If the instance is already shut off, we get this:864 # Code=55 Error=Requested operation is not valid:865 # domain is not running866 state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]867 if state == power_state.SHUTDOWN:868 is_okay = True869 elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:870 LOG.warn(_LW("Cannot destroy instance, operation time "871 "out"),872 instance=instance)873 reason = _("operation time out")874 raise exception.InstancePowerOffFailure(reason=reason)875 if not is_okay:876 with excutils.save_and_reraise_exception():877 LOG.error(_LE('Error from libvirt during destroy. '878 'Code=%(errcode)s Error=%(e)s'),879 {'errcode': errcode, 'e': e},880 instance=instance)881 def _wait_for_destroy(expected_domid):882 """Called at an interval until the VM is gone."""883 # NOTE(vish): If the instance disappears during the destroy884 # we ignore it so the cleanup can still be885 # attempted because we would prefer destroy to886 # never fail.887 try:888 dom_info = self.get_info(instance)889 state = dom_info['state']890 new_domid = dom_info['id']891 except exception.InstanceNotFound:892 LOG.warning(_LW("During wait destroy, instance disappeared."),893 instance=instance)894 raise loopingcall.LoopingCallDone()895 if state == power_state.SHUTDOWN:896 LOG.info(_LI("Instance destroyed successfully."),897 instance=instance)898 raise loopingcall.LoopingCallDone()899 # NOTE(wangpan): If the instance was booted again after destroy,900 # this may be a endless loop, so check the id of901 # domain here, if it changed and the instance is902 # still running, we should destroy it again.903 # see https://bugs.launchpad.net/nova/+bug/1111213 for more details904 if new_domid != expected_domid:905 LOG.info(_LI("Instance may be started again."),906 instance=instance)907 kwargs['is_running'] = True908 raise loopingcall.LoopingCallDone()909 kwargs = {'is_running': False}910 timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,911 old_domid)912 timer.start(interval=0.5).wait()913 if kwargs['is_running']:914 LOG.info(_LI("Going to destroy instance again."),915 instance=instance)916 self._destroy(instance)917 def destroy(self, context, instance, network_info, block_device_info=None,918 destroy_disks=True, migrate_data=None):919 self._destroy(instance)920 self.cleanup(context, instance, network_info, block_device_info,921 destroy_disks, migrate_data)922 def _undefine_domain(self, instance):923 try:924 virt_dom = self._lookup_by_name(instance['name'])925 except exception.InstanceNotFound:926 virt_dom = None927 if virt_dom:928 try:929 try:930 virt_dom.undefineFlags(931 libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)932 except libvirt.libvirtError:933 LOG.debug("Error from libvirt during undefineFlags."934 " Retrying with undefine", instance=instance)935 virt_dom.undefine()936 except AttributeError:937 # NOTE(vish): Older versions of libvirt don't support938 # undefine flags, so attempt to do the939 # right thing.940 try:941 if virt_dom.hasManagedSaveImage(0):942 virt_dom.managedSaveRemove(0)943 except AttributeError:944 pass945 virt_dom.undefine()946 except libvirt.libvirtError as e:947 with excutils.save_and_reraise_exception():948 errcode = e.get_error_code()949 LOG.error(_LE('Error from libvirt during undefine. '950 'Code=%(errcode)s Error=%(e)s'),951 {'errcode': errcode, 'e': e}, instance=instance)952 def cleanup(self, context, instance, network_info, block_device_info=None,953 destroy_disks=True, migrate_data=None, destroy_vifs=True):954 if destroy_vifs:955 self._unplug_vifs(instance, network_info, True)956 retry = True957 while retry:958 try:959 self.firewall_driver.unfilter_instance(instance,960 network_info=network_info)961 except libvirt.libvirtError as e:962 try:963 state = self.get_info(instance)['state']964 except exception.InstanceNotFound:965 state = power_state.SHUTDOWN966 if state != power_state.SHUTDOWN:967 LOG.warn(_LW("Instance may be still running, destroy "968 "it again."), instance=instance)969 self._destroy(instance)970 else:971 retry = False972 errcode = e.get_error_code()973 LOG.exception(_LE('Error from libvirt during unfilter. '974 'Code=%(errcode)s Error=%(e)s'),975 {'errcode': errcode, 'e': e},976 instance=instance)977 reason = "Error unfiltering instance."978 raise exception.InstanceTerminationFailure(reason=reason)979 except Exception:980 retry = False981 raise982 else:983 retry = False984 # FIXME(wangpan): if the instance is booted again here, such as the985 # the soft reboot operation boot it here, it will986 # become "running deleted", should we check and destroy987 # it at the end of this method?988 # NOTE(vish): we disconnect from volumes regardless989 block_device_mapping = driver.block_device_info_get_mapping(990 block_device_info)991 for vol in block_device_mapping:992 connection_info = vol['connection_info']993 disk_dev = vol['mount_device']994 if disk_dev is not None:995 disk_dev = disk_dev.rpartition("/")[2]996 if ('data' in connection_info and997 'volume_id' in connection_info['data']):998 volume_id = connection_info['data']['volume_id']999 encryption = encryptors.get_encryption_metadata(1000 context, self._volume_api, volume_id, connection_info)1001 if encryption:1002 # The volume must be detached from the VM before1003 # disconnecting it from its encryptor. Otherwise, the1004 # encryptor may report that the volume is still in use.1005 encryptor = self._get_volume_encryptor(connection_info,1006 encryption)1007 encryptor.detach_volume(**encryption)1008 try:1009 self._disconnect_volume(connection_info, disk_dev)1010 except Exception as exc:1011 with excutils.save_and_reraise_exception() as ctxt:1012 if destroy_disks:1013 # Don't block on Volume errors if we're trying to1014 # delete the instance as we may be partially created1015 # or deleted1016 ctxt.reraise = False1017 LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "1018 "during delete %(exc)s"),1019 {'vol_id': vol.get('volume_id'), 'exc': exc},1020 instance=instance)1021 if destroy_disks:1022 # NOTE(haomai): destroy volumes if needed1023 if CONF.libvirt.images_type == 'lvm':1024 self._cleanup_lvm(instance)1025 if CONF.libvirt.images_type == 'rbd':1026 self._cleanup_rbd(instance)1027 if destroy_disks or (1028 migrate_data and migrate_data.get('is_shared_block_storage',1029 False)):1030 self._delete_instance_files(instance)1031 if CONF.serial_console.enabled:1032 for host, port in self._get_serial_ports_from_instance(instance):1033 serial_console.release_port(host=host, port=port)1034 self._undefine_domain(instance)1035 def _detach_encrypted_volumes(self, instance):1036 """Detaches encrypted volumes attached to instance."""1037 disks = jsonutils.loads(self.get_instance_disk_info(instance['name']))1038 encrypted_volumes = filter(dmcrypt.is_encrypted,1039 [disk['path'] for disk in disks])1040 for path in encrypted_volumes:1041 dmcrypt.delete_volume(path)1042 def _get_serial_ports_from_instance(self, instance, mode=None):1043 """Returns an iterator over serial port(s) configured on instance.1044 :param mode: Should be a value in (None, bind, connect)1045 """1046 virt_dom = self._lookup_by_name(instance['name'])1047 xml = virt_dom.XMLDesc(0)1048 tree = etree.fromstring(xml)1049 for serial in tree.findall("./devices/serial"):1050 if serial.get("type") == "tcp":1051 source = serial.find("./source")1052 if source is not None:1053 if mode and source.get("mode") != mode:1054 continue1055 yield (source.get("host"), int(source.get("service")))1056 @staticmethod1057 def _get_rbd_driver():1058 return rbd_utils.RBDDriver(1059 pool=CONF.libvirt.images_rbd_pool,1060 ceph_conf=CONF.libvirt.images_rbd_ceph_conf,1061 rbd_user=CONF.libvirt.rbd_user)1062 def _cleanup_rbd(self, instance):1063 LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)1064 def _cleanup_lvm(self, instance):1065 """Delete all LVM disks for given instance object."""1066 if instance.get('ephemeral_key_uuid') is not None:1067 self._detach_encrypted_volumes(instance)1068 disks = self._lvm_disks(instance)1069 if disks:1070 lvm.remove_volumes(disks)1071 def _lvm_disks(self, instance):1072 """Returns all LVM disks for given instance object."""1073 if CONF.libvirt.images_volume_group:1074 vg = os.path.join('/dev', CONF.libvirt.images_volume_group)1075 if not os.path.exists(vg):1076 return []1077 pattern = '%s_' % instance['uuid']1078 # TODO(sdague): remove in Juno1079 def belongs_to_instance_legacy(disk):1080 # We don't want to leak old disks, but at the same time, we1081 # don't want to do an unsafe thing. So we will only handle1082 # the old filter if it's the system default still.1083 pattern = '%s_' % instance['name']1084 if disk.startswith(pattern):1085 if CONF.instance_name_template == 'instance-%08x':1086 return True1087 else:1088 LOG.warn(_LW('Volume %(disk)s possibly unsafe to '1089 'remove, please clean up manually'),1090 {'disk': disk})1091 return False1092 def belongs_to_instance(disk):1093 return disk.startswith(pattern)1094 def fullpath(name):1095 return os.path.join(vg, name)1096 logical_volumes = lvm.list_volumes(vg)1097 disk_names = filter(belongs_to_instance, logical_volumes)1098 # TODO(sdague): remove in Juno1099 disk_names.extend(1100 filter(belongs_to_instance_legacy, logical_volumes)1101 )1102 disks = map(fullpath, disk_names)1103 return disks1104 return []1105 def get_volume_connector(self, instance):1106 if not self._initiator:1107 self._initiator = libvirt_utils.get_iscsi_initiator()1108 if not self._initiator:1109 LOG.debug('Could not determine iscsi initiator name',1110 instance=instance)1111 if not self._fc_wwnns:1112 self._fc_wwnns = libvirt_utils.get_fc_wwnns()1113 if not self._fc_wwnns or len(self._fc_wwnns) == 0:1114 LOG.debug('Could not determine fibre channel '1115 'world wide node names',1116 instance=instance)1117 if not self._fc_wwpns:1118 self._fc_wwpns = libvirt_utils.get_fc_wwpns()1119 if not self._fc_wwpns or len(self._fc_wwpns) == 0:1120 LOG.debug('Could not determine fibre channel '1121 'world wide port names',1122 instance=instance)1123 connector = {'ip': CONF.my_ip,1124 'host': CONF.host}1125 if self._initiator:1126 connector['initiator'] = self._initiator1127 if self._fc_wwnns and self._fc_wwpns:1128 connector["wwnns"] = self._fc_wwnns1129 connector["wwpns"] = self._fc_wwpns1130 return connector1131 def _cleanup_resize(self, instance, network_info):1132 # NOTE(wangpan): we get the pre-grizzly instance path firstly,1133 # so the backup dir of pre-grizzly instance can1134 # be deleted correctly with grizzly or later nova.1135 pre_grizzly_name = libvirt_utils.get_instance_path(instance,1136 forceold=True)1137 target = pre_grizzly_name + '_resize'1138 if not os.path.exists(target):1139 target = libvirt_utils.get_instance_path(instance) + '_resize'1140 if os.path.exists(target):1141 # Deletion can fail over NFS, so retry the deletion as required.1142 # Set maximum attempt as 5, most test can remove the directory1143 # for the second time.1144 utils.execute('rm', '-rf', target, delay_on_retry=True,1145 attempts=5)1146 if instance['host'] != CONF.host:1147 self._undefine_domain(instance)1148 self.unplug_vifs(instance, network_info)1149 self.firewall_driver.unfilter_instance(instance, network_info)1150 def _connect_volume(self, connection_info, disk_info):1151 driver_type = connection_info.get('driver_volume_type')1152 if driver_type not in self.volume_drivers:1153 raise exception.VolumeDriverNotFound(driver_type=driver_type)1154 driver = self.volume_drivers[driver_type]1155 return driver.connect_volume(connection_info, disk_info)1156 def _disconnect_volume(self, connection_info, disk_dev):1157 driver_type = connection_info.get('driver_volume_type')1158 if driver_type not in self.volume_drivers:1159 raise exception.VolumeDriverNotFound(driver_type=driver_type)1160 driver = self.volume_drivers[driver_type]1161 return driver.disconnect_volume(connection_info, disk_dev)1162 def _get_volume_config(self, connection_info, disk_info):1163 driver_type = connection_info.get('driver_volume_type')1164 if driver_type not in self.volume_drivers:1165 raise exception.VolumeDriverNotFound(driver_type=driver_type)1166 driver = self.volume_drivers[driver_type]1167 return driver.get_config(connection_info, disk_info)1168 def _get_volume_encryptor(self, connection_info, encryption):1169 encryptor = encryptors.get_volume_encryptor(connection_info,1170 **encryption)1171 return encryptor1172 def attach_volume(self, context, connection_info, instance, mountpoint,1173 disk_bus=None, device_type=None, encryption=None):1174 instance_name = instance['name']1175 virt_dom = self._lookup_by_name(instance_name)1176 disk_dev = mountpoint.rpartition("/")[2]1177 bdm = {1178 'device_name': disk_dev,1179 'disk_bus': disk_bus,1180 'device_type': device_type}1181 # Note(cfb): If the volume has a custom block size, check that1182 # that we are using QEMU/KVM and libvirt >= 0.10.2. The1183 # presence of a block size is considered mandatory by1184 # cinder so we fail if we can't honor the request.1185 data = {}1186 if ('data' in connection_info):1187 data = connection_info['data']1188 if ('logical_block_size' in data or 'physical_block_size' in data):1189 if ((CONF.libvirt.virt_type != "kvm" and1190 CONF.libvirt.virt_type != "qemu")):1191 msg = _("Volume sets block size, but the current "1192 "libvirt hypervisor '%s' does not support custom "1193 "block size") % CONF.libvirt.virt_type1194 raise exception.InvalidHypervisorType(msg)1195 if not self._has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):1196 ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])1197 msg = _("Volume sets block size, but libvirt '%s' or later is "1198 "required.") % ver1199 raise exception.Invalid(msg)1200 disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm)1201 conf = self._connect_volume(connection_info, disk_info)1202 self._set_cache_mode(conf)1203 try:1204 # NOTE(vish): We can always affect config because our1205 # domains are persistent, but we should only1206 # affect live if the domain is running.1207 flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG1208 state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]1209 if state in (power_state.RUNNING, power_state.PAUSED):1210 flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE1211 # cache device_path in connection_info -- required by encryptors1212 if 'data' in connection_info:1213 connection_info['data']['device_path'] = conf.source_path1214 if encryption:1215 encryptor = self._get_volume_encryptor(connection_info,1216 encryption)1217 encryptor.attach_volume(context, **encryption)1218 virt_dom.attachDeviceFlags(conf.to_xml(), flags)1219 except Exception as ex:1220 if isinstance(ex, libvirt.libvirtError):1221 errcode = ex.get_error_code()1222 if errcode == libvirt.VIR_ERR_OPERATION_FAILED:1223 self._disconnect_volume(connection_info, disk_dev)1224 raise exception.DeviceIsBusy(device=disk_dev)1225 with excutils.save_and_reraise_exception():1226 self._disconnect_volume(connection_info, disk_dev)1227 def _swap_volume(self, domain, disk_path, new_path, resize_to):1228 """Swap existing disk with a new block device."""1229 # Save a copy of the domain's persistent XML file1230 xml = domain.XMLDesc(1231 libvirt.VIR_DOMAIN_XML_INACTIVE |1232 libvirt.VIR_DOMAIN_XML_SECURE)1233 # Abort is an idempotent operation, so make sure any block1234 # jobs which may have failed are ended.1235 try:1236 domain.blockJobAbort(disk_path, 0)1237 except Exception:1238 pass1239 try:1240 # NOTE (rmk): blockRebase cannot be executed on persistent1241 # domains, so we need to temporarily undefine it.1242 # If any part of this block fails, the domain is1243 # re-defined regardless.1244 if domain.isPersistent():1245 domain.undefine()1246 # Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to1247 # allow writing to existing external volume file1248 domain.blockRebase(disk_path, new_path, 0,1249 libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |1250 libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)1251 while self._wait_for_block_job(domain, disk_path):1252 time.sleep(0.5)1253 domain.blockJobAbort(disk_path,1254 libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)1255 if resize_to:1256 # NOTE(alex_xu): domain.blockJobAbort isn't sync call. This1257 # is bug in libvirt. So we need waiting for the pivot is1258 # finished. libvirt bug #11191731259 while self._wait_for_block_job(domain, disk_path,1260 wait_for_job_clean=True):1261 time.sleep(0.5)1262 domain.blockResize(disk_path, resize_to * units.Gi / units.Ki)1263 finally:1264 self._conn.defineXML(xml)1265 def swap_volume(self, old_connection_info,1266 new_connection_info, instance, mountpoint, resize_to):1267 instance_name = instance['name']1268 virt_dom = self._lookup_by_name(instance_name)1269 disk_dev = mountpoint.rpartition("/")[2]1270 xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)1271 if not xml:1272 raise exception.DiskNotFound(location=disk_dev)1273 disk_info = {1274 'dev': disk_dev,1275 'bus': blockinfo.get_disk_bus_for_disk_dev(1276 CONF.libvirt.virt_type, disk_dev),1277 'type': 'disk',1278 }1279 conf = self._connect_volume(new_connection_info, disk_info)1280 if not conf.source_path:1281 self._disconnect_volume(new_connection_info, disk_dev)1282 raise NotImplementedError(_("Swap only supports host devices"))1283 self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to)1284 self._disconnect_volume(old_connection_info, disk_dev)1285 @staticmethod1286 def _get_disk_xml(xml, device):1287 """Returns the xml for the disk mounted at device."""1288 try:1289 doc = etree.fromstring(xml)1290 except Exception:1291 return None1292 ret = doc.findall('./devices/disk')1293 for node in ret:1294 for child in node.getchildren():1295 if child.tag == 'target':1296 if child.get('dev') == device:1297 return etree.tostring(node)1298 def _get_existing_domain_xml(self, instance, network_info,1299 block_device_info=None):1300 try:1301 virt_dom = self._lookup_by_name(instance['name'])1302 xml = virt_dom.XMLDesc(0)1303 except exception.InstanceNotFound:1304 disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,1305 instance,1306 block_device_info)1307 xml = self._get_guest_xml(nova_context.get_admin_context(),1308 instance, network_info, disk_info,1309 block_device_info=block_device_info)1310 return xml1311 def detach_volume(self, connection_info, instance, mountpoint,1312 encryption=None):1313 instance_name = instance['name']1314 disk_dev = mountpoint.rpartition("/")[2]1315 try:1316 virt_dom = self._lookup_by_name(instance_name)1317 xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)1318 if not xml:1319 raise exception.DiskNotFound(location=disk_dev)1320 else:1321 # NOTE(vish): We can always affect config because our1322 # domains are persistent, but we should only1323 # affect live if the domain is running.1324 flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG1325 state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]1326 if state in (power_state.RUNNING, power_state.PAUSED):1327 flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE1328 virt_dom.detachDeviceFlags(xml, flags)1329 if encryption:1330 # The volume must be detached from the VM before1331 # disconnecting it from its encryptor. Otherwise, the1332 # encryptor may report that the volume is still in use.1333 encryptor = self._get_volume_encryptor(connection_info,1334 encryption)1335 encryptor.detach_volume(**encryption)1336 except exception.InstanceNotFound:1337 # NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()1338 # will throw InstanceNotFound exception. Need to1339 # disconnect volume under this circumstance.1340 LOG.warn(_LW("During detach_volume, instance disappeared."))1341 except libvirt.libvirtError as ex:1342 # NOTE(vish): This is called to cleanup volumes after live1343 # migration, so we should still disconnect even if1344 # the instance doesn't exist here anymore.1345 error_code = ex.get_error_code()1346 if error_code == libvirt.VIR_ERR_NO_DOMAIN:1347 # NOTE(vish):1348 LOG.warn(_LW("During detach_volume, instance disappeared."))1349 else:1350 raise1351 self._disconnect_volume(connection_info, disk_dev)1352 def attach_interface(self, instance, image_meta, vif):1353 virt_dom = self._lookup_by_name(instance['name'])1354 flavor = objects.Flavor.get_by_id(1355 nova_context.get_admin_context(read_deleted='yes'),1356 instance['instance_type_id'])1357 self.vif_driver.plug(instance, vif)1358 self.firewall_driver.setup_basic_filtering(instance, [vif])1359 cfg = self.vif_driver.get_config(instance, vif, image_meta,1360 flavor, CONF.libvirt.virt_type)1361 try:1362 flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG1363 state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]1364 if state == power_state.RUNNING or state == power_state.PAUSED:1365 flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE1366 virt_dom.attachDeviceFlags(cfg.to_xml(), flags)1367 except libvirt.libvirtError:1368 LOG.error(_LE('attaching network adapter failed.'),1369 instance=instance)1370 self.vif_driver.unplug(instance, vif)1371 raise exception.InterfaceAttachFailed(1372 instance_uuid=instance['uuid'])1373 def detach_interface(self, instance, vif):1374 virt_dom = self._lookup_by_name(instance['name'])1375 flavor = objects.Flavor.get_by_id(1376 nova_context.get_admin_context(read_deleted='yes'),1377 instance['instance_type_id'])1378 cfg = self.vif_driver.get_config(instance, vif, None, flavor,1379 CONF.libvirt.virt_type)1380 try:1381 self.vif_driver.unplug(instance, vif)1382 flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG1383 state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]1384 if state == power_state.RUNNING or state == power_state.PAUSED:1385 flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE1386 virt_dom.detachDeviceFlags(cfg.to_xml(), flags)1387 except libvirt.libvirtError as ex:1388 error_code = ex.get_error_code()1389 if error_code == libvirt.VIR_ERR_NO_DOMAIN:1390 LOG.warn(_LW("During detach_interface, "1391 "instance disappeared."),1392 instance=instance)1393 else:1394 LOG.error(_LE('detaching network adapter failed.'),1395 instance=instance)1396 raise exception.InterfaceDetachFailed(1397 instance_uuid=instance['uuid'])1398 def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):1399 metadata = {'is_public': False,1400 'status': 'active',1401 'name': snp_name,1402 'properties': {1403 'kernel_id': instance['kernel_id'],1404 'image_location': 'snapshot',1405 'image_state': 'available',1406 'owner_id': instance['project_id'],1407 'ramdisk_id': instance['ramdisk_id'],1408 }1409 }1410 if instance['os_type']:1411 metadata['properties']['os_type'] = instance['os_type']1412 # NOTE(vish): glance forces ami disk format to be ami1413 if base.get('disk_format') == 'ami':1414 metadata['disk_format'] = 'ami'1415 else:1416 metadata['disk_format'] = img_fmt1417 metadata['container_format'] = base.get('container_format', 'bare')1418 return metadata1419 def snapshot(self, context, instance, image_id, update_task_state):1420 """Create snapshot from a running VM instance.1421 This command only works with qemu 0.14+1422 """1423 try:1424 virt_dom = self._lookup_by_name(instance['name'])1425 except exception.InstanceNotFound:1426 raise exception.InstanceNotRunning(instance_id=instance['uuid'])1427 base_image_ref = instance['image_ref']1428 base = compute_utils.get_image_metadata(1429 context, self._image_api, base_image_ref, instance)1430 snapshot = self._image_api.get(context, image_id)1431 disk_path = libvirt_utils.find_disk(virt_dom)1432 source_format = libvirt_utils.get_disk_type(disk_path)1433 image_format = CONF.libvirt.snapshot_image_format or source_format1434 # NOTE(bfilippov): save lvm and rbd as raw1435 if image_format == 'lvm' or image_format == 'rbd':1436 image_format = 'raw'1437 metadata = self._create_snapshot_metadata(base,1438 instance,1439 image_format,1440 snapshot['name'])1441 snapshot_name = uuid.uuid4().hex1442 state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]1443 # NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.1444 # These restrictions can be relaxed as other configurations1445 # can be validated.1446 # NOTE(dgenin): Instances with LVM encrypted ephemeral storage require1447 # cold snapshots. Currently, checking for encryption is1448 # redundant because LVM supports only cold snapshots.1449 # It is necessary in case this situation changes in the1450 # future.1451 if (self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,1452 MIN_QEMU_LIVESNAPSHOT_VERSION,1453 REQ_HYPERVISOR_LIVESNAPSHOT)1454 and source_format not in ('lvm', 'rbd')1455 and not CONF.ephemeral_storage_encryption.enabled):1456 live_snapshot = True1457 # Abort is an idempotent operation, so make sure any block1458 # jobs which may have failed are ended. This operation also1459 # confirms the running instance, as opposed to the system as a1460 # whole, has a new enough version of the hypervisor (bug 1193146).1461 try:1462 virt_dom.blockJobAbort(disk_path, 0)1463 except libvirt.libvirtError as ex:1464 error_code = ex.get_error_code()1465 if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:1466 live_snapshot = False1467 else:1468 pass1469 else:1470 live_snapshot = False1471 # NOTE(rmk): We cannot perform live snapshots when a managedSave1472 # file is present, so we will use the cold/legacy method1473 # for instances which are shutdown.1474 if state == power_state.SHUTDOWN:1475 live_snapshot = False1476 # NOTE(dkang): managedSave does not work for LXC1477 if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:1478 if state == power_state.RUNNING or state == power_state.PAUSED:1479 self._detach_pci_devices(virt_dom,1480 pci_manager.get_instance_pci_devs(instance))1481 self._detach_sriov_ports(instance, virt_dom)1482 virt_dom.managedSave(0)1483 snapshot_backend = self.image_backend.snapshot(instance,1484 disk_path,1485 image_type=source_format)1486 if live_snapshot:1487 LOG.info(_LI("Beginning live snapshot process"),1488 instance=instance)1489 else:1490 LOG.info(_LI("Beginning cold snapshot process"),1491 instance=instance)1492 update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)1493 snapshot_directory = CONF.libvirt.snapshots_directory1494 fileutils.ensure_tree(snapshot_directory)1495 with utils.tempdir(dir=snapshot_directory) as tmpdir:1496 try:1497 out_path = os.path.join(tmpdir, snapshot_name)1498 if live_snapshot:1499 # NOTE(xqueralt): libvirt needs o+x in the temp directory1500 os.chmod(tmpdir, 0o701)1501 self._live_snapshot(virt_dom, disk_path, out_path,1502 image_format)1503 else:1504 snapshot_backend.snapshot_extract(out_path, image_format)1505 finally:1506 new_dom = None1507 # NOTE(dkang): because previous managedSave is not called1508 # for LXC, _create_domain must not be called.1509 if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:1510 if state == power_state.RUNNING:1511 new_dom = self._create_domain(domain=virt_dom)1512 elif state == power_state.PAUSED:1513 new_dom = self._create_domain(domain=virt_dom,1514 launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)1515 if new_dom is not None:1516 self._attach_pci_devices(new_dom,1517 pci_manager.get_instance_pci_devs(instance))1518 self._attach_sriov_ports(context, instance, new_dom)1519 LOG.info(_LI("Snapshot extracted, beginning image upload"),1520 instance=instance)1521 # Upload that image to the image service1522 update_task_state(task_state=task_states.IMAGE_UPLOADING,1523 expected_state=task_states.IMAGE_PENDING_UPLOAD)1524 with libvirt_utils.file_open(out_path) as image_file:1525 self._image_api.update(context,1526 image_id,1527 metadata,1528 image_file)1529 LOG.info(_LI("Snapshot image upload complete"),1530 instance=instance)1531 @staticmethod1532 def _wait_for_block_job(domain, disk_path, abort_on_error=False,1533 wait_for_job_clean=False):1534 """Wait for libvirt block job to complete.1535 Libvirt may return either cur==end or an empty dict when1536 the job is complete, depending on whether the job has been1537 cleaned up by libvirt yet, or not.1538 :returns: True if still in progress1539 False if completed1540 """1541 status = domain.blockJobInfo(disk_path, 0)1542 if status == -1 and abort_on_error:1543 msg = _('libvirt error while requesting blockjob info.')1544 raise exception.NovaException(msg)1545 try:1546 cur = status.get('cur', 0)1547 end = status.get('end', 0)1548 except Exception:1549 return False1550 if wait_for_job_clean:1551 job_ended = not status1552 else:1553 job_ended = cur == end1554 return not job_ended1555 def _live_snapshot(self, domain, disk_path, out_path, image_format):1556 """Snapshot an instance without downtime."""1557 # Save a copy of the domain's persistent XML file1558 xml = domain.XMLDesc(1559 libvirt.VIR_DOMAIN_XML_INACTIVE |1560 libvirt.VIR_DOMAIN_XML_SECURE)1561 # Abort is an idempotent operation, so make sure any block1562 # jobs which may have failed are ended.1563 try:1564 domain.blockJobAbort(disk_path, 0)1565 except Exception:1566 pass1567 # NOTE (rmk): We are using shallow rebases as a workaround to a bug1568 # in QEMU 1.3. In order to do this, we need to create1569 # a destination image with the original backing file1570 # and matching size of the instance root disk.1571 src_disk_size = libvirt_utils.get_disk_size(disk_path)1572 src_back_path = libvirt_utils.get_disk_backing_file(disk_path,1573 basename=False)1574 disk_delta = out_path + '.delta'1575 libvirt_utils.create_cow_image(src_back_path, disk_delta,1576 src_disk_size)1577 try:1578 # NOTE (rmk): blockRebase cannot be executed on persistent1579 # domains, so we need to temporarily undefine it.1580 # If any part of this block fails, the domain is1581 # re-defined regardless.1582 if domain.isPersistent():1583 domain.undefine()1584 # NOTE (rmk): Establish a temporary mirror of our root disk and1585 # issue an abort once we have a complete copy.1586 domain.blockRebase(disk_path, disk_delta, 0,1587 libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |1588 libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |1589 libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)1590 while self._wait_for_block_job(domain, disk_path):1591 time.sleep(0.5)1592 domain.blockJobAbort(disk_path, 0)1593 libvirt_utils.chown(disk_delta, os.getuid())1594 finally:1595 self._conn.defineXML(xml)1596 # Convert the delta (CoW) image with a backing file to a flat1597 # image with no backing file.1598 libvirt_utils.extract_snapshot(disk_delta, 'qcow2',1599 out_path, image_format)1600 def _volume_snapshot_update_status(self, context, snapshot_id, status):1601 """Send a snapshot status update to Cinder.1602 This method captures and logs exceptions that occur1603 since callers cannot do anything useful with these exceptions.1604 Operations on the Cinder side waiting for this will time out if1605 a failure occurs sending the update.1606 :param context: security context1607 :param snapshot_id: id of snapshot being updated1608 :param status: new status value1609 """1610 try:1611 self._volume_api.update_snapshot_status(context,1612 snapshot_id,1613 status)1614 except Exception:1615 LOG.exception(_LE('Failed to send updated snapshot status '1616 'to volume service.'))1617 def _volume_snapshot_create(self, context, instance, domain,1618 volume_id, new_file):1619 """Perform volume snapshot.1620 :param domain: VM that volume is attached to1621 :param volume_id: volume UUID to snapshot1622 :param new_file: relative path to new qcow2 file present on share1623 """1624 xml = domain.XMLDesc(0)1625 xml_doc = etree.fromstring(xml)1626 device_info = vconfig.LibvirtConfigGuest()1627 device_info.parse_dom(xml_doc)1628 disks_to_snap = [] # to be snapshotted by libvirt1629 network_disks_to_snap = [] # network disks (netfs, gluster, etc.)1630 disks_to_skip = [] # local disks not snapshotted1631 for guest_disk in device_info.devices:1632 if (guest_disk.root_name != 'disk'):1633 continue1634 if (guest_disk.target_dev is None):1635 continue1636 if (guest_disk.serial is None or guest_disk.serial != volume_id):1637 disks_to_skip.append(guest_disk.target_dev)1638 continue1639 # disk is a Cinder volume with the correct volume_id1640 disk_info = {1641 'dev': guest_disk.target_dev,1642 'serial': guest_disk.serial,1643 'current_file': guest_disk.source_path,1644 'source_protocol': guest_disk.source_protocol,1645 'source_name': guest_disk.source_name,1646 'source_hosts': guest_disk.source_hosts,1647 'source_ports': guest_disk.source_ports1648 }1649 # Determine path for new_file based on current path1650 if disk_info['current_file'] is not None:1651 current_file = disk_info['current_file']1652 new_file_path = os.path.join(os.path.dirname(current_file),1653 new_file)1654 disks_to_snap.append((current_file, new_file_path))1655 elif disk_info['source_protocol'] in ('gluster', 'netfs'):1656 network_disks_to_snap.append((disk_info, new_file))1657 if not disks_to_snap and not network_disks_to_snap:1658 msg = _('Found no disk to snapshot.')1659 raise exception.NovaException(msg)1660 snapshot = vconfig.LibvirtConfigGuestSnapshot()1661 for current_name, new_filename in disks_to_snap:1662 snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()1663 snap_disk.name = current_name1664 snap_disk.source_path = new_filename1665 snap_disk.source_type = 'file'1666 snap_disk.snapshot = 'external'1667 snap_disk.driver_name = 'qcow2'1668 snapshot.add_disk(snap_disk)1669 for disk_info, new_filename in network_disks_to_snap:1670 snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()1671 snap_disk.name = disk_info['dev']1672 snap_disk.source_type = 'network'1673 snap_disk.source_protocol = disk_info['source_protocol']1674 snap_disk.snapshot = 'external'1675 snap_disk.source_path = new_filename1676 old_dir = disk_info['source_name'].split('/')[0]1677 snap_disk.source_name = '%s/%s' % (old_dir, new_filename)1678 snap_disk.source_hosts = disk_info['source_hosts']1679 snap_disk.source_ports = disk_info['source_ports']1680 snapshot.add_disk(snap_disk)1681 for dev in disks_to_skip:1682 snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()1683 snap_disk.name = dev1684 snap_disk.snapshot = 'no'1685 snapshot.add_disk(snap_disk)1686 snapshot_xml = snapshot.to_xml()1687 LOG.debug("snap xml: %s", snapshot_xml)1688 snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |1689 libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |1690 libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)1691 QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE1692 try:1693 domain.snapshotCreateXML(snapshot_xml,1694 snap_flags | QUIESCE)1695 return1696 except libvirt.libvirtError:1697 LOG.exception(_LE('Unable to create quiesced VM snapshot, '1698 'attempting again with quiescing disabled.'))1699 try:1700 domain.snapshotCreateXML(snapshot_xml, snap_flags)1701 except libvirt.libvirtError:1702 LOG.exception(_LE('Unable to create VM snapshot, '1703 'failing volume_snapshot operation.'))1704 raise1705 def _volume_refresh_connection_info(self, context, instance, volume_id):1706 bdm = objects.BlockDeviceMapping.get_by_volume_id(context,1707 volume_id)1708 driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)1709 driver_bdm.refresh_connection_info(context, instance,1710 self._volume_api, self)1711 def volume_snapshot_create(self, context, instance, volume_id,1712 create_info):1713 """Create snapshots of a Cinder volume via libvirt.1714 :param instance: VM instance object reference1715 :param volume_id: id of volume being snapshotted1716 :param create_info: dict of information used to create snapshots1717 - snapshot_id : ID of snapshot1718 - type : qcow2 / <other>1719 - new_file : qcow2 file created by Cinder which1720 becomes the VM's active image after1721 the snapshot is complete1722 """1723 LOG.debug("volume_snapshot_create: create_info: %(c_info)s",1724 {'c_info': create_info}, instance=instance)1725 try:1726 virt_dom = self._lookup_by_name(instance.name)1727 except exception.InstanceNotFound:1728 raise exception.InstanceNotRunning(instance_id=instance.uuid)1729 if create_info['type'] != 'qcow2':1730 raise exception.NovaException(_('Unknown type: %s') %1731 create_info['type'])1732 snapshot_id = create_info.get('snapshot_id', None)1733 if snapshot_id is None:1734 raise exception.NovaException(_('snapshot_id required '1735 'in create_info'))1736 try:1737 self._volume_snapshot_create(context, instance, virt_dom,1738 volume_id, create_info['new_file'])1739 except Exception:1740 with excutils.save_and_reraise_exception():1741 LOG.exception(_LE('Error occurred during '1742 'volume_snapshot_create, '1743 'sending error status to Cinder.'))1744 self._volume_snapshot_update_status(1745 context, snapshot_id, 'error')1746 self._volume_snapshot_update_status(1747 context, snapshot_id, 'creating')1748 def _wait_for_snapshot():1749 snapshot = self._volume_api.get_snapshot(context, snapshot_id)1750 if snapshot.get('status') != 'creating':1751 self._volume_refresh_connection_info(context, instance,1752 volume_id)1753 raise loopingcall.LoopingCallDone()1754 timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)1755 timer.start(interval=0.5).wait()1756 def _volume_snapshot_delete(self, context, instance, volume_id,1757 snapshot_id, delete_info=None):1758 """Note:1759 if file being merged into == active image:1760 do a blockRebase (pull) operation1761 else:1762 do a blockCommit operation1763 Files must be adjacent in snap chain.1764 :param instance: instance object reference1765 :param volume_id: volume UUID1766 :param snapshot_id: snapshot UUID (unused currently)1767 :param delete_info: {1768 'type': 'qcow2',1769 'file_to_merge': 'a.img',1770 'merge_target_file': 'b.img' or None (if merging file_to_merge into1771 active image)1772 }1773 Libvirt blockjob handling required for this method is broken1774 in versions of libvirt that do not contain:1775 http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)1776 (Patch is pending in 1.0.5-maint branch as well, but we cannot detect1777 libvirt 1.0.5.5 vs. 1.0.5.6 here.)1778 """1779 if not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):1780 ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])1781 msg = _("Libvirt '%s' or later is required for online deletion "1782 "of volume snapshots.") % ver1783 raise exception.Invalid(msg)1784 LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)1785 if delete_info['type'] != 'qcow2':1786 msg = _('Unknown delete_info type %s') % delete_info['type']1787 raise exception.NovaException(msg)1788 try:1789 virt_dom = self._lookup_by_name(instance.name)1790 except exception.InstanceNotFound:1791 raise exception.InstanceNotRunning(instance_id=instance.uuid)1792 # Find dev name1793 my_dev = None1794 active_disk = None1795 xml = virt_dom.XMLDesc(0)1796 xml_doc = etree.fromstring(xml)1797 device_info = vconfig.LibvirtConfigGuest()1798 device_info.parse_dom(xml_doc)1799 active_disk_object = None1800 for guest_disk in device_info.devices:1801 if (guest_disk.root_name != 'disk'):1802 continue1803 if (guest_disk.target_dev is None or guest_disk.serial is None):1804 continue1805 if guest_disk.serial == volume_id:1806 my_dev = guest_disk.target_dev1807 active_disk = guest_disk.source_path1808 active_protocol = guest_disk.source_protocol1809 active_disk_object = guest_disk1810 break1811 if my_dev is None or (active_disk is None and active_protocol is None):1812 msg = _('Disk with id: %s '1813 'not found attached to instance.') % volume_id1814 LOG.debug('Domain XML: %s', xml)1815 raise exception.NovaException(msg)1816 LOG.debug("found device at %s", my_dev)1817 def _get_snap_dev(filename, backing_store):1818 if filename is None:1819 msg = _('filename cannot be None')1820 raise exception.NovaException(msg)1821 # libgfapi delete1822 LOG.debug("XML: %s" % xml)1823 LOG.debug("active disk object: %s" % active_disk_object)1824 # determine reference within backing store for desired image1825 filename_to_merge = filename1826 matched_name = None1827 b = backing_store1828 index = None1829 current_filename = active_disk_object.source_name.split('/')[1]1830 if current_filename == filename_to_merge:1831 return my_dev + '[0]'1832 while b is not None:1833 source_filename = b.source_name.split('/')[1]1834 if source_filename == filename_to_merge:1835 LOG.debug('found match: %s' % b.source_name)1836 matched_name = b.source_name1837 index = b.index1838 break1839 b = b.backing_store1840 if matched_name is None:1841 msg = _('no match found for %s') % (filename_to_merge)1842 raise exception.NovaException(msg)1843 LOG.debug('index of match (%s) is %s' % (b.source_name, index))1844 my_snap_dev = '%s[%s]' % (my_dev, index)1845 return my_snap_dev1846 if delete_info['merge_target_file'] is None:1847 # pull via blockRebase()1848 # Merge the most recent snapshot into the active image1849 rebase_disk = my_dev1850 rebase_flags = 01851 rebase_base = delete_info['file_to_merge'] # often None1852 if active_protocol is not None:1853 rebase_base = _get_snap_dev(delete_info['file_to_merge'],1854 active_disk_object.backing_store)1855 rebase_bw = 01856 LOG.debug('disk: %(disk)s, base: %(base)s, '1857 'bw: %(bw)s, flags: %(flags)s',1858 {'disk': rebase_disk,1859 'base': rebase_base,1860 'bw': rebase_bw,1861 'flags': rebase_flags})1862 result = virt_dom.blockRebase(rebase_disk, rebase_base,1863 rebase_bw, rebase_flags)1864 if result == 0:1865 LOG.debug('blockRebase started successfully')1866 while self._wait_for_block_job(virt_dom, my_dev,1867 abort_on_error=True):1868 LOG.debug('waiting for blockRebase job completion')1869 time.sleep(0.5)1870 else:1871 # commit with blockCommit()1872 my_snap_base = None1873 my_snap_top = None1874 commit_disk = my_dev1875 commit_flags = 01876 if active_protocol is not None:1877 my_snap_base = _get_snap_dev(delete_info['merge_target_file'],1878 active_disk_object.backing_store)1879 my_snap_top = _get_snap_dev(delete_info['file_to_merge'],1880 active_disk_object.backing_store)1881 try:1882 commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE1883 except AttributeError:1884 ver = '.'.join(1885 [str(x) for x in1886 MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])1887 msg = _("Relative blockcommit support was not detected. "1888 "Libvirt '%s' or later is required for online "1889 "deletion of network storage-backed volume "1890 "snapshots.") % ver1891 raise exception.Invalid(msg)1892 commit_base = my_snap_base or delete_info['merge_target_file']1893 commit_top = my_snap_top or delete_info['file_to_merge']1894 bandwidth = 01895 LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '1896 'commit_base=%(commit_base)s '1897 'commit_top=%(commit_top)s '1898 % {'commit_disk': commit_disk,1899 'commit_base': commit_base,1900 'commit_top': commit_top})1901 result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,1902 bandwidth, commit_flags)1903 if result == 0:1904 LOG.debug('blockCommit started successfully')1905 while self._wait_for_block_job(virt_dom, my_dev,1906 abort_on_error=True):1907 LOG.debug('waiting for blockCommit job completion')1908 time.sleep(0.5)1909 def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,1910 delete_info):1911 try:1912 self._volume_snapshot_delete(context, instance, volume_id,1913 snapshot_id, delete_info=delete_info)1914 except Exception:1915 with excutils.save_and_reraise_exception():1916 LOG.exception(_LE('Error occurred during '1917 'volume_snapshot_delete, '1918 'sending error status to Cinder.'))1919 self._volume_snapshot_update_status(1920 context, snapshot_id, 'error_deleting')1921 self._volume_snapshot_update_status(context, snapshot_id, 'deleting')1922 self._volume_refresh_connection_info(context, instance, volume_id)1923 def reboot(self, context, instance, network_info, reboot_type,1924 block_device_info=None, bad_volumes_callback=None):1925 """Reboot a virtual machine, given an instance reference."""1926 if reboot_type == 'SOFT':1927 # NOTE(vish): This will attempt to do a graceful shutdown/restart.1928 try:1929 soft_reboot_success = self._soft_reboot(instance)1930 except libvirt.libvirtError as e:1931 LOG.debug("Instance soft reboot failed: %s", e)1932 soft_reboot_success = False1933 if soft_reboot_success:1934 LOG.info(_LI("Instance soft rebooted successfully."),1935 instance=instance)1936 return1937 else:1938 LOG.warn(_LW("Failed to soft reboot instance. "1939 "Trying hard reboot."),1940 instance=instance)1941 return self._hard_reboot(context, instance, network_info,1942 block_device_info)1943 def _soft_reboot(self, instance):1944 """Attempt to shutdown and restart the instance gracefully.1945 We use shutdown and create here so we can return if the guest1946 responded and actually rebooted. Note that this method only1947 succeeds if the guest responds to acpi. Therefore we return1948 success or failure so we can fall back to a hard reboot if1949 necessary.1950 :returns: True if the reboot succeeded1951 """1952 dom = self._lookup_by_name(instance["name"])1953 state = LIBVIRT_POWER_STATE[dom.info()[0]]1954 old_domid = dom.ID()1955 # NOTE(vish): This check allows us to reboot an instance that1956 # is already shutdown.1957 if state == power_state.RUNNING:1958 dom.shutdown()1959 # NOTE(vish): This actually could take slightly longer than the1960 # FLAG defines depending on how long the get_info1961 # call takes to return.1962 self._prepare_pci_devices_for_use(1963 pci_manager.get_instance_pci_devs(instance, 'all'))1964 for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):1965 dom = self._lookup_by_name(instance["name"])1966 state = LIBVIRT_POWER_STATE[dom.info()[0]]1967 new_domid = dom.ID()1968 # NOTE(ivoks): By checking domain IDs, we make sure we are1969 # not recreating domain that's already running.1970 if old_domid != new_domid:1971 if state in [power_state.SHUTDOWN,1972 power_state.CRASHED]:1973 LOG.info(_LI("Instance shutdown successfully."),1974 instance=instance)1975 self._create_domain(domain=dom)1976 timer = loopingcall.FixedIntervalLoopingCall(1977 self._wait_for_running, instance)1978 timer.start(interval=0.5).wait()1979 return True1980 else:1981 LOG.info(_LI("Instance may have been rebooted during soft "1982 "reboot, so return now."), instance=instance)1983 return True1984 greenthread.sleep(1)1985 return False1986 def _hard_reboot(self, context, instance, network_info,1987 block_device_info=None):1988 """Reboot a virtual machine, given an instance reference.1989 Performs a Libvirt reset (if supported) on the domain.1990 If Libvirt reset is unavailable this method actually destroys and1991 re-creates the domain to ensure the reboot happens, as the guest1992 OS cannot ignore this action.1993 If xml is set, it uses the passed in xml in place of the xml from the1994 existing domain.1995 """1996 self._destroy(instance)1997 # Get the system metadata from the instance1998 system_meta = utils.instance_sys_meta(instance)1999 # Convert the system metadata to image metadata2000 image_meta = utils.get_image_from_system_metadata(system_meta)2001 if not image_meta:2002 image_ref = instance.get('image_ref')2003 image_meta = compute_utils.get_image_metadata(context,2004 self._image_api,2005 image_ref,2006 instance)2007 disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,2008 instance,2009 block_device_info,2010 image_meta)2011 # NOTE(vish): This could generate the wrong device_format if we are2012 # using the raw backend and the images don't exist yet.2013 # The create_images_and_backing below doesn't properly2014 # regenerate raw backend images, however, so when it2015 # does we need to (re)generate the xml after the images2016 # are in place.2017 xml = self._get_guest_xml(context, instance, network_info, disk_info,2018 image_meta=image_meta,2019 block_device_info=block_device_info,2020 write_to_disk=True)2021 # NOTE (rmk): Re-populate any missing backing files.2022 disk_info_json = self._get_instance_disk_info(instance['name'], xml,2023 block_device_info)2024 instance_dir = libvirt_utils.get_instance_path(instance)2025 self._create_images_and_backing(context, instance, instance_dir,2026 disk_info_json)2027 # Initialize all the necessary networking, block devices and2028 # start the instance.2029 self._create_domain_and_network(context, xml, instance, network_info,2030 block_device_info, reboot=True,2031 vifs_already_plugged=True)2032 self._prepare_pci_devices_for_use(2033 pci_manager.get_instance_pci_devs(instance, 'all'))2034 def _wait_for_reboot():2035 """Called at an interval until the VM is running again."""2036 state = self.get_info(instance)['state']2037 if state == power_state.RUNNING:2038 LOG.info(_LI("Instance rebooted successfully."),2039 instance=instance)2040 raise loopingcall.LoopingCallDone()2041 timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)2042 timer.start(interval=0.5).wait()2043 def pause(self, instance):2044 """Pause VM instance."""2045 dom = self._lookup_by_name(instance['name'])2046 dom.suspend()2047 def unpause(self, instance):2048 """Unpause paused VM instance."""2049 dom = self._lookup_by_name(instance['name'])2050 dom.resume()2051 def _clean_shutdown(self, instance, timeout, retry_interval):2052 """Attempt to shutdown the instance gracefully.2053 :param instance: The instance to be shutdown2054 :param timeout: How long to wait in seconds for the instance to2055 shutdown2056 :param retry_interval: How often in seconds to signal the instance2057 to shutdown while waiting2058 :returns: True if the shutdown succeeded2059 """2060 # List of states that represent a shutdown instance2061 SHUTDOWN_STATES = [power_state.SHUTDOWN,2062 power_state.CRASHED]2063 try:2064 dom = self._lookup_by_name(instance["name"])2065 except exception.InstanceNotFound:2066 # If the instance has gone then we don't need to2067 # wait for it to shutdown2068 return True2069 (state, _max_mem, _mem, _cpus, _t) = dom.info()2070 state = LIBVIRT_POWER_STATE[state]2071 if state in SHUTDOWN_STATES:2072 LOG.info(_LI("Instance already shutdown."),2073 instance=instance)2074 return True2075 LOG.debug("Shutting down instance from state %s", state,2076 instance=instance)2077 dom.shutdown()2078 retry_countdown = retry_interval2079 for sec in six.moves.range(timeout):2080 dom = self._lookup_by_name(instance["name"])2081 (state, _max_mem, _mem, _cpus, _t) = dom.info()2082 state = LIBVIRT_POWER_STATE[state]2083 if state in SHUTDOWN_STATES:2084 LOG.info(_LI("Instance shutdown successfully after %d "2085 "seconds."), sec, instance=instance)2086 return True2087 # Note(PhilD): We can't assume that the Guest was able to process2088 # any previous shutdown signal (for example it may2089 # have still been startingup, so within the overall2090 # timeout we re-trigger the shutdown every2091 # retry_interval2092 if retry_countdown == 0:2093 retry_countdown = retry_interval2094 # Instance could shutdown at any time, in which case we2095 # will get an exception when we call shutdown2096 try:2097 LOG.debug("Instance in state %s after %d seconds - "2098 "resending shutdown", state, sec,2099 instance=instance)2100 dom.shutdown()2101 except libvirt.libvirtError:2102 # Assume this is because its now shutdown, so loop2103 # one more time to clean up.2104 LOG.debug("Ignoring libvirt exception from shutdown "2105 "request.", instance=instance)2106 continue2107 else:2108 retry_countdown -= 12109 time.sleep(1)2110 LOG.info(_LI("Instance failed to shutdown in %d seconds."),2111 timeout, instance=instance)2112 return False2113 def power_off(self, instance, timeout=0, retry_interval=0):2114 """Power off the specified instance."""2115 if timeout:2116 self._clean_shutdown(instance, timeout, retry_interval)2117 self._destroy(instance)2118 def power_on(self, context, instance, network_info,2119 block_device_info=None):2120 """Power on the specified instance."""2121 # We use _hard_reboot here to ensure that all backing files,2122 # network, and block device connections, etc. are established2123 # and available before we attempt to start the instance.2124 self._hard_reboot(context, instance, network_info, block_device_info)2125 def suspend(self, instance):2126 """Suspend the specified instance."""2127 dom = self._lookup_by_name(instance['name'])2128 self._detach_pci_devices(dom,2129 pci_manager.get_instance_pci_devs(instance))2130 self._detach_sriov_ports(instance, dom)2131 dom.managedSave(0)2132 def resume(self, context, instance, network_info, block_device_info=None):2133 """resume the specified instance."""2134 xml = self._get_existing_domain_xml(instance, network_info,2135 block_device_info)2136 dom = self._create_domain_and_network(context, xml, instance,2137 network_info, block_device_info=block_device_info,2138 vifs_already_plugged=True)2139 self._attach_pci_devices(dom,2140 pci_manager.get_instance_pci_devs(instance))2141 self._attach_sriov_ports(context, instance, dom, network_info)2142 def resume_state_on_host_boot(self, context, instance, network_info,2143 block_device_info=None):2144 """resume guest state when a host is booted."""2145 # Check if the instance is running already and avoid doing2146 # anything if it is.2147 try:2148 domain = self._lookup_by_name(instance['name'])2149 state = LIBVIRT_POWER_STATE[domain.info()[0]]2150 ignored_states = (power_state.RUNNING,2151 power_state.SUSPENDED,2152 power_state.NOSTATE,2153 power_state.PAUSED)2154 if state in ignored_states:2155 return2156 except exception.NovaException:2157 pass2158 # Instance is not up and could be in an unknown state.2159 # Be as absolute as possible about getting it back into2160 # a known and running state.2161 self._hard_reboot(context, instance, network_info, block_device_info)2162 def rescue(self, context, instance, network_info, image_meta,2163 rescue_password):2164 """Loads a VM using rescue images.2165 A rescue is normally performed when something goes wrong with the2166 primary images and data needs to be corrected/recovered. Rescuing2167 should not edit or over-ride the original image, only allow for2168 data recovery.2169 """2170 instance_dir = libvirt_utils.get_instance_path(instance)2171 unrescue_xml = self._get_existing_domain_xml(instance, network_info)2172 unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')2173 libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)2174 if image_meta is not None:2175 rescue_image_id = image_meta.get('id')2176 else:2177 rescue_image_id = None2178 rescue_images = {2179 'image_id': (rescue_image_id or2180 CONF.libvirt.rescue_image_id or instance.image_ref),2181 'kernel_id': (CONF.libvirt.rescue_kernel_id or2182 instance.kernel_id),2183 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or2184 instance.ramdisk_id),2185 }2186 disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,2187 instance,2188 None,2189 image_meta,2190 rescue=True)2191 self._create_image(context, instance,2192 disk_info['mapping'],2193 '.rescue', rescue_images,2194 network_info=network_info,2195 admin_pass=rescue_password)2196 xml = self._get_guest_xml(context, instance, network_info, disk_info,2197 image_meta, rescue=rescue_images,2198 write_to_disk=True)2199 self._destroy(instance)2200 self._create_domain(xml)2201 def unrescue(self, instance, network_info):2202 """Reboot the VM which is being rescued back into primary images.2203 """2204 instance_dir = libvirt_utils.get_instance_path(instance)2205 unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')2206 xml = libvirt_utils.load_file(unrescue_xml_path)2207 virt_dom = self._lookup_by_name(instance.name)2208 self._destroy(instance)2209 self._create_domain(xml, virt_dom)2210 libvirt_utils.file_delete(unrescue_xml_path)2211 rescue_files = os.path.join(instance_dir, "*.rescue")2212 for rescue_file in glob.iglob(rescue_files):2213 libvirt_utils.file_delete(rescue_file)2214 def poll_rebooting_instances(self, timeout, instances):2215 pass2216 def _enable_hairpin(self, xml):2217 interfaces = self._get_interfaces(xml)2218 for interface in interfaces:2219 utils.execute('tee',2220 '/sys/class/net/%s/brport/hairpin_mode' % interface,2221 process_input='1',2222 run_as_root=True,2223 check_exit_code=[0, 1])2224 # NOTE(ilyaalekseyev): Implementation like in multinics2225 # for xenapi(tr3buchet)2226 def spawn(self, context, instance, image_meta, injected_files,2227 admin_password, network_info=None, block_device_info=None):2228 disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,2229 instance,2230 block_device_info,2231 image_meta)2232 self._create_image(context, instance,2233 disk_info['mapping'],2234 network_info=network_info,2235 block_device_info=block_device_info,2236 files=injected_files,2237 admin_pass=admin_password)2238 xml = self._get_guest_xml(context, instance, network_info,2239 disk_info, image_meta,2240 block_device_info=block_device_info,2241 write_to_disk=True)2242 self._create_domain_and_network(context, xml, instance, network_info,2243 block_device_info, disk_info=disk_info)2244 LOG.debug("Instance is running", instance=instance)2245 def _wait_for_boot():2246 """Called at an interval until the VM is running."""2247 state = self.get_info(instance)['state']2248 if state == power_state.RUNNING:2249 LOG.info(_LI("Instance spawned successfully."),2250 instance=instance)2251 raise loopingcall.LoopingCallDone()2252 timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)2253 timer.start(interval=0.5).wait()2254 def _flush_libvirt_console(self, pty):2255 out, err = utils.execute('dd',2256 'if=%s' % pty,2257 'iflag=nonblock',2258 run_as_root=True,2259 check_exit_code=False)2260 return out2261 def _append_to_file(self, data, fpath):2262 LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),2263 {'data': data, 'fpath': fpath})2264 with open(fpath, 'a+') as fp:2265 fp.write(data)2266 return fpath2267 def get_console_output(self, context, instance):2268 virt_dom = self._lookup_by_name(instance.name)2269 xml = virt_dom.XMLDesc(0)2270 tree = etree.fromstring(xml)2271 console_types = {}2272 # NOTE(comstud): We want to try 'file' types first, then try 'pty'2273 # types. We can't use Python 2.7 syntax of:2274 # tree.find("./devices/console[@type='file']/source")2275 # because we need to support 2.6.2276 console_nodes = tree.findall('./devices/console')2277 for console_node in console_nodes:2278 console_type = console_node.get('type')2279 console_types.setdefault(console_type, [])2280 console_types[console_type].append(console_node)2281 # If the guest has a console logging to a file prefer to use that2282 if console_types.get('file'):2283 for file_console in console_types.get('file'):2284 source_node = file_console.find('./source')2285 if source_node is None:2286 continue2287 path = source_node.get("path")2288 if not path:2289 continue2290 libvirt_utils.chown(path, os.getuid())2291 with libvirt_utils.file_open(path, 'rb') as fp:2292 log_data, remaining = utils.last_bytes(fp,2293 MAX_CONSOLE_BYTES)2294 if remaining > 0:2295 LOG.info(_LI('Truncated console log returned, '2296 '%d bytes ignored'), remaining,2297 instance=instance)2298 return log_data2299 # Try 'pty' types2300 if console_types.get('pty'):2301 for pty_console in console_types.get('pty'):2302 source_node = pty_console.find('./source')2303 if source_node is None:2304 continue2305 pty = source_node.get("path")2306 if not pty:2307 continue2308 break2309 else:2310 msg = _("Guest does not have a console available")2311 raise exception.NovaException(msg)2312 self._chown_console_log_for_instance(instance)2313 data = self._flush_libvirt_console(pty)2314 console_log = self._get_console_log_path(instance)2315 fpath = self._append_to_file(data, console_log)2316 with libvirt_utils.file_open(fpath, 'rb') as fp:2317 log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)2318 if remaining > 0:2319 LOG.info(_LI('Truncated console log returned, '2320 '%d bytes ignored'),2321 remaining, instance=instance)2322 return log_data2323 @staticmethod2324 def get_host_ip_addr():2325 return CONF.my_ip2326 def get_vnc_console(self, context, instance):2327 def get_vnc_port_for_instance(instance_name):2328 virt_dom = self._lookup_by_name(instance_name)2329 xml = virt_dom.XMLDesc(0)2330 dom = xmlutils.safe_minidom_parse_string(xml)2331 for graphic in dom.getElementsByTagName('graphics'):2332 if graphic.getAttribute('type') == 'vnc':2333 return graphic.getAttribute('port')2334 # NOTE(rmk): We had VNC consoles enabled but the instance in2335 # question is not actually listening for connections.2336 raise exception.ConsoleTypeUnavailable(console_type='vnc')2337 port = get_vnc_port_for_instance(instance.name)2338 host = CONF.vncserver_proxyclient_address2339 return ctype.ConsoleVNC(host=host, port=port)2340 def get_spice_console(self, context, instance):2341 def get_spice_ports_for_instance(instance_name):2342 virt_dom = self._lookup_by_name(instance_name)2343 xml = virt_dom.XMLDesc(0)2344 # TODO(sleepsonthefloor): use etree instead of minidom2345 dom = xmlutils.safe_minidom_parse_string(xml)2346 for graphic in dom.getElementsByTagName('graphics'):2347 if graphic.getAttribute('type') == 'spice':2348 return (graphic.getAttribute('port'),2349 graphic.getAttribute('tlsPort'))2350 # NOTE(rmk): We had Spice consoles enabled but the instance in2351 # question is not actually listening for connections.2352 raise exception.ConsoleTypeUnavailable(console_type='spice')2353 ports = get_spice_ports_for_instance(instance['name'])2354 host = CONF.spice.server_proxyclient_address2355 return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])2356 def get_serial_console(self, context, instance):2357 for host, port in self._get_serial_ports_from_instance(2358 instance, mode='bind'):2359 return ctype.ConsoleSerial(host=host, port=port)2360 raise exception.ConsoleTypeUnavailable(console_type='serial')2361 @staticmethod2362 def _supports_direct_io(dirpath):2363 if not hasattr(os, 'O_DIRECT'):2364 LOG.debug("This python runtime does not support direct I/O")2365 return False2366 testfile = os.path.join(dirpath, ".directio.test")2367 hasDirectIO = True2368 try:2369 f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)2370 # Check is the write allowed with 512 byte alignment2371 align_size = 5122372 m = mmap.mmap(-1, align_size)2373 m.write(r"x" * align_size)2374 os.write(f, m)2375 os.close(f)2376 LOG.debug("Path '%(path)s' supports direct I/O",2377 {'path': dirpath})2378 except OSError as e:2379 if e.errno == errno.EINVAL:2380 LOG.debug("Path '%(path)s' does not support direct I/O: "2381 "'%(ex)s'", {'path': dirpath, 'ex': e})2382 hasDirectIO = False2383 else:2384 with excutils.save_and_reraise_exception():2385 LOG.error(_LE("Error on '%(path)s' while checking "2386 "direct I/O: '%(ex)s'"),2387 {'path': dirpath, 'ex': e})2388 except Exception as e:2389 with excutils.save_and_reraise_exception():2390 LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "2391 "'%(ex)s'"), {'path': dirpath, 'ex': e})2392 finally:2393 try:2394 os.unlink(testfile)2395 except Exception:2396 pass2397 return hasDirectIO2398 @staticmethod2399 def _create_local(target, local_size, unit='G',2400 fs_format=None, label=None):2401 """Create a blank image of specified size."""2402 libvirt_utils.create_image('raw', target,2403 '%d%c' % (local_size, unit))2404 def _create_ephemeral(self, target, ephemeral_size,2405 fs_label, os_type, is_block_dev=False,2406 max_size=None, context=None, specified_fs=None):2407 if not is_block_dev:2408 self._create_local(target, ephemeral_size)2409 # Run as root only for block devices.2410 disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,2411 specified_fs=specified_fs)2412 @staticmethod2413 def _create_swap(target, swap_mb, max_size=None, context=None):2414 """Create a swap file of specified size."""2415 libvirt_utils.create_image('raw', target, '%dM' % swap_mb)2416 utils.mkfs('swap', target)2417 @staticmethod2418 def _get_console_log_path(instance):2419 return os.path.join(libvirt_utils.get_instance_path(instance),2420 'console.log')2421 @staticmethod2422 def _get_disk_config_path(instance, suffix=''):2423 return os.path.join(libvirt_utils.get_instance_path(instance),2424 'disk.config' + suffix)2425 def _chown_console_log_for_instance(self, instance):2426 console_log = self._get_console_log_path(instance)2427 if os.path.exists(console_log):2428 libvirt_utils.chown(console_log, os.getuid())2429 def _chown_disk_config_for_instance(self, instance):2430 disk_config = self._get_disk_config_path(instance)2431 if os.path.exists(disk_config):2432 libvirt_utils.chown(disk_config, os.getuid())2433 @staticmethod2434 def _is_booted_from_volume(instance, disk_mapping):2435 """Determines whether the VM is booting from volume2436 Determines whether the disk mapping indicates that the VM2437 is booting from a volume.2438 """2439 return ((not bool(instance.get('image_ref')))2440 or 'disk' not in disk_mapping)2441 def _inject_data(self, instance, network_info, admin_pass, files, suffix):2442 """Injects data in a disk image2443 Helper used for injecting data in a disk image file system.2444 Keyword arguments:2445 instance -- a dict that refers instance specifications2446 network_info -- a dict that refers network speficications2447 admin_pass -- a string used to set an admin password2448 files -- a list of files needs to be injected2449 suffix -- a string used as an image name suffix2450 """2451 # Handles the partition need to be used.2452 target_partition = None2453 if not instance['kernel_id']:2454 target_partition = CONF.libvirt.inject_partition2455 if target_partition == 0:2456 target_partition = None2457 if CONF.libvirt.virt_type == 'lxc':2458 target_partition = None2459 # Handles the key injection.2460 if CONF.libvirt.inject_key and instance.get('key_data'):2461 key = str(instance['key_data'])2462 else:2463 key = None2464 # Handles the admin password injection.2465 if not CONF.libvirt.inject_password:2466 admin_pass = None2467 # Handles the network injection.2468 net = netutils.get_injected_network_template(2469 network_info, libvirt_virt_type=CONF.libvirt.virt_type)2470 # Handles the metadata injection2471 metadata = instance.get('metadata')2472 image_type = CONF.libvirt.images_type2473 if any((key, net, metadata, admin_pass, files)):2474 injection_image = self.image_backend.image(2475 instance,2476 'disk' + suffix,2477 image_type)2478 img_id = instance['image_ref']2479 if not injection_image.check_image_exists():2480 LOG.warn(_LW('Image %s not found on disk storage. '2481 'Continue without injecting data'),2482 injection_image.path, instance=instance)2483 return2484 try:2485 disk.inject_data(injection_image.path,2486 key, net, metadata, admin_pass, files,2487 partition=target_partition,2488 use_cow=CONF.use_cow_images,2489 mandatory=('files',))2490 except Exception as e:2491 with excutils.save_and_reraise_exception():2492 LOG.error(_LE('Error injecting data into image '2493 '%(img_id)s (%(e)s)'),2494 {'img_id': img_id, 'e': e},2495 instance=instance)2496 def _create_image(self, context, instance,2497 disk_mapping, suffix='',2498 disk_images=None, network_info=None,2499 block_device_info=None, files=None,2500 admin_pass=None, inject_files=True):2501 booted_from_volume = self._is_booted_from_volume(2502 instance, disk_mapping)2503 def image(fname, image_type=CONF.libvirt.images_type):2504 return self.image_backend.image(instance,2505 fname + suffix, image_type)2506 def raw(fname):2507 return image(fname, image_type='raw')2508 # ensure directories exist and are writable2509 fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))2510 LOG.info(_LI('Creating image'), instance=instance)2511 # NOTE(dprince): for rescue console.log may already exist... chown it.2512 self._chown_console_log_for_instance(instance)2513 # NOTE(yaguang): For evacuate disk.config already exist in shared2514 # storage, chown it.2515 self._chown_disk_config_for_instance(instance)2516 # NOTE(vish): No need add the suffix to console.log2517 libvirt_utils.write_to_file(2518 self._get_console_log_path(instance), '', 7)2519 if not disk_images:2520 disk_images = {'image_id': instance['image_ref'],2521 'kernel_id': instance['kernel_id'],2522 'ramdisk_id': instance['ramdisk_id']}2523 if disk_images['kernel_id']:2524 fname = imagecache.get_cache_fname(disk_images, 'kernel_id')2525 raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,2526 context=context,2527 filename=fname,2528 image_id=disk_images['kernel_id'],2529 user_id=instance['user_id'],2530 project_id=instance['project_id'])2531 if disk_images['ramdisk_id']:2532 fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')2533 raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,2534 context=context,2535 filename=fname,2536 image_id=disk_images['ramdisk_id'],2537 user_id=instance['user_id'],2538 project_id=instance['project_id'])2539 inst_type = flavors.extract_flavor(instance)2540 # NOTE(ndipanov): Even if disk_mapping was passed in, which2541 # currently happens only on rescue - we still don't want to2542 # create a base image.2543 if not booted_from_volume:2544 root_fname = imagecache.get_cache_fname(disk_images, 'image_id')2545 size = instance['root_gb'] * units.Gi2546 if size == 0 or suffix == '.rescue':2547 size = None2548 backend = image('disk')2549 if backend.SUPPORTS_CLONE:2550 def clone_fallback_to_fetch(*args, **kwargs):2551 try:2552 backend.clone(context, disk_images['image_id'])2553 except exception.ImageUnacceptable:2554 libvirt_utils.fetch_image(*args, **kwargs)2555 fetch_func = clone_fallback_to_fetch2556 else:2557 fetch_func = libvirt_utils.fetch_image2558 backend.cache(fetch_func=fetch_func,2559 context=context,2560 filename=root_fname,2561 size=size,2562 image_id=disk_images['image_id'],2563 user_id=instance['user_id'],2564 project_id=instance['project_id'])2565 # Lookup the filesystem type if required2566 os_type_with_default = disk.get_fs_type_for_os_type(2567 instance['os_type'])2568 ephemeral_gb = instance['ephemeral_gb']2569 if 'disk.local' in disk_mapping:2570 disk_image = image('disk.local')2571 fn = functools.partial(self._create_ephemeral,2572 fs_label='ephemeral0',2573 os_type=instance["os_type"],2574 is_block_dev=disk_image.is_block_dev)2575 fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)2576 size = ephemeral_gb * units.Gi2577 disk_image.cache(fetch_func=fn,2578 context=context,2579 filename=fname,2580 size=size,2581 ephemeral_size=ephemeral_gb)2582 for idx, eph in enumerate(driver.block_device_info_get_ephemerals(2583 block_device_info)):2584 disk_image = image(blockinfo.get_eph_disk(idx))2585 specified_fs = eph.get('guest_format')2586 if specified_fs and not self.is_supported_fs_format(specified_fs):2587 msg = _("%s format is not supported") % specified_fs2588 raise exception.InvalidBDMFormat(details=msg)2589 fn = functools.partial(self._create_ephemeral,2590 fs_label='ephemeral%d' % idx,2591 os_type=instance["os_type"],2592 is_block_dev=disk_image.is_block_dev)2593 size = eph['size'] * units.Gi2594 fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)2595 disk_image.cache(fetch_func=fn,2596 context=context,2597 filename=fname,2598 size=size,2599 ephemeral_size=eph['size'],2600 specified_fs=specified_fs)2601 if 'disk.swap' in disk_mapping:2602 mapping = disk_mapping['disk.swap']2603 swap_mb = 02604 swap = driver.block_device_info_get_swap(block_device_info)2605 if driver.swap_is_usable(swap):2606 swap_mb = swap['swap_size']2607 elif (inst_type['swap'] > 0 and2608 not block_device.volume_in_mapping(2609 mapping['dev'], block_device_info)):2610 swap_mb = inst_type['swap']2611 if swap_mb > 0:2612 size = swap_mb * units.Mi2613 image('disk.swap').cache(fetch_func=self._create_swap,2614 context=context,2615 filename="swap_%s" % swap_mb,2616 size=size,2617 swap_mb=swap_mb)2618 # Config drive2619 if configdrive.required_by(instance):2620 LOG.info(_LI('Using config drive'), instance=instance)2621 extra_md = {}2622 if admin_pass:2623 extra_md['admin_pass'] = admin_pass2624 inst_md = instance_metadata.InstanceMetadata(instance,2625 content=files, extra_md=extra_md, network_info=network_info)2626 with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:2627 configdrive_path = self._get_disk_config_path(instance, suffix)2628 LOG.info(_LI('Creating config drive at %(path)s'),2629 {'path': configdrive_path}, instance=instance)2630 try:2631 cdb.make_drive(configdrive_path)2632 except processutils.ProcessExecutionError as e:2633 with excutils.save_and_reraise_exception():2634 LOG.error(_LE('Creating config drive failed '2635 'with error: %s'),2636 e, instance=instance)2637 def dummy_fetch_func(target, *args, **kwargs):2638 # NOTE(sileht): this is never called because the2639 # the target have already been created by2640 # cdb.make_drive call2641 pass2642 raw('disk.config').cache(fetch_func=dummy_fetch_func,2643 context=context,2644 filename='disk.config' + suffix)2645 # File injection only if needed2646 elif inject_files and CONF.libvirt.inject_partition != -2:2647 if booted_from_volume:2648 LOG.warn(_LW('File injection into a boot from volume '2649 'instance is not supported'), instance=instance)2650 self._inject_data(2651 instance, network_info, admin_pass, files, suffix)2652 if CONF.libvirt.virt_type == 'uml':2653 libvirt_utils.chown(image('disk').path, 'root')2654 def _prepare_pci_devices_for_use(self, pci_devices):2655 # kvm , qemu support managed mode2656 # In managed mode, the configured device will be automatically2657 # detached from the host OS drivers when the guest is started,2658 # and then re-attached when the guest shuts down.2659 if CONF.libvirt.virt_type != 'xen':2660 # we do manual detach only for xen2661 return2662 try:2663 for dev in pci_devices:2664 libvirt_dev_addr = dev['hypervisor_name']2665 libvirt_dev = \2666 self._conn.nodeDeviceLookupByName(libvirt_dev_addr)2667 # Note(yjiang5) Spelling for 'dettach' is correct, see2668 # http://libvirt.org/html/libvirt-libvirt.html.2669 libvirt_dev.dettach()2670 # Note(yjiang5): A reset of one PCI device may impact other2671 # devices on the same bus, thus we need two separated loops2672 # to detach and then reset it.2673 for dev in pci_devices:2674 libvirt_dev_addr = dev['hypervisor_name']2675 libvirt_dev = \2676 self._conn.nodeDeviceLookupByName(libvirt_dev_addr)2677 libvirt_dev.reset()2678 except libvirt.libvirtError as exc:2679 raise exception.PciDevicePrepareFailed(id=dev['id'],2680 instance_uuid=2681 dev['instance_uuid'],2682 reason=six.text_type(exc))2683 def _detach_pci_devices(self, dom, pci_devs):2684 # for libvirt version < 1.1.1, this is race condition2685 # so forbid detach if not had this version2686 if not self._has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):2687 if pci_devs:2688 reason = (_("Detaching PCI devices with libvirt < %(ver)s"2689 " is not permitted") %2690 {'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})2691 raise exception.PciDeviceDetachFailed(reason=reason,2692 dev=pci_devs)2693 try:2694 for dev in pci_devs:2695 dom.detachDeviceFlags(self._get_guest_pci_device(dev).to_xml(),2696 libvirt.VIR_DOMAIN_AFFECT_LIVE)2697 # after detachDeviceFlags returned, we should check the dom to2698 # ensure the detaching is finished2699 xml = dom.XMLDesc(0)2700 xml_doc = etree.fromstring(xml)2701 guest_config = vconfig.LibvirtConfigGuest()2702 guest_config.parse_dom(xml_doc)2703 for hdev in [d for d in guest_config.devices2704 if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:2705 hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]2706 dbsf = pci_utils.parse_address(dev['address'])2707 if [int(x, 16) for x in hdbsf] ==\2708 [int(x, 16) for x in dbsf]:2709 raise exception.PciDeviceDetachFailed(reason=2710 "timeout",2711 dev=dev)2712 except libvirt.libvirtError as ex:2713 error_code = ex.get_error_code()2714 if error_code == libvirt.VIR_ERR_NO_DOMAIN:2715 LOG.warn(_LW("Instance disappeared while detaching "2716 "a PCI device from it."))2717 else:2718 raise2719 def _attach_pci_devices(self, dom, pci_devs):2720 try:2721 for dev in pci_devs:2722 dom.attachDevice(self._get_guest_pci_device(dev).to_xml())2723 except libvirt.libvirtError:2724 LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),2725 {'dev': pci_devs, 'dom': dom.ID()})2726 raise2727 def _prepare_args_for_get_config(self, context, instance):2728 flavor = objects.Flavor.get_by_id(context,2729 instance['instance_type_id'])2730 image_ref = instance['image_ref']2731 image_meta = compute_utils.get_image_metadata(2732 context, self._image_api, image_ref, instance)2733 return flavor, image_meta2734 @staticmethod2735 def _has_sriov_port(network_info):2736 for vif in network_info:2737 if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:2738 return True2739 return False2740 def _attach_sriov_ports(self, context, instance, dom, network_info=None):2741 if network_info is None:2742 network_info = instance.info_cache.network_info2743 if network_info is None:2744 return2745 if self._has_sriov_port(network_info):2746 flavor, image_meta = self._prepare_args_for_get_config(context,2747 instance)2748 for vif in network_info:2749 if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:2750 cfg = self.vif_driver.get_config(instance,2751 vif,2752 image_meta,2753 flavor,2754 CONF.libvirt.virt_type)2755 LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',2756 {'port': vif, 'dom': dom.ID()})2757 dom.attachDevice(cfg.to_xml())2758 def _detach_sriov_ports(self, instance, dom):2759 network_info = instance.info_cache.network_info2760 if network_info is None:2761 return2762 context = nova_context.get_admin_context()2763 if self._has_sriov_port(network_info):2764 # for libvirt version < 1.1.1, this is race condition2765 # so forbid detach if it's an older version2766 if not self._has_min_version(2767 MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):2768 reason = (_("Detaching SR-IOV ports with"2769 " libvirt < %(ver)s is not permitted") %2770 {'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})2771 raise exception.PciDeviceDetachFailed(reason=reason,2772 dev=network_info)2773 flavor, image_meta = self._prepare_args_for_get_config(context,2774 instance)2775 for vif in network_info:2776 if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:2777 cfg = self.vif_driver.get_config(instance,2778 vif,2779 image_meta,2780 flavor,2781 CONF.libvirt.virt_type)2782 dom.detachDeviceFlags(cfg.to_xml(),2783 libvirt.VIR_DOMAIN_AFFECT_LIVE)2784 def _set_host_enabled(self, enabled,2785 disable_reason=DISABLE_REASON_UNDEFINED):2786 """Enables / Disables the compute service on this host.2787 This doesn't override non-automatic disablement with an automatic2788 setting; thereby permitting operators to keep otherwise2789 healthy hosts out of rotation.2790 """2791 status_name = {True: 'disabled',2792 False: 'enabled'}2793 disable_service = not enabled2794 ctx = nova_context.get_admin_context()2795 try:2796 service = objects.Service.get_by_compute_host(ctx, CONF.host)2797 if service.disabled != disable_service:2798 # Note(jang): this is a quick fix to stop operator-2799 # disabled compute hosts from re-enabling themselves2800 # automatically. We prefix any automatic reason code2801 # with a fixed string. We only re-enable a host2802 # automatically if we find that string in place.2803 # This should probably be replaced with a separate flag.2804 if not service.disabled or (2805 service.disabled_reason and2806 service.disabled_reason.startswith(DISABLE_PREFIX)):2807 service.disabled = disable_service2808 service.disabled_reason = (2809 DISABLE_PREFIX + disable_reason2810 if disable_service else DISABLE_REASON_UNDEFINED)2811 service.save()2812 LOG.debug('Updating compute service status to %s',2813 status_name[disable_service])2814 else:2815 LOG.debug('Not overriding manual compute service '2816 'status with: %s',2817 status_name[disable_service])2818 except exception.ComputeHostNotFound:2819 LOG.warn(_LW('Cannot update service status on host: %s,'2820 'since it is not registered.'), CONF.host)2821 except Exception:2822 LOG.warn(_LW('Cannot update service status on host: %s,'2823 'due to an unexpected exception.'), CONF.host,2824 exc_info=True)2825 def _get_host_capabilities(self):2826 """Returns an instance of config.LibvirtConfigCaps representing2827 the capabilities of the host.2828 """2829 if not self._caps:2830 xmlstr = self._conn.getCapabilities()2831 self._caps = vconfig.LibvirtConfigCaps()2832 self._caps.parse_str(xmlstr)2833 if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):2834 try:2835 features = self._conn.baselineCPU(2836 [self._caps.host.cpu.to_xml()],2837 libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)2838 # FIXME(wangpan): the return value of baselineCPU should be2839 # None or xml string, but libvirt has a bug2840 # of it from 1.1.2 which is fixed in 1.2.0,2841 # this -1 checking should be removed later.2842 if features and features != -1:2843 cpu = vconfig.LibvirtConfigCPU()2844 cpu.parse_str(features)2845 self._caps.host.cpu.features = cpu.features2846 except libvirt.libvirtError as ex:2847 error_code = ex.get_error_code()2848 if error_code == libvirt.VIR_ERR_NO_SUPPORT:2849 LOG.warn(_LW("URI %(uri)s does not support full set"2850 " of host capabilities: " "%(error)s"),2851 {'uri': self.uri(), 'error': ex})2852 else:2853 raise2854 return self._caps2855 def _get_host_uuid(self):2856 """Returns a UUID representing the host."""2857 caps = self._get_host_capabilities()2858 return caps.host.uuid2859 def _get_guest_cpu_model_config(self):2860 mode = CONF.libvirt.cpu_mode2861 model = CONF.libvirt.cpu_model2862 if (CONF.libvirt.virt_type == "kvm" or2863 CONF.libvirt.virt_type == "qemu"):2864 if mode is None:2865 mode = "host-model"2866 if mode == "none":2867 return vconfig.LibvirtConfigGuestCPU()2868 else:2869 if mode is None or mode == "none":2870 return None2871 if ((CONF.libvirt.virt_type != "kvm" and2872 CONF.libvirt.virt_type != "qemu")):2873 msg = _("Config requested an explicit CPU model, but "2874 "the current libvirt hypervisor '%s' does not "2875 "support selecting CPU models") % CONF.libvirt.virt_type2876 raise exception.Invalid(msg)2877 if mode == "custom" and model is None:2878 msg = _("Config requested a custom CPU model, but no "2879 "model name was provided")2880 raise exception.Invalid(msg)2881 elif mode != "custom" and model is not None:2882 msg = _("A CPU model name should not be set when a "2883 "host CPU model is requested")2884 raise exception.Invalid(msg)2885 LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",2886 {'mode': mode, 'model': (model or "")})2887 cpu = vconfig.LibvirtConfigGuestCPU()2888 cpu.mode = mode2889 cpu.model = model2890 return cpu2891 def _get_guest_cpu_config(self, flavor, image, guest_cpu_numa):2892 cpu = self._get_guest_cpu_model_config()2893 if cpu is None:2894 return None2895 topology = hardware.VirtCPUTopology.get_best_config(flavor,2896 image)2897 cpu.sockets = topology.sockets2898 cpu.cores = topology.cores2899 cpu.threads = topology.threads2900 cpu.numa = guest_cpu_numa2901 return cpu2902 def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,2903 image_type=None):2904 if CONF.libvirt.hw_disk_discard:2905 if not self._has_min_version(MIN_LIBVIRT_DISCARD_VERSION,2906 MIN_QEMU_DISCARD_VERSION,2907 REQ_HYPERVISOR_DISCARD):2908 msg = (_('Volume sets discard option, but libvirt %(libvirt)s'2909 ' or later is required, qemu %(qemu)s'2910 ' or later is required.') %2911 {'libvirt': MIN_LIBVIRT_DISCARD_VERSION,2912 'qemu': MIN_QEMU_DISCARD_VERSION})2913 raise exception.Invalid(msg)2914 image = self.image_backend.image(instance,2915 name,2916 image_type)2917 disk_info = disk_mapping[name]2918 return image.libvirt_info(disk_info['bus'],2919 disk_info['dev'],2920 disk_info['type'],2921 self.disk_cachemode,2922 inst_type['extra_specs'],2923 self._get_hypervisor_version())2924 def _get_guest_storage_config(self, instance, image_meta,2925 disk_info,2926 rescue, block_device_info,2927 inst_type):2928 devices = []2929 disk_mapping = disk_info['mapping']2930 block_device_mapping = driver.block_device_info_get_mapping(2931 block_device_info)2932 mount_rootfs = CONF.libvirt.virt_type == "lxc"2933 if mount_rootfs:2934 fs = vconfig.LibvirtConfigGuestFilesys()2935 fs.source_type = "mount"2936 fs.source_dir = os.path.join(2937 libvirt_utils.get_instance_path(instance), 'rootfs')2938 devices.append(fs)2939 else:2940 if rescue:2941 diskrescue = self._get_guest_disk_config(instance,2942 'disk.rescue',2943 disk_mapping,2944 inst_type)2945 devices.append(diskrescue)2946 diskos = self._get_guest_disk_config(instance,2947 'disk',2948 disk_mapping,2949 inst_type)2950 devices.append(diskos)2951 else:2952 if 'disk' in disk_mapping:2953 diskos = self._get_guest_disk_config(instance,2954 'disk',2955 disk_mapping,2956 inst_type)2957 devices.append(diskos)2958 if 'disk.local' in disk_mapping:2959 disklocal = self._get_guest_disk_config(instance,2960 'disk.local',2961 disk_mapping,2962 inst_type)2963 devices.append(disklocal)2964 instance.default_ephemeral_device = (2965 block_device.prepend_dev(disklocal.target_dev))2966 for idx, eph in enumerate(2967 driver.block_device_info_get_ephemerals(2968 block_device_info)):2969 diskeph = self._get_guest_disk_config(2970 instance,2971 blockinfo.get_eph_disk(idx),2972 disk_mapping, inst_type)2973 devices.append(diskeph)2974 if 'disk.swap' in disk_mapping:2975 diskswap = self._get_guest_disk_config(instance,2976 'disk.swap',2977 disk_mapping,2978 inst_type)2979 devices.append(diskswap)2980 instance.default_swap_device = (2981 block_device.prepend_dev(diskswap.target_dev))2982 if 'disk.config' in disk_mapping:2983 # NOTE(sileht): a configdrive is a raw image2984 # it works well with rbd, lvm and raw images_type2985 # but we must force to raw image_type if the desired2986 # images_type is qcow22987 if CONF.libvirt.images_type not in ['rbd', 'lvm']:2988 image_type = "raw"2989 else:2990 image_type = None2991 diskconfig = self._get_guest_disk_config(instance,2992 'disk.config',2993 disk_mapping,2994 inst_type,2995 image_type)2996 devices.append(diskconfig)2997 for vol in block_device.get_bdms_to_connect(block_device_mapping,2998 mount_rootfs):2999 connection_info = vol['connection_info']3000 vol_dev = block_device.prepend_dev(vol['mount_device'])3001 info = disk_mapping[vol_dev]3002 cfg = self._connect_volume(connection_info, info)3003 devices.append(cfg)3004 vol['connection_info'] = connection_info3005 vol.save(nova_context.get_admin_context())3006 for d in devices:3007 self._set_cache_mode(d)3008 if (image_meta and3009 image_meta.get('properties', {}).get('hw_scsi_model')):3010 hw_scsi_model = image_meta['properties']['hw_scsi_model']3011 scsi_controller = vconfig.LibvirtConfigGuestController()3012 scsi_controller.type = 'scsi'3013 scsi_controller.model = hw_scsi_model3014 devices.append(scsi_controller)3015 return devices3016 def _get_host_sysinfo_serial_hardware(self):3017 """Get a UUID from the host hardware3018 Get a UUID for the host hardware reported by libvirt.3019 This is typically from the SMBIOS data, unless it has3020 been overridden in /etc/libvirt/libvirtd.conf3021 """3022 return self._get_host_uuid()3023 def _get_host_sysinfo_serial_os(self):3024 """Get a UUID from the host operating system3025 Get a UUID for the host operating system. Modern Linux3026 distros based on systemd provide a /etc/machine-id3027 file containing a UUID. This is also provided inside3028 systemd based containers and can be provided by other3029 init systems too, since it is just a plain text file.3030 """3031 with open("/etc/machine-id") as f:3032 # We want to have '-' in the right place3033 # so we parse & reformat the value3034 return str(uuid.UUID(f.read().split()[0]))3035 def _get_host_sysinfo_serial_auto(self):3036 if os.path.exists("/etc/machine-id"):3037 return self._get_host_sysinfo_serial_os()3038 else:3039 return self._get_host_sysinfo_serial_hardware()3040 def _get_guest_config_sysinfo(self, instance):3041 sysinfo = vconfig.LibvirtConfigGuestSysinfo()3042 sysinfo.system_manufacturer = version.vendor_string()3043 sysinfo.system_product = version.product_string()3044 sysinfo.system_version = version.version_string_with_package()3045 sysinfo.system_serial = self._sysinfo_serial_func()3046 sysinfo.system_uuid = instance['uuid']3047 return sysinfo3048 def _get_guest_pci_device(self, pci_device):3049 dbsf = pci_utils.parse_address(pci_device['address'])3050 dev = vconfig.LibvirtConfigGuestHostdevPCI()3051 dev.domain, dev.bus, dev.slot, dev.function = dbsf3052 # only kvm support managed mode3053 if CONF.libvirt.virt_type in ('xen',):3054 dev.managed = 'no'3055 if CONF.libvirt.virt_type in ('kvm', 'qemu'):3056 dev.managed = 'yes'3057 return dev3058 def _get_guest_config_meta(self, context, instance, flavor):3059 """Get metadata config for guest."""3060 meta = vconfig.LibvirtConfigGuestMetaNovaInstance()3061 meta.package = version.version_string_with_package()3062 meta.name = instance["display_name"]3063 meta.creationTime = time.time()3064 if instance["image_ref"] not in ("", None):3065 meta.roottype = "image"3066 meta.rootid = instance["image_ref"]3067 if context is not None:3068 ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()3069 ometa.userid = context.user_id3070 ometa.username = context.user_name3071 ometa.projectid = context.project_id3072 ometa.projectname = context.project_name3073 meta.owner = ometa3074 fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()3075 fmeta.name = flavor.name3076 fmeta.memory = flavor.memory_mb3077 fmeta.vcpus = flavor.vcpus3078 fmeta.ephemeral = flavor.ephemeral_gb3079 fmeta.disk = flavor.root_gb3080 fmeta.swap = flavor.swap3081 meta.flavor = fmeta3082 return meta3083 def _machine_type_mappings(self):3084 mappings = {}3085 for mapping in CONF.libvirt.hw_machine_type:3086 host_arch, _, machine_type = mapping.partition('=')3087 mappings[host_arch] = machine_type3088 return mappings3089 def _get_machine_type(self, image_meta, caps):3090 # The underlying machine type can be set as an image attribute,3091 # or otherwise based on some architecture specific defaults3092 mach_type = None3093 if (image_meta is not None and image_meta.get('properties') and3094 image_meta['properties'].get('hw_machine_type')3095 is not None):3096 mach_type = image_meta['properties']['hw_machine_type']3097 else:3098 # For ARM systems we will default to vexpress-a15 for armv73099 # and virt for aarch643100 if caps.host.cpu.arch == arch.ARMV7:3101 mach_type = "vexpress-a15"3102 if caps.host.cpu.arch == arch.AARCH64:3103 mach_type = "virt"3104 # If set in the config, use that as the default.3105 if CONF.libvirt.hw_machine_type:3106 mappings = self._machine_type_mappings()3107 mach_type = mappings.get(caps.host.cpu.arch)3108 return mach_type3109 @staticmethod3110 def _create_idmaps(klass, map_strings):3111 idmaps = []3112 if len(map_strings) > 5:3113 map_strings = map_strings[0:5]3114 LOG.warn(_LW("Too many id maps, only included first five."))3115 for map_string in map_strings:3116 try:3117 idmap = klass()3118 values = [int(i) for i in map_string.split(":")]3119 idmap.start = values[0]3120 idmap.target = values[1]3121 idmap.count = values[2]3122 idmaps.append(idmap)3123 except (ValueError, IndexError):3124 LOG.warn(_LW("Invalid value for id mapping %s"), map_string)3125 return idmaps3126 def _get_guest_idmaps(self):3127 id_maps = []3128 if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:3129 uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,3130 CONF.libvirt.uid_maps)3131 id_maps.extend(uid_maps)3132 if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:3133 gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,3134 CONF.libvirt.gid_maps)3135 id_maps.extend(gid_maps)3136 return id_maps3137 def _get_cpu_numa_config_from_instance(self, context, instance):3138 # TODO(ndipanov): Remove this check once the tests are fixed, in3139 # reality all code paths should be using instance objects now.3140 if isinstance(instance, objects.Instance):3141 instance_topology = instance.numa_topology3142 else:3143 try:3144 instance_topology = (3145 objects.InstanceNUMATopology.get_by_instance_uuid(3146 context or nova_context.get_admin_context(),3147 instance['uuid']))3148 except exception.NumaTopologyNotFound:3149 return3150 if instance_topology:3151 guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()3152 for instance_cell in instance_topology.cells:3153 guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()3154 guest_cell.id = instance_cell.id3155 guest_cell.cpus = instance_cell.cpuset3156 guest_cell.memory = instance_cell.memory3157 guest_cpu_numa.cells.append(guest_cell)3158 return guest_cpu_numa3159 def _get_guest_numa_config(self, context, instance, flavor,3160 allowed_cpus=None):3161 """Returns the config objects for the guest NUMA specs.3162 Determines the CPUs that the guest can be pinned to if the guest3163 specifies a cell topology and the host supports it. Constructs the3164 libvirt XML config object representing the NUMA topology selected3165 for the guest. Returns a tuple of:3166 (cpu_set, guest_cpu_tune, guest_cpu_numa)3167 With the following caveats:3168 a) If there is no specified guest NUMA topology, then3169 guest_cpu_tune and guest_cpu_numa shall be None. cpu_set3170 will be populated with the chosen CPUs that the guest3171 allowed CPUs fit within, which could be the supplied3172 allowed_cpus value if the host doesn't support NUMA3173 topologies.3174 b) If there is a specified guest NUMA topology, then3175 cpu_set will be None and guest_cpu_numa will be the3176 LibvirtConfigGuestCPUNUMA object representing the guest's3177 NUMA topology. If the host supports NUMA, then guest_cpu_tune3178 will contain a LibvirtConfigGuestCPUTune object representing3179 the optimized chosen cells that match the host capabilities3180 with the instance's requested topology. If the host does3181 not support NUMA, then guest_cpu_tune will be None.3182 """3183 caps = self._get_host_capabilities()3184 topology = caps.host.topology3185 # We have instance NUMA so translate it to the config class3186 guest_cpu_numa = self._get_cpu_numa_config_from_instance(3187 context, instance)3188 if not guest_cpu_numa:3189 # No NUMA topology defined for instance3190 vcpus = flavor.vcpus3191 memory = flavor.memory_mb3192 if topology:3193 # Host is NUMA capable so try to keep the instance in a cell3194 viable_cells = [cell for cell in topology.cells3195 if vcpus <= len(cell.cpus) and3196 memory * 1024 <= cell.memory]3197 if not viable_cells:3198 # We can't contain the instance in a cell - do nothing for3199 # now.3200 # TODO(ndipanov): Attempt to spread the instance accross3201 # NUMA nodes and expose the topology to the instance as an3202 # optimisation3203 return allowed_cpus, None, None3204 else:3205 cell = random.choice(viable_cells)3206 pin_cpuset = set(cpu.id for cpu in cell.cpus)3207 if allowed_cpus:3208 pin_cpuset &= allowed_cpus3209 return pin_cpuset, None, None3210 else:3211 # We have no NUMA topology in the host either3212 return allowed_cpus, None, None3213 else:3214 if topology:3215 # Now get the CpuTune configuration from the numa_topology3216 guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()3217 for host_cell in topology.cells:3218 for guest_cell in guest_cpu_numa.cells:3219 if guest_cell.id == host_cell.id:3220 host_cpuset = set(cpu.id for cpu in host_cell.cpus)3221 host_cpuset = (host_cpuset & allowed_cpus3222 if allowed_cpus else host_cpuset)3223 for cpu in guest_cell.cpus:3224 pin_cpuset = (3225 vconfig.LibvirtConfigGuestCPUTuneVCPUPin())3226 pin_cpuset.id = cpu3227 pin_cpuset.cpuset = host_cpuset3228 guest_cpu_tune.vcpupin.append(pin_cpuset)3229 return None, guest_cpu_tune, guest_cpu_numa3230 else:3231 return allowed_cpus, None, guest_cpu_numa3232 def _get_guest_config(self, instance, network_info, image_meta,3233 disk_info, rescue=None, block_device_info=None,3234 context=None):3235 """Get config data for parameters.3236 :param rescue: optional dictionary that should contain the key3237 'ramdisk_id' if a ramdisk is needed for the rescue image and3238 'kernel_id' if a kernel is needed for the rescue image.3239 """3240 flavor = objects.Flavor.get_by_id(3241 nova_context.get_admin_context(read_deleted='yes'),3242 instance['instance_type_id'])3243 inst_path = libvirt_utils.get_instance_path(instance)3244 disk_mapping = disk_info['mapping']3245 img_meta_prop = image_meta.get('properties', {}) if image_meta else {}3246 CONSOLE = "console=tty0 console=ttyS0"3247 guest = vconfig.LibvirtConfigGuest()3248 guest.virt_type = CONF.libvirt.virt_type3249 guest.name = instance['name']3250 guest.uuid = instance['uuid']3251 # We are using default unit for memory: KiB3252 guest.memory = flavor.memory_mb * units.Ki3253 guest.vcpus = flavor.vcpus3254 allowed_cpus = hardware.get_vcpu_pin_set()3255 cpuset, cputune, guest_cpu_numa = self._get_guest_numa_config(3256 context, instance, flavor, allowed_cpus)3257 guest.cpuset = cpuset3258 guest.cputune = cputune3259 guest.metadata.append(self._get_guest_config_meta(context,3260 instance,3261 flavor))3262 guest.idmaps = self._get_guest_idmaps()3263 cputuning = ['shares', 'period', 'quota']3264 for name in cputuning:3265 key = "quota:cpu_" + name3266 if key in flavor.extra_specs:3267 if guest.cputune is None:3268 guest.cputune = vconfig.LibvirtConfigGuestCPUTune()3269 setattr(guest.cputune, name,3270 int(flavor.extra_specs[key]))3271 guest.cpu = self._get_guest_cpu_config(3272 flavor, image_meta, guest_cpu_numa)3273 if 'root' in disk_mapping:3274 root_device_name = block_device.prepend_dev(3275 disk_mapping['root']['dev'])3276 else:3277 root_device_name = None3278 if root_device_name:3279 # NOTE(yamahata):3280 # for nova.api.ec2.cloud.CloudController.get_metadata()3281 instance.root_device_name = root_device_name3282 guest.os_type = vm_mode.get_from_instance(instance)3283 if guest.os_type is None:3284 if CONF.libvirt.virt_type == "lxc":3285 guest.os_type = vm_mode.EXE3286 elif CONF.libvirt.virt_type == "uml":3287 guest.os_type = vm_mode.UML3288 elif CONF.libvirt.virt_type == "xen":3289 guest.os_type = vm_mode.XEN3290 else:3291 guest.os_type = vm_mode.HVM3292 caps = self._get_host_capabilities()3293 if CONF.libvirt.virt_type == "xen":3294 if guest.os_type == vm_mode.HVM:3295 guest.os_loader = CONF.libvirt.xen_hvmloader_path3296 # PAE only makes sense in X863297 if caps.host.cpu.arch in (arch.I686, arch.X86_64):3298 guest.pae = True3299 if CONF.libvirt.virt_type in ("kvm", "qemu"):3300 if caps.host.cpu.arch in (arch.I686, arch.X86_64):3301 guest.sysinfo = self._get_guest_config_sysinfo(instance)3302 guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()3303 guest.os_mach_type = self._get_machine_type(image_meta, caps)3304 if CONF.libvirt.virt_type == "lxc":3305 guest.os_init_path = "/sbin/init"3306 guest.os_cmdline = CONSOLE3307 elif CONF.libvirt.virt_type == "uml":3308 guest.os_kernel = "/usr/bin/linux"3309 guest.os_root = root_device_name3310 else:3311 if rescue:3312 if rescue.get('kernel_id'):3313 guest.os_kernel = os.path.join(inst_path, "kernel.rescue")3314 if CONF.libvirt.virt_type == "xen":3315 guest.os_cmdline = "ro root=%s" % root_device_name3316 else:3317 guest.os_cmdline = ("root=%s %s" % (root_device_name,3318 CONSOLE))3319 if CONF.libvirt.virt_type == "qemu":3320 guest.os_cmdline += " no_timer_check"3321 if rescue.get('ramdisk_id'):3322 guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")3323 elif instance['kernel_id']:3324 guest.os_kernel = os.path.join(inst_path, "kernel")3325 if CONF.libvirt.virt_type == "xen":3326 guest.os_cmdline = "ro root=%s" % root_device_name3327 else:3328 guest.os_cmdline = ("root=%s %s" % (root_device_name,3329 CONSOLE))3330 if CONF.libvirt.virt_type == "qemu":3331 guest.os_cmdline += " no_timer_check"3332 if instance['ramdisk_id']:3333 guest.os_initrd = os.path.join(inst_path, "ramdisk")3334 # we only support os_command_line with images with an explicit3335 # kernel set and don't want to break nova if there's an3336 # os_command_line property without a specified kernel_id param3337 if image_meta:3338 img_props = image_meta.get('properties', {})3339 if img_props.get('os_command_line'):3340 guest.os_cmdline = img_props.get('os_command_line')3341 else:3342 guest.os_boot_dev = blockinfo.get_boot_order(disk_info)3343 if ((CONF.libvirt.virt_type != "lxc" and3344 CONF.libvirt.virt_type != "uml")):3345 guest.acpi = True3346 guest.apic = True3347 # NOTE(mikal): Microsoft Windows expects the clock to be in3348 # "localtime". If the clock is set to UTC, then you can use a3349 # registry key to let windows know, but Microsoft says this is3350 # buggy in http://support.microsoft.com/kb/26872523351 clk = vconfig.LibvirtConfigGuestClock()3352 if instance['os_type'] == 'windows':3353 LOG.info(_LI('Configuring timezone for windows instance to '3354 'localtime'), instance=instance)3355 clk.offset = 'localtime'3356 else:3357 clk.offset = 'utc'3358 guest.set_clock(clk)3359 if CONF.libvirt.virt_type == "kvm":3360 # TODO(berrange) One day this should be per-guest3361 # OS type configurable3362 tmpit = vconfig.LibvirtConfigGuestTimer()3363 tmpit.name = "pit"3364 tmpit.tickpolicy = "delay"3365 tmrtc = vconfig.LibvirtConfigGuestTimer()3366 tmrtc.name = "rtc"3367 tmrtc.tickpolicy = "catchup"3368 clk.add_timer(tmpit)3369 clk.add_timer(tmrtc)3370 guestarch = libvirt_utils.get_arch(image_meta)3371 if guestarch in (arch.I686, arch.X86_64):3372 # NOTE(rfolco): HPET is a hardware timer for x86 arch.3373 # qemu -no-hpet is not supported on non-x86 targets.3374 tmhpet = vconfig.LibvirtConfigGuestTimer()3375 tmhpet.name = "hpet"3376 tmhpet.present = False3377 clk.add_timer(tmhpet)3378 for config in self._get_guest_storage_config(instance,3379 image_meta,3380 disk_info,3381 rescue,3382 block_device_info,3383 flavor):3384 guest.add_device(config)3385 for vif in network_info:3386 config = self.vif_driver.get_config(3387 instance, vif, image_meta,3388 flavor, CONF.libvirt.virt_type)3389 guest.add_device(config)3390 if ((CONF.libvirt.virt_type == "qemu" or3391 CONF.libvirt.virt_type == "kvm")):3392 # Create the serial console char devices3393 if CONF.serial_console.enabled:3394 num_ports = hardware.get_number_of_serial_ports(3395 flavor, image_meta)3396 for port in six.moves.range(num_ports):3397 console = vconfig.LibvirtConfigGuestSerial()3398 console.port = port3399 console.type = "tcp"3400 console.listen_host = (3401 CONF.serial_console.proxyclient_address)3402 console.listen_port = (3403 serial_console.acquire_port(3404 console.listen_host))3405 guest.add_device(console)3406 else:3407 # The QEMU 'pty' driver throws away any data if no3408 # client app is connected. Thus we can't get away3409 # with a single type=pty console. Instead we have3410 # to configure two separate consoles.3411 consolelog = vconfig.LibvirtConfigGuestSerial()3412 consolelog.type = "file"3413 consolelog.source_path = self._get_console_log_path(instance)3414 guest.add_device(consolelog)3415 consolepty = vconfig.LibvirtConfigGuestSerial()3416 else:3417 consolepty = vconfig.LibvirtConfigGuestConsole()3418 consolepty.type = "pty"3419 guest.add_device(consolepty)3420 # We want a tablet if VNC is enabled, or SPICE is enabled and3421 # the SPICE agent is disabled. If the SPICE agent is enabled3422 # it provides a paravirt mouse which drastically reduces3423 # overhead (by eliminating USB polling).3424 #3425 # NB: this implies that if both SPICE + VNC are enabled3426 # at the same time, we'll get the tablet whether the3427 # SPICE agent is used or not.3428 need_usb_tablet = False3429 if CONF.vnc_enabled:3430 need_usb_tablet = CONF.libvirt.use_usb_tablet3431 elif CONF.spice.enabled and not CONF.spice.agent_enabled:3432 need_usb_tablet = CONF.libvirt.use_usb_tablet3433 if need_usb_tablet and guest.os_type == vm_mode.HVM:3434 tablet = vconfig.LibvirtConfigGuestInput()3435 tablet.type = "tablet"3436 tablet.bus = "usb"3437 guest.add_device(tablet)3438 if CONF.spice.enabled and CONF.spice.agent_enabled and \3439 CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):3440 channel = vconfig.LibvirtConfigGuestChannel()3441 channel.target_name = "com.redhat.spice.0"3442 guest.add_device(channel)3443 # NB some versions of libvirt support both SPICE and VNC3444 # at the same time. We're not trying to second guess which3445 # those versions are. We'll just let libvirt report the3446 # errors appropriately if the user enables both.3447 add_video_driver = False3448 if ((CONF.vnc_enabled and3449 CONF.libvirt.virt_type not in ('lxc', 'uml'))):3450 graphics = vconfig.LibvirtConfigGuestGraphics()3451 graphics.type = "vnc"3452 graphics.keymap = CONF.vnc_keymap3453 graphics.listen = CONF.vncserver_listen3454 guest.add_device(graphics)3455 add_video_driver = True3456 if CONF.spice.enabled and \3457 CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'):3458 graphics = vconfig.LibvirtConfigGuestGraphics()3459 graphics.type = "spice"3460 graphics.keymap = CONF.spice.keymap3461 graphics.listen = CONF.spice.server_listen3462 guest.add_device(graphics)3463 add_video_driver = True3464 if add_video_driver:3465 VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")3466 video = vconfig.LibvirtConfigGuestVideo()3467 # NOTE(ldbragst): The following logic sets the video.type3468 # depending on supported defaults given the architecture,3469 # virtualization type, and features. The video.type attribute can3470 # be overridden by the user with image_meta['properties'], which3471 # is carried out in the next if statement below this one.3472 guestarch = libvirt_utils.get_arch(image_meta)3473 if guest.os_type == vm_mode.XEN:3474 video.type = 'xen'3475 elif guestarch in (arch.PPC, arch.PPC64):3476 # NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default3477 # so use 'vga' instead when running on Power hardware.3478 video.type = 'vga'3479 elif CONF.spice.enabled:3480 video.type = 'qxl'3481 if img_meta_prop.get('hw_video_model'):3482 video.type = img_meta_prop.get('hw_video_model')3483 if (video.type not in VALID_VIDEO_DEVICES):3484 raise exception.InvalidVideoMode(model=video.type)3485 # Set video memory, only if the flavor's limit is set3486 video_ram = int(img_meta_prop.get('hw_video_ram', 0))3487 max_vram = int(flavor.extra_specs3488 .get('hw_video:ram_max_mb', 0))3489 if video_ram > max_vram:3490 raise exception.RequestedVRamTooHigh(req_vram=video_ram,3491 max_vram=max_vram)3492 if max_vram and video_ram:3493 video.vram = video_ram3494 guest.add_device(video)3495 # Qemu guest agent only support 'qemu' and 'kvm' hypervisor3496 if CONF.libvirt.virt_type in ('qemu', 'kvm'):3497 qga_enabled = False3498 # Enable qga only if the 'hw_qemu_guest_agent' is equal to yes3499 hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')3500 if hw_qga.lower() == 'yes':3501 LOG.debug("Qemu guest agent is enabled through image "3502 "metadata", instance=instance)3503 qga_enabled = True3504 if qga_enabled:3505 qga = vconfig.LibvirtConfigGuestChannel()3506 qga.type = "unix"3507 qga.target_name = "org.qemu.guest_agent.0"3508 qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %3509 ("org.qemu.guest_agent.0", instance['name']))3510 guest.add_device(qga)3511 if (img_meta_prop.get('hw_rng_model') == 'virtio' and3512 flavor.extra_specs.get('hw_rng:allowed',3513 '').lower() == 'true'):3514 rng_device = vconfig.LibvirtConfigGuestRng()3515 rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)3516 period = flavor.extra_specs.get('hw_rng:rate_period', 0)3517 if rate_bytes:3518 rng_device.rate_bytes = int(rate_bytes)3519 rng_device.rate_period = int(period)3520 if (CONF.libvirt.rng_dev_path and3521 not os.path.exists(CONF.libvirt.rng_dev_path)):3522 raise exception.RngDeviceNotExist(3523 path=CONF.libvirt.rng_dev_path)3524 rng_device.backend = CONF.libvirt.rng_dev_path3525 guest.add_device(rng_device)3526 if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'):3527 for pci_dev in pci_manager.get_instance_pci_devs(instance):3528 guest.add_device(self._get_guest_pci_device(pci_dev))3529 else:3530 if len(pci_manager.get_instance_pci_devs(instance)) > 0:3531 raise exception.PciDeviceUnsupportedHypervisor(3532 type=CONF.libvirt.virt_type)3533 watchdog_action = flavor.extra_specs.get('hw_watchdog_action',3534 'disabled')3535 if (image_meta is not None and3536 image_meta.get('properties', {}).get('hw_watchdog_action')):3537 watchdog_action = image_meta['properties']['hw_watchdog_action']3538 # NB(sross): currently only actually supported by KVM/QEmu3539 if watchdog_action != 'disabled':3540 if watchdog_actions.is_valid_watchdog_action(watchdog_action):3541 bark = vconfig.LibvirtConfigGuestWatchdog()3542 bark.action = watchdog_action3543 guest.add_device(bark)3544 else:3545 raise exception.InvalidWatchdogAction(action=watchdog_action)3546 # Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor3547 if (CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm') and3548 CONF.libvirt.mem_stats_period_seconds > 0):3549 balloon = vconfig.LibvirtConfigMemoryBalloon()3550 if CONF.libvirt.virt_type in ('qemu', 'kvm'):3551 balloon.model = 'virtio'3552 else:3553 balloon.model = 'xen'3554 balloon.period = CONF.libvirt.mem_stats_period_seconds3555 guest.add_device(balloon)3556 return guest3557 def _get_guest_xml(self, context, instance, network_info, disk_info,3558 image_meta=None, rescue=None,3559 block_device_info=None, write_to_disk=False):3560 if image_meta is None:3561 image_ref = instance['image_ref']3562 image_meta = compute_utils.get_image_metadata(3563 context, self._image_api, image_ref, instance)3564 # NOTE(danms): Stringifying a NetworkInfo will take a lock. Do3565 # this ahead of time so that we don't acquire it while also3566 # holding the logging lock.3567 network_info_str = str(network_info)3568 msg = ('Start _get_guest_xml '3569 'network_info=%(network_info)s '3570 'disk_info=%(disk_info)s '3571 'image_meta=%(image_meta)s rescue=%(rescue)s '3572 'block_device_info=%(block_device_info)s' %3573 {'network_info': network_info_str, 'disk_info': disk_info,3574 'image_meta': image_meta, 'rescue': rescue,3575 'block_device_info': block_device_info})3576 # NOTE(mriedem): block_device_info can contain auth_password so we3577 # need to sanitize the password in the message.3578 LOG.debug(logging.mask_password(msg), instance=instance)3579 conf = self._get_guest_config(instance, network_info, image_meta,3580 disk_info, rescue, block_device_info,3581 context)3582 xml = conf.to_xml()3583 if write_to_disk:3584 instance_dir = libvirt_utils.get_instance_path(instance)3585 xml_path = os.path.join(instance_dir, 'libvirt.xml')3586 libvirt_utils.write_to_file(xml_path, xml)3587 LOG.debug('End _get_guest_xml xml=%(xml)s',3588 {'xml': xml}, instance=instance)3589 return xml3590 def _lookup_by_id(self, instance_id):3591 """Retrieve libvirt domain object given an instance id.3592 All libvirt error handling should be handled in this method and3593 relevant nova exceptions should be raised in response.3594 """3595 try:3596 return self._conn.lookupByID(instance_id)3597 except libvirt.libvirtError as ex:3598 error_code = ex.get_error_code()3599 if error_code == libvirt.VIR_ERR_NO_DOMAIN:3600 raise exception.InstanceNotFound(instance_id=instance_id)3601 msg = (_("Error from libvirt while looking up %(instance_id)s: "3602 "[Error Code %(error_code)s] %(ex)s")3603 % {'instance_id': instance_id,3604 'error_code': error_code,3605 'ex': ex})3606 raise exception.NovaException(msg)3607 def _lookup_by_name(self, instance_name):3608 """Retrieve libvirt domain object given an instance name.3609 All libvirt error handling should be handled in this method and3610 relevant nova exceptions should be raised in response.3611 """3612 try:3613 return self._conn.lookupByName(instance_name)3614 except libvirt.libvirtError as ex:3615 error_code = ex.get_error_code()3616 if error_code == libvirt.VIR_ERR_NO_DOMAIN:3617 raise exception.InstanceNotFound(instance_id=instance_name)3618 msg = (_('Error from libvirt while looking up %(instance_name)s: '3619 '[Error Code %(error_code)s] %(ex)s') %3620 {'instance_name': instance_name,3621 'error_code': error_code,3622 'ex': ex})3623 raise exception.NovaException(msg)3624 def get_info(self, instance):3625 """Retrieve information from libvirt for a specific instance name.3626 If a libvirt error is encountered during lookup, we might raise a3627 NotFound exception or Error exception depending on how severe the3628 libvirt error is.3629 """3630 virt_dom = self._lookup_by_name(instance['name'])3631 try:3632 dom_info = virt_dom.info()3633 except libvirt.libvirtError as ex:3634 error_code = ex.get_error_code()3635 if error_code == libvirt.VIR_ERR_NO_DOMAIN:3636 raise exception.InstanceNotFound(instance_id=instance['name'])3637 msg = (_('Error from libvirt while getting domain info for '3638 '%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %3639 {'instance_name': instance['name'],3640 'error_code': error_code,3641 'ex': ex})3642 raise exception.NovaException(msg)3643 return {'state': LIBVIRT_POWER_STATE[dom_info[0]],3644 'max_mem': dom_info[1],3645 'mem': dom_info[2],3646 'num_cpu': dom_info[3],3647 'cpu_time': dom_info[4],3648 'id': virt_dom.ID()}3649 def _create_domain_setup_lxc(self, instance, block_device_info, disk_info):3650 inst_path = libvirt_utils.get_instance_path(instance)3651 block_device_mapping = driver.block_device_info_get_mapping(3652 block_device_info)3653 disk_info = disk_info or {}3654 disk_mapping = disk_info.get('mapping', [])3655 if self._is_booted_from_volume(instance, disk_mapping):3656 root_disk = block_device.get_root_bdm(block_device_mapping)3657 disk_path = root_disk['connection_info']['data']['device_path']3658 disk_info = blockinfo.get_info_from_bdm(3659 CONF.libvirt.virt_type, root_disk)3660 self._connect_volume(root_disk['connection_info'], disk_info)3661 # Get the system metadata from the instance3662 system_meta = utils.instance_sys_meta(instance)3663 use_cow = system_meta['image_disk_format'] == 'qcow2'3664 else:3665 image = self.image_backend.image(instance, 'disk')3666 disk_path = image.path3667 use_cow = CONF.use_cow_images3668 container_dir = os.path.join(inst_path, 'rootfs')3669 fileutils.ensure_tree(container_dir)3670 rootfs_dev = disk.setup_container(disk_path,3671 container_dir=container_dir,3672 use_cow=use_cow)3673 try:3674 # Save rootfs device to disconnect it when deleting the instance3675 if rootfs_dev:3676 instance.system_metadata['rootfs_device_name'] = rootfs_dev3677 if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:3678 id_maps = self._get_guest_idmaps()3679 libvirt_utils.chown_for_id_maps(container_dir, id_maps)3680 except Exception:3681 with excutils.save_and_reraise_exception():3682 self._create_domain_cleanup_lxc(instance)3683 def _create_domain_cleanup_lxc(self, instance):3684 inst_path = libvirt_utils.get_instance_path(instance)3685 container_dir = os.path.join(inst_path, 'rootfs')3686 try:3687 state = self.get_info(instance)['state']3688 except exception.InstanceNotFound:3689 # The domain may not be present if the instance failed to start3690 state = None3691 if state == power_state.RUNNING:3692 # NOTE(uni): Now the container is running with its own private3693 # mount namespace and so there is no need to keep the container3694 # rootfs mounted in the host namespace3695 disk.clean_lxc_namespace(container_dir=container_dir)3696 else:3697 disk.teardown_container(container_dir=container_dir)3698 @contextlib.contextmanager3699 def _lxc_disk_handler(self, instance, block_device_info, disk_info):3700 """Context manager to handle the pre and post instance boot,3701 LXC specific disk operations.3702 An image or a volume path will be prepared and setup to be3703 used by the container, prior to starting it.3704 The disk will be disconnected and unmounted if a container has3705 failed to start.3706 """3707 if CONF.libvirt.virt_type != 'lxc':3708 yield3709 return3710 self._create_domain_setup_lxc(instance, block_device_info, disk_info)3711 try:3712 yield3713 finally:3714 self._create_domain_cleanup_lxc(instance)3715 def _create_domain(self, xml=None, domain=None,3716 instance=None, launch_flags=0, power_on=True):3717 """Create a domain.3718 Either domain or xml must be passed in. If both are passed, then3719 the domain definition is overwritten from the xml.3720 """3721 err = None3722 try:3723 if xml:3724 err = _LE('Error defining a domain with XML: %s') % xml3725 domain = self._conn.defineXML(xml)3726 if power_on:3727 err = _LE('Error launching a defined domain with XML: %s') \3728 % domain.XMLDesc(0)3729 domain.createWithFlags(launch_flags)3730 if not utils.is_neutron():3731 err = _LE('Error enabling hairpin mode with XML: %s') \3732 % domain.XMLDesc(0)3733 self._enable_hairpin(domain.XMLDesc(0))3734 except Exception:3735 with excutils.save_and_reraise_exception():3736 if err:3737 LOG.error(err)3738 return domain3739 def _neutron_failed_callback(self, event_name, instance):3740 LOG.error(_LE('Neutron Reported failure on event '3741 '%(event)s for instance %(uuid)s'),3742 {'event': event_name, 'uuid': instance.uuid})3743 if CONF.vif_plugging_is_fatal:3744 raise exception.VirtualInterfaceCreateException()3745 def _get_neutron_events(self, network_info):3746 # NOTE(danms): We need to collect any VIFs that are currently3747 # down that we expect a down->up event for. Anything that is3748 # already up will not undergo that transition, and for3749 # anything that might be stale (cache-wise) assume it's3750 # already up so we don't block on it.3751 return [('network-vif-plugged', vif['id'])3752 for vif in network_info if vif.get('active', True) is False]3753 def _create_domain_and_network(self, context, xml, instance, network_info,3754 block_device_info=None, power_on=True,3755 reboot=False, vifs_already_plugged=False,3756 disk_info=None):3757 """Do required network setup and create domain."""3758 block_device_mapping = driver.block_device_info_get_mapping(3759 block_device_info)3760 for vol in block_device_mapping:3761 connection_info = vol['connection_info']3762 info = blockinfo.get_info_from_bdm(3763 CONF.libvirt.virt_type, vol)3764 conf = self._connect_volume(connection_info, info)3765 # cache device_path in connection_info -- required by encryptors3766 if 'data' in connection_info:3767 connection_info['data']['device_path'] = conf.source_path3768 vol['connection_info'] = connection_info3769 vol.save(context)3770 if (not reboot and 'data' in connection_info and3771 'volume_id' in connection_info['data']):3772 volume_id = connection_info['data']['volume_id']3773 encryption = encryptors.get_encryption_metadata(3774 context, self._volume_api, volume_id, connection_info)3775 if encryption:3776 encryptor = self._get_volume_encryptor(connection_info,3777 encryption)3778 encryptor.attach_volume(context, **encryption)3779 timeout = CONF.vif_plugging_timeout3780 if (self._conn_supports_start_paused and3781 utils.is_neutron() and not3782 vifs_already_plugged and power_on and timeout):3783 events = self._get_neutron_events(network_info)3784 else:3785 events = []3786 launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 03787 domain = None3788 try:3789 with self.virtapi.wait_for_instance_event(3790 instance, events, deadline=timeout,3791 error_callback=self._neutron_failed_callback):3792 self.plug_vifs(instance, network_info)3793 self.firewall_driver.setup_basic_filtering(instance,3794 network_info)3795 self.firewall_driver.prepare_instance_filter(instance,3796 network_info)3797 with self._lxc_disk_handler(instance, block_device_info,3798 disk_info):3799 domain = self._create_domain(3800 xml, instance=instance,3801 launch_flags=launch_flags,3802 power_on=power_on)3803 self.firewall_driver.apply_instance_filter(instance,3804 network_info)3805 except exception.VirtualInterfaceCreateException:3806 # Neutron reported failure and we didn't swallow it, so3807 # bail here3808 with excutils.save_and_reraise_exception():3809 if domain:3810 domain.destroy()3811 self.cleanup(context, instance, network_info=network_info,3812 block_device_info=block_device_info)3813 except eventlet.timeout.Timeout:3814 # We never heard from Neutron3815 LOG.warn(_LW('Timeout waiting for vif plugging callback for '3816 'instance %(uuid)s'), {'uuid': instance['uuid']})3817 if CONF.vif_plugging_is_fatal:3818 if domain:3819 domain.destroy()3820 self.cleanup(context, instance, network_info=network_info,3821 block_device_info=block_device_info)3822 raise exception.VirtualInterfaceCreateException()3823 # Resume only if domain has been paused3824 if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:3825 domain.resume()3826 return domain3827 def _get_all_block_devices(self):3828 """Return all block devices in use on this node."""3829 devices = []3830 for dom in self._list_instance_domains():3831 try:3832 doc = etree.fromstring(dom.XMLDesc(0))3833 except libvirt.libvirtError as e:3834 LOG.warn(_LW("couldn't obtain the XML from domain:"3835 " %(uuid)s, exception: %(ex)s") %3836 {"uuid": dom.UUIDString(), "ex": e})3837 continue3838 except Exception:3839 continue3840 ret = doc.findall('./devices/disk')3841 for node in ret:3842 if node.get('type') != 'block':3843 continue3844 for child in node.getchildren():3845 if child.tag == 'source':3846 devices.append(child.get('dev'))3847 return devices3848 def _get_interfaces(self, xml):3849 """Note that this function takes a domain xml.3850 Returns a list of all network interfaces for this instance.3851 """3852 doc = None3853 try:3854 doc = etree.fromstring(xml)3855 except Exception:3856 return []3857 interfaces = []3858 ret = doc.findall('./devices/interface')3859 for node in ret:3860 devdst = None3861 for child in list(node):3862 if child.tag == 'target':3863 devdst = child.attrib['dev']3864 if devdst is None:3865 continue3866 interfaces.append(devdst)3867 return interfaces3868 def _get_vcpu_total(self):3869 """Get available vcpu number of physical computer.3870 :returns: the number of cpu core instances can be used.3871 """3872 if self._vcpu_total != 0:3873 return self._vcpu_total3874 try:3875 total_pcpus = self._conn.getInfo()[2]3876 except libvirt.libvirtError:3877 LOG.warn(_LW("Cannot get the number of cpu, because this "3878 "function is not implemented for this platform. "))3879 return 03880 if CONF.vcpu_pin_set is None:3881 self._vcpu_total = total_pcpus3882 return self._vcpu_total3883 available_ids = hardware.get_vcpu_pin_set()3884 if available_ids[-1] >= total_pcpus:3885 raise exception.Invalid(_("Invalid vcpu_pin_set config, "3886 "out of hypervisor cpu range."))3887 self._vcpu_total = len(available_ids)3888 return self._vcpu_total3889 def _get_memory_mb_total(self):3890 """Get the total memory size(MB) of physical computer.3891 :returns: the total amount of memory(MB).3892 """3893 return self._conn.getInfo()[1]3894 @staticmethod3895 def _get_local_gb_info():3896 """Get local storage info of the compute node in GB.3897 :returns: A dict containing:3898 :total: How big the overall usable filesystem is (in gigabytes)3899 :free: How much space is free (in gigabytes)3900 :used: How much space is used (in gigabytes)3901 """3902 if CONF.libvirt.images_type == 'lvm':3903 info = lvm.get_volume_group_info(3904 CONF.libvirt.images_volume_group)3905 elif CONF.libvirt.images_type == 'rbd':3906 info = LibvirtDriver._get_rbd_driver().get_pool_info()3907 else:3908 info = libvirt_utils.get_fs_info(CONF.instances_path)3909 for (k, v) in info.iteritems():3910 info[k] = v / units.Gi3911 return info3912 def _get_vcpu_used(self):3913 """Get vcpu usage number of physical computer.3914 :returns: The total number of vcpu(s) that are currently being used.3915 """3916 total = 03917 if CONF.libvirt.virt_type == 'lxc':3918 return total + 13919 for dom in self._list_instance_domains():3920 try:3921 vcpus = dom.vcpus()3922 except libvirt.libvirtError as e:3923 LOG.warn(_LW("couldn't obtain the vpu count from domain id:"3924 " %(uuid)s, exception: %(ex)s") %3925 {"uuid": dom.UUIDString(), "ex": e})3926 else:3927 if vcpus is not None and len(vcpus) > 1:3928 total += len(vcpus[1])3929 # NOTE(gtt116): give other tasks a chance.3930 greenthread.sleep(0)3931 return total3932 def _get_memory_mb_used(self):3933 """Get the used memory size(MB) of physical computer.3934 :returns: the total usage of memory(MB).3935 """3936 if sys.platform.upper() not in ['LINUX2', 'LINUX3']:3937 return 03938 with open('/proc/meminfo') as fp:3939 m = fp.read().split()3940 idx1 = m.index('MemFree:')3941 idx2 = m.index('Buffers:')3942 idx3 = m.index('Cached:')3943 if CONF.libvirt.virt_type == 'xen':3944 used = 03945 for dom in self._list_instance_domains(only_guests=False):3946 try:3947 dom_mem = int(dom.info()[2])3948 except libvirt.libvirtError as e:3949 LOG.warn(_LW("couldn't obtain the memory from domain:"3950 " %(uuid)s, exception: %(ex)s") %3951 {"uuid": dom.UUIDString(), "ex": e})3952 continue3953 # skip dom03954 if dom.ID() != 0:3955 used += dom_mem3956 else:3957 # the mem reported by dom0 is be greater of what3958 # it is being used3959 used += (dom_mem -3960 (int(m[idx1 + 1]) +3961 int(m[idx2 + 1]) +3962 int(m[idx3 + 1])))3963 # Convert it to MB3964 return used / units.Ki3965 else:3966 avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))3967 # Convert it to MB3968 return self._get_memory_mb_total() - avail / units.Ki3969 def _get_hypervisor_type(self):3970 """Get hypervisor type.3971 :returns: hypervisor type (ex. qemu)3972 """3973 return self._conn.getType()3974 def _get_hypervisor_version(self):3975 """Get hypervisor version.3976 :returns: hypervisor version (ex. 12003)3977 """3978 # NOTE(justinsb): getVersion moved between libvirt versions3979 # Trying to do be compatible with older versions is a lost cause3980 # But ... we can at least give the user a nice message3981 method = getattr(self._conn, 'getVersion', None)3982 if method is None:3983 raise exception.NovaException(_("libvirt version is too old"3984 " (does not support getVersion)"))3985 # NOTE(justinsb): If we wanted to get the version, we could:3986 # method = getattr(libvirt, 'getVersion', None)3987 # NOTE(justinsb): This would then rely on a proper version check3988 return method()3989 def _get_hypervisor_hostname(self):3990 """Returns the hostname of the hypervisor."""3991 hostname = self._conn.getHostname()3992 if not hasattr(self, '_hypervisor_hostname'):3993 self._hypervisor_hostname = hostname3994 elif hostname != self._hypervisor_hostname:3995 LOG.error(_LE('Hostname has changed from %(old)s '3996 'to %(new)s. A restart is required to take effect.'),3997 {'old': self._hypervisor_hostname,3998 'new': hostname})3999 return self._hypervisor_hostname4000 def _get_instance_capabilities(self):4001 """Get hypervisor instance capabilities4002 Returns a list of tuples that describe instances the4003 hypervisor is capable of hosting. Each tuple consists4004 of the triplet (arch, hypervisor_type, vm_mode).4005 :returns: List of tuples describing instance capabilities4006 """4007 caps = self._get_host_capabilities()4008 instance_caps = list()4009 for g in caps.guests:4010 for dt in g.domtype:4011 instance_cap = (4012 arch.canonicalize(g.arch),4013 hvtype.canonicalize(dt),4014 vm_mode.canonicalize(g.ostype))4015 instance_caps.append(instance_cap)4016 return instance_caps4017 def _get_cpu_info(self):4018 """Get cpuinfo information.4019 Obtains cpu feature from virConnect.getCapabilities,4020 and returns as a json string.4021 :return: see above description4022 """4023 caps = self._get_host_capabilities()4024 cpu_info = dict()4025 cpu_info['arch'] = caps.host.cpu.arch4026 cpu_info['model'] = caps.host.cpu.model4027 cpu_info['vendor'] = caps.host.cpu.vendor4028 topology = dict()4029 topology['sockets'] = caps.host.cpu.sockets4030 topology['cores'] = caps.host.cpu.cores4031 topology['threads'] = caps.host.cpu.threads4032 cpu_info['topology'] = topology4033 features = list()4034 for f in caps.host.cpu.features:4035 features.append(f.name)4036 cpu_info['features'] = features4037 # TODO(berrange): why do we bother converting the4038 # libvirt capabilities XML into a special JSON format ?4039 # The data format is different across all the drivers4040 # so we could just return the raw capabilities XML4041 # which 'compare_cpu' could use directly4042 #4043 # That said, arch_filter.py now seems to rely on4044 # the libvirt drivers format which suggests this4045 # data format needs to be standardized across drivers4046 return jsonutils.dumps(cpu_info)4047 def _get_pcidev_info(self, devname):4048 """Returns a dict of PCI device."""4049 def _get_device_type(cfgdev):4050 """Get a PCI device's device type.4051 An assignable PCI device can be a normal PCI device,4052 a SR-IOV Physical Function (PF), or a SR-IOV Virtual4053 Function (VF). Only normal PCI devices or SR-IOV VFs4054 are assignable, while SR-IOV PFs are always owned by4055 hypervisor.4056 Please notice that a PCI device with SR-IOV4057 capability but not enabled is reported as normal PCI device.4058 """4059 for fun_cap in cfgdev.pci_capability.fun_capability:4060 if len(fun_cap.device_addrs) != 0:4061 if fun_cap.type == 'virt_functions':4062 return {'dev_type': 'type-PF'}4063 if fun_cap.type == 'phys_function':4064 phys_address = "%s:%s:%s.%s" % (4065 fun_cap.device_addrs[0][0].replace("0x", ''),4066 fun_cap.device_addrs[0][1].replace("0x", ''),4067 fun_cap.device_addrs[0][2].replace("0x", ''),4068 fun_cap.device_addrs[0][3].replace("0x", ''))4069 return {'dev_type': 'type-VF',4070 'phys_function': phys_address}4071 return {'dev_type': 'type-PCI'}4072 virtdev = self._conn.nodeDeviceLookupByName(devname)4073 xmlstr = virtdev.XMLDesc(0)4074 cfgdev = vconfig.LibvirtConfigNodeDevice()4075 cfgdev.parse_str(xmlstr)4076 address = "%04x:%02x:%02x.%1x" % (4077 cfgdev.pci_capability.domain,4078 cfgdev.pci_capability.bus,4079 cfgdev.pci_capability.slot,4080 cfgdev.pci_capability.function)4081 device = {4082 "dev_id": cfgdev.name,4083 "address": address,4084 "product_id": cfgdev.pci_capability.product_id[2:6],4085 "vendor_id": cfgdev.pci_capability.vendor_id[2:6],4086 }4087 # requirement by DataBase Model4088 device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device4089 device.update(_get_device_type(cfgdev))4090 return device4091 def _pci_device_assignable(self, device):4092 if device['dev_type'] == 'type-PF':4093 return False4094 return self.dev_filter.device_assignable(device)4095 def _get_pci_passthrough_devices(self):4096 """Get host PCI devices information.4097 Obtains pci devices information from libvirt, and returns4098 as a JSON string.4099 Each device information is a dictionary, with mandatory keys4100 of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',4101 'label' and other optional device specific information.4102 Refer to the objects/pci_device.py for more idea of these keys.4103 :returns: a JSON string containaing a list of the assignable PCI4104 devices information4105 """4106 # Bail early if we know we can't support `listDevices` to avoid4107 # repeated warnings within a periodic task4108 if not getattr(self, '_list_devices_supported', True):4109 return jsonutils.dumps([])4110 try:4111 dev_names = self._conn.listDevices('pci', 0) or []4112 except libvirt.libvirtError as ex:4113 error_code = ex.get_error_code()4114 if error_code == libvirt.VIR_ERR_NO_SUPPORT:4115 self._list_devices_supported = False4116 LOG.warn(_LW("URI %(uri)s does not support "4117 "listDevices: " "%(error)s"),4118 {'uri': self.uri(), 'error': ex})4119 return jsonutils.dumps([])4120 else:4121 raise4122 pci_info = []4123 for name in dev_names:4124 pci_dev = self._get_pcidev_info(name)4125 if self._pci_device_assignable(pci_dev):4126 pci_info.append(pci_dev)4127 return jsonutils.dumps(pci_info)4128 def _get_host_numa_topology(self):4129 caps = self._get_host_capabilities()4130 topology = caps.host.topology4131 if topology is None or not topology.cells:4132 return4133 topology = hardware.VirtNUMAHostTopology(4134 cells=[hardware.VirtNUMATopologyCellUsage(4135 cell.id, set(cpu.id for cpu in cell.cpus),4136 cell.memory)4137 for cell in topology.cells])4138 allowed_cpus = hardware.get_vcpu_pin_set()4139 if allowed_cpus:4140 for cell in topology.cells:4141 cell.cpuset &= allowed_cpus4142 return topology4143 def get_all_volume_usage(self, context, compute_host_bdms):...

Full Screen

Full Screen

platform.py

Source:platform.py Github

copy

Full Screen

...152 self, environment: Environment, log: Logger, lv_conn: libvirt.virConnect153 ) -> bool:154 if not environment.runbook.nodes_requirement:155 return True156 host_capabilities = self._get_host_capabilities(lv_conn, log)157 nodes_capabilities = self._create_node_capabilities(host_capabilities)158 nodes_requirement = []159 for node_space in environment.runbook.nodes_requirement:160 # Check that the general node capabilities are compatible with this node's161 # specific requirements.162 if not node_space.check(nodes_capabilities):163 return False164 # Rectify the general node capabilities with this node's specific165 # requirements.166 node_requirement = node_space.generate_min_capability(nodes_capabilities)167 nodes_requirement.append(node_requirement)168 if not self._check_host_capabilities(nodes_requirement, host_capabilities, log):169 return False170 environment.runbook.nodes_requirement = nodes_requirement171 return True172 def _get_host_capabilities(173 self, lv_conn: libvirt.virConnect, log: Logger174 ) -> _HostCapabilities:175 host_capabilities = _HostCapabilities()176 capabilities_xml_str = lv_conn.getCapabilities()177 capabilities_xml = ET.fromstring(capabilities_xml_str)178 host_xml = capabilities_xml.find("host")179 assert host_xml180 topology_xml = host_xml.find("topology")181 assert topology_xml182 cells_xml = topology_xml.find("cells")183 assert cells_xml184 for cell in cells_xml.findall("cell"):185 cpus_xml = cell.find("cpus")186 assert cpus_xml...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful