1. 概述node
libvirt是基於KVM的上層封裝,提供了操做KVM的生層接口,如虛擬機的生命週期(建立,刪除,查看,管理)等,網絡的管理和存儲的管理。經過libvirt能夠操做KVM,實現相似於virsh,virt-manager這些工具可以實現的功能,本文以查看當前hypervisor的全部instance爲例,講述經過libvirt模塊,查看當前機器的虛擬機列表,關於libvirt的更多操做,如開機,關機,重啓,網絡管理,存儲管理等操做,參考附件。
python
2. 實現代碼api
cat libvirt_vm.py #!/usr/bin/env python #_*_ coding:utf8 _*_ #author:Happy #blog adddress: http://happylab.blog.51cto.com #來自Happy實驗室 import sys try: import libvirt HAS_LIBVIRT = True except Exception: HAS_LIBVIRT = False def is_virtual(): ''' 判斷當前系統是否支持KVM虛擬化,不支持則退出 ''' if not HAS_LIBVIRT: sys.exit("current system are not support Virtualization") return 'virt' def get_conn(): ''' 獲取libvirt的鏈接句柄,用於提供操做libivrt的接口 ''' if is_virtual() == 'virt': try: conn = libvirt.open('qemu:///system') except Exception as e: sys.exit(e) return conn def close_conn(conn): ''' 關閉libvirt的鏈接句柄 ''' return conn.close() def list_active_vms(): ''' 獲取全部開機狀態的instance,返回虛擬機的名字 ''' vms_list = [] conn = get_conn() domain_list = conn.listDomainsID() for id in domain_list: vms_list.append(conn.lookupByID(id).name()) close_conn(conn) return vms_list def list_inactive_vms(): ''' 獲取關機狀態的instance,返回虛擬機的名字 ''' vms_list = [] conn = get_conn() for id in conn.listDefinedDomains(): vms_list.append(id) close_conn(conn) return vms_list def list_all_vms(): ''' 獲取全部的虛擬機 ''' vms = [] vms.extend(list_active_vms()) vms.extend(list_inactive_vms()) return vms def get_capability(): ''' 獲得hypervisor的容量信息,返回格式爲XML ''' conn = get_conn() capability = conn.getCapabilities() conn.close() return capability def get_hostname(): ''' attain hypervisor's hostname ''' conn = get_conn() hostname = conn.getHostname() conn.close() return hostname def get_max_vcpus(): ''' 獲取hypervisor支持虛擬機的最大CPU數 ''' conn = get_conn() max_vcpus = conn.getMaxVcpus(None) conn.close() return max_vcpus if __name__ == "__main__": print "當前主機%s的虛擬機列表:" % (get_hostname()) for vms in list_active_vms(): print vms
3. 測試網絡
[root@ChuangYiYuan_10_16_2_19 ~]# python libvirt_vm.py 當前主機ChuangYiYuan_10_16_2_19的虛擬機列表: instance-0000006b instance-000001c1 instance-000000b9 instance-00000181 instance-000001f5 instance-000000cb instance-0000007f instance-000000eb instance-00000145 instance-0000019b instance-000001b9 instance-000000d7 instance-0000012b instance-00000077 instance-00000165 instance-00000083
4. 總結app
經過libvirt可以實現KVM的管理,libvirt提供了大部分管理KVM的接口,經過改接口,能夠實現openstack底層的操做。
dom
5. 附錄socket
openstack關於libvirt底層的實現代碼,供你們參考ide
""" Supports KVM, LXC, QEMU, UML, and XEN. """ import errno import eventlet import functools import glob import mmap import os import shutil import socket import sys import tempfile import threading import time import uuid class LibvirtDriver(driver.ComputeDriver): capabilities = { "has_p_w_picpathcache": True, "supports_recreate": True, } def __init__(self, virtapi, read_only=False): super(LibvirtDriver, self).__init__(virtapi) global libvirt if libvirt is None: libvirt = __import__('libvirt') self._host_state = None self._initiator = None self._fc_wwnns = None self._fc_wwpns = None self._wrapped_conn = None self._wrapped_conn_lock = threading.Lock() self._caps = None self._vcpu_total = 0 self.read_only = read_only self.firewall_driver = firewall.load_driver( DEFAULT_FIREWALL_DRIVER, self.virtapi, get_connection=self._get_connection) vif_class = importutils.import_class(CONF.libvirt.vif_driver) self.vif_driver = vif_class(self._get_connection) self.volume_drivers = driver.driver_dict_from_config( CONF.libvirt.volume_drivers, self) self.dev_filter = pci_whitelist.get_pci_devices_filter() self._event_queue = None self._disk_cachemode = None self.p_w_picpath_cache_manager = p_w_picpathcache.ImageCacheManager() self.p_w_picpath_backend = p_w_picpathbackend.Backend(CONF.use_cow_p_w_picpaths) self.disk_cachemodes = {} self.valid_cachemodes = ["default", "none", "writethrough", "writeback", "directsync", "unsafe", ] for mode_str in CONF.libvirt.disk_cachemodes: disk_type, sep, cache_mode = mode_str.partition('=') if cache_mode not in self.valid_cachemodes: LOG.warn(_('Invalid cachemode %(cache_mode)s specified ' 'for disk type %(disk_type)s.'), {'cache_mode': cache_mode, 'disk_type': disk_type}) continue self.disk_cachemodes[disk_type] = cache_mode self._volume_api = volume.API() def _get_new_connection(self): # call with _wrapped_conn_lock held LOG.debug(_('Connecting to libvirt: %s'), self.uri()) wrapped_conn = None try: wrapped_conn = self._connect(self.uri(), self.read_only) finally: # Enabling the compute service, in case it was disabled # since the connection was successful. disable_reason = DISABLE_REASON_UNDEFINED if not wrapped_conn: disable_reason = 'Failed to connect to libvirt' self._set_host_enabled(bool(wrapped_conn), disable_reason) self._wrapped_conn = wrapped_conn try: LOG.debug(_("Registering for lifecycle events %s"), self) wrapped_conn.domainEventRegisterAny( None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._event_lifecycle_callback, self) except Exception as e: LOG.warn(_("URI %(uri)s does not support events: %(error)s"), {'uri': self.uri(), 'error': e}) try: LOG.debug(_("Registering for connection events: %s") % str(self)) wrapped_conn.registerCloseCallback(self._close_callback, None) except (TypeError, AttributeError) as e: # NOTE: The registerCloseCallback of python-libvirt 1.0.1+ # is defined with 3 arguments, and the above registerClose- # Callback succeeds. However, the one of python-libvirt 1.0.0 # is defined with 4 arguments and TypeError happens here. # Then python-libvirt 0.9 does not define a method register- # CloseCallback. LOG.debug(_("The version of python-libvirt does not support " "registerCloseCallback or is too old: %s"), e) except libvirt.libvirtError as e: LOG.warn(_("URI %(uri)s does not support connection" " events: %(error)s"), {'uri': self.uri(), 'error': e}) return wrapped_conn @staticmethod def uri(): if CONF.libvirt.virt_type == 'uml': uri = CONF.libvirt.connection_uri or 'uml:///system' elif CONF.libvirt.virt_type == 'xen': uri = CONF.libvirt.connection_uri or 'xen:///' elif CONF.libvirt.virt_type == 'lxc': uri = CONF.libvirt.connection_uri or 'lxc:///' else: uri = CONF.libvirt.connection_uri or 'qemu:///system' return uri @staticmethod def _connect(uri, read_only): def _connect_auth_cb(creds, opaque): if len(creds) == 0: return 0 LOG.warning( _("Can not handle authentication request for %d credentials") % len(creds)) raise exception.NovaException( _("Can not handle authentication request for %d credentials") % len(creds)) auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_ECHOPROMPT, libvirt.VIR_CRED_REALM, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_NOECHOPROMPT, libvirt.VIR_CRED_EXTERNAL], _connect_auth_cb, None] try: flags = 0 if read_only: flags = libvirt.VIR_CONNECT_RO # tpool.proxy_call creates a native thread. Due to limitations # with eventlet locking we cannot use the logging API inside # the called function. return tpool.proxy_call( (libvirt.virDomain, libvirt.virConnect), libvirt.openAuth, uri, auth, flags) except libvirt.libvirtError as ex: LOG.exception(_("Connection to libvirt failed: %s"), ex) payload = dict(ip=LibvirtDriver.get_host_ip_addr(), method='_connect', reason=ex) rpc.get_notifier('compute').error(nova_context.get_admin_context(), 'compute.libvirt.error', payload) raise exception.HypervisorUnavailable(host=CONF.host) ''' 返回instance的個數,conn.numOfDomains()用於顯示active的vm個數,conn.numOfDefinedDomains()則顯示inactive的vm個數 ''' def get_num_instances(self): """Efficient override of base instance_exists method.""" return self._conn.numOfDomains() ''' 檢查虛擬機是否存在,根據名字校驗 ''' def instance_exists(self, instance_name): """Efficient override of base instance_exists method.""" try: self._lookup_by_name(instance_name) return True except exception.NovaException: return False ''' 查看libvirt active虛擬機的id號碼,conn.numOfDomains()用於顯示active虛擬機的個數,conn.numOfDefinedDomains()則用於顯示inactive的虛擬機個數 ''' # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed. def list_instance_ids(self): if self._conn.numOfDomains() == 0: return [] return self._conn.listDomainsID() ''' 返回虛擬機列表的名字,調用list_instance_ids()函數,只是顯示active虛擬機的名字,其中conn.lookupByID(ids).name()用於顯示instance的名字 ''' def list_instances(self): names = [] for domain_id in self.list_instance_ids(): try: # We skip domains with ID 0 (hypervisors). if domain_id != 0: domain = self._lookup_by_id(domain_id) names.append(domain.name()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue # extend instance list to contain also defined domains names.extend([vm for vm in self._conn.listDefinedDomains() if vm not in names]) return names ''' 查看instance的UUID號碼,顯示active+inactive狀態的虛擬機的UUID號碼,其中conn.lookupByID(ids).UUIDString()用於返回active instance的UUID號碼 conn.lookupByName('name').UUIDString()則返回inactive虛擬機的UUID號 ''' def list_instance_uuids(self): uuids = set() for domain_id in self.list_instance_ids(): try: # We skip domains with ID 0 (hypervisors). if domain_id != 0: domain = self._lookup_by_id(domain_id) uuids.add(domain.UUIDString()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue # extend instance list to contain also defined domains for domain_name in self._conn.listDefinedDomains(): try: uuids.add(self._lookup_by_name(domain_name).UUIDString()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue return list(uuids) def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" for vif in network_info: self.vif_driver.plug(instance, vif) def unplug_vifs(self, instance, network_info, ignore_errors=False): """Unplug VIFs from networks.""" for vif in network_info: try: self.vif_driver.unplug(instance, vif) except exception.NovaException: if not ignore_errors: raise def _teardown_container(self, instance): inst_path = libvirt_utils.get_instance_path(instance) container_dir = os.path.join(inst_path, 'rootfs') container_root_device = instance.get('root_device_name') disk.teardown_container(container_dir, container_root_device) def _undefine_domain(self, instance): try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: virt_dom = None if virt_dom: try: try: virt_dom.undefineFlags( libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE) except libvirt.libvirtError: LOG.debug(_("Error from libvirt during undefineFlags." " Retrying with undefine"), instance=instance) virt_dom.undefine() except AttributeError: # NOTE(vish): Older versions of libvirt don't support # undefine flags, so attempt to do the # right thing. try: if virt_dom.hasManagedSaveImage(0): virt_dom.managedSaveRemove(0) except AttributeError: pass virt_dom.undefine() except libvirt.libvirtError as e: with excutils.save_and_reraise_exception(): errcode = e.get_error_code() LOG.error(_('Error from libvirt during undefine. ' 'Code=%(errcode)s Error=%(e)s') % {'errcode': errcode, 'e': e}, instance=instance) def _cleanup_rbd(self, instance): pool = CONF.libvirt.p_w_picpaths_rbd_pool volumes = libvirt_utils.list_rbd_volumes(pool) pattern = instance['uuid'] def belongs_to_instance(disk): return disk.startswith(pattern) volumes = filter(belongs_to_instance, volumes) if volumes: libvirt_utils.remove_rbd_volumes(pool, *volumes) def _cleanup_lvm(self, instance): """Delete all LVM disks for given instance object.""" disks = self._lvm_disks(instance) if disks: libvirt_utils.remove_logical_volumes(*disks) @staticmethod def _get_disk_xml(xml, device): """Returns the xml for the disk mounted at device.""" try: doc = etree.fromstring(xml) except Exception: return None ret = doc.findall('./devices/disk') for node in ret: for child in node.getchildren(): if child.tag == 'target': if child.get('dev') == device: return etree.tostring(node) def _get_existing_domain_xml(self, instance, network_info, block_device_info=None): try: virt_dom = self._lookup_by_name(instance['name']) xml = virt_dom.XMLDesc(0) except exception.InstanceNotFound: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(nova_context.get_admin_context(), instance, network_info, disk_info, block_device_info=block_device_info) return xml