Created
August 30, 2013 15:01
-
-
Save zhurongze/6390778 to your computer and use it in GitHub Desktop.
This is a patch for "add-rbd-imagebackend-for-nova", it is base on Nova-2013.2.b2, because we need to use fedora openstack-nova rpm specs for building our rpms. If the flag libvirt_images_rbd_clone_image == false, Nova will download directly image from Glance. If the flag is true, Nova will clone image from Glance's Ceph pool and there isn't dat…
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 9ad0efe74436b0dab934295cef3ccf07dfe0d511 Mon Sep 17 00:00:00 2001 | |
From: Rongze Zhu <[email protected]> | |
Date: Wed, 28 Aug 2013 10:09:46 +0800 | |
Subject: [PATCH] Add rbd imagebackend for Nova | |
add three flag for rbd imagebackend: | |
* libvirt_images_rbd_pool | |
* libvirt_images_rbd_ceph_conf | |
* libvirt_images_rbd_clone_image | |
If set libvirt_images_rbd_clone_image = false, Nova will download image | |
from glance and import image into rbd backend. If set | |
libvirt_images_rbd_clone_image = true, Nova will directly clone image | |
from glance's rbd backend, it need glance's storage backend is rbd. | |
Change-Id: Ie5003e18e56fc6d98ddd3b2d909e927afdb27b62 | |
--- | |
etc/nova/nova.conf.sample | 12 +- | |
etc/nova/rootwrap.d/compute.filters | 3 + | |
nova/image/glance.py | 5 + | |
nova/tests/virt/libvirt/fake_libvirt_utils.py | 11 + | |
nova/tests/virt/libvirt/test_image_utils.py | 5 + | |
nova/tests/virt/libvirt/test_imagebackend.py | 63 +++++ | |
nova/tests/virt/libvirt/test_libvirt.py | 42 ++++ | |
nova/virt/images.py | 9 +- | |
nova/virt/libvirt/driver.py | 34 ++- | |
nova/virt/libvirt/imagebackend.py | 344 +++++++++++++++++++++++++- | |
nova/virt/libvirt/utils.py | 39 +++ | |
11 files changed, 561 insertions(+), 6 deletions(-) | |
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample | |
index 633316b..63d4f45 100644 | |
--- a/etc/nova/nova.conf.sample | |
+++ b/etc/nova/nova.conf.sample | |
@@ -2006,7 +2006,7 @@ | |
# Options defined in nova.virt.libvirt.imagebackend | |
# | |
-# VM Images format. Acceptable values are: raw, qcow2, lvm, | |
+# VM Images format. Acceptable values are: raw, qcow2, lvm, rbd | |
# default. If default is specified, then use_cow_images flag | |
# is used instead of this one. (string value) | |
#libvirt_images_type=default | |
@@ -2023,6 +2023,16 @@ | |
# snapshot copy-on-write blocks. (integer value) | |
#libvirt_lvm_snapshot_size=1000 | |
+# the RADOS pool in which rbd volumes are stored (string | |
+# value) | |
+#libvirt_images_rbd_pool=rbd | |
+ | |
+# path to the ceph configuration file to use (string value) | |
+#libvirt_images_rbd_ceph_conf=/etc/ceph/ceph.conf | |
+ | |
+# wheter clone image from glance rbd storage backend, it need nova and rbd use | |
+# a same ceph cluster | |
+#libvirt_images_rbd_clone_image=false | |
# | |
# Options defined in nova.virt.libvirt.imagecache | |
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters | |
index fe608a9..84a0063 100644 | |
--- a/etc/nova/rootwrap.d/compute.filters | |
+++ b/etc/nova/rootwrap.d/compute.filters | |
@@ -204,3 +204,6 @@ xenstore-read: CommandFilter, xenstore-read, root | |
# nova/virt/baremetal/tilera.py: 'rpc.mountd' | |
rpc.mountd: CommandFilter, rpc.mountd, root | |
+ | |
+# nova/virt/libvirt/utils.py: | |
+rbd: CommandFilter, rbd, root | |
diff --git a/nova/image/glance.py b/nova/image/glance.py | |
index 69c6860..56b1251 100644 | |
--- a/nova/image/glance.py | |
+++ b/nova/image/glance.py | |
@@ -244,6 +244,11 @@ class GlanceImageService(object): | |
base_image_meta = self._translate_from_glance(image) | |
return base_image_meta | |
+ def get_direct_url(self, context, image_id): | |
+ # There isn't get_location method in latest Nova codes | |
+ # It will be called by nova.virt.images.get_image_location function. | |
+ return self.get_location(context, image_id) | |
+ | |
def get_location(self, context, image_id): | |
"""Returns the direct url representing the backend storage location, | |
or None if this attribute is not shown by Glance. | |
diff --git a/nova/tests/virt/libvirt/fake_libvirt_utils.py b/nova/tests/virt/libvirt/fake_libvirt_utils.py | |
index 6ea8c5b..261024f 100644 | |
--- a/nova/tests/virt/libvirt/fake_libvirt_utils.py | |
+++ b/nova/tests/virt/libvirt/fake_libvirt_utils.py | |
@@ -204,3 +204,14 @@ def get_instance_path(instance, forceold=False, relative=False): | |
def pick_disk_driver_name(hypervisor_version, is_block_dev=False): | |
return "qemu" | |
+ | |
+ | |
+def import_rbd_image(path, *args): | |
+ pass | |
+ | |
+ | |
+def remove_rbd_volumes(pool, *names): | |
+ pass | |
+ | |
+def list_rbd_volumes(pool): | |
+ return ['fakevolumes_disk', 'fakevolumes_swap, fakevolumes_root'] | |
diff --git a/nova/tests/virt/libvirt/test_image_utils.py b/nova/tests/virt/libvirt/test_image_utils.py | |
index a9768f8..32dd6d2 100644 | |
--- a/nova/tests/virt/libvirt/test_image_utils.py | |
+++ b/nova/tests/virt/libvirt/test_image_utils.py | |
@@ -30,6 +30,11 @@ class ImageUtilsTestCase(test.TestCase): | |
for p in ['/dev/b', '/dev/blah/blah']: | |
d_type = libvirt_utils.get_disk_type(p) | |
self.assertEquals('lvm', d_type) | |
+ | |
+ # Try rbd detection | |
+ d_type = libvirt_utils.get_disk_type('rbd:pool/instance') | |
+ self.assertEquals('rbd', d_type) | |
+ | |
# Try the other types | |
template_output = """image: %(path)s | |
file format: %(format)s | |
diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py | |
index d5ed083..4ff629a 100644 | |
--- a/nova/tests/virt/libvirt/test_imagebackend.py | |
+++ b/nova/tests/virt/libvirt/test_imagebackend.py | |
@@ -496,6 +496,62 @@ class LvmTestCase(_ImageTestCase, test.TestCase): | |
self.assertEqual(fake_processutils.fake_execute_get_log(), []) | |
+class RbdTestCase(_ImageTestCase, test.TestCase): | |
+ POOL = "FakePool" | |
+ USER = "FakeUser" | |
+ CONF = "FakeConf" | |
+ SIZE = 1024 | |
+ | |
+ def setUp(self): | |
+ self.image_class = imagebackend.Rbd | |
+ super(RbdTestCase, self).setUp() | |
+ self.flags(libvirt_images_rbd_pool=self.POOL) | |
+ self.flags(rbd_user=self.USER) | |
+ self.flags(libvirt_images_rbd_ceph_conf=self.CONF) | |
+ self.libvirt_utils = imagebackend.libvirt_utils | |
+ self.utils = imagebackend.utils | |
+ self.rbd = self.mox.CreateMockAnything() | |
+ | |
+ def prepare_mocks(self): | |
+ fn = self.mox.CreateMockAnything() | |
+ self.mox.StubOutWithMock(imagebackend, 'rbd') | |
+ return fn | |
+ | |
+ def test_create_image(self): | |
+ fn = self.prepare_mocks() | |
+ fn(rbd=self.rbd, target=self.TEMPLATE_PATH) | |
+ | |
+ self.rbd.RBD_FEATURE_LAYERING = 1 | |
+ | |
+ rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME) | |
+ cmd = ('--pool', self.POOL, self.TEMPLATE_PATH, | |
+ rbd_name, '--new-format', '--id', self.USER, | |
+ '--conf', self.CONF) | |
+ self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd) | |
+ self.mox.ReplayAll() | |
+ | |
+ image = self.image_class(self.INSTANCE, self.NAME) | |
+ image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd) | |
+ | |
+ self.mox.VerifyAll() | |
+ | |
+ def test_prealloc_image(self): | |
+ CONF.set_override('preallocate_images', 'space') | |
+ | |
+ fake_processutils.fake_execute_clear_log() | |
+ fake_processutils.stub_out_processutils_execute(self.stubs) | |
+ image = self.image_class(self.INSTANCE, self.NAME) | |
+ | |
+ def fake_fetch(target, *args, **kwargs): | |
+ return | |
+ | |
+ self.stubs.Set(os.path, 'exists', lambda _: True) | |
+ | |
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE) | |
+ | |
+ self.assertEqual(fake_processutils.fake_execute_get_log(), []) | |
+ | |
+ | |
class BackendTestCase(test.TestCase): | |
INSTANCE = {'name': 'fake-instance', | |
'uuid': uuidutils.generate_uuid()} | |
@@ -529,5 +585,12 @@ class BackendTestCase(test.TestCase): | |
self.flags(libvirt_images_volume_group='FakeVG') | |
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm) | |
+ def test_image_rbd(self): | |
+ conf = "FakeConf" | |
+ pool = "FakePool" | |
+ self.flags(libvirt_images_rbd_pool=pool) | |
+ self.flags(libvirt_images_rbd_ceph_conf=conf) | |
+ self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd) | |
+ | |
def test_image_default(self): | |
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2) | |
diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py | |
index af7e0e8..1b64f0c 100644 | |
--- a/nova/tests/virt/libvirt/test_libvirt.py | |
+++ b/nova/tests/virt/libvirt/test_libvirt.py | |
@@ -3425,6 +3425,48 @@ class LibvirtConnTestCase(test.TestCase): | |
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"} | |
conn.destroy(instance, []) | |
+ def test_cleanup_rbd(self): | |
+ mock = self.mox.CreateMock(libvirt.virDomain) | |
+ | |
+ def fake_lookup_by_name(instance_name): | |
+ return mock | |
+ | |
+ def fake_get_info(instance_name): | |
+ return {'state': power_state.SHUTDOWN, 'id': -1} | |
+ | |
+ | |
+ | |
+ fake_pool = 'fake_pool' | |
+ fake_uuid = '875a8070-d0b9-4949-8b31-104d125c9a64' | |
+ fake_instance = {'name': 'fakeinstancename', 'id': 'instanceid', | |
+ 'uuid': fake_uuid} | |
+ fake_volumes = [fake_uuid + '_disk', | |
+ fake_uuid + '_local', | |
+ fake_uuid + '_swap', | |
+ fake_uuid + '_xxx'] | |
+ | |
+ def fake_list_rbd_volumes(pool): | |
+ return fake_volumes | |
+ | |
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) | |
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name) | |
+ self.stubs.Set(conn, 'get_info', fake_get_info) | |
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'list_rbd_volumes', | |
+ fake_list_rbd_volumes) | |
+ | |
+ self.flags(libvirt_images_rbd_pool=fake_pool) | |
+ self.flags(libvirt_images_type='rbd') | |
+ self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, | |
+ 'remove_rbd_volumes') | |
+ libvirt_driver.libvirt_utils.remove_rbd_volumes(fake_pool, | |
+ *fake_volumes) | |
+ | |
+ self.mox.ReplayAll() | |
+ | |
+ conn._cleanup_rbd(fake_instance) | |
+ | |
+ self.mox.VerifyAll() | |
+ | |
def test_destroy_timed_out(self): | |
mock = self.mox.CreateMock(libvirt.virDomain) | |
mock.ID() | |
diff --git a/nova/virt/images.py b/nova/virt/images.py | |
index bbb57cd..d517d31 100755 | |
--- a/nova/virt/images.py | |
+++ b/nova/virt/images.py | |
@@ -164,7 +164,7 @@ class QemuImgInfo(object): | |
def qemu_img_info(path): | |
"""Return an object containing the parsed output from qemu-img info.""" | |
- if not os.path.exists(path): | |
+ if not os.path.exists(path) and CONF.libvirt_images_type != 'rbd': | |
return QemuImgInfo() | |
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', | |
@@ -225,3 +225,10 @@ def fetch_to_raw(context, image_href, path, user_id, project_id): | |
os.rename(staged, path) | |
else: | |
os.rename(path_tmp, path) | |
+ | |
+def get_image_location(context, image_href): | |
+ """Get image's location from glance service""" | |
+ (image_service, image_id) = glance.get_remote_image_service(context, | |
+ image_href) | |
+ location = image_service.get_direct_url(context, image_id) | |
+ return location | |
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py | |
index 4165038..1659e8d 100755 | |
--- a/nova/virt/libvirt/driver.py | |
+++ b/nova/virt/libvirt/driver.py | |
@@ -927,6 +927,23 @@ class LibvirtDriver(driver.ComputeDriver): | |
#NOTE(bfilippov): destroy all LVM disks for this instance | |
self._cleanup_lvm(instance) | |
+ #NOTE(haomai): destory rbd volumes if needed | |
+ if CONF.libvirt_images_type == 'rbd': | |
+ self._cleanup_rbd(instance) | |
+ | |
+ def _cleanup_rbd(self, instance): | |
+ pool = CONF.libvirt_images_rbd_pool | |
+ volumes = libvirt_utils.list_rbd_volumes(pool) | |
+ pattern = instance['uuid'] | |
+ | |
+ def belongs_to_instance(disk): | |
+ return disk.startswith(pattern) | |
+ | |
+ volumes = filter(belongs_to_instance, volumes) | |
+ | |
+ if volumes: | |
+ libvirt_utils.remove_rbd_volumes(pool, *volumes) | |
+ | |
def _cleanup_lvm(self, instance): | |
"""Delete all LVM disks for given instance object.""" | |
disks = self._lvm_disks(instance) | |
@@ -1189,6 +1206,10 @@ class LibvirtDriver(driver.ComputeDriver): | |
if image_format == 'lvm': | |
image_format = 'raw' | |
+ # NOTE(haomai): save rbd as raw | |
+ if image_format == 'rbd': | |
+ image_format = 'raw' | |
+ | |
# NOTE(vish): glance forces ami disk format to be ami | |
if base.get('disk_format') == 'ami': | |
metadata['disk_format'] = 'ami' | |
@@ -1208,7 +1229,7 @@ class LibvirtDriver(driver.ComputeDriver): | |
if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION, | |
MIN_QEMU_LIVESNAPSHOT_VERSION, | |
REQ_HYPERVISOR_LIVESNAPSHOT) \ | |
- and not source_format == "lvm": | |
+ and not source_format == "lvm" and not source_format == 'rbd': | |
live_snapshot = True | |
# Abort is an idempotent operation, so make sure any block | |
# jobs which may have failed are ended. This operation also | |
@@ -1280,6 +1301,17 @@ class LibvirtDriver(driver.ComputeDriver): | |
update_task_state(task_state=task_states.IMAGE_UPLOADING, | |
expected_state=task_states.IMAGE_PENDING_UPLOAD) | |
+ | |
+ if (CONF.libvirt_images_rbd_clone_image and | |
+ CONF.libvirt_images_type == 'rbd'): | |
+ location = 'rbd://%s/%s/%s/%s' % (snapshot_backend.get_fsid(), | |
+ snapshot_backend.pool, | |
+ snapshot_backend.rbd_name, | |
+ snapshot_name) | |
+ metadata['location'] = location | |
+ image_service.update(context, image_href, metadata) | |
+ return | |
+ | |
with libvirt_utils.file_open(out_path) as image_file: | |
image_service.update(context, | |
image_href, | |
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py | |
index 2f29c05..a6409cc 100755 | |
--- a/nova/virt/libvirt/imagebackend.py | |
+++ b/nova/virt/libvirt/imagebackend.py | |
@@ -18,12 +18,14 @@ | |
import abc | |
import contextlib | |
import os | |
+import urllib | |
from oslo.config import cfg | |
from nova import exception | |
from nova.openstack.common import excutils | |
from nova.openstack.common import fileutils | |
+from nova.openstack.common import jsonutils | |
from nova.openstack.common import log as logging | |
from nova import utils | |
from nova.virt.disk import api as disk | |
@@ -31,11 +33,19 @@ from nova.virt import images | |
from nova.virt.libvirt import config as vconfig | |
from nova.virt.libvirt import utils as libvirt_utils | |
+try: | |
+ import rbd | |
+ import rados | |
+except ImportError: | |
+ rbd = None | |
+ rados = None | |
+ | |
+ | |
__imagebackend_opts = [ | |
cfg.StrOpt('libvirt_images_type', | |
default='default', | |
help='VM Images format. Acceptable values are: raw, qcow2, lvm,' | |
- ' default. If default is specified,' | |
+ 'rbd, default. If default is specified,' | |
' then use_cow_images flag is used instead of this one.'), | |
cfg.StrOpt('libvirt_images_volume_group', | |
default=None, | |
@@ -46,9 +56,18 @@ __imagebackend_opts = [ | |
help='Create sparse logical volumes (with virtualsize)' | |
' if this flag is set to True.'), | |
cfg.IntOpt('libvirt_lvm_snapshot_size', | |
- default=1000, | |
- help='The amount of storage (in megabytes) to allocate for LVM' | |
+ default=1000, | |
+ help='The amount of storage (in megabytes) to allocate for LVM' | |
' snapshot copy-on-write blocks.'), | |
+ cfg.StrOpt('libvirt_images_rbd_pool', | |
+ default='rbd', | |
+ help='the RADOS pool in which rbd volumes are stored'), | |
+ cfg.StrOpt('libvirt_images_rbd_ceph_conf', | |
+ default='/etc/ceph/ceph.conf', | |
+ help='path to the ceph configuration file to use'), | |
+ cfg.BoolOpt('libvirt_images_rbd_clone_image', | |
+ default=False, | |
+ help='Nova clone glance image in rbd'), | |
] | |
CONF = cfg.CONF | |
@@ -388,12 +407,284 @@ class Lvm(Image): | |
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3) | |
+class Rbd(Image): | |
+ def __init__(self, instance=None, disk_name=None, path=None, | |
+ snapshot_name=None, **kwargs): | |
+ super(Rbd, self).__init__("block", "rbd", is_block_dev=True) | |
+ if path: | |
+ self.rbd_name = self._get_image_from_path(path) | |
+ else: | |
+ self.rbd_name = '%s_%s' % (instance['uuid'], disk_name) | |
+ self.snapshot_name = snapshot_name | |
+ self.path = (path or | |
+ os.path.join(libvirt_utils.get_instance_path(instance), | |
+ disk_name)) | |
+ if not CONF.libvirt_images_rbd_pool: | |
+ raise RuntimeError(_('You should specify' | |
+ ' libvirt_images_rbd_pool' | |
+ ' flag to use rbd images.')) | |
+ self.user = CONF.rbd_user | |
+ self.pool = CONF.libvirt_images_rbd_pool | |
+ self.ceph_conf = CONF.libvirt_images_rbd_ceph_conf | |
+ self.rbd = kwargs.get('rbd', rbd) | |
+ self.rados = kwargs.get('rados', rados) | |
+ | |
+ def _ceph_args(self): | |
+ args = [] | |
+ if self.user: | |
+ args.extend(['--id', self.user]) | |
+ if self.ceph_conf: | |
+ args.extend(['--conf', self.ceph_conf]) | |
+ return args | |
+ | |
+ def _connect_to_rados(self, pool=None): | |
+ client = self.rados.Rados(rados_id=self.user, conffile=self.ceph_conf) | |
+ try: | |
+ client.connect() | |
+ pool_to_open = pool if pool else self.pool | |
+ ioctx = client.open_ioctx(str(pool_to_open)) | |
+ return client, ioctx | |
+ except self.rados.Error: | |
+ client.shutdown() | |
+ raise | |
+ | |
+ def _disconnect_from_rados(self, client, ioctx): | |
+ ioctx.close() | |
+ client.shutdown() | |
+ | |
+ def _get_mon_addrs(self): | |
+ args = ['ceph', 'mon', 'dump', '--format=json'] + self._ceph_args() | |
+ out, _ = utils.execute(*args) | |
+ lines = out.split('\n') | |
+ if lines[0].startswith('dumped monmap epoch'): | |
+ lines = lines[1:] | |
+ monmap = jsonutils.loads('\n'.join(lines)) | |
+ addrs = [mon['addr'] for mon in monmap['mons']] | |
+ hosts = [] | |
+ ports = [] | |
+ for addr in addrs: | |
+ host_port = addr[:addr.rindex('/')] | |
+ host, port = host_port.rsplit(':', 1) | |
+ hosts.append(host.strip('[]')) | |
+ ports.append(port) | |
+ return hosts, ports | |
+ | |
+ def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, | |
+ extra_specs, hypervisor_version): | |
+ """Get `LibvirtConfigGuestDisk` filled for this image. | |
+ | |
+ :disk_dev: Disk bus device name | |
+ :disk_bus: Disk bus type | |
+ :device_type: Device type for this image. | |
+ :cache_mode: Caching mode for this image | |
+ :extra_specs: Instance type extra specs dict. | |
+ """ | |
+ info = vconfig.LibvirtConfigGuestDisk() | |
+ | |
+ hosts, ports = self._get_mon_addrs() | |
+ info.device_type = device_type | |
+ info.driver_format = 'raw' | |
+ info.driver_cache = cache_mode | |
+ info.target_bus = disk_bus | |
+ info.target_dev = disk_dev | |
+ info.source_type = 'network' | |
+ info.source_protocol = 'rbd' | |
+ info.source_name = '%s/%s' % (self.pool, self.rbd_name) | |
+ info.source_path = info.source_name | |
+ info.source_hosts = hosts | |
+ info.source_ports = ports | |
+ auth_enabled = (CONF.rbd_user is not None) | |
+ if CONF.rbd_secret_uuid: | |
+ info.auth_secret_uuid = CONF.rbd_secret_uuid | |
+ auth_enabled = True # Force authentication locally | |
+ if CONF.rbd_user: | |
+ info.auth_username = CONF.rbd_user | |
+ if auth_enabled: | |
+ info.auth_secret_type = 'ceph' | |
+ info.auth_secret_uuid = CONF.rbd_secret_uuid | |
+ return info | |
+ | |
+ def _get_image_from_path(self, path): | |
+ | |
+ """ There are three types path: | |
+ rbd://fsid/pool/image[/snapshot] | |
+ rbd:pool/image[/snapshot] | |
+ pool/image[/snapshot] | |
+ """ | |
+ | |
+ if path.startswith('rbd://'): | |
+ prefix = 'rbd://' | |
+ elif path.startswith('rbd:'): | |
+ prefix = 'rbd:' | |
+ else: | |
+ prefix = '' | |
+ pieces = map(urllib.unquote, path[len(prefix):].split('/')) | |
+ if any(map(lambda p: p == '', pieces)): | |
+ raise exception.InvalidDevicePath(path=path) | |
+ if (len(pieces) < 2) or (prefix == 'rbd://' and len(pieces) < 3): | |
+ raise exception.InvalidDevicePath(path=path) | |
+ if prefix == 'rbd://': | |
+ return pieces[2] | |
+ else: | |
+ return pieces[1] | |
+ | |
+ def _parse_location(self, location): | |
+ prefix = 'rbd://' | |
+ if not location.startswith(prefix): | |
+ reason = _('Not stored in rbd') | |
+ raise exception.ImageUnacceptable(image_id=location, reason=reason) | |
+ pieces = map(urllib.unquote, location[len(prefix):].split('/')) | |
+ if any(map(lambda p: p == '', pieces)): | |
+ reason = _('Blank components') | |
+ raise exception.ImageUnacceptable(image_id=location, reason=reason) | |
+ if len(pieces) != 4: | |
+ reason = _('Not an rbd snapshot') | |
+ raise exception.ImageUnacceptable(image_id=location, reason=reason) | |
+ return pieces | |
+ | |
+ def get_fsid(self): | |
+ with RADOSClient(self) as client: | |
+ return client.cluster.get_fsid() | |
+ | |
+ def _is_cloneable(self, image_location): | |
+ try: | |
+ fsid, pool, image, snapshot = self._parse_location(image_location) | |
+ except exception.ImageUnacceptable as e: | |
+ LOG.debug(_('not cloneable: %s'), e) | |
+ return False | |
+ | |
+ if self.get_fsid() != fsid: | |
+ reason = _('%s is in a different ceph cluster') % image_location | |
+ LOG.debug(reason) | |
+ return False | |
+ | |
+ # check that we can read the image | |
+ try: | |
+ with RBDVolumeProxy(self, image, | |
+ pool=pool, | |
+ snapshot=snapshot, | |
+ read_only=True): | |
+ return True | |
+ except self.rbd.Error as e: | |
+ LOG.debug(_('Unable to open image %(loc)s: %(err)s') % | |
+ dict(loc=image_location, err=e)) | |
+ return False | |
+ | |
+ def clone_image(self, image_location, image_id, size=None): | |
+ """docstring for clone_image""" | |
+ if image_location is None or not self._is_cloneable(image_location): | |
+ raise RuntimeError(_('rbd cannot clone image')) | |
+ prefix, pool, image, snapshot = self._parse_location(image_location) | |
+ self._clone(pool, image, snapshot) | |
+ self._resize(size) | |
+ | |
+ def _resize(self, size): | |
+ if size: | |
+ with RBDVolumeProxy(self, self.rbd_name) as vol: | |
+ vol.resize(int(size)) | |
+ | |
+ def _clone(self, src_pool, src_image, src_snap): | |
+ LOG.debug(_('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s') % | |
+ dict(pool=src_pool, img=src_image, snap=src_snap, | |
+ dst=self.rbd_name)) | |
+ with RADOSClient(self, src_pool) as src_client: | |
+ with RADOSClient(self) as dest_client: | |
+ self.rbd.RBD().clone(src_client.ioctx, | |
+ str(src_image), | |
+ str(src_snap), | |
+ dest_client.ioctx, | |
+ str(self.rbd_name), | |
+ features=self.rbd.RBD_FEATURE_LAYERING) | |
+ | |
+ def cache(self, fetch_func, filename, size=None, *args, **kwargs): | |
+ """Creates image from template. | |
+ | |
+ Ensures that template and image not already exists. | |
+ Ensures that base directory exists. | |
+ Synchronizes on template fetching. | |
+ | |
+ :fetch_func: Function that creates the base image | |
+ Should accept `target` argument. | |
+ :filename: Name of the file in the image directory | |
+ :size: Size of created image in bytes (optional) | |
+ """ | |
+ if CONF.libvirt_images_rbd_clone_image: | |
+ image_location = images.get_image_location(kwargs.get('context'), | |
+ kwargs.get('image_id')) | |
+ self.clone_image(image_location, kwargs.get('image_id'), size) | |
+ return | |
+ | |
+ @utils.synchronized(filename, external=True, lock_path=self.lock_path) | |
+ def call_if_not_exists(target, *args, **kwargs): | |
+ if not os.path.exists(target): | |
+ fetch_func(target=target, *args, **kwargs) | |
+ elif CONF.libvirt_images_type == "lvm" and \ | |
+ 'ephemeral_size' in kwargs: | |
+ fetch_func(target=target, *args, **kwargs) | |
+ | |
+ base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name) | |
+ if not os.path.exists(base_dir): | |
+ fileutils.ensure_tree(base_dir) | |
+ base = os.path.join(base_dir, filename) | |
+ | |
+ if not os.path.exists(self.path) or not os.path.exists(base): | |
+ self.create_image(call_if_not_exists, base, size, | |
+ *args, **kwargs) | |
+ | |
+ if size and self.preallocate and self._can_fallocate(): | |
+ utils.execute('fallocate', '-n', '-l', size, self.path) | |
+ | |
+ def create_image(self, prepare_template, base, size, *args, **kwargs): | |
+ if self.rbd is None: | |
+ raise RuntimeError(_('rbd python libraries not found')) | |
+ | |
+ old_format = True | |
+ features = 0 | |
+ if self._supports_layering(): | |
+ old_format = False | |
+ features = self.rbd.RBD_FEATURE_LAYERING | |
+ | |
+ if not os.path.exists(base): | |
+ prepare_template(target=base, *args, **kwargs) | |
+ | |
+ # keep using the command line import instead of librbd since it | |
+ # detects zeroes to preserve sparseness in the image | |
+ args = ['--pool', self.pool, base, self.rbd_name] | |
+ if self._supports_layering(): | |
+ args += ['--new-format'] | |
+ args += self._ceph_args() | |
+ libvirt_utils.import_rbd_image(self.path, *args) | |
+ | |
+ def _supports_layering(self): | |
+ return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') | |
+ | |
+ def snapshot_create(self): | |
+ with RBDVolumeProxy(self, self.rbd_name) as vol: | |
+ vol.create_snap(self.snapshot_name) | |
+ if (CONF.libvirt_images_rbd_clone_image and | |
+ self._supports_layering()): | |
+ vol.protect_snap(self.snapshot_name) | |
+ | |
+ def snapshot_extract(self, target, out_format): | |
+ if not CONF.libvirt_images_rbd_clone_image: | |
+ snap = 'rbd:%s/%s/%s' % (self.pool, | |
+ self.rbd_name, | |
+ self.snapshot_name) | |
+ images.convert_image(snap, target, out_format) | |
+ | |
+ def snapshot_delete(self): | |
+ if not CONF.libvirt_images_rbd_clone_image: | |
+ with RBDVolumeProxy(self, self.rbd_name) as vol: | |
+ vol.remove_snap(self.snapshot_name) | |
+ | |
+ | |
class Backend(object): | |
def __init__(self, use_cow): | |
self.BACKEND = { | |
'raw': Raw, | |
'qcow2': Qcow2, | |
'lvm': Lvm, | |
+ 'rbd': Rbd, | |
'default': Qcow2 if use_cow else Raw | |
} | |
@@ -425,3 +716,50 @@ class Backend(object): | |
""" | |
backend = self.backend(image_type) | |
return backend(path=disk_path, snapshot_name=snapshot_name) | |
+ | |
+ | |
+def ascii_str(string): | |
+ return string if string is None else str(string) | |
+ | |
+ | |
+class RBDVolumeProxy(object): | |
+ | |
+ def __init__(self, driver, name, pool=None, snapshot=None, | |
+ read_only=False): | |
+ client, ioctx = driver._connect_to_rados(pool) | |
+ try: | |
+ self.volume = driver.rbd.Image(ioctx, str(name), | |
+ snapshot=ascii_str(snapshot), | |
+ read_only=read_only) | |
+ except driver.rbd.Error: | |
+ LOG.exception(_("error opening rbd image %s"), name) | |
+ driver._disconnect_from_rados(client, ioctx) | |
+ raise | |
+ self.driver = driver | |
+ self.client = client | |
+ self.ioctx = ioctx | |
+ | |
+ def __enter__(self): | |
+ return self | |
+ | |
+ def __exit__(self, type_, value, traceback): | |
+ try: | |
+ self.volume.close() | |
+ finally: | |
+ self.driver._disconnect_from_rados(self.client, self.ioctx) | |
+ | |
+ def __getattr__(self, attrib): | |
+ return getattr(self.volume, attrib) | |
+ | |
+ | |
+class RADOSClient(object): | |
+ | |
+ def __init__(self, driver, pool=None): | |
+ self.driver = driver | |
+ self.cluster, self.ioctx = driver._connect_to_rados(pool) | |
+ | |
+ def __enter__(self): | |
+ return self | |
+ | |
+ def __exit__(self, type_, value, traceback): | |
+ self.driver._disconnect_from_rados(self.cluster, self.ioctx) | |
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py | |
index eb963b9..7806177 100755 | |
--- a/nova/virt/libvirt/utils.py | |
+++ b/nova/virt/libvirt/utils.py | |
@@ -21,6 +21,7 @@ | |
# under the License. | |
import errno | |
+import json | |
import os | |
from lxml import etree | |
@@ -257,6 +258,41 @@ def create_lvm_image(vg, lv, size, sparse=False): | |
execute(*cmd, run_as_root=True, attempts=3) | |
+def import_rbd_image(path, *args): | |
+ execute('rbd', 'import', *args) | |
+ execute('touch', path) | |
+ | |
+ | |
+def list_rbd_volumes(pool): | |
+ """List volumes names for given ceph pool. | |
+ | |
+ :param pool: ceph pool name | |
+ """ | |
+ out, err = utils.execute('rbd', '-p', pool, 'ls') | |
+ | |
+ return [line.strip() for line in out.splitlines()] | |
+ | |
+ | |
+def remove_rbd_volumes(pool, *names): | |
+ """Remove one or more rbd volume.""" | |
+ for name in names: | |
+ # NOTE(rongze) if there are snapshots of the volume, we cannot remove | |
+ # the volume. | |
+ out, err = utils.execute('rbd', '--pool', pool, | |
+ '--image', name, | |
+ '--format', 'json', | |
+ 'snap', 'ls') | |
+ if err: | |
+ continue | |
+ else: | |
+ snaps = json.loads(out) | |
+ if len(snaps) > 0: | |
+ continue | |
+ | |
+ rbd_remove = ('rbd', '-p', pool, 'rm', name) | |
+ execute(*rbd_remove, attempts=3, run_as_root=True) | |
+ | |
+ | |
def get_volume_group_info(vg): | |
"""Return free/used/total space info for a volume group in bytes | |
@@ -585,6 +621,9 @@ def get_disk_type(path): | |
if path.startswith('/dev'): | |
return 'lvm' | |
+ if path.startswith('rbd'): | |
+ return 'rbd' | |
+ | |
return images.qemu_img_info(path).file_format | |
-- | |
1.7.11.3 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment