It can't delete snapshots yet because Vitastor layer merge isn't implemented yet. You can only delete volumes with all snapshots. This will be fixed in the near future.pull/6/head
parent
4be761254c
commit
e90bbe6385
@ -0,0 +1,948 @@ |
||||
# Vitastor Driver for OpenStack Cinder |
||||
# |
||||
# -------------------------------------------- |
||||
# Install as cinder/volume/drivers/vitastor.py |
||||
# -------------------------------------------- |
||||
# |
||||
# Copyright 2020 Vitaliy Filippov |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
||||
# not use this file except in compliance with the License. You may obtain |
||||
# a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
||||
# License for the specific language governing permissions and limitations |
||||
# under the License. |
||||
"""Cinder Vitastor Driver""" |
||||
|
||||
import binascii |
||||
import base64 |
||||
import errno |
||||
import json |
||||
import math |
||||
import os |
||||
import tempfile |
||||
|
||||
from castellan import key_manager |
||||
from oslo_config import cfg |
||||
from oslo_log import log as logging |
||||
from oslo_service import loopingcall |
||||
from oslo_concurrency import processutils |
||||
from oslo_utils import encodeutils |
||||
from oslo_utils import excutils |
||||
from oslo_utils import fileutils |
||||
from oslo_utils import units |
||||
import six |
||||
from six.moves.urllib import request |
||||
|
||||
from cinder import exception |
||||
from cinder.i18n import _ |
||||
from cinder.image import image_utils |
||||
from cinder import interface |
||||
from cinder import objects |
||||
from cinder.objects import fields |
||||
from cinder import utils |
||||
from cinder.volume import configuration |
||||
from cinder.volume import driver |
||||
from cinder.volume import volume_utils |
||||
|
||||
VERSION = '0.6.4' |
||||
|
||||
LOG = logging.getLogger(__name__) |
||||
|
||||
VITASTOR_OPTS = [ |
||||
cfg.StrOpt( |
||||
'vitastor_config_path', |
||||
default='/etc/vitastor/vitastor.conf', |
||||
help='Vitastor configuration file path' |
||||
), |
||||
cfg.StrOpt( |
||||
'vitastor_etcd_address', |
||||
default='', |
||||
help='Vitastor etcd address(es)'), |
||||
cfg.StrOpt( |
||||
'vitastor_etcd_prefix', |
||||
default='/vitastor', |
||||
help='Vitastor etcd prefix' |
||||
), |
||||
cfg.StrOpt( |
||||
'vitastor_pool_id', |
||||
default='', |
||||
help='Vitastor pool ID to use for volumes' |
||||
), |
||||
# FIXME exclusive_cinder_pool ? |
||||
] |
||||
|
||||
CONF = cfg.CONF |
||||
CONF.register_opts(VITASTOR_OPTS, group = configuration.SHARED_CONF_GROUP) |
||||
|
||||
class VitastorDriverException(exception.VolumeDriverException): |
||||
message = _("Vitastor Cinder driver failure: %(reason)s") |
||||
|
||||
@interface.volumedriver |
||||
class VitastorDriver(driver.CloneableImageVD, |
||||
driver.ManageableVD, driver.ManageableSnapshotsVD, |
||||
driver.BaseVD): |
||||
"""Implements Vitastor volume commands.""" |
||||
|
||||
cfg = {} |
||||
_etcd_urls = [] |
||||
|
||||
def __init__(self, active_backend_id = None, *args, **kwargs): |
||||
super(VitastorDriver, self).__init__(*args, **kwargs) |
||||
self.configuration.append_config_values(VITASTOR_OPTS) |
||||
|
||||
@classmethod |
||||
def get_driver_options(cls): |
||||
additional_opts = cls._get_oslo_driver_opts( |
||||
'reserved_percentage', |
||||
'max_over_subscription_ratio', |
||||
'volume_dd_blocksize' |
||||
) |
||||
return VITASTOR_OPTS + additional_opts |
||||
|
||||
def do_setup(self, context): |
||||
"""Performs initialization steps that could raise exceptions.""" |
||||
super(VitastorDriver, self).do_setup(context) |
||||
# Make sure configuration is in UTF-8 |
||||
for attr in [ 'config_path', 'etcd_address', 'etcd_prefix', 'pool_id' ]: |
||||
val = self.configuration.safe_get('vitastor_'+attr) |
||||
if val is not None: |
||||
self.cfg[attr] = utils.convert_str(val) |
||||
self.cfg = self._load_config(self.cfg) |
||||
|
||||
def _load_config(self, cfg): |
||||
# Try to load configuration file |
||||
try: |
||||
f = open(cfg['config_path'] or '/etc/vitastor/vitastor.conf') |
||||
conf = json.loads(f.read()) |
||||
f.close() |
||||
for k in conf: |
||||
cfg[k] = cfg.get(k, conf[k]) |
||||
except: |
||||
pass |
||||
if isinstance(cfg['etcd_address'], str): |
||||
cfg['etcd_address'] = cfg['etcd_address'].split(',') |
||||
# Sanitize etcd URLs |
||||
for i, etcd_url in enumerate(cfg['etcd_address']): |
||||
ssl = False |
||||
if etcd_url.lower().startswith('http://'): |
||||
etcd_url = etcd_url[7:] |
||||
elif etcd_url.lower().startswith('https://'): |
||||
etcd_url = etcd_url[8:] |
||||
ssl = True |
||||
if etcd_url.find('/') < 0: |
||||
etcd_url += '/v3' |
||||
if ssl: |
||||
etcd_url = 'https://'+etcd_url |
||||
else: |
||||
etcd_url = 'http://'+etcd_url |
||||
cfg['etcd_address'][i] = etcd_url |
||||
return cfg |
||||
|
||||
def check_for_setup_error(self): |
||||
"""Returns an error if prerequisites aren't met.""" |
||||
|
||||
def _encode_etcd_key(self, key): |
||||
if not isinstance(key, bytes): |
||||
key = str(key).encode('utf-8') |
||||
return base64.b64encode(self.cfg['etcd_prefix'].encode('utf-8')+b'/'+key).decode('utf-8') |
||||
|
||||
def _encode_etcd_value(self, value): |
||||
if not isinstance(value, bytes): |
||||
value = str(value).encode('utf-8') |
||||
return base64.b64encode(value).decode('utf-8') |
||||
|
||||
def _encode_etcd_requests(self, obj): |
||||
for v in obj: |
||||
for rt in v: |
||||
if 'key' in v[rt]: |
||||
v[rt]['key'] = self._encode_etcd_key(v[rt]['key']) |
||||
if 'range_end' in v[rt]: |
||||
v[rt]['range_end'] = self._encode_etcd_key(v[rt]['range_end']) |
||||
if 'value' in v[rt]: |
||||
v[rt]['value'] = self._encode_etcd_value(v[rt]['value']) |
||||
|
||||
def _etcd_txn(self, params): |
||||
if 'compare' in params: |
||||
for v in params['compare']: |
||||
if 'key' in v: |
||||
v['key'] = self._encode_etcd_key(v['key']) |
||||
if 'failure' in params: |
||||
self._encode_etcd_requests(params['failure']) |
||||
if 'success' in params: |
||||
self._encode_etcd_requests(params['success']) |
||||
body = json.dumps(params).encode('utf-8') |
||||
headers = { |
||||
'Content-Type': 'application/json' |
||||
} |
||||
err = None |
||||
for etcd_url in self.cfg['etcd_address']: |
||||
try: |
||||
resp = request.urlopen(request.Request(etcd_url+'/kv/txn', body, headers), timeout = 5) |
||||
data = json.loads(resp.read()) |
||||
if 'responses' not in data: |
||||
data['responses'] = [] |
||||
for i, resp in enumerate(data['responses']): |
||||
if 'response_range' in resp: |
||||
if 'kvs' not in resp['response_range']: |
||||
resp['response_range']['kvs'] = [] |
||||
for kv in resp['response_range']['kvs']: |
||||
kv['key'] = base64.b64decode(kv['key'].encode('utf-8')).decode('utf-8') |
||||
if kv['key'].startswith(self.cfg['etcd_prefix']+'/'): |
||||
kv['key'] = kv['key'][len(self.cfg['etcd_prefix'])+1 : ] |
||||
kv['value'] = json.loads(base64.b64decode(kv['value'].encode('utf-8'))) |
||||
if len(resp.keys()) != 1: |
||||
LOG.exception('unknown responses['+str(i)+'] format: '+json.dumps(resp)) |
||||
else: |
||||
resp = data['responses'][i] = resp[list(resp.keys())[0]] |
||||
return data |
||||
except Exception as e: |
||||
LOG.exception('error calling etcd transaction: '+body.decode('utf-8')+'\nerror: '+str(e)) |
||||
err = e |
||||
raise err |
||||
|
||||
def _etcd_foreach(self, prefix, add_fn): |
||||
total = 0 |
||||
batch = 1000 |
||||
begin = prefix+'/' |
||||
while True: |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { |
||||
'key': begin, |
||||
'range_end': prefix+'0', |
||||
'limit': batch+1, |
||||
} }, |
||||
] }) |
||||
i = 0 |
||||
while i < batch and i < len(resp['responses'][0]['kvs']): |
||||
kv = resp['responses'][0]['kvs'][i] |
||||
add_fn(kv) |
||||
i += 1 |
||||
if len(resp['responses'][0]['kvs']) <= batch: |
||||
break |
||||
begin = resp['responses'][0]['kvs'][batch]['key'] |
||||
return total |
||||
|
||||
def _update_volume_stats(self): |
||||
location_info = json.dumps({ |
||||
'config': self.configuration.vitastor_config_path, |
||||
'etcd_address': self.configuration.vitastor_etcd_address, |
||||
'etcd_prefix': self.configuration.vitastor_etcd_prefix, |
||||
'pool_id': self.configuration.vitastor_pool_id, |
||||
}) |
||||
|
||||
stats = { |
||||
'vendor_name': 'Vitastor', |
||||
'driver_version': self.VERSION, |
||||
'storage_protocol': 'vitastor', |
||||
'total_capacity_gb': 'unknown', |
||||
'free_capacity_gb': 'unknown', |
||||
# FIXME check if safe_get is required |
||||
'reserved_percentage': self.configuration.safe_get('reserved_percentage'), |
||||
'multiattach': True, |
||||
'thin_provisioning_support': True, |
||||
'max_over_subscription_ratio': self.configuration.safe_get('max_over_subscription_ratio'), |
||||
'location_info': location_info, |
||||
'backend_state': 'down', |
||||
'volume_backend_name': self.configuration.safe_get('volume_backend_name') or 'vitastor', |
||||
'replication_enabled': False, |
||||
} |
||||
|
||||
try: |
||||
pool_stats = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'pool/stats/'+str(self.cfg['pool_id']) } } |
||||
] }) |
||||
total_provisioned = 0 |
||||
def add_total(kv): |
||||
nonlocal total_provisioned |
||||
if kv['key'].find('@') >= 0: |
||||
total_provisioned += kv['value']['size'] |
||||
self._etcd_foreach('config/inode/'+str(self.cfg['pool_id']), lambda kv: add_total(kv)) |
||||
stats['provisioned_capacity_gb'] = round(total_provisioned/1024.0/1024.0/1024.0, 2) |
||||
pool_stats = pool_stats['responses'][0]['kvs'] |
||||
if len(pool_stats): |
||||
pool_stats = pool_stats[0] |
||||
stats['free_capacity_gb'] = round(1024.0*(pool_stats['total_raw_tb']-pool_stats['used_raw_tb'])/pool_stats['raw_to_usable'], 2) |
||||
stats['total_capacity_gb'] = round(1024.0*pool_stats['total_raw_tb'], 2) |
||||
stats['backend_state'] = 'up' |
||||
except Exception as e: |
||||
# just log and return unknown capacities |
||||
LOG.exception('error getting vitastor pool stats: '+str(e)) |
||||
|
||||
self._stats = stats |
||||
|
||||
def _next_id(self, resp): |
||||
if len(resp['kvs']) == 0: |
||||
return (1, 0) |
||||
else: |
||||
return (1 + resp['kvs'][0]['value'], resp['kvs'][0]['mod_revision']) |
||||
|
||||
def create_volume(self, volume): |
||||
"""Creates a logical volume.""" |
||||
|
||||
size = int(volume.size) * units.Gi |
||||
# FIXME: Check if convert_str is really required |
||||
vol_name = utils.convert_str(volume.name) |
||||
if vol_name.find('@') >= 0 or vol_name.find('/') >= 0: |
||||
raise exception.VolumeBackendAPIException(data = '@ and / are forbidden in volume and snapshot names') |
||||
|
||||
LOG.debug("creating volume '%s'", vol_name) |
||||
|
||||
self._create_image(vol_name, { 'size': size }) |
||||
|
||||
if volume.encryption_key_id: |
||||
self._create_encrypted_volume(volume, volume.obj_context) |
||||
|
||||
volume_update = {} |
||||
return volume_update |
||||
|
||||
def _create_encrypted_volume(self, volume, context): |
||||
"""Create a new LUKS encrypted image directly in Vitastor.""" |
||||
vol_name = utils.convert_str(volume.name) |
||||
f, opts = self._encrypt_opts(volume, context) |
||||
# FIXME: Check if it works at all :-) |
||||
self._execute( |
||||
'qemu-img', 'convert', '-f', 'luks', *opts, |
||||
'vitastor:image='+vol_name.replace(':', '\\:')+self._qemu_args(), |
||||
'%sM' % (volume.size * 1024) |
||||
) |
||||
f.close() |
||||
|
||||
def _encrypt_opts(self, volume, context): |
||||
encryption = volume_utils.check_encryption_provider(self.db, volume, context) |
||||
# Fetch the key associated with the volume and decode the passphrase |
||||
keymgr = key_manager.API(CONF) |
||||
key = keymgr.get(context, encryption['encryption_key_id']) |
||||
passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8') |
||||
# Decode the dm-crypt style cipher spec into something qemu-img can use |
||||
cipher_spec = image_utils.decode_cipher(encryption['cipher'], encryption['key_size']) |
||||
tmp_dir = volume_utils.image_conversion_dir() |
||||
f = tempfile.NamedTemporaryFile(prefix = 'luks_', dir = tmp_dir) |
||||
f.write(passphrase) |
||||
f.flush() |
||||
return (f, [ |
||||
'--object', 'secret,id=luks_sec,format=raw,file=%(passfile)s' % {'passfile': f.name}, |
||||
'-o', 'key-secret=luks_sec,cipher-alg=%(cipher_alg)s,cipher-mode=%(cipher_mode)s,ivgen-alg=%(ivgen_alg)s' % cipher_spec, |
||||
]) |
||||
|
||||
def create_snapshot(self, snapshot): |
||||
"""Creates a volume snapshot.""" |
||||
|
||||
vol_name = utils.convert_str(snapshot.volume_name) |
||||
snap_name = utils.convert_str(snapshot.name) |
||||
if snap_name.find('@') >= 0 or snap_name.find('/') >= 0: |
||||
raise exception.VolumeBackendAPIException(data = '@ and / are forbidden in volume and snapshot names') |
||||
self._create_snapshot(vol_name, vol_name+'@'+snap_name) |
||||
|
||||
def snapshot_revert_use_temp_snapshot(self): |
||||
"""Disable the use of a temporary snapshot on revert.""" |
||||
return False |
||||
|
||||
def revert_to_snapshot(self, context, volume, snapshot): |
||||
"""Revert a volume to a given snapshot.""" |
||||
|
||||
# FIXME Delete the image, then recreate it from the snapshot |
||||
|
||||
def delete_snapshot(self, snapshot): |
||||
"""Deletes a snapshot.""" |
||||
|
||||
vol_name = utils.convert_str(snapshot.volume_name) |
||||
snap_name = utils.convert_str(snapshot.name) |
||||
|
||||
# Find the snapshot |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'index/image/'+vol_name+'@'+snap_name } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
raise exception.SnapshotNotFound(snapshot_id = snap_name) |
||||
inode_id = int(resp['responses'][0]['kvs'][0]['value']['id']) |
||||
pool_id = int(resp['responses'][0]['kvs'][0]['value']['pool_id']) |
||||
parents = {} |
||||
parents[(pool_id << 48) | (inode_id & 0xffffffffffff)] = True |
||||
|
||||
# Check if there are child volumes |
||||
children = self._child_count(parents) |
||||
if children > 0: |
||||
raise exception.SnapshotIsBusy(snapshot_name = snap_name) |
||||
|
||||
# FIXME: We can't delete snapshots because we can't merge layers yet |
||||
raise exception.VolumeBackendAPIException(data = 'Snapshot delete (layer merge) is not implemented yet') |
||||
|
||||
def _child_count(self, parents): |
||||
children = 0 |
||||
def add_child(kv): |
||||
nonlocal children |
||||
children += self._check_parent(kv, parents) |
||||
self._etcd_foreach('config/inode', lambda kv: add_child(kv)) |
||||
return children |
||||
|
||||
def _check_parent(self, kv, parents): |
||||
if 'parent_id' not in kv['value']: |
||||
return 0 |
||||
parent_id = kv['value']['parent_id'] |
||||
_, _, pool_id, inode_id = kv['key'].split('/') |
||||
parent_pool_id = pool_id |
||||
if 'parent_pool_id' in kv['value'] and kv['value']['parent_pool_id']: |
||||
parent_pool_id = kv['value']['parent_pool_id'] |
||||
inode = (int(pool_id) << 48) | (int(inode_id) & 0xffffffffffff) |
||||
parent = (int(parent_pool_id) << 48) | (int(parent_id) & 0xffffffffffff) |
||||
if parent in parents and inode not in parents: |
||||
return 1 |
||||
return 0 |
||||
|
||||
def create_cloned_volume(self, volume, src_vref): |
||||
"""Create a cloned volume from another volume.""" |
||||
|
||||
size = int(volume.size) * units.Gi |
||||
src_name = utils.convert_str(src_vref.name) |
||||
dest_name = utils.convert_str(volume.name) |
||||
if dest_name.find('@') >= 0 or dest_name.find('/') >= 0: |
||||
raise exception.VolumeBackendAPIException(data = '@ and / are forbidden in volume and snapshot names') |
||||
|
||||
# FIXME Do full copy if requested (cfg.disable_clone) |
||||
|
||||
if src_vref.admin_metadata.get('readonly') == 'True': |
||||
# source volume is a volume-image cache entry or other readonly volume |
||||
# clone without intermediate snapshot |
||||
src = self._get_image(src_name) |
||||
LOG.debug("creating image '%s' from '%s'", dest_name, src_name) |
||||
new_cfg = self._create_image(dest_name, { |
||||
'size': size, |
||||
'parent_id': src['idx']['id'], |
||||
'parent_pool_id': src['idx']['pool_id'], |
||||
}) |
||||
return {} |
||||
|
||||
clone_snap = "%s@%s.clone_snap" % (src_name, dest_name) |
||||
make_img = True |
||||
if (volume.display_name and |
||||
volume.display_name.startswith('image-') and |
||||
src_vref.project_id != volume.project_id): |
||||
# idiotic openstack creates image-volume cache entries |
||||
# as clones of normal VM volumes... :-X prevent it :-D |
||||
clone_snap = dest_name |
||||
make_img = False |
||||
|
||||
LOG.debug("creating layer '%s' under '%s'", clone_snap, src_name) |
||||
new_cfg = self._create_snapshot(src_name, clone_snap, True) |
||||
if make_img: |
||||
# Then create a clone from it |
||||
new_cfg = self._create_image(dest_name, { |
||||
'size': size, |
||||
'parent_id': new_cfg['parent_id'], |
||||
'parent_pool_id': new_cfg['parent_pool_id'], |
||||
}) |
||||
|
||||
return {} |
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot): |
||||
"""Creates a cloned volume from an existing snapshot.""" |
||||
|
||||
vol_name = utils.convert_str(volume.name) |
||||
snap_name = utils.convert_str(snapshot.name) |
||||
|
||||
snap = self._get_image(vol_name+'@'+snap_name) |
||||
if not snap: |
||||
raise exception.SnapshotNotFound(snapshot_id = snap_name) |
||||
snap_inode_id = int(resp['responses'][0]['kvs'][0]['value']['id']) |
||||
snap_pool_id = int(resp['responses'][0]['kvs'][0]['value']['pool_id']) |
||||
|
||||
size = snap['cfg']['size'] |
||||
if int(volume.size): |
||||
size = int(volume.size) * units.Gi |
||||
new_cfg = self._create_image(vol_name, { |
||||
'size': size, |
||||
'parent_id': snap['idx']['id'], |
||||
'parent_pool_id': snap['idx']['pool_id'], |
||||
}) |
||||
|
||||
return {} |
||||
|
||||
def _vitastor_args(self): |
||||
args = [] |
||||
for k in [ 'config_path', 'etcd_address', 'etcd_prefix' ]: |
||||
v = self.configuration.safe_get('vitastor_'+k) |
||||
if v: |
||||
args.extend(['--'+k, v]) |
||||
return args |
||||
|
||||
def _qemu_args(self): |
||||
args = '' |
||||
for k in [ 'config_path', 'etcd_address', 'etcd_prefix' ]: |
||||
v = self.configuration.safe_get('vitastor_'+k) |
||||
kk = k |
||||
if kk == 'etcd_address': |
||||
# FIXME use etcd_address in qemu driver |
||||
kk = 'etcd_host' |
||||
if v: |
||||
args += ':'+kk+'='+v.replace(':', '\\:') |
||||
return args |
||||
|
||||
def delete_volume(self, volume): |
||||
"""Deletes a logical volume.""" |
||||
|
||||
vol_name = utils.convert_str(volume.name) |
||||
|
||||
# Find the volume and all its snapshots |
||||
range_end = b'index/image/' + vol_name.encode('utf-8') |
||||
range_end = range_end[0 : len(range_end)-1] + six.int2byte(range_end[len(range_end)-1] + 1) |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'index/image/'+vol_name, 'range_end': range_end } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
# already deleted |
||||
LOG.info("volume %s no longer exists in backend", vol_name) |
||||
return |
||||
layers = resp['responses'][0]['kvs'] |
||||
layer_ids = {} |
||||
for kv in layers: |
||||
inode_id = int(kv['value']['id']) |
||||
pool_id = int(kv['value']['pool_id']) |
||||
inode_pool_id = (pool_id << 48) | (inode_id & 0xffffffffffff) |
||||
layer_ids[inode_pool_id] = True |
||||
|
||||
# Check if the volume has clones and raise 'busy' if so |
||||
children = self._child_count(layer_ids) |
||||
if children > 0: |
||||
raise exception.VolumeIsBusy(volume_name = vol_name) |
||||
|
||||
# Clear data |
||||
for kv in layers: |
||||
args = [ |
||||
'vitastor-rm', '--pool', str(kv['value']['pool_id']), |
||||
'--inode', str(kv['value']['id']), '--progress', '0', |
||||
*(self._vitastor_args()) |
||||
] |
||||
try: |
||||
self._execute(*args) |
||||
except processutils.ProcessExecutionError as exc: |
||||
LOG.error("Failed to remove layer "+kv['key']+": "+exc) |
||||
raise exception.VolumeBackendAPIException(data = exc.stderr) |
||||
|
||||
# Delete all layers from etcd |
||||
requests = [] |
||||
for kv in layers: |
||||
requests.append({ 'request_delete_range': { 'key': kv['key'] } }) |
||||
requests.append({ 'request_delete_range': { 'key': 'config/inode/'+str(kv['value']['pool_id'])+'/'+str(kv['value']['id']) } }) |
||||
self._etcd_txn({ 'success': requests }) |
||||
|
||||
def retype(self, context, volume, new_type, diff, host): |
||||
"""Change extra type specifications for a volume.""" |
||||
|
||||
# FIXME Maybe (in the future) support multiple pools as different types |
||||
return True, {} |
||||
|
||||
def ensure_export(self, context, volume): |
||||
"""Synchronously recreates an export for a logical volume.""" |
||||
pass |
||||
|
||||
def create_export(self, context, volume, connector): |
||||
"""Exports the volume.""" |
||||
pass |
||||
|
||||
def remove_export(self, context, volume): |
||||
"""Removes an export for a logical volume.""" |
||||
pass |
||||
|
||||
def _create_image(self, vol_name, cfg): |
||||
pool_s = str(self.cfg['pool_id']) |
||||
image_id = 0 |
||||
while image_id == 0: |
||||
# check if the image already exists and find a free ID |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'index/image/'+vol_name } }, |
||||
{ 'request_range': { 'key': 'index/maxid/'+pool_s } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) > 0: |
||||
# already exists |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' already exists') |
||||
image_id, id_mod = self._next_id(resp['responses'][1]) |
||||
# try to create the image |
||||
resp = self._etcd_txn({ 'compare': [ |
||||
{ 'target': 'MOD', 'mod_revision': id_mod, 'key': 'index/maxid/'+pool_s }, |
||||
{ 'target': 'VERSION', 'version': 0, 'key': 'index/image/'+vol_name }, |
||||
{ 'target': 'VERSION', 'version': 0, 'key': 'config/inode/'+pool_s+'/'+str(image_id) }, |
||||
], 'success': [ |
||||
{ 'request_put': { 'key': 'index/maxid/'+pool_s, 'value': image_id } }, |
||||
{ 'request_put': { 'key': 'index/image/'+vol_name, 'value': json.dumps({ |
||||
'id': image_id, 'pool_id': self.cfg['pool_id'] |
||||
}) } }, |
||||
{ 'request_put': { 'key': 'config/inode/'+pool_s+'/'+str(image_id), 'value': json.dumps({ |
||||
**cfg, 'name': vol_name, |
||||
}) } }, |
||||
] }) |
||||
if not resp.get('succeeded'): |
||||
# repeat |
||||
image_id = 0 |
||||
|
||||
def _create_snapshot(self, vol_name, snap_vol_name, allow_existing = False): |
||||
while True: |
||||
# check if the image already exists and snapshot doesn't |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'index/image/'+vol_name } }, |
||||
{ 'request_range': { 'key': 'index/image/'+snap_vol_name } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' does not exist') |
||||
if len(resp['responses'][1]['kvs']) > 0: |
||||
if allow_existing: |
||||
snap_idx = resp['responses'][1]['kvs'][0]['value'] |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'config/inode/'+str(snap_idx['pool_id'])+'/'+str(snap_idx['id']) } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
raise exception.VolumeBackendAPIException(data = |
||||
'Volume '+snap_vol_name+' is already indexed, but does not exist' |
||||
) |
||||
return resp['responses'][0]['kvs'][0]['value'] |
||||
raise exception.VolumeBackendAPIException( |
||||
data = 'Volume '+snap_vol_name+' already exists' |
||||
) |
||||
vol_idx = resp['responses'][0]['kvs'][0]['value'] |
||||
vol_idx_mod = resp['responses'][0]['kvs'][0]['mod_revision'] |
||||
# get image inode config and find a new ID |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']) } }, |
||||
{ 'request_range': { 'key': 'index/maxid/'+str(self.cfg['pool_id']) } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' does not exist') |
||||
vol_cfg = resp['responses'][0]['kvs'][0]['value'] |
||||
vol_mod = resp['responses'][0]['kvs'][0]['mod_revision'] |
||||
new_id, id_mod = self._next_id(resp['responses'][1]) |
||||
# try to redirect image to the new inode |
||||
new_cfg = { |
||||
**vol_cfg, 'name': vol_name, 'parent_id': vol_idx['id'], 'parent_pool_id': vol_idx['pool_id'] |
||||
} |
||||
resp = self._etcd_txn({ 'compare': [ |
||||
{ 'target': 'MOD', 'mod_revision': vol_idx_mod, 'key': 'index/image/'+vol_name }, |
||||
{ 'target': 'MOD', 'mod_revision': vol_mod, 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']) }, |
||||
{ 'target': 'MOD', 'mod_revision': id_mod, 'key': 'index/maxid/'+str(self.cfg['pool_id']) }, |
||||
{ 'target': 'VERSION', 'version': 0, 'key': 'index/image/'+snap_vol_name }, |
||||
{ 'target': 'VERSION', 'version': 0, 'key': 'config/inode/'+str(self.cfg['pool_id'])+'/'+str(new_id) }, |
||||
], 'success': [ |
||||
{ 'request_put': { 'key': 'index/maxid/'+str(self.cfg['pool_id']), 'value': new_id } }, |
||||
{ 'request_put': { 'key': 'index/image/'+vol_name, 'value': json.dumps({ |
||||
'id': new_id, 'pool_id': self.cfg['pool_id'] |
||||
}) } }, |
||||
{ 'request_put': { 'key': 'config/inode/'+str(self.cfg['pool_id'])+'/'+str(new_id), 'value': json.dumps(new_cfg) } }, |
||||
{ 'request_put': { 'key': 'index/image/'+snap_vol_name, 'value': json.dumps({ |
||||
'id': vol_idx['id'], 'pool_id': vol_idx['pool_id'] |
||||
}) } }, |
||||
{ 'request_put': { 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']), 'value': json.dumps({ |
||||
**vol_cfg, 'name': snap_vol_name, 'readonly': True |
||||
}) } } |
||||
] }) |
||||
if resp.get('succeeded'): |
||||
return new_cfg |
||||
|
||||
def initialize_connection(self, volume, connector): |
||||
data = { |
||||
'driver_volume_type': 'vitastor', |
||||
'data': { |
||||
'config_path': self.configuration.vitastor_config_path, |
||||
'etcd_address': self.configuration.vitastor_etcd_address, |
||||
'etcd_prefix': self.configuration.vitastor_etcd_prefix, |
||||
'name': volume.name, |
||||
'logical_block_size': 512, |
||||
'physical_block_size': 4096, |
||||
} |
||||
} |
||||
LOG.debug('connection data: %s', data) |
||||
return data |
||||
|
||||
def terminate_connection(self, volume, connector, **kwargs): |
||||
pass |
||||
|
||||
def clone_image(self, context, volume, image_location, image_meta, image_service): |
||||
if image_location: |
||||
# Note: image_location[0] is glance image direct_url. |
||||
# image_location[1] contains the list of all locations (including |
||||
# direct_url) or None if show_multiple_locations is False in |
||||
# glance configuration. |
||||
if image_location[1]: |
||||
url_locations = [location['url'] for location in image_location[1]] |
||||
else: |
||||
url_locations = [image_location[0]] |
||||
# iterate all locations to look for a cloneable one. |
||||
for url_location in url_locations: |
||||
if url_location and url_location.startswith('cinder://'): |
||||
# The idea is to use cinder://<volume-id> Glance volumes as base images |
||||
base_vol = self.db.volume_get(context, url_location[len('cinder://') : ]) |
||||
if not base_vol or base_vol.volume_type_id != volume.volume_type_id: |
||||
continue |
||||
size = int(volume.size) * units.Gi |
||||
dest_name = utils.convert_str(volume.name) |
||||
# Find or create the base snapshot |
||||
snap_cfg = self._create_snapshot(base_vol.name, base_vol.name+'@.clone_snap', True) |
||||
# Then create a clone from it |
||||
new_cfg = self._create_image(dest_name, { |
||||
'size': size, |
||||
'parent_id': snap_cfg['parent_id'], |
||||
'parent_pool_id': snap_cfg['parent_pool_id'], |
||||
}) |
||||
return ({}, True) |
||||
return ({}, False) |
||||
|
||||
def copy_image_to_encrypted_volume(self, context, volume, image_service, image_id): |
||||
self.copy_image_to_volume(context, volume, image_service, image_id, encrypted = True) |
||||
|
||||
def copy_image_to_volume(self, context, volume, image_service, image_id, encrypted = False): |
||||
tmp_dir = volume_utils.image_conversion_dir() |
||||
with tempfile.NamedTemporaryFile(dir = tmp_dir) as tmp: |
||||
image_utils.fetch_to_raw( |
||||
context, image_service, image_id, tmp.name, |
||||
self.configuration.volume_dd_blocksize, size = volume.size |
||||
) |
||||
out_format = [ '-O', 'raw' ] |
||||
if encrypted: |
||||
key_file, opts = self._encrypt_opts(volume, context) |
||||
out_format = [ '-O', 'luks', *opts ] |
||||
dest_name = utils.convert_str(volume.name) |
||||
self._try_execute( |
||||
'qemu-img', 'convert', '-f', 'raw', tmp.name, *out_format, |
||||
'vitastor:image='+dest_name.replace(':', '\\:')+self._qemu_args() |
||||
) |
||||
if encrypted: |
||||
key_file.close() |
||||
|
||||
def copy_volume_to_image(self, context, volume, image_service, image_meta): |
||||
tmp_dir = volume_utils.image_conversion_dir() |
||||
tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id']) |
||||
with fileutils.remove_path_on_error(tmp_file): |
||||
vol_name = utils.convert_str(volume.name) |
||||
self._try_execute( |
||||
'qemu-img', 'convert', '-f', 'raw', |
||||
'vitastor:image='+vol_name.replace(':', '\\:')+self._qemu_args(), |
||||
'-O', 'raw', tmp_file |
||||
) |
||||
# FIXME: Copy directly if the destination image is also in Vitastor |
||||
volume_utils.upload_volume(context, image_service, image_meta, tmp_file, volume) |
||||
os.unlink(tmp_file) |
||||
|
||||
def _get_image(self, vol_name): |
||||
# find the image |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'index/image/'+vol_name } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
return None |
||||
vol_idx = resp['responses'][0]['kvs'][0]['value'] |
||||
vol_idx_mod = resp['responses'][0]['kvs'][0]['mod_revision'] |
||||
# get image inode config |
||||
resp = self._etcd_txn({ 'success': [ |
||||
{ 'request_range': { 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']) } }, |
||||
] }) |
||||
if len(resp['responses'][0]['kvs']) == 0: |
||||
return None |
||||
vol_cfg = resp['responses'][0]['kvs'][0]['value'] |
||||
vol_cfg_mod = resp['responses'][0]['kvs'][0]['mod_revision'] |
||||
return { |
||||
'cfg': vol_cfg, |
||||
'cfg_mod': vol_cfg_mod, |
||||
'idx': vol_idx, |
||||
'idx_mod': vol_idx_mod, |
||||
} |
||||
|
||||
def extend_volume(self, volume, new_size): |
||||
"""Extend an existing volume.""" |
||||
vol_name = utils.convert_str(volume.name) |
||||
while True: |
||||
vol = self._get_image(vol_name) |
||||
if not vol: |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' does not exist') |
||||
# change size |
||||
size = int(new_size) * units.Gi |
||||
if size == vol['cfg']['size']: |
||||
break |
||||
resp = self._etcd_txn({ 'compare': [ { |
||||
'target': 'MOD', |
||||
'mod_revision': vol['cfg_mod'], |
||||
'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']), |
||||
} ], 'success': [ |
||||
{ 'request_put': { |
||||
'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']), |
||||
'value': json.dumps({ **vol['cfg'], 'size': size }), |
||||
} }, |
||||
] }) |
||||
if resp.get('succeeded'): |
||||
break |
||||
LOG.debug( |
||||
"Extend volume from %(old_size)s GB to %(new_size)s GB.", |
||||
{'old_size': volume.size, 'new_size': new_size} |
||||
) |
||||
|
||||
def _add_manageable_volume(self, kv, manageable_volumes, cinder_ids): |
||||
cfg = kv['value'] |
||||
if kv['key'].find('@') >= 0: |
||||
# snapshot |
||||
return |
||||
image_id = volume_utils.extract_id_from_volume_name(cfg['name']) |
||||
image_info = { |
||||
'reference': {'source-name': image_name}, |
||||
'size': int(math.ceil(float(cfg['size']) / units.Gi)), |
||||
'cinder_id': None, |
||||
'extra_info': None, |
||||
} |
||||
if image_id in cinder_ids: |
||||
image_info['cinder_id'] = image_id |
||||
image_info['safe_to_manage'] = False |
||||
image_info['reason_not_safe'] = 'already managed' |
||||
else: |
||||
image_info['safe_to_manage'] = True |
||||
image_info['reason_not_safe'] = None |
||||
manageable_volumes.append(image_info) |
||||
|
||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): |
||||
manageable_volumes = [] |
||||
cinder_ids = [resource['id'] for resource in cinder_volumes] |
||||
|
||||
# List all volumes |
||||
# FIXME: It's possible to use pagination in our case, but.. do we want it? |
||||
self._etcd_foreach('config/inode/'+str(self.cfg['pool_id']), |
||||
lambda kv: self._add_manageable_volume(kv, manageable_volumes, cinder_ids)) |
||||
|
||||
return volume_utils.paginate_entries_list( |
||||
manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) |
||||
|
||||
def _get_existing_name(existing_ref): |
||||
if not isinstance(existing_ref, dict): |
||||
existing_ref = {"source-name": existing_ref} |
||||
if 'source-name' not in existing_ref: |
||||
reason = _('Reference must contain source-name element.') |
||||
raise exception.ManageExistingInvalidReference(existing_ref=existing_ref, reason=reason) |
||||
src_name = utils.convert_str(existing_ref['source-name']) |
||||
if not src_name: |
||||
reason = _('Reference must contain source-name element.') |
||||
raise exception.ManageExistingInvalidReference(existing_ref=existing_ref, reason=reason) |
||||
return src_name |
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref): |
||||
"""Return size of an existing image for manage_existing. |
||||
|
||||
:param volume: volume ref info to be set |
||||
:param existing_ref: {'source-name': <image name>} |
||||
""" |
||||
src_name = self._get_existing_name(existing_ref) |
||||
vol = self._get_image(src_name) |
||||
if not vol: |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+src_name+' does not exist') |
||||
return int(math.ceil(float(vol['cfg']['size']) / units.Gi)) |
||||
|
||||
def manage_existing(self, volume, existing_ref): |
||||
"""Manages an existing image. |
||||
|
||||
Renames the image name to match the expected name for the volume. |
||||
|
||||
:param volume: volume ref info to be set |
||||
:param existing_ref: {'source-name': <image name>} |
||||
""" |
||||
from_name = self._get_existing_name(existing_ref) |
||||
to_name = utils.convert_str(volume.name) |
||||
self._rename(from_name, to_name) |
||||
|
||||
def _rename(self, from_name, to_name): |
||||
while True: |
||||
vol = self._get_image(from_name) |
||||
if not vol: |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+from_name+' does not exist') |
||||
to = self._get_image(to_name) |
||||
if to: |
||||
raise exception.VolumeBackendAPIException(data = 'Volume '+to_name+' already exists') |
||||
resp = self._etcd_txn({ 'compare': [ |
||||
{ 'target': 'MOD', 'mod_revision': vol['idx_mod'], 'key': 'index/image/'+vol['cfg']['name'] }, |
||||
{ 'target': 'MOD', 'mod_revision': vol['cfg_mod'], 'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']) }, |
||||
{ 'target': 'VERSION', 'version': 0, 'key': 'index/image/'+to_name }, |
||||
], 'success': [ |
||||
{ 'request_delete_range': { 'key': 'index/image/'+vol['cfg']['name'] } }, |
||||
{ 'request_put': { 'key': 'index/image/'+to_name, 'value': json.dumps(vol['idx']) } }, |
||||
{ 'request_put': { 'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']), |
||||
'value': json.dumps({ **vol['cfg'], 'name': to_name }) } }, |
||||
] }) |
||||
if resp.get('succeeded'): |
||||
break |
||||
|
||||
def unmanage(self, volume): |
||||
pass |
||||
|
||||
def _add_manageable_snapshot(self, kv, manageable_snapshots, cinder_ids): |
||||
cfg = kv['value'] |
||||
dog = kv['key'].find('@') |
||||
if dog < 0: |
||||
# snapshot |
||||
return |
||||
image_name = kv['key'][0 : dog] |
||||
snap_name = kv['key'][dog+1 : ] |
||||
snapshot_id = volume_utils.extract_id_from_snapshot_name(snap_name) |
||||
snapshot_info = { |
||||
'reference': {'source-name': snap_name}, |
||||
'size': int(math.ceil(float(cfg['size']) / units.Gi)), |
||||
'cinder_id': None, |
||||
'extra_info': None, |
||||
'safe_to_manage': False, |
||||
'reason_not_safe': None, |
||||
'source_reference': {'source-name': image_name} |
||||
} |
||||
if snapshot_id in cinder_ids: |
||||
# Exclude snapshots already managed. |
||||
snapshot_info['reason_not_safe'] = ('already managed') |
||||
snapshot_info['cinder_id'] = snapshot_id |
||||
elif snap_name.endswith('.clone_snap'): |
||||
# Exclude clone snapshot. |
||||
snapshot_info['reason_not_safe'] = ('used for clone snap') |
||||
else: |
||||
snapshot_info['safe_to_manage'] = True |
||||
manageable_snapshots.append(snapshot_info) |
||||
|
||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): |
||||
"""List manageable snapshots in Vitastor.""" |
||||
manageable_snapshots = [] |
||||
cinder_snapshot_ids = [resource['id'] for resource in cinder_snapshots] |
||||
# List all volumes |
||||
# FIXME: It's possible to use pagination in our case, but.. do we want it? |
||||
self._etcd_foreach('config/inode/'+str(self.cfg['pool_id']), |
||||
lambda kv: self._add_manageable_volume(kv, manageable_snapshots, cinder_snapshot_ids)) |
||||
return volume_utils.paginate_entries_list( |
||||
manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) |
||||
|
||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref): |
||||
"""Return size of an existing image for manage_existing. |
||||
|
||||
:param snapshot: snapshot ref info to be set |
||||
:param existing_ref: {'source-name': <name of snapshot>} |
||||
""" |
||||
vol_name = utils.convert_str(snapshot.volume_name) |
||||
snap_name = self._get_existing_name(existing_ref) |
||||
vol = self._get_image(vol_name+'@'+snap_name) |
||||
if not vol: |
||||
raise exception.ManageExistingInvalidReference( |
||||
existing_ref=snapshot_name, reason='Specified snapshot does not exist.' |
||||
) |
||||
return int(math.ceil(float(vol['cfg']['size']) / units.Gi)) |
||||
|
||||
def manage_existing_snapshot(self, snapshot, existing_ref): |
||||
"""Manages an existing snapshot. |
||||
|
||||
Renames the snapshot name to match the expected name for the snapshot. |
||||
Error checking done by manage_existing_get_size is not repeated. |
||||
|
||||
:param snapshot: snapshot ref info to be set |
||||
:param existing_ref: {'source-name': <name of snapshot>} |
||||
""" |
||||
vol_name = utils.convert_str(snapshot.volume_name) |
||||
snap_name = self._get_existing_name(existing_ref) |
||||
from_name = vol_name+'@'+snap_name |
||||
to_name = vol_name+'@'+utils.convert_str(snapshot.name) |
||||
self._rename(from_name, to_name) |
||||
|
||||
def unmanage_snapshot(self, snapshot): |
||||
"""Removes the specified snapshot from Cinder management.""" |
||||
pass |
||||
|
||||
def _dumps(self, obj): |
||||
return json.dumps(obj, separators=(',', ':'), sort_keys=True) |
@ -0,0 +1,23 @@ |
||||
# Devstack configuration for bridged networking |
||||
|
||||
[[local|localrc]] |
||||
ADMIN_PASSWORD=secret |
||||
DATABASE_PASSWORD=$ADMIN_PASSWORD |
||||
RABBIT_PASSWORD=$ADMIN_PASSWORD |
||||
SERVICE_PASSWORD=$ADMIN_PASSWORD |
||||
HOST_IP=10.0.2.15 |
||||
Q_USE_SECGROUP=True |
||||
FLOATING_RANGE="10.0.2.0/24" |
||||
IPV4_ADDRS_SAFE_TO_USE="10.0.5.0/24" |
||||
Q_FLOATING_ALLOCATION_POOL=start=10.0.2.50,end=10.0.2.100 |
||||
PUBLIC_NETWORK_GATEWAY=10.0.2.2 |
||||
PUBLIC_INTERFACE=ens3 |
||||
Q_USE_PROVIDERNET_FOR_PUBLIC=True |
||||
Q_AGENT=linuxbridge |
||||
Q_ML2_PLUGIN_MECHANISM_DRIVERS=linuxbridge |
||||
LB_PHYSICAL_INTERFACE=ens3 |
||||
PUBLIC_PHYSICAL_NETWORK=default |
||||
LB_INTERFACE_MAPPINGS=default:ens3 |
||||
Q_SERVICE_PLUGIN_CLASSES= |
||||
Q_ML2_PLUGIN_TYPE_DRIVERS=flat |
||||
Q_ML2_PLUGIN_EXT_DRIVERS= |
@ -0,0 +1,287 @@ |
||||
diff --git a/nova/virt/image/model.py b/nova/virt/image/model.py
|
||||
index 971f7e9c07..70ed70d5e2 100644
|
||||
--- a/nova/virt/image/model.py
|
||||
+++ b/nova/virt/image/model.py
|
||||
@@ -129,3 +129,22 @@ class RBDImage(Image):
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.servers = servers
|
||||
+
|
||||
+
|
||||
+class VitastorImage(Image):
|
||||
+ """Class for images in a remote Vitastor cluster"""
|
||||
+
|
||||
+ def __init__(self, name, etcd_address = None, etcd_prefix = None, config_path = None):
|
||||
+ """Create a new Vitastor image object
|
||||
+
|
||||
+ :param name: name of the image
|
||||
+ :param etcd_address: etcd URL(s) (optional)
|
||||
+ :param etcd_prefix: etcd prefix (optional)
|
||||
+ :param config_path: path to the configuration (optional)
|
||||
+ """
|
||||
+ super(RBDImage, self).__init__(FORMAT_RAW)
|
||||
+
|
||||
+ self.name = name
|
||||
+ self.etcd_address = etcd_address
|
||||
+ self.etcd_prefix = etcd_prefix
|
||||
+ self.config_path = config_path
|
||||
diff --git a/nova/virt/images.py b/nova/virt/images.py
|
||||
index 5358f3766a..ebe3d6effb 100644
|
||||
--- a/nova/virt/images.py
|
||||
+++ b/nova/virt/images.py
|
||||
@@ -41,7 +41,7 @@ IMAGE_API = glance.API()
|
||||
|
||||
def qemu_img_info(path, format=None):
|
||||
"""Return an object containing the parsed output from qemu-img info."""
|
||||
- if not os.path.exists(path) and not path.startswith('rbd:'):
|
||||
+ if not os.path.exists(path) and not path.startswith('rbd:') and not path.startswith('vitastor:'):
|
||||
raise exception.DiskNotFound(location=path)
|
||||
|
||||
info = nova.privsep.qemu.unprivileged_qemu_img_info(path, format=format)
|
||||
@@ -50,7 +50,7 @@ def qemu_img_info(path, format=None):
|
||||
|
||||
def privileged_qemu_img_info(path, format=None, output_format='json'):
|
||||
"""Return an object containing the parsed output from qemu-img info."""
|
||||
- if not os.path.exists(path) and not path.startswith('rbd:'):
|
||||
+ if not os.path.exists(path) and not path.startswith('rbd:') and not path.startswith('vitastor:'):
|
||||
raise exception.DiskNotFound(location=path)
|
||||
|
||||
info = nova.privsep.qemu.privileged_qemu_img_info(path, format=format)
|
||||
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
|
||||
index f9475776b3..51573fe41d 100644
|
||||
--- a/nova/virt/libvirt/config.py
|
||||
+++ b/nova/virt/libvirt/config.py
|
||||
@@ -1060,6 +1060,8 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
|
||||
self.driver_iommu = False
|
||||
self.source_path = None
|
||||
self.source_protocol = None
|
||||
+ self.source_query = None
|
||||
+ self.source_config = None
|
||||
self.source_name = None
|
||||
self.source_hosts = []
|
||||
self.source_ports = []
|
||||
@@ -1186,7 +1188,8 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
|
||||
elif self.source_type == "mount":
|
||||
dev.append(etree.Element("source", dir=self.source_path))
|
||||
elif self.source_type == "network" and self.source_protocol:
|
||||
- source = etree.Element("source", protocol=self.source_protocol)
|
||||
+ source = etree.Element("source", protocol=self.source_protocol,
|
||||
+ query=self.source_query, config=self.source_config)
|
||||
if self.source_name is not None:
|
||||
source.set('name', self.source_name)
|
||||
hosts_info = zip(self.source_hosts, self.source_ports)
|
||||
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
|
||||
index 391231c527..34dc60dcdd 100644
|
||||
--- a/nova/virt/libvirt/driver.py
|
||||
+++ b/nova/virt/libvirt/driver.py
|
||||
@@ -179,6 +179,7 @@ VOLUME_DRIVERS = {
|
||||
'local': 'nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
|
||||
'fake': 'nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
|
||||
'rbd': 'nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
|
||||
+ 'vitastor': 'nova.virt.libvirt.volume.vitastor.LibvirtVitastorVolumeDriver',
|
||||
'nfs': 'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
|
||||
'smbfs': 'nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
|
||||
'fibre_channel': 'nova.virt.libvirt.volume.fibrechannel.LibvirtFibreChannelVolumeDriver', # noqa:E501
|
||||
@@ -385,10 +386,10 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# This prevents the risk of one test setting a capability
|
||||
# which bleeds over into other tests.
|
||||
|
||||
- # LVM and RBD require raw images. If we are not configured to
|
||||
+ # LVM, RBD, Vitastor require raw images. If we are not configured to
|
||||
# force convert images into raw format, then we _require_ raw
|
||||
# images only.
|
||||
- raw_only = ('rbd', 'lvm')
|
||||
+ raw_only = ('rbd', 'lvm', 'vitastor')
|
||||
requires_raw_image = (CONF.libvirt.images_type in raw_only and
|
||||
not CONF.force_raw_images)
|
||||
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
|
||||
@@ -775,12 +776,12 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# Some imagebackends are only able to import raw disk images,
|
||||
# and will fail if given any other format. See the bug
|
||||
# https://bugs.launchpad.net/nova/+bug/1816686 for more details.
|
||||
- if CONF.libvirt.images_type in ('rbd',):
|
||||
+ if CONF.libvirt.images_type in ('rbd', 'vitastor'):
|
||||
if not CONF.force_raw_images:
|
||||
msg = _("'[DEFAULT]/force_raw_images = False' is not "
|
||||
- "allowed with '[libvirt]/images_type = rbd'. "
|
||||
+ "allowed with '[libvirt]/images_type = rbd' or 'vitastor'. "
|
||||
"Please check the two configs and if you really "
|
||||
- "do want to use rbd as images_type, set "
|
||||
+ "do want to use rbd or vitastor as images_type, set "
|
||||
"force_raw_images to True.")
|
||||
raise exception.InvalidConfiguration(msg)
|
||||
|
||||
@@ -2603,6 +2604,16 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
if connection_info['data'].get('auth_enabled'):
|
||||
username = connection_info['data']['auth_username']
|
||||
path = f"rbd:{volume_name}:id={username}"
|
||||
+ elif connection_info['driver_volume_type'] == 'vitastor':
|
||||
+ volume_name = connection_info['data']['name']
|
||||
+ path = 'vitastor:image='+volume_name.replace(':', '\\:')
|
||||
+ for k in [ 'config_path', 'etcd_address', 'etcd_prefix' ]:
|
||||
+ if k in connection_info['data']:
|
||||
+ kk = k
|
||||
+ if kk == 'etcd_address':
|
||||
+ # FIXME use etcd_address in qemu driver
|
||||
+ kk = 'etcd_host'
|
||||
+ path += ":"+kk+"="+connection_info['data'][k].replace(':', '\\:')
|
||||
else:
|
||||
path = 'unknown'
|
||||
raise exception.DiskNotFound(location='unknown')
|
||||
@@ -2827,8 +2838,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
|
||||
image_format = CONF.libvirt.snapshot_image_format or source_type
|
||||
|
||||
- # NOTE(bfilippov): save lvm and rbd as raw
|
||||
- if image_format == 'lvm' or image_format == 'rbd':
|
||||
+ # NOTE(bfilippov): save lvm and rbd and vitastor as raw
|
||||
+ if image_format == 'lvm' or image_format == 'rbd' or image_format == 'vitastor':
|
||||
image_format = 'raw'
|
||||
|
||||
metadata = self._create_snapshot_metadata(instance.image_meta,
|
||||
@@ -2899,7 +2910,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
expected_state=task_states.IMAGE_UPLOADING)
|
||||
|
||||
# TODO(nic): possibly abstract this out to the root_disk
|
||||
- if source_type == 'rbd' and live_snapshot:
|
||||
+ if (source_type == 'rbd' or source_type == 'vitastor') and live_snapshot:
|
||||
# Standard snapshot uses qemu-img convert from RBD which is
|
||||
# not safe to run with live_snapshot.
|
||||
live_snapshot = False
|
||||
@@ -4099,7 +4110,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# cleanup rescue volume
|
||||
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
|
||||
if lvmdisk.endswith('.rescue')])
|
||||
- if CONF.libvirt.images_type == 'rbd':
|
||||
+ if CONF.libvirt.images_type == 'rbd' or CONF.libvirt.images_type == 'vitastor':
|
||||
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
|
||||
disk.endswith('.rescue'))
|
||||
rbd_utils.RBDDriver().cleanup_volumes(filter_fn)
|
||||
@@ -4356,6 +4367,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# TODO(mikal): there is a bug here if images_type has
|
||||
# changed since creation of the instance, but I am pretty
|
||||
# sure that this bug already exists.
|
||||
+ if CONF.libvirt.images_type == 'vitastor':
|
||||
+ return 'vitastor'
|
||||
return 'rbd' if CONF.libvirt.images_type == 'rbd' else 'raw'
|
||||
|
||||
@staticmethod
|
||||
@@ -4764,10 +4777,10 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
finally:
|
||||
# NOTE(mikal): if the config drive was imported into RBD,
|
||||
# then we no longer need the local copy
|
||||
- if CONF.libvirt.images_type == 'rbd':
|
||||
+ if CONF.libvirt.images_type == 'rbd' or CONF.libvirt.images_type == 'vitastor':
|
||||
LOG.info('Deleting local config drive %(path)s '
|
||||
- 'because it was imported into RBD.',
|
||||
- {'path': config_disk_local_path},
|
||||
+ 'because it was imported into %(type).',
|
||||
+ {'path': config_disk_local_path, 'type': CONF.libvirt.images_type},
|
||||
instance=instance)
|
||||
os.unlink(config_disk_local_path)
|
||||
|
||||
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
|
||||
index da2a6e8b8a..52c02e72f1 100644
|
||||
--- a/nova/virt/libvirt/utils.py
|
||||
+++ b/nova/virt/libvirt/utils.py
|
||||
@@ -340,6 +340,10 @@ def find_disk(guest: libvirt_guest.Guest) -> ty.Tuple[str, ty.Optional[str]]:
|
||||
disk_path = disk.source_name
|
||||
if disk_path:
|
||||
disk_path = 'rbd:' + disk_path
|
||||
+ elif not disk_path and disk.source_protocol == 'vitastor':
|
||||
+ disk_path = disk.source_name
|
||||
+ if disk_path:
|
||||
+ disk_path = 'vitastor:' + disk_path
|
||||
|
||||
if not disk_path:
|
||||
raise RuntimeError(_("Can't retrieve root device path "
|
||||
@@ -354,6 +358,8 @@ def get_disk_type_from_path(path: str) -> ty.Optional[str]:
|
||||
return 'lvm'
|
||||
elif path.startswith('rbd:'):
|
||||
return 'rbd'
|
||||
+ elif path.startswith('vitastor:'):
|
||||
+ return 'vitastor'
|
||||
elif (os.path.isdir(path) and
|
||||
os.path.exists(os.path.join(path, "DiskDescriptor.xml"))):
|
||||
return 'ploop'
|
||||
diff --git a/nova/virt/libvirt/volume/vitastor.py b/nova/virt/libvirt/volume/vitastor.py
|
||||
new file mode 100644
|
||||
index 0000000000..0256df62c1
|
||||
--- /dev/null
|
||||
+++ b/nova/virt/libvirt/volume/vitastor.py
|
||||
@@ -0,0 +1,75 @@
|
||||
+# Copyright (c) 2021+, Vitaliy Filippov <vitalif@yourcmc.ru>
|
||||
+#
|
||||
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
+# not use this file except in compliance with the License. You may obtain
|
||||
+# a copy of the License at
|
||||
+#
|
||||
+# http://www.apache.org/licenses/LICENSE-2.0
|
||||
+#
|
||||
+# Unless required by applicable law or agreed to in writing, software
|
||||
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
+# License for the specific language governing permissions and limitations
|
||||
+# under the License.
|
||||
+
|
||||
+from os_brick import exception as os_brick_exception
|
||||
+from os_brick import initiator
|
||||
+from os_brick.initiator import connector
|
||||
+from oslo_log import log as logging
|
||||
+
|
||||
+import nova.conf
|
||||
+from nova import utils
|
||||
+from nova.virt.libvirt.volume import volume as libvirt_volume
|
||||
+
|
||||
+
|
||||
+CONF = nova.conf.CONF
|
||||
+LOG = logging.getLogger(__name__)
|
||||
+
|
||||
+
|
||||
+class LibvirtVitastorVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
|
||||
+ """Driver to attach Vitastor volumes to libvirt."""
|
||||
+ def __init__(self, host):
|
||||
+ super(LibvirtVitastorVolumeDriver, self).__init__(host, is_block_dev=False)
|
||||
+
|
||||
+ def connect_volume(self, connection_info, instance):
|
||||
+ pass
|
||||
+
|
||||
+ def disconnect_volume(self, connection_info, instance):
|
||||
+ pass
|
||||
+
|
||||
+ def get_config(self, connection_info, disk_info):
|
||||
+ """Returns xml for libvirt."""
|
||||
+ conf = super(LibvirtVitastorVolumeDriver, self).get_config(connection_info, disk_info)
|
||||
+ conf.source_type = 'network'
|
||||
+ conf.source_protocol = 'vitastor'
|
||||
+ conf.source_name = connection_info['data'].get('name')
|
||||
+ conf.source_query = connection_info['data'].get('etcd_prefix') or None
|
||||
+ conf.source_config = connection_info['data'].get('config_path') or None
|
||||
+ conf.source_hosts = []
|
||||
+ conf.source_ports = []
|
||||
+ addresses = connection_info['data'].get('etcd_address', '')
|
||||
+ if addresses:
|
||||
+ if not isinstance(addresses, list):
|
||||
+ addresses = addresses.split(',')
|
||||
+ for addr in addresses:
|
||||
+ if addr.startswith('https://'):
|
||||
+ raise NotImplementedError('Vitastor block driver does not support SSL for etcd communication yet')
|
||||
+ if addr.startswith('http://'):
|
||||
+ addr = addr[7:]
|
||||
+ addr = addr.rstrip('/')
|
||||
+ if addr.endswith('/v3'):
|
||||
+ addr = addr[0:-3]
|
||||
+ p = addr.find('/')
|
||||
+ if p > 0:
|
||||
+ raise NotImplementedError('libvirt does not support custom URL paths for Vitastor etcd yet. Use /etc/vitastor/vitastor.conf')
|
||||
+ p = addr.find(':')
|
||||
+ port = '2379'
|
||||
+ if p > 0:
|
||||
+ port = addr[p+1:]
|
||||
+ addr = addr[0:p]
|
||||
+ conf.source_hosts.append(addr)
|
||||
+ conf.source_ports.append(port)
|
||||
+ return conf
|
||||
+
|
||||
+ def extend_volume(self, connection_info, instance, requested_size):
|
||||
+ raise NotImplementedError
|
Loading…
Reference in new issue