Coverage for manila/share/drivers/generic.py: 93%
498 statements
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
1# Copyright (c) 2014 NetApp, Inc.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
16"""Generic Driver for shares."""
18import os
19import time
21from oslo_concurrency import processutils
22from oslo_config import cfg
23from oslo_log import log
24from oslo_utils import importutils
25from oslo_utils import units
27from manila.common import constants as const
28from manila import compute
29from manila import context
30from manila import exception
31from manila.i18n import _
32from manila.share import driver
33from manila.share.drivers import service_instance
34from manila import ssh_utils
35from manila import utils
36from manila import volume
38LOG = log.getLogger(__name__)
40share_opts = [
41 cfg.StrOpt('smb_template_config_path',
42 default='$state_path/smb.conf',
43 help="Path to smb config."),
44 cfg.StrOpt('volume_name_template',
45 default='manila-share-%s',
46 help="Volume name template."),
47 cfg.StrOpt('volume_snapshot_name_template',
48 default='manila-snapshot-%s',
49 help="Volume snapshot name template."),
50 cfg.StrOpt('share_mount_path',
51 default='/shares',
52 help="Parent path in service instance where shares "
53 "will be mounted."),
54 cfg.IntOpt('max_time_to_create_volume',
55 default=180,
56 help="Maximum time to wait for creating cinder volume."),
57 cfg.IntOpt('max_time_to_extend_volume',
58 default=180,
59 help="Maximum time to wait for extending cinder volume."),
60 cfg.IntOpt('max_time_to_attach',
61 default=120,
62 help="Maximum time to wait for attaching cinder volume."),
63 cfg.StrOpt('service_instance_smb_config_path',
64 default='$share_mount_path/smb.conf',
65 help="Path to SMB config in service instance."),
66 cfg.ListOpt('share_helpers',
67 default=[
68 'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess',
69 'NFS=manila.share.drivers.helpers.NFSHelper',
70 ],
71 help='Specify list of share export helpers.'),
72 cfg.StrOpt('share_volume_fstype',
73 default='ext4',
74 choices=['ext4', 'ext3'],
75 help='Filesystem type of the share volume.'),
76 cfg.StrOpt('cinder_volume_type',
77 help='Name or id of cinder volume type which will be used '
78 'for all volumes created by driver.'),
79]
81CONF = cfg.CONF
82CONF.register_opts(share_opts)
84# NOTE(u_glide): These constants refer to the column number in the "df" output
85BLOCK_DEVICE_SIZE_INDEX = 1
86USED_SPACE_INDEX = 2
89def ensure_server(f):
91 def wrap(self, context, *args, **kwargs):
92 server = kwargs.get('share_server')
94 if not self.driver_handles_share_servers:
95 if not server:
96 server = self.service_instance_manager.get_common_server()
97 kwargs['share_server'] = server
98 else:
99 raise exception.ManilaException(
100 _("Share server handling is not available. "
101 "But 'share_server' was provided. '%s'. "
102 "Share network should not be used.") % server.get('id'))
103 elif not server:
104 raise exception.ManilaException(
105 _("Share server handling is enabled. But 'share_server' "
106 "is not provided. Make sure you used 'share_network'."))
108 if not server.get('backend_details'):
109 raise exception.ManilaException(
110 _("Share server '%s' does not have backend details.") %
111 server['id'])
112 if not self.service_instance_manager.ensure_service_instance( 112 ↛ 114line 112 didn't jump to line 114 because the condition on line 112 was never true
113 context, server['backend_details']):
114 raise exception.ServiceInstanceUnavailable()
116 return f(self, context, *args, **kwargs)
118 return wrap
121class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
122 """Executes commands relating to Shares."""
124 def __init__(self, *args, **kwargs):
125 """Do initialization."""
126 super(GenericShareDriver, self).__init__(
127 [False, True], *args, **kwargs)
128 self.admin_context = context.get_admin_context()
129 self.configuration.append_config_values(share_opts)
130 self._helpers = {}
131 self.backend_name = self.configuration.safe_get(
132 'share_backend_name') or "Cinder_Volumes"
133 self.ssh_connections = {}
134 self._setup_service_instance_manager()
135 self.private_storage = kwargs.get('private_storage')
137 def _setup_service_instance_manager(self):
138 self.service_instance_manager = (
139 service_instance.ServiceInstanceManager(
140 driver_config=self.configuration))
142 def _ssh_exec(self, server, command, check_exit_code=True):
143 LOG.debug("_ssh_exec - server: %s, command: %s, check_exit_code: %s",
144 server, command, check_exit_code)
145 connection = self.ssh_connections.get(server['instance_id'])
146 ssh_conn_timeout = self.configuration.ssh_conn_timeout
147 if not connection:
148 ssh_pool = ssh_utils.SSHPool(server['ip'],
149 22,
150 ssh_conn_timeout,
151 server['username'],
152 server.get('password'),
153 server.get('pk_path'),
154 max_size=1)
155 ssh = ssh_pool.create()
156 self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
157 else:
158 ssh_pool, ssh = connection
160 if not ssh.get_transport().is_active():
161 ssh_pool.remove(ssh)
162 ssh = ssh_pool.create()
163 self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
165 # (aovchinnikov): ssh_execute does not behave well when passed
166 # parameters with spaces.
167 wrap = lambda token: "\"" + token + "\"" # noqa: E731
168 command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command]
169 return processutils.ssh_execute(ssh, ' '.join(command),
170 check_exit_code=check_exit_code)
172 def check_for_setup_error(self):
173 """Returns an error if prerequisites aren't met."""
175 def do_setup(self, context):
176 """Any initialization the generic driver does while starting."""
177 super(GenericShareDriver, self).do_setup(context)
178 self.compute_api = compute.API()
179 self.volume_api = volume.API()
180 self._setup_helpers()
182 common_sv_available = False
183 share_server = None
184 sv_fetch_retry_interval = 5
185 while not (common_sv_available or self.driver_handles_share_servers):
186 try:
187 # Verify availability of common server
188 share_server = (
189 self.service_instance_manager.get_common_server())
190 common_sv_available = self._is_share_server_active(
191 context, share_server)
192 except Exception as ex:
193 LOG.error(ex)
195 if not common_sv_available:
196 time.sleep(sv_fetch_retry_interval)
197 LOG.warning("Waiting for the common service VM to become "
198 "available. "
199 "Driver is currently uninitialized. "
200 "Share server: %(share_server)s "
201 "Retry interval: %(retry_interval)s",
202 dict(share_server=share_server,
203 retry_interval=sv_fetch_retry_interval))
205 def _setup_helpers(self):
206 """Initializes protocol-specific NAS drivers."""
207 helpers = self.configuration.share_helpers
208 if helpers:
209 for helper_str in helpers:
210 share_proto, __, import_str = helper_str.partition('=')
211 helper = importutils.import_class(import_str)
212 self._helpers[share_proto.upper()] = helper(
213 self._execute,
214 self._ssh_exec,
215 self.configuration)
216 else:
217 raise exception.ManilaException(
218 "No protocol helpers selected for Generic Driver. "
219 "Please specify using config option 'share_helpers'.")
221 @ensure_server
222 def create_share(self, context, share, share_server=None):
223 """Creates share."""
224 return self._create_share(
225 context, share,
226 snapshot=None,
227 share_server=share_server,
228 )
230 def _create_share(self, context, share, snapshot, share_server=None):
231 helper = self._get_helper(share)
232 server_details = share_server['backend_details']
233 volume = self._allocate_container(
234 self.admin_context, share, snapshot=snapshot)
235 volume = self._attach_volume(
236 self.admin_context, share, server_details['instance_id'], volume)
237 if not snapshot:
238 self._format_device(server_details, volume)
240 self._mount_device(share, server_details, volume)
241 export_locations = helper.create_exports(
242 server_details, share['name'])
243 return export_locations
245 @utils.retry(retry_param=exception.ProcessExecutionError, backoff_rate=1)
246 def _is_device_file_available(self, server_details, volume):
247 """Checks whether the device file is available"""
248 command = ['sudo', 'test', '-b', volume['mountpoint']]
249 self._ssh_exec(server_details, command)
251 def _format_device(self, server_details, volume):
252 """Formats device attached to the service vm."""
253 self._is_device_file_available(server_details, volume)
254 command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype,
255 volume['mountpoint']]
256 self._ssh_exec(server_details, command)
258 def _is_device_mounted(self, mount_path, server_details, volume=None):
259 """Checks whether volume already mounted or not."""
260 log_data = {
261 'mount_path': mount_path,
262 'server_id': server_details['instance_id'],
263 }
264 if volume and volume.get('mountpoint', ''):
265 log_data['volume_id'] = volume['id']
266 log_data['dev_mount_path'] = volume['mountpoint']
267 msg = ("Checking whether volume '%(volume_id)s' with mountpoint "
268 "'%(dev_mount_path)s' is mounted on mount path '%(mount_p"
269 "ath)s' on server '%(server_id)s' or not." % log_data)
270 else:
271 msg = ("Checking whether mount path '%(mount_path)s' exists on "
272 "server '%(server_id)s' or not." % log_data)
273 LOG.debug(msg)
274 mounts_list_cmd = ['sudo', 'mount']
275 output, __ = self._ssh_exec(server_details, mounts_list_cmd)
276 mounts = output.split('\n')
277 for mount in mounts:
278 mount_elements = mount.split(' ')
279 if (len(mount_elements) > 2 and mount_path == mount_elements[2]):
280 if volume:
281 # Mount goes with device path and mount path
282 if (volume.get('mountpoint', '') == mount_elements[0]):
283 return True
284 else:
285 # Unmount goes only by mount path
286 return True
287 return False
289 def _add_mount_permanently(self, share_id, device_path, server_details):
290 """Add mount permanently for mounted filesystems."""
291 try:
292 self._ssh_exec(
293 server_details,
294 ['grep', share_id, const.MOUNT_FILE_TEMP,
295 '|', 'sudo', 'tee', '-a', const.MOUNT_FILE],
296 )
297 output, __ = self._ssh_exec(
298 server_details,
299 ['lsblk', '-o', 'uuid', '-n', device_path])
300 if output: 300 ↛ 314line 300 didn't jump to line 314 because the condition on line 300 was always true
301 device_uuid = f"UUID={output.strip()}"
302 self._ssh_exec(
303 server_details,
304 ['sudo', 'sed', '-i', "s@{}@{}@".format(device_path,
305 device_uuid),
306 const.MOUNT_FILE]
307 )
308 except exception.ProcessExecutionError as e:
309 LOG.error("Failed to add 'Share-%(share_id)s' mount "
310 "permanently on server '%(instance_id)s'.",
311 {"share_id": share_id,
312 "instance_id": server_details['instance_id']})
313 raise exception.ShareBackendException(msg=str(e))
314 try:
315 # Remount it to avoid postponed point of failure
316 self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
317 except exception.ProcessExecutionError:
318 LOG.error("Failed to mount all shares on server '%s'.",
319 server_details['instance_id'])
321 def _remove_mount_permanently(self, share_id, server_details):
322 """Remove mount permanently from mounted filesystems."""
323 try:
324 self._ssh_exec(
325 server_details,
326 ['sudo', 'sed', '-i', '\'/%s/d\'' % share_id,
327 const.MOUNT_FILE],
328 )
329 except exception.ProcessExecutionError as e:
330 LOG.error("Failed to remove 'Share-%(share_id)s' mount "
331 "permanently on server '%(instance_id)s'.",
332 {"share_id": share_id,
333 "instance_id": server_details['instance_id']})
334 raise exception.ShareBackendException(msg=str(e))
336 def _mount_device(self, share, server_details, volume):
337 """Mounts block device to the directory on service vm.
339 Mounts attached and formatted block device to the directory if not
340 mounted yet.
341 """
343 @utils.synchronized('generic_driver_mounts_'
344 '%s' % server_details['instance_id'])
345 def _mount_device_with_lock():
346 mount_path = self._get_mount_path(share)
347 device_path = volume['mountpoint']
348 log_data = {
349 'dev': device_path,
350 'path': mount_path,
351 'server': server_details['instance_id'],
352 }
353 try:
354 if not self._is_device_mounted(mount_path, server_details,
355 volume):
356 LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
357 "server '%(server)s'.", log_data)
358 mount_cmd = (
359 'sudo', 'mkdir', '-p', mount_path,
360 '&&', 'sudo', 'mount', device_path, mount_path,
361 '&&', 'sudo', 'chmod', '777', mount_path,
362 '&&', 'sudo', 'umount', mount_path,
363 # NOTE(vponomaryov): 'tune2fs' is required to make
364 # filesystem of share created from snapshot have
365 # unique ID, in case of LVM volumes, by default,
366 # it will have the same UUID as source volume one.
367 # 'tune2fs' command can be executed only when device
368 # is not mounted and also, in current case, it takes
369 # effect only after it was mounted. Closes #1645751
370 # NOTE(gouthamr): Executing tune2fs -U only works on
371 # a recently checked filesystem. See debian bug 857336
372 '&&', 'sudo', 'e2fsck', '-y', '-f', device_path,
373 '&&', 'sudo', 'tune2fs', '-U', 'random', device_path,
374 '&&', 'sudo', 'mount', device_path, mount_path,
375 )
376 self._ssh_exec(server_details, mount_cmd)
377 self._add_mount_permanently(share.id, device_path,
378 server_details)
379 else:
380 LOG.warning("Mount point '%(path)s' already exists on "
381 "server '%(server)s'.", log_data)
382 except exception.ProcessExecutionError as e:
383 raise exception.ShareBackendException(msg=str(e))
384 return _mount_device_with_lock()
386 @utils.retry(retry_param=exception.ProcessExecutionError)
387 def _unmount_device(self, share, server_details):
388 """Unmounts block device from directory on service vm."""
390 @utils.synchronized('generic_driver_mounts_'
391 '%s' % server_details['instance_id'])
392 def _unmount_device_with_lock():
393 mount_path = self._get_mount_path(share)
394 log_data = {
395 'path': mount_path,
396 'server': server_details['instance_id'],
397 }
398 if self._is_device_mounted(mount_path, server_details):
399 LOG.debug("Unmounting path '%(path)s' on server "
400 "'%(server)s'.", log_data)
401 unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo',
402 'rmdir', mount_path]
403 self._ssh_exec(server_details, unmount_cmd)
404 self._remove_mount_permanently(share.id, server_details)
405 else:
406 LOG.warning("Mount point '%(path)s' does not exist on "
407 "server '%(server)s'.", log_data)
408 return _unmount_device_with_lock()
410 def _get_mount_path(self, share):
411 """Returns the path to use for mount device in service vm."""
412 return os.path.join(self.configuration.share_mount_path, share['name'])
414 def _attach_volume(self, context, share, instance_id, volume):
415 """Attaches cinder volume to service vm."""
416 @utils.synchronized(
417 "generic_driver_attach_detach_%s" % instance_id, external=True)
418 def do_attach(volume):
419 if volume['status'] == 'in-use':
420 attached_volumes = self.compute_api.instance_volumes_list(
421 self.admin_context, instance_id)
422 if volume['id'] in attached_volumes:
423 return volume
424 else:
425 raise exception.ManilaException(
426 _('Volume %s is already attached to another instance')
427 % volume['id'])
429 @utils.retry(retries=3,
430 interval=2,
431 backoff_rate=1)
432 def attach_volume():
433 self.compute_api.instance_volume_attach(
434 self.admin_context, instance_id, volume['id'])
436 attach_volume()
438 t = time.time()
439 while time.time() - t < self.configuration.max_time_to_attach: 439 ↛ 448line 439 didn't jump to line 448 because the condition on line 439 was always true
440 volume = self.volume_api.get(context, volume['id'])
441 if volume['status'] == 'in-use':
442 return volume
443 elif volume['status'] not in ('attaching', 'reserved'): 443 ↛ 446line 443 didn't jump to line 446 because the condition on line 443 was always true
444 raise exception.ManilaException(
445 _('Failed to attach volume %s') % volume['id'])
446 time.sleep(1)
447 else:
448 err_msg = {
449 'volume_id': volume['id'],
450 'max_time': self.configuration.max_time_to_attach
451 }
452 raise exception.ManilaException(
453 _('Volume %(volume_id)s has not been attached in '
454 '%(max_time)ss. Giving up.') % err_msg)
455 return do_attach(volume)
457 def _get_volume_name(self, share_id):
458 return self.configuration.volume_name_template % share_id
460 def _get_volume(self, context, share_id):
461 """Finds volume, associated to the specific share."""
462 volume_id = self.private_storage.get(share_id, 'volume_id')
464 if volume_id is not None:
465 return self.volume_api.get(context, volume_id)
466 else: # Fallback to legacy method
467 return self._get_volume_legacy(context, share_id)
469 def _get_volume_legacy(self, context, share_id):
470 # NOTE(u_glide): this method is deprecated and will be removed in
471 # future versions
472 volume_name = self._get_volume_name(share_id)
473 search_opts = {'name': volume_name}
474 if context.is_admin: 474 ↛ 476line 474 didn't jump to line 476 because the condition on line 474 was always true
475 search_opts['all_tenants'] = True
476 volumes_list = self.volume_api.get_all(context, search_opts)
477 if len(volumes_list) == 1:
478 return volumes_list[0]
479 elif len(volumes_list) > 1:
480 LOG.error(
481 "Expected only one volume in volume list with name "
482 "'%(name)s', but got more than one in a result - "
483 "'%(result)s'.", {
484 'name': volume_name, 'result': volumes_list})
485 raise exception.ManilaException(
486 _("Error. Ambiguous volumes for name '%s'") % volume_name)
487 return None
489 def _get_volume_snapshot(self, context, snapshot_id):
490 """Find volume snapshot associated to the specific share snapshot."""
491 volume_snapshot_id = self.private_storage.get(
492 snapshot_id, 'volume_snapshot_id')
494 if volume_snapshot_id is not None:
495 return self.volume_api.get_snapshot(context, volume_snapshot_id)
496 else: # Fallback to legacy method
497 return self._get_volume_snapshot_legacy(context, snapshot_id)
499 def _get_volume_snapshot_legacy(self, context, snapshot_id):
500 # NOTE(u_glide): this method is deprecated and will be removed in
501 # future versions
502 volume_snapshot_name = (
503 self.configuration.volume_snapshot_name_template % snapshot_id)
504 volume_snapshot_list = self.volume_api.get_all_snapshots(
505 context, {'name': volume_snapshot_name})
506 volume_snapshot = None
507 if len(volume_snapshot_list) == 1:
508 volume_snapshot = volume_snapshot_list[0]
509 elif len(volume_snapshot_list) > 1:
510 LOG.error(
511 "Expected only one volume snapshot in list with name "
512 "'%(name)s', but got more than one in a result - "
513 "'%(result)s'.", {
514 'name': volume_snapshot_name,
515 'result': volume_snapshot_list})
516 raise exception.ManilaException(
517 _('Error. Ambiguous volume snaphots'))
518 return volume_snapshot
520 def _detach_volume(self, context, share, server_details):
521 """Detaches cinder volume from service vm."""
522 instance_id = server_details['instance_id']
524 @utils.synchronized(
525 "generic_driver_attach_detach_%s" % instance_id, external=True)
526 def do_detach():
527 attached_volumes = self.compute_api.instance_volumes_list(
528 self.admin_context, instance_id)
529 try:
530 volume = self._get_volume(context, share['id'])
531 except exception.VolumeNotFound:
532 LOG.warning("Volume not found for share %s. "
533 "Possibly already deleted.", share['id'])
534 volume = None
535 if volume and volume['id'] in attached_volumes:
536 self.compute_api.instance_volume_detach(
537 self.admin_context,
538 instance_id,
539 volume['id']
540 )
541 t = time.time()
542 while time.time() - t < self.configuration.max_time_to_attach: 542 ↛ 549line 542 didn't jump to line 549 because the condition on line 542 was always true
543 volume = self.volume_api.get(context, volume['id'])
544 if volume['status'] in (const.STATUS_AVAILABLE, 544 ↛ 547line 544 didn't jump to line 547 because the condition on line 544 was always true
545 const.STATUS_ERROR):
546 break
547 time.sleep(1)
548 else:
549 err_msg = {
550 'volume_id': volume['id'],
551 'max_time': self.configuration.max_time_to_attach
552 }
553 raise exception.ManilaException(
554 _('Volume %(volume_id)s has not been detached in '
555 '%(max_time)ss. Giving up.') % err_msg)
556 do_detach()
558 def _allocate_container(self, context, share, snapshot=None):
559 """Creates cinder volume, associated to share by name."""
560 volume_snapshot = None
561 if snapshot:
562 volume_snapshot = self._get_volume_snapshot(context,
563 snapshot['id'])
565 volume = self.volume_api.create(
566 context,
567 share['size'],
568 self.configuration.volume_name_template % share['id'], '',
569 snapshot=volume_snapshot,
570 volume_type=self.configuration.cinder_volume_type,
571 availability_zone=share['availability_zone'])
573 self.private_storage.update(
574 share['id'], {'volume_id': volume['id']})
576 msg_error = _('Failed to create volume')
577 msg_timeout = (
578 _('Volume has not been created in %ss. Giving up') %
579 self.configuration.max_time_to_create_volume
580 )
581 return self.volume_api.wait_for_available_volume(
582 volume, self.configuration.max_time_to_create_volume,
583 msg_error=msg_error, msg_timeout=msg_timeout
584 )
586 def _deallocate_container(self, context, share):
587 """Deletes cinder volume."""
588 try:
589 volume = self._get_volume(context, share['id'])
590 except exception.VolumeNotFound:
591 LOG.info("Volume not found. Already deleted?")
592 volume = None
593 if volume:
594 if volume['status'] == 'in-use': 594 ↛ 595line 594 didn't jump to line 595 because the condition on line 594 was never true
595 raise exception.ManilaException(
596 _('Volume is still in use and '
597 'cannot be deleted now.'))
598 self.volume_api.delete(context, volume['id'])
599 t = time.time()
600 while (time.time() - t < 600 ↛ 609line 600 didn't jump to line 609 because the condition on line 600 was always true
601 self.configuration.max_time_to_create_volume):
602 try:
603 volume = self.volume_api.get(context, volume['id'])
604 except exception.VolumeNotFound:
605 LOG.debug('Volume was deleted successfully')
606 break
607 time.sleep(1)
608 else:
609 raise exception.ManilaException(
610 _('Volume have not been '
611 'deleted in %ss. Giving up')
612 % self.configuration.max_time_to_create_volume)
614 def _update_share_stats(self):
615 """Retrieve stats info from share volume group."""
616 data = dict(
617 share_backend_name=self.backend_name,
618 storage_protocol='NFS_CIFS',
619 reserved_percentage=self.configuration.reserved_share_percentage,
620 reserved_snapshot_percentage=(
621 self.configuration.reserved_share_from_snapshot_percentage
622 or self.configuration.reserved_share_percentage),
623 reserved_share_extend_percentage=(
624 self.configuration.reserved_share_extend_percentage
625 or self.configuration.reserved_share_percentage),
626 )
627 super(GenericShareDriver, self)._update_share_stats(data)
629 @ensure_server
630 def create_share_from_snapshot(self, context, share, snapshot,
631 share_server=None, parent_share=None):
632 """Is called to create share from snapshot."""
633 return self._create_share(
634 context, share,
635 snapshot=snapshot,
636 share_server=share_server,
637 )
639 @ensure_server
640 def extend_share(self, share, new_size, share_server=None):
641 server_details = share_server['backend_details']
643 helper = self._get_helper(share)
644 helper.disable_access_for_maintenance(server_details, share['name'])
645 self._unmount_device(share, server_details)
646 volume = self._get_volume(self.admin_context, share['id'])
648 if int(new_size) > volume['size']:
649 self._detach_volume(self.admin_context, share, server_details)
650 volume = self._extend_volume(self.admin_context, volume, new_size)
652 volume = self._attach_volume(
653 self.admin_context,
654 share,
655 server_details['instance_id'],
656 volume)
658 self._resize_filesystem(server_details, volume, new_size=new_size)
659 self._mount_device(share, server_details, volume)
660 helper.restore_access_after_maintenance(server_details,
661 share['name'])
663 def _extend_volume(self, context, volume, new_size):
664 self.volume_api.extend(context, volume['id'], new_size)
666 msg_error = _('Failed to extend volume %s') % volume['id']
667 msg_timeout = (
668 _('Volume has not been extended in %ss. Giving up') %
669 self.configuration.max_time_to_extend_volume
670 )
671 return self.volume_api.wait_for_available_volume(
672 volume, self.configuration.max_time_to_extend_volume,
673 msg_error=msg_error, msg_timeout=msg_timeout,
674 expected_size=new_size
675 )
677 @ensure_server
678 def shrink_share(self, share, new_size, share_server=None):
679 server_details = share_server['backend_details']
681 helper = self._get_helper(share)
682 export_location = share['export_locations'][0]['path']
683 mount_path = helper.get_share_path_by_export_location(
684 server_details, export_location)
686 consumed_space = self._get_consumed_space(mount_path, server_details)
688 LOG.debug("Consumed space on share: %s", consumed_space)
690 if consumed_space >= new_size:
691 raise exception.ShareShrinkingPossibleDataLoss(
692 share_id=share['id'])
694 volume = self._get_volume(self.admin_context, share['id'])
696 helper.disable_access_for_maintenance(server_details, share['name'])
697 self._unmount_device(share, server_details)
699 try:
700 self._resize_filesystem(server_details, volume, new_size=new_size)
701 except exception.Invalid:
702 raise exception.ShareShrinkingPossibleDataLoss(
703 share_id=share['id'])
704 except Exception as e:
705 msg = _("Cannot shrink share: %s") % str(e)
706 raise exception.Invalid(msg)
707 finally:
708 self._mount_device(share, server_details, volume)
709 helper.restore_access_after_maintenance(server_details,
710 share['name'])
712 def _resize_filesystem(self, server_details, volume, new_size=None):
713 """Resize filesystem of provided volume."""
714 check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']]
715 self._ssh_exec(server_details, check_command)
716 command = ['sudo', 'resize2fs', volume['mountpoint']]
718 if new_size: 718 ↛ 721line 718 didn't jump to line 721 because the condition on line 718 was always true
719 command.append("%sG" % new_size)
721 try:
722 self._ssh_exec(server_details, command)
723 except processutils.ProcessExecutionError as e:
724 if e.stderr.find('New size smaller than minimum') != -1:
725 msg = (_("Invalid 'new_size' provided: %s")
726 % new_size)
727 raise exception.Invalid(msg)
728 else:
729 msg = _("Cannot resize file-system: %s") % e
730 raise exception.ManilaException(msg)
732 def _is_share_server_active(self, context, share_server):
733 """Check if the share server is active."""
734 has_active_share_server = (
735 share_server and share_server.get('backend_details') and
736 self.service_instance_manager.ensure_service_instance(
737 context, share_server['backend_details']))
738 return has_active_share_server
740 def delete_share(self, context, share, share_server=None):
741 """Deletes share."""
742 helper = self._get_helper(share)
743 if not self.driver_handles_share_servers:
744 share_server = self.service_instance_manager.get_common_server()
745 if self._is_share_server_active(context, share_server):
746 helper.remove_exports(
747 share_server['backend_details'], share['name'])
748 self._unmount_device(share, share_server['backend_details'])
749 self._detach_volume(self.admin_context, share,
750 share_server['backend_details'])
752 # Note(jun): It is an intended breakage to deal with the cases
753 # with any reason that caused absence of Nova instances.
754 self._deallocate_container(self.admin_context, share)
756 self.private_storage.delete(share['id'])
758 def create_snapshot(self, context, snapshot, share_server=None):
759 """Creates a snapshot."""
760 model_update = {}
761 volume = self._get_volume(
762 self.admin_context, snapshot['share_instance_id'])
763 volume_snapshot_name = (self.configuration.
764 volume_snapshot_name_template % snapshot['id'])
765 volume_snapshot = self.volume_api.create_snapshot_force(
766 self.admin_context, volume['id'], volume_snapshot_name, '')
767 t = time.time()
768 while time.time() - t < self.configuration.max_time_to_create_volume: 768 ↛ 788line 768 didn't jump to line 788 because the condition on line 768 was always true
769 if volume_snapshot['status'] == const.STATUS_AVAILABLE: 769 ↛ 771line 769 didn't jump to line 771 because the condition on line 769 was always true
770 break
771 if volume_snapshot['status'] == const.STATUS_ERROR:
772 raise exception.ManilaException(_('Failed to create volume '
773 'snapshot'))
774 time.sleep(1)
775 volume_snapshot = self.volume_api.get_snapshot(
776 self.admin_context,
777 volume_snapshot['id'])
779 # NOTE(xyang): We should look at whether we still need to save
780 # volume_snapshot_id in private_storage later, now that is saved
781 # in provider_location.
782 self.private_storage.update(
783 snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
784 # NOTE(xyang): Need to update provider_location in the db so
785 # that it can be used in manage/unmanage snapshot tempest tests.
786 model_update['provider_location'] = volume_snapshot['id']
787 else:
788 raise exception.ManilaException(
789 _('Volume snapshot have not been '
790 'created in %ss. Giving up') %
791 self.configuration.max_time_to_create_volume)
793 return model_update
795 def delete_snapshot(self, context, snapshot, share_server=None):
796 """Deletes a snapshot."""
797 volume_snapshot = self._get_volume_snapshot(self.admin_context,
798 snapshot['id'])
799 if volume_snapshot is None: 799 ↛ 800line 799 didn't jump to line 800 because the condition on line 799 was never true
800 return
801 self.volume_api.delete_snapshot(self.admin_context,
802 volume_snapshot['id'])
803 t = time.time()
804 while time.time() - t < self.configuration.max_time_to_create_volume: 804 ↛ 814line 804 didn't jump to line 814 because the condition on line 804 was always true
805 try:
806 snapshot = self.volume_api.get_snapshot(self.admin_context,
807 volume_snapshot['id'])
808 except exception.VolumeSnapshotNotFound:
809 LOG.debug('Volume snapshot was deleted successfully')
810 self.private_storage.delete(snapshot['id'])
811 break
812 time.sleep(1)
813 else:
814 raise exception.ManilaException(
815 _('Volume snapshot have not been '
816 'deleted in %ss. Giving up') %
817 self.configuration.max_time_to_create_volume)
819 @ensure_server
820 def ensure_share(self, context, share, share_server=None):
821 """Ensure that storage are mounted and exported."""
822 helper = self._get_helper(share)
823 volume = self._get_volume(context, share['id'])
825 # NOTE(vponomaryov): volume can be None for managed shares
826 if volume:
827 volume = self._attach_volume(
828 context,
829 share,
830 share_server['backend_details']['instance_id'],
831 volume)
832 self._mount_device(share, share_server['backend_details'], volume)
833 helper.create_exports(
834 share_server['backend_details'], share['name'], recreate=True)
836 @ensure_server
837 def update_access(self, context, share, access_rules, add_rules,
838 delete_rules, update_rules, share_server=None):
839 """Update access rules for given share.
841 This driver has two different behaviors according to parameters:
842 1. Recovery after error - 'access_rules' contains all access_rules,
843 'add_rules' and 'delete_rules' shall be empty. Previously existing
844 access rules are cleared and then added back according
845 to 'access_rules'.
847 2. Adding/Deleting of several access rules - 'access_rules' contains
848 all access_rules, 'add_rules' and 'delete_rules' contain rules which
849 should be added/deleted. Rules in 'access_rules' are ignored and
850 only rules from 'add_rules' and 'delete_rules' are applied.
852 :param context: Current context
853 :param share: Share model with share data.
854 :param access_rules: All access rules for given share
855 :param add_rules: Empty List or List of access rules which should be
856 added. access_rules already contains these rules.
857 :param delete_rules: Empty List or List of access rules which should be
858 removed. access_rules doesn't contain these rules.
859 :param update_rules: Empty List or List of access rules which should be
860 updated. access_rules already contains these rules.
861 :param share_server: None or Share server model
862 """
863 self._get_helper(share).update_access(share_server['backend_details'],
864 share['name'], access_rules,
865 add_rules=add_rules,
866 delete_rules=delete_rules)
868 def _get_helper(self, share):
869 helper = self._helpers.get(share['share_proto'])
870 if helper:
871 return helper
872 else:
873 raise exception.InvalidShare(
874 reason="Wrong, unsupported or disabled protocol")
876 def get_network_allocations_number(self):
877 """Get number of network interfaces to be created."""
878 # NOTE(vponomaryov): Generic driver does not need allocations, because
879 # Nova will handle it. It is valid for all multitenant drivers, that
880 # use service instance provided by Nova.
881 return 0
883 def _setup_server(self, network_info, metadata=None):
884 # NOTE(felipe_rodrigues): keep legacy network_info support as a dict.
885 network_info = network_info[0]
887 msg = "Creating share server '%s'."
888 LOG.debug(msg, network_info['server_id'])
889 server = self.service_instance_manager.set_up_service_instance(
890 self.admin_context, network_info)
891 for helper in self._helpers.values():
892 helper.init_helper(server)
893 return server
895 def _teardown_server(self, server_details, security_services=None):
896 instance_id = server_details.get("instance_id")
897 LOG.debug("Removing share infrastructure for service instance '%s'.",
898 instance_id)
899 self.service_instance_manager.delete_service_instance(
900 self.admin_context, server_details)
902 def manage_existing(self, share, driver_options):
903 """Manage existing share to manila.
905 Generic driver accepts only one driver_option 'volume_id'.
906 If an administrator provides this option, then appropriate Cinder
907 volume will be managed by Manila as well.
909 :param share: share data
910 :param driver_options: Empty dict or dict with 'volume_id' option.
911 :return: dict with share size, example: {'size': 1}
912 """
913 helper = self._get_helper(share)
914 share_server = self.service_instance_manager.get_common_server()
915 server_details = share_server['backend_details']
917 old_export_location = share['export_locations'][0]['path']
918 mount_path = helper.get_share_path_by_export_location(
919 share_server['backend_details'], old_export_location)
920 LOG.debug("Manage: mount path = %s", mount_path)
922 mounted = self._is_device_mounted(mount_path, server_details)
923 LOG.debug("Manage: is share mounted = %s", mounted)
925 if not mounted:
926 msg = _("Provided share %s is not mounted.") % share['id']
927 raise exception.ManageInvalidShare(reason=msg)
929 def get_volume():
930 if 'volume_id' in driver_options:
931 try:
932 return self.volume_api.get(
933 self.admin_context, driver_options['volume_id'])
934 except exception.VolumeNotFound as e:
935 raise exception.ManageInvalidShare(reason=e.message)
937 # NOTE(vponomaryov): Manila can only combine volume name by itself,
938 # nowhere to get volume ID from. Return None since Cinder volume
939 # names are not unique or fixed, hence, they can not be used for
940 # sure.
941 return None
943 share_volume = get_volume()
945 if share_volume:
946 attached_volumes = self.compute_api.instance_volumes_list(
947 self.admin_context, server_details['instance_id'])
948 LOG.debug('Manage: attached volumes = %s', attached_volumes)
950 if share_volume['id'] not in attached_volumes:
951 msg = _("Provided volume %s is not attached "
952 "to service instance.") % share_volume['id']
953 raise exception.ManageInvalidShare(reason=msg)
955 linked_volume_name = self._get_volume_name(share['id'])
956 if share_volume['name'] != linked_volume_name: 956 ↛ 961line 956 didn't jump to line 961 because the condition on line 956 was always true
957 LOG.debug('Manage: volume_id = %s', share_volume['id'])
958 self.volume_api.update(self.admin_context, share_volume['id'],
959 {'name': linked_volume_name})
961 self.private_storage.update(
962 share['id'], {'volume_id': share_volume['id']})
964 share_size = share_volume['size']
965 else:
966 share_size = self._get_mounted_share_size(
967 mount_path, share_server['backend_details'])
969 export_locations = helper.get_exports_for_share(
970 server_details, old_export_location)
971 return {'size': share_size, 'export_locations': export_locations}
973 def manage_existing_snapshot(self, snapshot, driver_options):
974 """Manage existing share snapshot with manila.
976 :param snapshot: Snapshot data
977 :param driver_options: Not used by the Generic driver currently
978 :return: dict with share snapshot size, example: {'size': 1}
979 """
980 model_update = {}
981 volume_snapshot = None
982 snapshot_size = snapshot.get('share_size', 0)
983 provider_location = snapshot.get('provider_location')
984 try:
985 volume_snapshot = self.volume_api.get_snapshot(
986 self.admin_context,
987 provider_location)
988 except exception.VolumeSnapshotNotFound as e:
989 raise exception.ManageInvalidShareSnapshot(
990 reason=e.message)
992 if volume_snapshot: 992 ↛ 1005line 992 didn't jump to line 1005 because the condition on line 992 was always true
993 snapshot_size = volume_snapshot['size']
994 # NOTE(xyang): volume_snapshot_id is saved in private_storage
995 # in create_snapshot, so saving it here too for consistency.
996 # We should look at whether we still need to save it in
997 # private_storage later.
998 self.private_storage.update(
999 snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
1000 # NOTE(xyang): provider_location is used to map a Manila snapshot
1001 # to its name on the storage backend and prevent managing of the
1002 # same snapshot twice.
1003 model_update['provider_location'] = volume_snapshot['id']
1005 model_update['size'] = snapshot_size
1006 return model_update
1008 def unmanage_snapshot(self, snapshot):
1009 """Unmanage share snapshot with manila."""
1011 self.private_storage.delete(snapshot['id'])
1013 def _get_mount_stats_by_index(self, mount_path, server_details, index,
1014 block_size='G'):
1015 """Get mount stats using df shell command.
1017 :param mount_path: Share path on share server
1018 :param server_details: Share server connection details
1019 :param index: Data index in df command output:
1020 BLOCK_DEVICE_SIZE_INDEX - Size of block device
1021 USED_SPACE_INDEX - Used space
1022 :param block_size: size of block (example: G, M, Mib, etc)
1023 :returns: value of provided index
1024 """
1025 share_size_cmd = ['df', '-PB%s' % block_size, mount_path]
1026 output, __ = self._ssh_exec(server_details, share_size_cmd)
1027 lines = output.split('\n')
1028 return int(lines[1].split()[index][:-1])
1030 def _get_mounted_share_size(self, mount_path, server_details):
1031 try:
1032 size = self._get_mount_stats_by_index(
1033 mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX)
1034 except Exception as e:
1035 msg = _("Cannot calculate size of share %(path)s : %(error)s") % {
1036 'path': mount_path,
1037 'error': e
1038 }
1039 raise exception.ManageInvalidShare(reason=msg)
1041 return size
1043 def _get_consumed_space(self, mount_path, server_details):
1044 try:
1045 size = self._get_mount_stats_by_index(
1046 mount_path, server_details, USED_SPACE_INDEX, block_size='M')
1047 size /= float(units.Ki)
1048 except Exception as e:
1049 msg = _("Cannot calculate consumed space on share "
1050 "%(path)s : %(error)s") % {
1051 'path': mount_path,
1052 'error': e
1053 }
1054 raise exception.InvalidShare(reason=msg)
1056 return size