Coverage for manila/share/drivers/lvm.py: 91%
289 statements
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
1# Copyright 2012 NetApp
2# Copyright 2016 Mirantis Inc.
3# All Rights Reserved.
4#
5# Licensed under the Apache License, Version 2.0 (the "License"); you may
6# not use this file except in compliance with the License. You may obtain
7# a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14# License for the specific language governing permissions and limitations
15# under the License.
16"""
17LVM Driver for shares.
19"""
21import ipaddress
22import math
23import os
24import re
26from oslo_concurrency import processutils
27from oslo_config import cfg
28from oslo_log import log
29from oslo_utils import importutils
30from oslo_utils import timeutils
32from manila import exception
33from manila.i18n import _
34from manila.privsep import common as privsep_common
35from manila.privsep import filesystem as privsep_filesystem
36from manila.privsep import lvm as privsep_lvm
37from manila.privsep import os as privsep_os
38from manila.share import driver
39from manila.share.drivers import generic
40from manila.share import utils as share_utils
41from manila import utils
43LOG = log.getLogger(__name__)
45share_opts = [
46 cfg.StrOpt('lvm_share_export_root',
47 default='$state_path/mnt',
48 help='Base folder where exported shares are located.'),
49 cfg.ListOpt('lvm_share_export_ips',
50 help='List of IPs to export shares belonging to the LVM '
51 'storage driver.'),
52 cfg.IntOpt('lvm_share_mirrors',
53 default=0,
54 help='If set, create LVMs with multiple mirrors. Note that '
55 'this requires lvm_mirrors + 2 PVs with available space.'),
56 cfg.StrOpt('lvm_share_volume_group',
57 default='lvm-shares',
58 help='Name for the VG that will contain exported shares.'),
59 cfg.ListOpt('lvm_share_helpers',
60 default=[
61 'CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess',
62 'NFS=manila.share.drivers.helpers.NFSHelper',
63 ],
64 help='Specify list of share export helpers.'),
65]
67CONF = cfg.CONF
68CONF.register_opts(share_opts)
69CONF.register_opts(generic.share_opts)
72class LVMMixin(driver.ExecuteMixin):
73 def check_for_setup_error(self):
74 """Returns an error if prerequisites aren't met."""
75 try:
76 out, err = privsep_lvm.list_vgs_get_name()
77 except processutils.ProcessExecutionError:
78 msg = _("Failed to get LVM volume group names.")
79 raise exception.ShareBackendException(msg=msg)
80 volume_groups = out.split()
81 if self.configuration.lvm_share_volume_group not in volume_groups:
82 msg = (_("Share volume group %s doesn't exist.")
83 % self.configuration.lvm_share_volume_group)
84 raise exception.InvalidParameterValue(err=msg)
86 if not self.configuration.lvm_share_export_ips:
87 msg = _("The option lvm_share_export_ips must be specified.")
88 raise exception.InvalidParameterValue(err=msg)
90 def _allocate_container(self, share):
91 sizestr = '%sG' % share['size']
92 mirrors = 0
93 region_size = 0
94 if self.configuration.lvm_share_mirrors:
95 mirrors = self.configuration.lvm_share_mirrors
96 terras = int(sizestr[:-1]) / 1024.0
97 if terras >= 1.5: 97 ↛ 102line 97 didn't jump to line 102 because the condition on line 97 was always true
98 rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
99 # NOTE(vish): Next power of two for region size. See:
100 # http://red.ht/U2BPOD
101 region_size = str(rsize)
102 action_args = [
103 share['size'],
104 share['name'],
105 self.configuration.lvm_share_volume_group,
106 mirrors,
107 region_size
108 ]
109 privsep_common.execute_with_retries(
110 privsep_lvm.lvcreate, action_args,
111 self.configuration.num_shell_tries)
112 device_name = self._get_local_path(share)
113 try:
114 privsep_filesystem.make_filesystem(
115 self.configuration.share_volume_fstype, device_name)
116 except processutils.ProcessExecutionError:
117 raise
119 def _get_mount_point_name(self, share):
120 return share.get('mount_point_name') or share.get('name')
122 def _extend_container(self, share, device_name, size):
123 privsep_common.execute_with_retries(
124 privsep_lvm.lvextend, [device_name, size],
125 self.configuration.num_shell_tries)
127 def _deallocate_container(self, share_name):
128 """Deletes a logical volume for share."""
129 try:
130 action_args = [
131 self.configuration.lvm_share_volume_group, share_name]
132 privsep_common.execute_with_retries(
133 privsep_lvm.lvremove, action_args,
134 self.configuration.num_shell_tries)
135 except exception.ProcessExecutionError as exc:
136 err_pattern = re.compile(".*failed to find.*|.*not found.*",
137 re.IGNORECASE)
138 if not err_pattern.match(exc.stderr):
139 LOG.exception("Error deleting volume")
140 raise
141 LOG.warning("Volume not found: %s", exc.stderr)
143 def _create_snapshot(self, context, snapshot):
144 """Creates a snapshot."""
145 orig_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group,
146 snapshot['share_name'])
147 action_args = [
148 snapshot['share']['size'], snapshot['name'], orig_lv_name]
149 privsep_common.execute_with_retries(
150 privsep_lvm.lv_snapshot_create, action_args,
151 self.configuration.num_shell_tries)
153 self._set_random_uuid_to_device(snapshot)
155 def _set_random_uuid_to_device(self, share_or_snapshot):
156 # NOTE(vponomaryov): 'tune2fs' is required to make
157 # filesystem of share created from snapshot have
158 # unique ID, in case of LVM volumes, by default,
159 # it will have the same UUID as source volume. Closes #1645751
160 # NOTE(gouthamr): Executing tune2fs -U only works on
161 # a recently checked filesystem.
162 # See: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=857336
163 device_path = self._get_local_path(share_or_snapshot)
164 try:
165 privsep_filesystem.e2fsck(device_path)
166 privsep_filesystem.tune2fs(device_path)
167 except processutils.ProcessExecutionError:
168 msg = _("Failed to check or modify filesystems.")
169 raise exception.ShareBackendException(msg=msg)
171 def create_snapshot(self, context, snapshot, share_server=None):
172 self._create_snapshot(context, snapshot)
174 def delete_snapshot(self, context, snapshot, share_server=None):
175 """Deletes a snapshot."""
176 self._deallocate_container(snapshot['name'])
179class LVMShareDriver(LVMMixin, driver.ShareDriver):
180 """Executes commands relating to Shares."""
182 def __init__(self, *args, **kwargs):
183 """Do initialization."""
184 super(LVMShareDriver, self).__init__([False], *args, **kwargs)
185 self.configuration.append_config_values(share_opts)
186 self.configuration.append_config_values(generic.share_opts)
187 self.configuration.share_mount_path = (
188 self.configuration.lvm_share_export_root)
189 self._helpers = None
190 self.configured_ip_version = None
191 self.backend_name = self.configuration.safe_get(
192 'share_backend_name') or 'LVM'
193 # Set of parameters used for compatibility with
194 # Generic driver's helpers.
195 self.share_server = {
196 'instance_id': self.backend_name,
197 'lock_name': 'manila_lvm',
198 }
199 self.share_server['public_addresses'] = (
200 self.configuration.lvm_share_export_ips
201 )
202 self.ipv6_implemented = True
204 def _ssh_exec_as_root(self, server, command, check_exit_code=True):
205 kwargs = {}
206 if 'sudo' in command:
207 kwargs['run_as_root'] = True
208 command.remove('sudo')
209 kwargs['check_exit_code'] = check_exit_code
210 return self._execute(*command, **kwargs)
212 def do_setup(self, context):
213 """Any initialization the volume driver does while starting."""
214 super(LVMShareDriver, self).do_setup(context)
215 self._setup_helpers()
217 def _setup_helpers(self):
218 """Initializes protocol-specific NAS drivers."""
219 self._helpers = {}
220 for helper_str in self.configuration.lvm_share_helpers:
221 share_proto, _, import_str = helper_str.partition('=')
222 helper = importutils.import_class(import_str)
223 # TODO(rushiagr): better way to handle configuration
224 # instead of just passing to the helper
225 self._helpers[share_proto.upper()] = helper(
226 self._execute, self._ssh_exec_as_root, self.configuration)
228 def _get_local_path(self, share):
229 # The escape characters are expected by the device mapper.
230 escaped_group = (
231 self.configuration.lvm_share_volume_group.replace('-', '--'))
232 escaped_name = share['name'].replace('-', '--')
233 return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
235 def _update_share_stats(self):
236 """Retrieve stats info from share volume group."""
237 data = {
238 'share_backend_name': self.backend_name,
239 'storage_protocol': 'NFS_CIFS',
240 'reserved_percentage':
241 self.configuration.reserved_share_percentage,
242 'reserved_snapshot_percentage':
243 (self.configuration.reserved_share_from_snapshot_percentage
244 or self.configuration.reserved_share_percentage),
245 'reserved_share_extend_percentage':
246 (self.configuration.reserved_share_extend_percentage
247 or self.configuration.reserved_share_percentage),
248 'snapshot_support': True,
249 'create_share_from_snapshot_support': True,
250 'revert_to_snapshot_support': True,
251 'mount_snapshot_support': True,
252 'driver_name': 'LVMShareDriver',
253 'pools': self.get_share_server_pools(),
254 }
255 super(LVMShareDriver, self)._update_share_stats(data)
257 def get_share_server_pools(self, share_server=None):
258 try:
259 out, err = privsep_lvm.get_vgs(
260 self.configuration.lvm_share_volume_group)
261 except processutils.ProcessExecutionError:
262 msg = _("Failed to list LVM Volume Groups.")
263 raise exception.ShareBackendException(msg=msg)
264 total_size = re.findall(r"VSize\s[0-9.]+g", out)[0][6:-1]
265 free_size = re.findall(r"VFree\s[0-9.]+g", out)[0][6:-1]
266 return [{
267 'pool_name': 'lvm-single-pool',
268 'total_capacity_gb': float(total_size),
269 'free_capacity_gb': float(free_size),
270 'reserved_percentage': 0,
271 'reserved_snapshot_percentage': 0,
272 'reserved_share_extend_percentage': 0,
273 'mount_point_name_support': True,
274 }, ]
276 def create_share(self, context, share, share_server=None):
277 self._allocate_container(share)
278 # create file system
279 device_name = self._get_local_path(share)
280 share_export_location = self._get_mount_point_name(share)
281 location = self._get_helper(share).create_exports(
282 self.share_server, share_export_location)
283 self._mount_device(share, device_name)
284 return location
286 def create_share_from_snapshot(self, context, share, snapshot,
287 share_server=None, parent_share=None):
288 """Is called to create share from snapshot."""
289 self._allocate_container(share)
290 snapshot_device_name = self._get_local_path(snapshot)
291 share_device_name = self._get_local_path(share)
292 self._set_random_uuid_to_device(share)
293 self._copy_volume(
294 snapshot_device_name, share_device_name, share['size'])
295 share_export_location = self._get_mount_point_name(share)
296 location = self._get_helper(share).create_exports(
297 self.share_server, share_export_location)
298 self._mount_device(share, share_device_name)
299 return location
301 def delete_share(self, context, share, share_server=None):
302 self._unmount_device(share, raise_if_missing=False,
303 retry_busy_device=True)
304 self._delete_share(context, share)
305 self._deallocate_container(share['name'])
307 def _unmount_device(self, share_or_snapshot, raise_if_missing=True,
308 retry_busy_device=False):
309 """Unmount the filesystem of a share or snapshot LV."""
310 mount_path = self._get_mount_path(share_or_snapshot)
311 if os.path.exists(mount_path): 311 ↛ exitline 311 didn't return from function '_unmount_device' because the condition on line 311 was always true
313 retries = 10 if retry_busy_device else 1
315 @utils.retry(retry_param=exception.ShareBusyException,
316 retries=retries)
317 def _unmount_device_with_retry():
318 try:
319 privsep_os.umount(mount_path)
320 except exception.ProcessExecutionError as exc:
321 if 'is busy' in exc.stderr.lower():
322 raise exception.ShareBusyException(
323 reason=share_or_snapshot['name'])
324 elif 'not mounted' in exc.stderr.lower():
325 if raise_if_missing: 325 ↛ 326line 325 didn't jump to line 326 because the condition on line 325 was never true
326 LOG.error('Unable to find device: %s', exc)
327 raise
328 else:
329 LOG.error('Unable to umount: %s', exc)
330 raise
332 _unmount_device_with_retry()
333 # remove dir
334 try:
335 privsep_os.rmdir(mount_path)
336 except exception.ProcessExecutionError:
337 msg = _("Failed to remove the directory.")
338 raise exception.ShareBackendException(msg=msg)
340 def ensure_shares(self, context, shares):
341 updates = {}
342 for share in shares:
343 updates[share['id']] = {
344 'export_locations': self.ensure_share(context, share)}
345 return updates
347 def ensure_share(self, ctx, share, share_server=None):
348 """Ensure that storage are mounted and exported."""
349 device_name = self._get_local_path(share)
350 self._mount_device(share, device_name)
351 share_export_location = self._get_mount_point_name(share)
352 return self._get_helper(share).create_exports(
353 self.share_server,
354 share_export_location,
355 recreate=True
356 )
358 def _delete_share(self, ctx, share):
359 share_export_location = self._get_mount_point_name(share)
360 """Delete a share."""
361 try:
362 self._get_helper(share).remove_exports(
363 self.share_server, share_export_location)
364 except exception.ProcessExecutionError:
365 LOG.warning("Can't remove share %r", share['id'])
366 except exception.InvalidShare as exc:
367 LOG.warning(exc)
369 def update_access(self, context, share, access_rules, add_rules,
370 delete_rules, update_rules, share_server=None):
371 """Update access rules for given share.
373 This driver has two different behaviors according to parameters:
374 1. Recovery after error - 'access_rules' contains all access_rules,
375 'add_rules' and 'delete_rules' shall be empty. Previously existing
376 access rules are cleared and then added back according
377 to 'access_rules'.
379 2. Adding/Deleting of several access rules - 'access_rules' contains
380 all access_rules, 'add_rules' and 'delete_rules' contain rules which
381 should be added/deleted. Rules in 'access_rules' are ignored and
382 only rules from 'add_rules' and 'delete_rules' are applied.
384 :param context: Current context
385 :param share: Share model with share data.
386 :param access_rules: All access rules for given share
387 :param add_rules: Empty List or List of access rules which should be
388 added. access_rules already contains these rules.
389 :param delete_rules: Empty List or List of access rules which should be
390 removed. access_rules doesn't contain these rules.
391 :param update_rules: Empty List or List of access rules which should be
392 updated. access_rules already contains these rules.
393 :param share_server: None or Share server model
394 """
395 share_export_location = self._get_mount_point_name(share)
396 self._get_helper(share).update_access(self.share_server,
397 share_export_location,
398 access_rules,
399 add_rules=add_rules,
400 delete_rules=delete_rules)
402 def _get_helper(self, share):
403 if share['share_proto'].lower().startswith('nfs'):
404 return self._helpers['NFS']
405 elif share['share_proto'].lower().startswith('cifs'):
406 return self._helpers['CIFS']
407 else:
408 raise exception.InvalidShare(reason='Wrong share protocol')
410 def _mount_device(self, share_or_snapshot, device_name):
411 """Mount LV for share or snapshot and ignore if already mounted."""
412 mount_path = self._get_mount_path(share_or_snapshot)
413 self._execute('mkdir', '-p', mount_path)
414 try:
415 privsep_os.mount(device_name, mount_path)
416 privsep_os.chmod('777', mount_path)
417 except exception.ProcessExecutionError:
418 out, err = privsep_os.list_mounts()
419 if device_name in out: 419 ↛ 420line 419 didn't jump to line 420 because the condition on line 419 was never true
420 LOG.warning("%s is already mounted", device_name)
421 else:
422 raise
423 return mount_path
425 def _get_mount_path(self, share_or_snapshot):
426 """Returns path where share or snapshot is mounted."""
427 return os.path.join(self.configuration.share_mount_path,
428 share_or_snapshot['name'])
430 def _copy_volume(self, srcstr, deststr, size_in_g):
431 # Use O_DIRECT to avoid thrashing the system buffer cache
432 # Check whether O_DIRECT is supported
433 use_direct_io = (
434 privsep_os.is_data_definition_direct_io_supported(srcstr, deststr))
436 # Perform the copy
437 try:
438 privsep_os.data_definition(
439 srcstr, deststr, (size_in_g * 1024),
440 use_direct_io=use_direct_io)
441 except exception.ProcessExecutionError:
442 msg = _("Failed while copying from the snapshot to the share.")
443 raise exception.ShareBackendException(msg=msg)
445 def extend_share(self, share, new_size, share_server=None):
446 device_name = self._get_local_path(share)
447 self._extend_container(share, device_name, new_size)
449 def revert_to_snapshot(self, context, snapshot, share_access_rules,
450 snapshot_access_rules, share_server=None):
451 share = snapshot['share']
452 snapshot_export_location = self._get_mount_point_name(snapshot)
453 share_export_location = self._get_mount_point_name(share)
454 # Temporarily remove all access rules
455 self._get_helper(share).update_access(self.share_server,
456 snapshot_export_location,
457 [], [], [])
458 self._get_helper(share).update_access(self.share_server,
459 share_export_location,
460 [], [], [])
461 # Unmount the snapshot filesystem
462 self._unmount_device(snapshot)
463 # Unmount the share filesystem
464 self._unmount_device(share)
465 # Merge the snapshot LV back into the share, reverting it
466 try:
467 privsep_lvm.lvconvert(self.configuration.lvm_share_volume_group,
468 snapshot['name'])
469 except exception.ProcessExecutionError:
470 msg = _('Failed to revert the share to the given snapshot.')
471 raise exception.ShareBackendException(msg=msg)
473 # Now recreate the snapshot that was destroyed by the merge
474 self._create_snapshot(context, snapshot)
475 # At this point we can mount the share again
476 device_name = self._get_local_path(share)
477 self._mount_device(share, device_name)
478 # Also remount the snapshot
479 device_name = self._get_local_path(snapshot)
480 self._mount_device(snapshot, device_name)
481 share_export_location = self._get_mount_point_name(share)
482 snapshot_export_location = self._get_mount_point_name(share)
483 # Lastly we add all the access rules back
484 self._get_helper(share).update_access(self.share_server,
485 share_export_location,
486 share_access_rules,
487 [], [])
488 snapshot_access_rules, __, __ = share_utils.change_rules_to_readonly(
489 snapshot_access_rules, [], [])
490 self._get_helper(share).update_access(self.share_server,
491 snapshot_export_location,
492 snapshot_access_rules,
493 [], [])
495 def create_snapshot(self, context, snapshot, share_server=None):
496 self._create_snapshot(context, snapshot)
498 device_name = self._get_local_path(snapshot)
499 self._mount_device(snapshot, device_name)
501 helper = self._get_helper(snapshot['share'])
502 exports = helper.create_exports(self.share_server, snapshot['name'])
504 return {'export_locations': exports}
506 def delete_snapshot(self, context, snapshot, share_server=None):
507 self._unmount_device(snapshot, raise_if_missing=False)
509 super(LVMShareDriver, self).delete_snapshot(context, snapshot,
510 share_server)
512 def get_configured_ip_versions(self):
513 if self.configured_ip_version is None: 513 ↛ 523line 513 didn't jump to line 523 because the condition on line 513 was always true
514 try:
515 self.configured_ip_version = []
516 for ip in self.configuration.lvm_share_export_ips:
517 self.configured_ip_version.append(
518 ipaddress.ip_address(str(ip)).version)
519 except Exception:
520 message = (_("Invalid 'lvm_share_export_ips' option supplied "
521 "%s.") % self.configuration.lvm_share_export_ips)
522 raise exception.InvalidInput(reason=message)
523 return self.configured_ip_version
525 def snapshot_update_access(self, context, snapshot, access_rules,
526 add_rules, delete_rules, share_server=None):
527 """Update access rules for given snapshot.
529 This driver has two different behaviors according to parameters:
530 1. Recovery after error - 'access_rules' contains all access_rules,
531 'add_rules' and 'delete_rules' shall be empty. Previously existing
532 access rules are cleared and then added back according
533 to 'access_rules'.
535 2. Adding/Deleting of several access rules - 'access_rules' contains
536 all access_rules, 'add_rules' and 'delete_rules' contain rules which
537 should be added/deleted. Rules in 'access_rules' are ignored and
538 only rules from 'add_rules' and 'delete_rules' are applied.
540 :param context: Current context
541 :param snapshot: Snapshot model with snapshot data.
542 :param access_rules: All access rules for given snapshot
543 :param add_rules: Empty List or List of access rules which should be
544 added. access_rules already contains these rules.
545 :param delete_rules: Empty List or List of access rules which should be
546 removed. access_rules doesn't contain these rules.
547 :param share_server: None or Share server model
548 """
549 helper = self._get_helper(snapshot['share'])
550 access_rules, add_rules, delete_rules = (
551 share_utils.change_rules_to_readonly(
552 access_rules, add_rules, delete_rules)
553 )
555 helper.update_access(self.share_server,
556 snapshot['name'], access_rules,
557 add_rules=add_rules, delete_rules=delete_rules)
559 def update_share_usage_size(self, context, shares):
560 updated_shares = []
561 out, err = self._execute(
562 'df', '-l', '--output=target,used',
563 '--block-size=g')
564 gathered_at = timeutils.utcnow()
566 for share in shares:
567 try:
568 mount_path = self._get_mount_path(share)
569 if os.path.exists(mount_path):
570 used_size = (re.findall(
571 mount_path + r"\s*[0-9.]+G", out)[0].
572 split(' ')[-1][:-1])
573 updated_shares.append({'id': share['id'],
574 'used_size': used_size,
575 'gathered_at': gathered_at})
576 else:
577 raise exception.NotFound(
578 _("Share mount path %s could not be "
579 "found.") % mount_path)
580 except Exception:
581 LOG.exception("Failed to gather 'used_size' for share %s.",
582 share['id'])
584 return updated_shares
586 def get_backend_info(self, context):
587 return {
588 'export_ips': ','.join(self.share_server['public_addresses']),
589 'db_version': share_utils.get_recent_db_migration_id(),
590 }