Coverage for manila/tests/share/drivers/dummy.py: 0%
447 statements
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
1# Copyright 2016 Mirantis inc.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
16"""
17Dummy share driver for testing Manila APIs and other interfaces.
19This driver simulates support of:
20- Both available driver modes: DHSS=True/False
21- NFS and CIFS protocols
22- IP access for NFS shares and USER access for CIFS shares
23- CIFS shares in DHSS=True driver mode
24- Creation and deletion of share snapshots
25- Share replication (readable)
26- Share migration
27- Consistency groups
28- Resize of a share (extend/shrink)
30"""
32import functools
33import time
35from oslo_config import cfg
36from oslo_log import log
37from oslo_serialization import jsonutils
38from oslo_utils import timeutils
40from manila.common import constants
41from manila import exception
42from manila.i18n import _
43from manila.keymgr import barbican as barbican_api
44from manila.share import configuration
45from manila.share import driver
46from manila.share.manager import share_manager_opts # noqa
47from manila.share import utils as share_utils
49LOG = log.getLogger(__name__)
52dummy_opts = [
53 cfg.FloatOpt(
54 "dummy_driver_default_driver_method_delay",
55 help="Defines default time delay in seconds for each dummy driver "
56 "method. To redefine some specific method delay use other "
57 "'dummy_driver_driver_methods_delays' config opt. Optional.",
58 default=2.0,
59 min=0,
60 ),
61 cfg.DictOpt(
62 "dummy_driver_driver_methods_delays",
63 help="It is dictionary-like config option, that consists of "
64 "driver method names as keys and integer/float values that are "
65 "time delay in seconds. Optional.",
66 default={
67 "ensure_share": "1.05",
68 "create_share": "3.98",
69 "get_pool": "0.5",
70 "do_setup": "0.05",
72 "_get_pools_info": "0.1",
73 "_update_share_stats": "0.3",
75 "create_replica": "3.99",
76 "delete_replica": "2.98",
77 "promote_replica": "0.75",
78 "update_replica_state": "0.85",
79 "create_replicated_snapshot": "4.15",
80 "delete_replicated_snapshot": "3.16",
81 "update_replicated_snapshot": "1.17",
83 "migration_start": 1.01,
84 "migration_continue": 1.02, # it will be called 2 times
85 "migration_complete": 1.03,
86 "migration_cancel": 1.04,
87 "migration_get_progress": 1.05,
88 "migration_check_compatibility": 0.05,
90 "create_backup": "1.50",
91 "restore_backup": "1.50",
93 "update_share_network_subnet_from_metadata": "0.5",
95 },
96 ),
97]
99CONF = cfg.CONF
102def slow_me_down(f):
104 @functools.wraps(f)
105 def wrapped_func(self, *args, **kwargs):
106 sleep_time = self.configuration.safe_get(
107 "dummy_driver_driver_methods_delays").get(
108 f.__name__,
109 self.configuration.safe_get(
110 "dummy_driver_default_driver_method_delay")
111 )
112 time.sleep(float(sleep_time))
113 return f(self, *args, **kwargs)
115 return wrapped_func
118def get_backend_configuration(backend_name):
119 config_stanzas = CONF.list_all_sections()
120 if backend_name not in config_stanzas:
121 msg = _("Could not find backend stanza %(backend_name)s in "
122 "configuration which is required for share replication and "
123 "migration. Available stanzas are %(stanzas)s")
124 params = {
125 "stanzas": config_stanzas,
126 "backend_name": backend_name,
127 }
128 raise exception.BadConfigurationException(reason=msg % params)
130 config = configuration.Configuration(
131 driver.share_opts, config_group=backend_name)
132 config.append_config_values(dummy_opts)
133 config.append_config_values(share_manager_opts)
134 config.append_config_values(driver.ssh_opts)
136 return config
139class DummyDriver(driver.ShareDriver):
140 """Dummy share driver that implements all share driver interfaces."""
142 def __init__(self, *args, **kwargs):
143 """Do initialization."""
144 super(DummyDriver, self).__init__(
145 [False, True], *args, config_opts=[dummy_opts], **kwargs)
146 self._verify_configuration()
147 self.private_storage = kwargs.get('private_storage')
148 self.backend_name = self.configuration.safe_get(
149 "share_backend_name") or "DummyDriver"
150 self.migration_progress = {}
151 self.security_service_update_support = True
152 self.network_allocation_update_support = True
153 self.share_replicas_migration_support = True
154 self.encryption_support = ['share_server']
156 def _verify_configuration(self):
157 allowed_driver_methods = [m for m in dir(self) if m[0] != '_']
158 allowed_driver_methods.extend([
159 "_setup_server",
160 "_teardown_server",
161 "_get_pools_info",
162 "_update_share_stats",
163 ])
164 disallowed_driver_methods = (
165 "get_admin_network_allocations_number",
166 "get_network_allocations_number",
167 "get_share_server_pools",
168 )
169 for k, v in self.configuration.safe_get(
170 "dummy_driver_driver_methods_delays").items():
171 if k not in allowed_driver_methods:
172 raise exception.BadConfigurationException(reason=(
173 "Dummy driver does not have '%s' method." % k
174 ))
175 elif k in disallowed_driver_methods:
176 raise exception.BadConfigurationException(reason=(
177 "Method '%s' does not support delaying." % k
178 ))
179 try:
180 float(v)
181 except (TypeError, ValueError):
182 raise exception.BadConfigurationException(reason=(
183 "Wrong value (%(v)s) for '%(k)s' dummy driver method time "
184 "delay is set in 'dummy_driver_driver_methods_delays' "
185 "config option." % {"k": k, "v": v}
186 ))
188 def _get_share_name(self, share):
189 mount_point_name = share.get('mount_point_name')
190 if mount_point_name is not None:
191 return mount_point_name
192 return "share_%(s_id)s_%(si_id)s" % {
193 "s_id": share["share_id"].replace("-", "_"),
194 "si_id": share["id"].replace("-", "_")}
196 def _get_snapshot_name(self, snapshot):
197 return "snapshot_%(s_id)s_%(si_id)s" % {
198 "s_id": snapshot["snapshot_id"].replace("-", "_"),
199 "si_id": snapshot["id"].replace("-", "_")}
201 def _get_export(self, mountpoint, ip, is_admin_only, preferred):
202 return {
203 "path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint},
204 "metadata": {
205 "preferred": preferred,
206 },
207 "is_admin_only": is_admin_only,
208 }
210 def _get_subnet_allocations_from_backend_details(self, backend_details):
211 """Reads subnet_allocations info from backend details"""
212 # NOTE(sfernand): Ensure backward compatibility for share servers
213 # created prior to the addition of support to multiple subnets per AZ,
214 # by read ip information using the old format in case
215 # subnet_allocations does not exist.
216 if 'subnet_allocations' in backend_details:
217 subnet_allocations = jsonutils.loads(
218 backend_details['subnet_allocations'])
219 else:
220 subnet_allocations = [{
221 'primary_public_ip':
222 backend_details['primary_public_ip'],
223 'secondary_public_ip':
224 backend_details['secondary_public_ip']
225 }]
226 return subnet_allocations
228 def _generate_export_locations(self, mountpoint, share_server=None):
229 if share_server:
230 backend_details = share_server['backend_details']
231 subnet_allocations = (
232 self._get_subnet_allocations_from_backend_details(
233 backend_details))
234 service_ip = backend_details["service_ip"]
235 else:
236 subnet_allocations = [{
237 "primary_public_ip": "10.0.0.10",
238 "secondary_public_ip": "10.0.0.20",
239 }]
240 service_ip = "11.0.0.11"
242 export_locations = [
243 self._get_export(mountpoint, service_ip, True, False)]
244 for subnet_allocation in subnet_allocations:
245 export_locations.append(
246 self._get_export(
247 mountpoint, subnet_allocation["primary_public_ip"],
248 False, True))
249 export_locations.append(
250 self._get_export(
251 mountpoint, subnet_allocation["secondary_public_ip"],
252 False, False))
254 return export_locations
256 def _create_share(self, context, share, share_server=None):
257 share_proto = share["share_proto"]
258 if share_proto not in ("NFS", "CIFS"):
259 msg = _("Unsupported share protocol provided - %s.") % share_proto
260 raise exception.InvalidShareAccess(reason=msg)
262 encryption_key_ref = share.get('encryption_key_ref')
263 if encryption_key_ref and context:
264 encryption_key_href = barbican_api.get_secret_href(
265 context, encryption_key_ref)
266 LOG.debug("Generated encryption_key_href %s for share create "
267 "request.", encryption_key_href)
269 share_name = self._get_share_name(share)
270 mountpoint = "/path/to/fake/share/%s" % share_name
271 self.private_storage.update(
272 share["id"], {
273 "fake_provider_share_name": share_name,
274 "fake_provider_location": mountpoint,
275 }
276 )
277 return self._generate_export_locations(
278 mountpoint, share_server=share_server)
280 @slow_me_down
281 def create_share(self, context, share, share_server=None):
282 """Is called to create share."""
283 return self._create_share(context, share, share_server=share_server)
285 @slow_me_down
286 def create_share_from_snapshot(self, context, share, snapshot,
287 share_server=None, parent_share=None):
288 """Is called to create share from snapshot."""
289 export_locations = self._create_share(
290 context, share, share_server=share_server)
291 return {
292 'export_locations': export_locations,
293 'status': constants.STATUS_AVAILABLE
294 }
296 def _create_snapshot(self, snapshot, share_server=None):
297 snapshot_name = self._get_snapshot_name(snapshot)
298 mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name
299 self.private_storage.update(
300 snapshot["id"], {
301 "fake_provider_snapshot_name": snapshot_name,
302 "fake_provider_location": mountpoint,
303 }
304 )
305 return {
306 'fake_key1': 'fake_value1',
307 'fake_key2': 'fake_value2',
308 'fake_key3': 'fake_value3',
309 "provider_location": mountpoint,
310 "export_locations": self._generate_export_locations(
311 mountpoint, share_server=share_server)
312 }
314 @slow_me_down
315 def create_snapshot(self, context, snapshot, share_server=None):
316 """Is called to create snapshot."""
317 return self._create_snapshot(snapshot, share_server)
319 @slow_me_down
320 def delete_share(self, context, share, share_server=None):
321 """Is called to remove share."""
322 self.private_storage.delete(share["id"])
324 @slow_me_down
325 def delete_snapshot(self, context, snapshot, share_server=None):
326 """Is called to remove snapshot."""
327 LOG.debug('Deleting snapshot with following data: %s', snapshot)
328 self.private_storage.delete(snapshot["id"])
330 @slow_me_down
331 def get_pool(self, share):
332 """Return pool name where the share resides on."""
333 pool_name = share_utils.extract_host(share["host"], level="pool")
334 return pool_name
336 @slow_me_down
337 def ensure_share(self, context, share, share_server=None):
338 """Invoked to ensure that share is exported."""
340 @slow_me_down
341 def update_access(self, context, share, access_rules, add_rules,
342 delete_rules, update_rules, share_server=None):
343 """Update access rules for given share."""
344 for rule in add_rules + access_rules:
345 share_proto = share["share_proto"].lower()
346 access_type = rule["access_type"].lower()
347 if not (
348 (share_proto == "nfs" and access_type == "ip") or
349 (share_proto == "cifs" and access_type == "user")):
350 msg = _("Unsupported '%(access_type)s' access type provided "
351 "for '%(share_proto)s' share protocol.") % {
352 "access_type": access_type, "share_proto": share_proto}
353 raise exception.InvalidShareAccess(reason=msg)
355 @slow_me_down
356 def snapshot_update_access(self, context, snapshot, access_rules,
357 add_rules, delete_rules, share_server=None):
358 """Update access rules for given snapshot."""
359 self.update_access(context, snapshot['share'], access_rules,
360 add_rules, delete_rules, share_server)
362 @slow_me_down
363 def do_setup(self, context):
364 """Any initialization the share driver does while starting."""
366 @slow_me_down
367 def manage_existing(self, share, driver_options):
368 """Brings an existing share under Manila management."""
369 new_export = share['export_location']
370 old_share_id = self._get_share_id_from_export(new_export)
371 old_export = self.private_storage.get(
372 old_share_id, key='export_location')
373 if old_export.split(":/")[-1] == new_export.split(":/")[-1]:
374 result = {
375 "size": 1,
376 "export_locations": self._create_share(None, share)
377 }
378 self.private_storage.delete(old_share_id)
379 return result
380 else:
381 msg = ("Invalid export specified, existing share %s"
382 " could not be found" % old_share_id)
383 raise exception.ShareBackendException(msg=msg)
385 @slow_me_down
386 def manage_existing_with_server(
387 self, share, driver_options, share_server=None):
388 return self.manage_existing(share, driver_options)
390 def _get_share_id_from_export(self, export_location):
391 values = export_location.split('share_')
392 if len(values) > 1:
393 return values[1][37:].replace("_", "-")
394 else:
395 return export_location
397 @slow_me_down
398 def unmanage(self, share):
399 """Removes the specified share from Manila management."""
400 self.private_storage.update(
401 share['id'], {'export_location': share['export_location']})
403 @slow_me_down
404 def unmanage_with_server(self, share, share_server=None):
405 self.unmanage(share)
407 @slow_me_down
408 def manage_existing_snapshot_with_server(self, snapshot, driver_options,
409 share_server=None):
410 return self.manage_existing_snapshot(snapshot, driver_options)
412 @slow_me_down
413 def manage_existing_snapshot(self, snapshot, driver_options):
414 """Brings an existing snapshot under Manila management."""
415 old_snap_id = self._get_snap_id_from_provider_location(
416 snapshot['provider_location'])
417 old_provider_location = self.private_storage.get(
418 old_snap_id, key='provider_location')
419 if old_provider_location == snapshot['provider_location']:
420 self._create_snapshot(snapshot)
421 self.private_storage.delete(old_snap_id)
422 return {"size": 1,
423 "provider_location": snapshot["provider_location"]}
424 else:
425 msg = ("Invalid provider location specified, existing snapshot %s"
426 " could not be found" % old_snap_id)
427 raise exception.ShareBackendException(msg=msg)
429 def _get_snap_id_from_provider_location(self, provider_location):
430 values = provider_location.split('snapshot_')
431 if len(values) > 1:
432 return values[1][37:].replace("_", "-")
433 else:
434 return provider_location
436 @slow_me_down
437 def unmanage_snapshot(self, snapshot):
438 """Removes the specified snapshot from Manila management."""
439 self.private_storage.update(
440 snapshot['id'],
441 {'provider_location': snapshot['provider_location']})
443 @slow_me_down
444 def unmanage_snapshot_with_server(self, snapshot, share_server=None):
445 self.unmanage_snapshot(snapshot)
447 @slow_me_down
448 def revert_to_snapshot(self, context, snapshot, share_access_rules,
449 snapshot_access_rules, share_server=None):
450 """Reverts a share (in place) to the specified snapshot."""
452 @slow_me_down
453 def extend_share(self, share, new_size, share_server=None):
454 """Extends size of existing share."""
456 @slow_me_down
457 def shrink_share(self, share, new_size, share_server=None):
458 """Shrinks size of existing share."""
460 def get_network_allocations_number(self):
461 """Returns number of network allocations for creating VIFs."""
462 return 2
464 def get_admin_network_allocations_number(self):
465 return 1
467 @slow_me_down
468 def _setup_server(self, network_info, metadata=None):
469 """Sets up and configures share server with given network parameters.
471 Redefine it within share driver when it is going to handle share
472 servers.
473 """
474 common_net_info = network_info[0]
475 server_details = {
476 "service_ip": common_net_info[
477 "admin_network_allocations"][0]["ip_address"],
478 "username": "fake_username",
479 "server_id": common_net_info['server_id'],
480 }
482 subnet_allocations = []
483 for subnet_info in network_info:
484 subnet_allocations.append({
485 "primary_public_ip": subnet_info[
486 "network_allocations"][0]["ip_address"],
487 "secondary_public_ip": subnet_info[
488 "network_allocations"][1]["ip_address"]
489 })
491 server_details['subnet_allocations'] = jsonutils.dumps(
492 subnet_allocations)
493 return server_details
495 @slow_me_down
496 def _teardown_server(self, server_details, security_services=None):
497 """Tears down share server."""
499 @slow_me_down
500 def _get_pools_info(self):
501 pools = [{
502 "pool_name": "fake_pool_for_%s" % self.backend_name,
503 "total_capacity_gb": 1230.0,
504 "free_capacity_gb": 1210.0,
505 "reserved_percentage":
506 self.configuration.reserved_share_percentage,
507 "reserved_snapshot_percentage":
508 self.configuration.reserved_share_from_snapshot_percentage,
509 "reserved_share_extend_percentage":
510 self.configuration.reserved_share_extend_percentage
511 }]
512 if self.configuration.replication_domain:
513 pools[0]["replication_type"] = "readable"
514 return pools
516 @slow_me_down
517 def _update_share_stats(self, data=None):
518 """Retrieve stats info from share group."""
519 data = {
520 "share_backend_name": self.backend_name,
521 "storage_protocol": "NFS_CIFS",
522 "reserved_percentage":
523 self.configuration.reserved_share_percentage,
524 "reserved_snapshot_percentage":
525 self.configuration.reserved_share_from_snapshot_percentage,
526 "reserved_share_extend_percentage":
527 self.configuration.reserved_share_extend_percentage,
528 "snapshot_support": True,
529 "create_share_from_snapshot_support": True,
530 "revert_to_snapshot_support": True,
531 "mount_snapshot_support": True,
532 "driver_name": "Dummy",
533 "pools": self._get_pools_info(),
534 "share_group_stats": {
535 "consistent_snapshot_support": "pool",
536 },
537 'share_server_multiple_subnet_support': True,
538 'mount_point_name_support': True,
539 }
540 if self.configuration.replication_domain:
541 data["replication_type"] = "readable"
542 super(DummyDriver, self)._update_share_stats(data)
544 def get_share_server_pools(self, share_server):
545 """Return list of pools related to a particular share server."""
546 return []
548 @slow_me_down
549 def create_consistency_group(self, context, cg_dict, share_server=None):
550 """Create a consistency group."""
551 LOG.debug(
552 "Successfully created dummy Consistency Group with ID: %s.",
553 cg_dict["id"])
555 @slow_me_down
556 def delete_consistency_group(self, context, cg_dict, share_server=None):
557 """Delete a consistency group."""
558 LOG.debug(
559 "Successfully deleted dummy consistency group with ID %s.",
560 cg_dict["id"])
562 @slow_me_down
563 def create_cgsnapshot(self, context, snap_dict, share_server=None):
564 """Create a consistency group snapshot."""
565 LOG.debug("Successfully created CG snapshot %s.", snap_dict["id"])
566 return None, None
568 @slow_me_down
569 def delete_cgsnapshot(self, context, snap_dict, share_server=None):
570 """Delete a consistency group snapshot."""
571 LOG.debug("Successfully deleted CG snapshot %s.", snap_dict["id"])
572 return None, None
574 @slow_me_down
575 def create_consistency_group_from_cgsnapshot(
576 self, context, cg_dict, cgsnapshot_dict, share_server=None):
577 """Create a consistency group from a cgsnapshot."""
578 LOG.debug(
579 ("Successfully created dummy Consistency Group (%(cg_id)s) "
580 "from CG snapshot (%(cg_snap_id)s)."),
581 {"cg_id": cg_dict["id"], "cg_snap_id": cgsnapshot_dict["id"]})
582 return None, []
584 @slow_me_down
585 def create_replica(self, context, replica_list, new_replica,
586 access_rules, replica_snapshots, share_server=None):
587 """Replicate the active replica to a new replica on this backend."""
588 replica_name = self._get_share_name(new_replica)
589 mountpoint = "/path/to/fake/share/%s" % replica_name
590 self.private_storage.update(
591 new_replica["id"], {
592 "fake_provider_replica_name": replica_name,
593 "fake_provider_location": mountpoint,
594 }
595 )
596 return {
597 "export_locations": self._generate_export_locations(
598 mountpoint, share_server=share_server),
599 "replica_state": constants.REPLICA_STATE_IN_SYNC,
600 "access_rules_status": constants.STATUS_ACTIVE,
601 }
603 @slow_me_down
604 def delete_replica(self, context, replica_list, replica_snapshots,
605 replica, share_server=None):
606 """Delete a replica."""
607 self.private_storage.delete(replica["id"])
609 @slow_me_down
610 def promote_replica(self, context, replica_list, replica, access_rules,
611 share_server=None, quiesce_wait_time=None):
612 """Promote a replica to 'active' replica state."""
613 return_replica_list = []
614 for r in replica_list:
615 if r["id"] == replica["id"]:
616 replica_state = constants.REPLICA_STATE_ACTIVE
617 else:
618 replica_state = constants.REPLICA_STATE_IN_SYNC
619 return_replica_list.append(
620 {"id": r["id"], "replica_state": replica_state})
621 return return_replica_list
623 @slow_me_down
624 def update_replica_state(self, context, replica_list, replica,
625 access_rules, replica_snapshots,
626 share_server=None):
627 """Update the replica_state of a replica."""
628 return constants.REPLICA_STATE_IN_SYNC
630 @slow_me_down
631 def create_replicated_snapshot(self, context, replica_list,
632 replica_snapshots, share_server=None):
633 """Create a snapshot on active instance and update across the replicas.
635 """
636 return_replica_snapshots = []
637 for r in replica_snapshots:
638 return_replica_snapshots.append(
639 {"id": r["id"], "status": constants.STATUS_AVAILABLE})
640 return return_replica_snapshots
642 @slow_me_down
643 def revert_to_replicated_snapshot(self, context, active_replica,
644 replica_list, active_replica_snapshot,
645 replica_snapshots, share_access_rules,
646 snapshot_access_rules,
647 share_server=None):
648 """Reverts a replicated share (in place) to the specified snapshot."""
650 @slow_me_down
651 def delete_replicated_snapshot(self, context, replica_list,
652 replica_snapshots, share_server=None):
653 """Delete a snapshot by deleting its instances across the replicas."""
654 return_replica_snapshots = []
655 for r in replica_snapshots:
656 return_replica_snapshots.append(
657 {"id": r["id"], "status": constants.STATUS_DELETED})
658 return return_replica_snapshots
660 @slow_me_down
661 def update_replicated_snapshot(self, context, replica_list,
662 share_replica, replica_snapshots,
663 replica_snapshot, share_server=None):
664 """Update the status of a snapshot instance that lives on a replica."""
665 return {
666 "id": replica_snapshot["id"], "status": constants.STATUS_AVAILABLE}
668 @slow_me_down
669 def migration_check_compatibility(
670 self, context, source_share, destination_share,
671 share_server=None, destination_share_server=None):
672 """Is called to test compatibility with destination backend."""
673 backend_name = share_utils.extract_host(
674 destination_share['host'], level='backend_name')
675 config = get_backend_configuration(backend_name)
676 compatible = 'Dummy' in config.share_driver
677 return {
678 'compatible': compatible,
679 'writable': compatible,
680 'preserve_metadata': compatible,
681 'nondisruptive': False,
682 'preserve_snapshots': compatible,
683 }
685 @slow_me_down
686 def migration_start(
687 self, context, source_share, destination_share, source_snapshots,
688 snapshot_mappings, share_server=None,
689 destination_share_server=None):
690 """Is called to perform 1st phase of driver migration of a given share.
692 """
693 LOG.debug(
694 "Migration of dummy share with ID '%s' has been started.",
695 source_share["id"])
696 self.migration_progress[source_share['share_id']] = 0
698 @slow_me_down
699 def migration_continue(
700 self, context, source_share, destination_share, source_snapshots,
701 snapshot_mappings, share_server=None,
702 destination_share_server=None):
704 if source_share["id"] not in self.migration_progress:
705 self.migration_progress[source_share["id"]] = 0
707 self.migration_progress[source_share["id"]] += 50
709 LOG.debug(
710 "Migration of dummy share with ID '%s' is continuing, %s.",
711 source_share["id"],
712 self.migration_progress[source_share["id"]])
714 return self.migration_progress[source_share["id"]] == 100
716 @slow_me_down
717 def migration_complete(
718 self, context, source_share, destination_share, source_snapshots,
719 snapshot_mappings, share_server=None,
720 destination_share_server=None):
721 """Is called to perform 2nd phase of driver migration of a given share.
723 """
724 snapshot_updates = {}
725 for src_snap_ins, dest_snap_ins in snapshot_mappings.items():
726 snapshot_updates[dest_snap_ins['id']] = self._create_snapshot(
727 dest_snap_ins)
728 return {
729 'snapshot_updates': snapshot_updates,
730 'export_locations': self._do_migration(
731 source_share, destination_share, share_server)
732 }
734 def _do_migration(self, source_share_ref, dest_share_ref, share_server):
735 share_name = self._get_share_name(dest_share_ref)
736 mountpoint = "/path/to/fake/share/%s" % share_name
737 self.private_storage.delete(source_share_ref["id"])
738 self.private_storage.update(
739 dest_share_ref["id"], {
740 "fake_provider_share_name": share_name,
741 "fake_provider_location": mountpoint,
742 }
743 )
744 LOG.debug(
745 "Migration of dummy share with ID '%s' has been completed.",
746 source_share_ref["id"])
747 self.migration_progress.pop(source_share_ref["id"], None)
749 return self._generate_export_locations(
750 mountpoint, share_server=share_server)
752 @slow_me_down
753 def migration_cancel(
754 self, context, source_share, destination_share, source_snapshots,
755 snapshot_mappings, share_server=None,
756 destination_share_server=None):
757 """Is called to cancel driver migration."""
758 LOG.debug(
759 "Migration of dummy share with ID '%s' has been canceled.",
760 source_share["id"])
761 self.migration_progress.pop(source_share["id"], None)
763 @slow_me_down
764 def migration_get_progress(
765 self, context, source_share, destination_share, source_snapshots,
766 snapshot_mappings, share_server=None,
767 destination_share_server=None):
768 """Is called to get migration progress."""
769 # Simulate migration progress.
770 if source_share["id"] not in self.migration_progress:
771 self.migration_progress[source_share["id"]] = 0
772 total_progress = self.migration_progress[source_share["id"]]
773 LOG.debug("Progress of current dummy share migration "
774 "with ID '%(id)s' is %(progress)s.", {
775 "id": source_share["id"],
776 "progress": total_progress
777 })
778 return {"total_progress": total_progress}
780 def share_server_migration_check_compatibility(
781 self, context, share_server, dest_host, old_share_network,
782 new_share_network, shares_request_spec):
783 """Is called to check migration compatibility for a share server."""
784 backend_name = share_utils.extract_host(
785 dest_host, level='backend_name')
786 config = get_backend_configuration(backend_name)
787 compatible = 'Dummy' in config.share_driver
788 return {
789 'compatible': compatible,
790 'writable': compatible,
791 'preserve_snapshots': compatible,
792 'nondisruptive': False,
793 'share_network_id': new_share_network['id'],
794 'migration_cancel': compatible,
795 'migration_get_progress': compatible,
796 }
798 @slow_me_down
799 def share_server_migration_start(self, context, src_share_server,
800 dest_share_server, shares, snapshots):
801 """Is called to perform 1st phase of migration of a share server."""
802 LOG.debug(
803 "Migration of dummy share server with ID '%s' has been started.",
804 src_share_server["id"])
805 self.migration_progress[src_share_server['id']] = 0
807 @slow_me_down
808 def share_server_migration_continue(self, context, src_share_server,
809 dest_share_server, shares, snapshots):
810 """Is called to continue the migration of a share server."""
811 if src_share_server["id"] not in self.migration_progress:
812 self.migration_progress[src_share_server["id"]] = 0
814 self.migration_progress[src_share_server["id"]] += 50
816 LOG.debug(
817 "Migration of dummy share server with ID '%s' is continuing, %s.",
818 src_share_server["id"],
819 self.migration_progress[src_share_server["id"]])
821 return self.migration_progress[src_share_server["id"]] >= 100
823 @slow_me_down
824 def share_server_migration_complete(self, context, source_share_server,
825 dest_share_server, shares, snapshots,
826 new_network_allocations):
827 """Is called to complete the migration of a share server."""
828 shares_updates = {}
829 pools = self._get_pools_info()
830 for instance in shares:
832 share_name = self._get_share_name(instance)
833 mountpoint = "/path/to/fake/share/%s" % share_name
834 export_locations = self._generate_export_locations(
835 mountpoint, share_server=dest_share_server)
836 dest_pool = pools[0]['pool_name']
837 shares_updates.update(
838 {instance['id']: {'export_locations': export_locations,
839 'pool_name': dest_pool}}
840 )
842 snapshot_updates = {}
843 for instance in snapshots:
844 snapshot_name = self._get_snapshot_name(instance)
845 mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name
846 snap_export_locations = self._generate_export_locations(
847 mountpoint, share_server=dest_share_server)
848 snapshot_updates.update(
849 {instance['id']: {
850 'provider_location': mountpoint,
851 'export_locations': snap_export_locations}}
852 )
854 LOG.debug(
855 "Migration of dummy share server with ID '%s' has been completed.",
856 source_share_server["id"])
857 self.migration_progress.pop(source_share_server["id"], None)
859 return {
860 'share_updates': shares_updates,
861 'snapshot_updates': snapshot_updates,
862 }
864 @slow_me_down
865 def share_server_migration_cancel(self, context, src_share_server,
866 dest_share_server, shares, snapshots):
867 """Is called to cancel a share server migration."""
868 LOG.debug(
869 "Migration of dummy share server with ID '%s' has been canceled.",
870 src_share_server["id"])
871 self.migration_progress.pop(src_share_server["id"], None)
873 @slow_me_down
874 def share_server_migration_get_progress(self, context, src_share_server,
875 dest_share_server, shares,
876 snapshots):
877 """Is called to get share server migration progress."""
878 if src_share_server["id"] not in self.migration_progress:
879 self.migration_progress[src_share_server["id"]] = 0
880 total_progress = self.migration_progress[src_share_server["id"]]
881 LOG.debug("Progress of current dummy share server migration "
882 "with ID '%(id)s' is %(progress)s.", {
883 "id": src_share_server["id"],
884 "progress": total_progress
885 })
886 return {"total_progress": total_progress}
888 def update_share_usage_size(self, context, shares):
889 share_updates = []
890 gathered_at = timeutils.utcnow()
891 for s in shares:
892 share_updates.append({'id': s['id'],
893 'used_size': 1,
894 'gathered_at': gathered_at})
895 return share_updates
897 @slow_me_down
898 def get_share_server_network_info(
899 self, context, share_server, identifier, driver_options):
900 try:
901 server_details = self.private_storage.get(identifier)
902 except Exception:
903 msg = ("Unable to find share server %s in "
904 "private storage." % identifier)
905 raise exception.ShareBackendException(msg=msg)
907 ips = [server_details['service_ip']]
909 subnet_allocations = (
910 self._get_subnet_allocations_from_backend_details(server_details))
912 for subnet_allocation in subnet_allocations:
913 ips += list(subnet_allocation.values())
914 return ips
916 @slow_me_down
917 def manage_server(self, context, share_server, identifier, driver_options):
918 server_details = self.private_storage.get(identifier)
919 self.private_storage.delete(identifier)
920 return identifier, server_details
922 def unmanage_server(self, server_details, security_services=None):
923 server_details = server_details or {}
924 if not server_details or 'server_id' not in server_details:
925 # This share server doesn't have any network details. Since it's
926 # just being cleaned up, we'll log a warning and return without
927 # errors.
928 LOG.warning("Share server does not have network information. "
929 "It is being unmanaged, but cannot be re-managed "
930 "without first creating network allocations in this "
931 "driver's private storage.")
932 return
933 self.private_storage.update(server_details['server_id'],
934 server_details)
936 def get_share_status(self, share, share_server=None):
937 return {
938 'status': constants.STATUS_AVAILABLE,
939 'export_locations': self.private_storage.get(share['id'],
940 key='export_location')
941 }
943 @slow_me_down
944 def update_share_server_security_service(self, context, share_server,
945 network_info, share_instances,
946 share_instance_rules,
947 new_security_service,
948 current_security_service=None):
949 if current_security_service:
950 msg = _("Replacing security service %(cur_sec_serv_id)s by "
951 "security service %(new_sec_serv_id)s on share server "
952 "%(server_id)s."
953 ) % {
954 'cur_sec_serv_id': current_security_service['id'],
955 'new_sec_serv_id': new_security_service['id'],
956 'server_id': share_server['id']
957 }
958 else:
959 msg = _("Adding security service %(sec_serv_id)s on share server "
960 "%(server_id)s."
961 ) % {
962 'sec_serv_id': new_security_service['id'],
963 'server_id': share_server['id']
964 }
966 LOG.debug(msg)
968 def check_update_share_server_security_service(
969 self, context, share_server, network_info, share_instances,
970 share_instance_rules, new_security_service,
971 current_security_service=None):
972 return True
974 def check_update_share_server_network_allocations(
975 self, context, share_server, current_network_allocations,
976 new_share_network_subnet, security_services, share_instances,
977 share_instances_rules):
979 LOG.debug("Share server %(server)s can be updated with allocations "
980 "from new subnet.", {'server': share_server['id']})
981 return True
983 def update_share_server_network_allocations(
984 self, context, share_server, current_network_allocations,
985 new_network_allocations, security_services, shares, snapshots):
987 backend_details = share_server['backend_details']
988 subnet_allocations = (
989 self._get_subnet_allocations_from_backend_details(backend_details))
991 subnet_allocations.append({
992 'primary_public_ip': new_network_allocations[
993 'network_allocations'][0]['ip_address'],
994 'secondary_public_ip': new_network_allocations[
995 'network_allocations'][1]['ip_address'],
996 })
997 new_server = {
998 "backend_details": {
999 "subnet_allocations": jsonutils.dumps(subnet_allocations),
1000 "service_ip": backend_details["service_ip"],
1001 }
1002 }
1003 shares_updates = {}
1004 for instance in shares:
1006 share_name = self._get_share_name(instance)
1007 mountpoint = "/path/to/fake/share/%s" % share_name
1008 export_locations = self._generate_export_locations(
1009 mountpoint, share_server=new_server)
1010 shares_updates.update(
1011 {instance['id']: export_locations}
1012 )
1014 snapshot_updates = {}
1015 for instance in snapshots:
1016 snapshot_name = self._get_snapshot_name(instance)
1017 mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name
1018 snap_export_locations = self._generate_export_locations(
1019 mountpoint, share_server=new_server)
1020 snapshot_updates.update(
1021 {instance['id']: {
1022 'provider_location': mountpoint,
1023 'export_locations': snap_export_locations}}
1024 )
1026 LOG.debug(
1027 "Network update allocations of dummy share server with ID '%s' "
1028 "has been completed.", share_server["id"])
1029 return {
1030 "share_updates": shares_updates,
1031 "snapshot_updates": snapshot_updates,
1032 "server_details": {
1033 "subnet_allocations": (
1034 new_server["backend_details"]["subnet_allocations"])
1035 },
1036 }
1038 @slow_me_down
1039 def create_backup(self, context, share_instance, backup,
1040 share_server=None):
1041 LOG.debug("Created backup %(backup)s of share %(share)s "
1042 "using dummy driver.",
1043 {'backup': backup['id'],
1044 'share': share_instance['share_id']})
1046 def create_backup_continue(self, context, share_instance, backup,
1047 share_server=None):
1048 LOG.debug("Continue backup %(backup)s of share %(share)s "
1049 "using dummy driver.",
1050 {'backup': backup['id'],
1051 'share': share_instance['share_id']})
1052 return {'total_progress': '100'}
1054 def delete_backup(self, context, backup, share_instance,
1055 share_server=None):
1056 LOG.debug("Deleted backup '%s' using dummy driver.", backup['id'])
1058 @slow_me_down
1059 def restore_backup(self, context, backup, share_instance,
1060 share_server=None):
1061 LOG.debug("Restored backup %(backup)s into share %(share)s "
1062 "using dummy driver.",
1063 {'backup': backup['id'],
1064 'share': share_instance['share_id']})
1066 def restore_backup_continue(self, context, backup, share_instance,
1067 share_server=None):
1068 LOG.debug("Continue restore of backup %(backup)s into share "
1069 "%(share)s using dummy driver.",
1070 {'backup': backup['id'],
1071 'share': share_instance['share_id']})
1072 return {'total_progress': '100'}
1074 def update_share_from_metadata(self, context, share_instance, metadata,
1075 share_server=None):
1076 LOG.debug("Updated share %(share)s. Metadata %(metadata)s "
1077 "applied successfully.",
1078 {'share': share_instance['share_id'],
1079 'metadata': metadata})
1081 @slow_me_down
1082 def update_share_network_subnet_from_metadata(self, context,
1083 share_network,
1084 share_network_subnet,
1085 share_servers, metadata):
1086 LOG.debug("Updated share network subnet %(sn_sub)s. Metadata "
1087 "%(metadata)s applied successfully.",
1088 {'sn_sub': share_network_subnet['id'],
1089 'metadata': metadata})