Coverage for manila/share/drivers/inspur/instorage/instorage.py: 94%
303 statements
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
1# Copyright 2019 Inspur Corp.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15"""
16Driver for Inspur InStorage
17"""
19import ipaddress
20import itertools
22from oslo_config import cfg
23from oslo_log import log
24from oslo_utils import units
26from manila import coordination
27from manila import exception
28from manila.i18n import _
29from manila.share import driver
30from manila.share import utils as share_utils
32from manila.share.drivers.inspur.instorage.cli_helper import InStorageSSH
33from manila.share.drivers.inspur.instorage.cli_helper import SSHRunner
35instorage_opts = [
36 cfg.HostAddressOpt(
37 'instorage_nas_ip',
38 required=True,
39 help='IP address for the InStorage.'
40 ),
41 cfg.PortOpt(
42 'instorage_nas_port',
43 default=22,
44 help='Port number for the InStorage.'
45 ),
46 cfg.StrOpt(
47 'instorage_nas_login',
48 required=True,
49 help='Username for the InStorage.'
50 ),
51 cfg.StrOpt(
52 'instorage_nas_password',
53 required=True,
54 secret=True,
55 help='Password for the InStorage.'
56 ),
57 cfg.ListOpt(
58 'instorage_nas_pools',
59 required=True,
60 help='The Storage Pools Manila should use, a comma separated list.'
61 )
62]
64CONF = cfg.CONF
65CONF.register_opts(instorage_opts)
66LOG = log.getLogger(__name__)
69class InStorageShareDriver(driver.ShareDriver):
70 """Inspur InStorage NAS driver. Allows for NFS and CIFS NAS.
72 .. code::none
73 Version history:
74 1.0.0 - Initial driver.
75 Driver support:
76 share create/delete
77 extend size
78 update_access
79 protocol: NFS/CIFS
80 """
82 VENDOR = 'INSPUR'
83 VERSION = '1.0.0'
84 PROTOCOL = 'NFS_CIFS'
86 def __init__(self, *args, **kwargs):
87 super(InStorageShareDriver, self).__init__(False, *args, **kwargs)
88 self.configuration.append_config_values(instorage_opts)
90 self.backend_name = self.configuration.safe_get('share_backend_name')
91 self.backend_pools = self.configuration.instorage_nas_pools
93 self.ssh_runner = SSHRunner(**{
94 'host': self.configuration.instorage_nas_ip,
95 'port': 22,
96 'login': self.configuration.instorage_nas_login,
97 'password': self.configuration.instorage_nas_password
98 })
100 self.assistant = InStorageAssistant(self.ssh_runner)
102 def check_for_setup_error(self):
103 nodes = self.assistant.get_nodes_info()
104 if len(nodes) == 0:
105 msg = _('No valid node, be sure the NAS Port IP is configured')
106 raise exception.ShareBackendException(msg=msg)
108 pools = self.assistant.get_available_pools()
109 not_exist = set(self.backend_pools).difference(set(pools))
110 if not_exist:
111 msg = _('Pool %s not exist on the storage system') % not_exist
112 raise exception.InvalidParameterValue(msg)
114 def _update_share_stats(self, **kwargs):
115 """Retrieve share stats information."""
117 try:
118 stats = {
119 'share_backend_name': self.backend_name,
120 'vendor_name': self.VENDOR,
121 'driver_version': self.VERSION,
122 'storage_protocol': 'NFS_CIFS',
123 'reserved_percentage':
124 self.configuration.reserved_share_percentage,
125 'reserved_snapshot_percentage': (
126 self.configuration.reserved_share_from_snapshot_percentage
127 or self.configuration.reserved_share_percentage),
128 'reserved_share_extend_percentage': (
129 self.configuration.reserved_share_extend_percentage
130 or self.configuration.reserved_share_percentage),
131 'max_over_subscription_ratio':
132 self.configuration.max_over_subscription_ratio,
133 'snapshot_support': False,
134 'create_share_from_snapshot_support': False,
135 'revert_to_snapshot_support': False,
136 'qos': False,
137 'total_capacity_gb': 0.0,
138 'free_capacity_gb': 0.0,
139 'pools': []
140 }
142 pools = self.assistant.get_pools_attr(self.backend_pools)
143 total_capacity_gb = 0
144 free_capacity_gb = 0
145 for pool in pools.values():
146 total_capacity_gb += pool['total_capacity_gb']
147 free_capacity_gb += pool['free_capacity_gb']
148 stats['pools'].append(pool)
150 stats['total_capacity_gb'] = total_capacity_gb
151 stats['free_capacity_gb'] = free_capacity_gb
153 LOG.debug('share status %s', stats)
155 super(InStorageShareDriver, self)._update_share_stats(stats)
156 except Exception:
157 msg = _('Unexpected error while trying to get the '
158 'usage stats from array.')
159 LOG.exception(msg)
160 raise
162 @staticmethod
163 def generate_share_name(share):
164 # Generate a name with id of the share as base, and do follows:
165 # 1. Remove the '-' in the id string.
166 # 2. Transform all alpha to lower case.
167 # 3. If the first char of the id is a num,
168 # transform it to an Upper case alpha start from 'A',
169 # such as '0' -> 'A', '1' -> 'B'.
170 # e.g.
171 # generate_share_name({
172 # 'id': '46CF5E85-D618-4023-8727-6A1EA9292954',
173 # ...
174 # })
175 # returns 'E6cf5e85d618402387276a1ea9292954'
177 name = share['id'].replace('-', '').lower()
178 if name[0] in '0123456789':
179 name = chr(ord('A') + ord(name[0]) - ord('0')) + name[1:]
180 return name
182 def get_network_allocations_number(self):
183 """Get the number of network interfaces to be created."""
185 return 0
187 def create_share(self, context, share, share_server=None):
188 """Create a new share instance."""
189 share_name = self.generate_share_name(share)
190 share_size = share['size']
191 share_proto = share['share_proto']
193 pool_name = share_utils.extract_host(share['host'], level='pool')
195 self.assistant.create_share(
196 share_name,
197 pool_name,
198 share_size,
199 share_proto
200 )
202 return self.assistant.get_export_locations(share_name, share_proto)
204 def delete_share(self, context, share, share_server=None):
205 """Delete the given share instance."""
206 share_name = self.generate_share_name(share)
207 share_proto = share['share_proto']
209 self.assistant.delete_share(share_name, share_proto)
211 def extend_share(self, share, new_size, share_server=None):
212 """Extend the share instance's size to new size."""
213 share_name = self.generate_share_name(share)
215 self.assistant.extend_share(share_name, new_size)
217 def ensure_share(self, context, share, share_server=None):
218 """Ensure that the share instance is exported."""
219 share_name = self.generate_share_name(share)
220 share_proto = share['share_proto']
222 return self.assistant.get_export_locations(share_name, share_proto)
224 def update_access(self, context, share, access_rules, add_rules,
225 delete_rules, update_rules, share_server=None):
226 """Update the share instance's access rule."""
227 share_name = self.generate_share_name(share)
228 share_proto = share['share_proto']
230 @coordination.synchronized('inspur-instorage-access-' + share_name)
231 def _update_access(name, proto, rules, add_rules, delete_rules):
232 self.assistant.update_access(
233 name, proto, rules, add_rules, delete_rules
234 )
236 _update_access(
237 share_name, share_proto, access_rules, add_rules, delete_rules
238 )
241class InStorageAssistant(object):
243 NFS_CLIENT_SPEC_PATTERN = (
244 '%(ip)s/%(mask)s:%(rights)s:%(all_squash)s:%(root_squash)s'
245 )
247 CIFS_CLIENT_RIGHT_PATTERN = (
248 '%(type)s:%(name)s:%(rights)s'
249 )
251 def __init__(self, ssh_runner):
252 self.ssh = InStorageSSH(ssh_runner)
254 @staticmethod
255 def handle_keyerror(cmd, out):
256 msg = (_('Could not find key in output of command %(cmd)s: %(out)s.')
257 % {'out': out, 'cmd': cmd})
258 raise exception.ShareBackendException(msg=msg)
260 def size_to_gb(self, size):
261 new_size = 0
263 if 'P' in size:
264 new_size = int(float(size.rstrip('PB')) * units.Mi)
265 elif 'T' in size:
266 new_size = int(float(size.rstrip('TB')) * units.Ki)
267 elif 'G' in size:
268 new_size = int(float(size.rstrip('GB')) * 1)
269 elif 'M' in size: 269 ↛ 273line 269 didn't jump to line 273 because the condition on line 269 was always true
270 mb_size = float(size.rstrip('MB'))
271 new_size = int((mb_size + units.Ki - 1) / units.Ki)
273 return new_size
275 def get_available_pools(self):
276 nas_pools = self.ssh.lsnaspool()
277 return [pool['pool_name'] for pool in nas_pools]
279 def get_pools_attr(self, backend_pools):
280 pools = {}
281 fs_attr = self.ssh.lsfs()
282 nas_pools = self.ssh.lsnaspool()
283 for pool_attr in nas_pools:
284 pool_name = pool_attr['pool_name']
285 if pool_name not in backend_pools:
286 continue
288 total_used_capacity = 0
289 total_allocated_capacity = 0
290 for fs in fs_attr:
291 if fs['pool_name'] != pool_name:
292 continue
293 allocated = self.size_to_gb(fs['total_capacity'])
294 used = self.size_to_gb(fs['used_capacity'])
296 total_allocated_capacity += allocated
297 total_used_capacity += used
299 available = self.size_to_gb(pool_attr['available_capacity'])
301 pool = {
302 'pool_name': pool_name,
303 'total_capacity_gb': total_allocated_capacity + available,
304 'free_capacity_gb': available,
305 'allocated_capacity_gb': total_allocated_capacity,
306 'reserved_percentage': 0,
307 'reserved_snapshot_percentage': 0,
308 'reserved_share_extend_percentage': 0,
309 'qos': False,
310 'dedupe': False,
311 'compression': False,
312 'thin_provisioning': False,
313 'max_over_subscription_ratio': 0
314 }
316 pools[pool_name] = pool
318 return pools
320 def get_nodes_info(self):
321 """Return a dictionary containing information of system's nodes."""
322 nodes = {}
323 resp = self.ssh.lsnasportip()
324 for port in resp:
325 try:
326 # Port is invalid if it has no IP configured.
327 if port['ip'] == '':
328 continue
330 node_name = port['node_name']
331 if node_name not in nodes: 331 ↛ 334line 331 didn't jump to line 334 because the condition on line 331 was always true
332 nodes[node_name] = {}
334 node = nodes[node_name]
335 node[port['id']] = port
336 except KeyError:
337 self.handle_keyerror('lsnasportip', port)
339 return nodes
341 @staticmethod
342 def get_fsname_by_name(name):
343 return ('%(fsname)s' % {'fsname': name})[0:32]
345 @staticmethod
346 def get_dirname_by_name(name):
347 return ('%(dirname)s' % {'dirname': name})[0:32]
349 def get_dirpath_by_name(self, name):
350 fsname = self.get_fsname_by_name(name)
351 dirname = self.get_dirname_by_name(name)
353 return '/fs/%(fsname)s/%(dirname)s' % {
354 'fsname': fsname, 'dirname': dirname
355 }
357 def create_share(self, name, pool, size, proto):
358 """Create a share with given info."""
360 # use one available node as the primary node
361 nodes = self.get_nodes_info()
362 if len(nodes) == 0: 362 ↛ 363line 362 didn't jump to line 363 because the condition on line 362 was never true
363 msg = _('No valid node, be sure the NAS Port IP is configured')
364 raise exception.ShareBackendException(msg=msg)
366 node_name = [key for key in nodes.keys()][0]
368 # first create the file system on which share will be created
369 fsname = self.get_fsname_by_name(name)
370 self.ssh.addfs(fsname, pool, size, node_name)
372 # then create the directory used for the share
373 dirpath = self.get_dirpath_by_name(name)
374 self.ssh.addnasdir(dirpath)
376 # For CIFS, we need to create a CIFS share.
377 # For NAS, the share is automatically added when the first
378 # 'access spec' is added on it.
379 if proto == 'CIFS':
380 self.ssh.addcifs(name, dirpath)
382 def check_share_exist(self, name):
383 """Check whether the specified share exist on backend."""
385 fsname = self.get_fsname_by_name(name)
386 for fs in self.ssh.lsfs():
387 if fs['fs_name'] == fsname:
388 return True
389 return False
391 def delete_share(self, name, proto):
392 """Delete the given share."""
394 if not self.check_share_exist(name):
395 LOG.warning('Share %s does not exist on the backend.', name)
396 return
398 # For CIFS, we have to delete the share first.
399 # For NAS, when the last client access spec is removed from
400 # it, the share is automatically deleted.
401 if proto == 'CIFS':
402 self.ssh.rmcifs(name)
404 # then delete the directory
405 dirpath = self.get_dirpath_by_name(name)
406 self.ssh.rmnasdir(dirpath)
408 # at last delete the file system
409 fsname = self.get_fsname_by_name(name)
410 self.ssh.rmfs(fsname)
412 def extend_share(self, name, new_size):
413 """Extend a given share to a new size.
415 :param name: the name of the share.
416 :param new_size: the new size the share should be.
417 :return:
418 """
419 # first get the original capacity
420 old_size = None
421 fsname = self.get_fsname_by_name(name)
422 for fs in self.ssh.lsfs(): 422 ↛ 427line 422 didn't jump to line 427 because the loop on line 422 didn't complete
423 if fs['fs_name'] == fsname: 423 ↛ 422line 423 didn't jump to line 422 because the condition on line 423 was always true
424 old_size = self.size_to_gb(fs['total_capacity'])
425 break
427 if old_size is None: 427 ↛ 428line 427 didn't jump to line 428 because the condition on line 427 was never true
428 msg = _('share %s is not available') % name
429 raise exception.ShareBackendException(msg=msg)
431 LOG.debug('Extend fs %s from %dGB to %dGB', fsname, old_size, new_size)
432 self.ssh.expandfs(fsname, new_size - old_size)
434 def get_export_locations(self, name, share_proto):
435 """Get the export locations of a given share.
437 :param name: the name of the share.
438 :param share_proto: the protocol of the share.
439 :return: a list of export locations.
440 """
442 if share_proto == 'NFS':
443 dirpath = self.get_dirpath_by_name(name)
444 pattern = '%(ip)s:' + dirpath
445 elif share_proto == 'CIFS': 445 ↛ 448line 445 didn't jump to line 448 because the condition on line 445 was always true
446 pattern = '\\\\%(ip)s\\' + name
447 else:
448 msg = _('share protocol %s is not supported') % share_proto
449 raise exception.ShareBackendException(msg=msg)
451 # we need get the node so that we know which port ip we can use
452 node_name = None
453 fsname = self.get_fsname_by_name(name)
454 for node in self.ssh.lsnode(): 454 ↛ 462line 454 didn't jump to line 462 because the loop on line 454 didn't complete
455 for fs in self.ssh.lsfs(node['name']):
456 if fs['fs_name'] == fsname:
457 node_name = node['name']
458 break
459 if node_name:
460 break
462 if node_name is None: 462 ↛ 463line 462 didn't jump to line 463 because the condition on line 462 was never true
463 msg = _('share %s is not available') % name
464 raise exception.ShareBackendException(msg=msg)
466 locations = []
467 ports = self.ssh.lsnasportip()
468 for port in ports:
469 if port['node_name'] == node_name and port['ip'] != '':
470 location = pattern % {'ip': port['ip']}
472 locations.append({
473 'path': location,
474 'is_admin_only': False,
475 'metadata': {}
476 })
478 return locations
480 def classify_nfs_client_spec(self, client_spec, dirpath):
481 nfslist = self.ssh.lsnfslist(dirpath)
482 if len(nfslist):
483 nfsinfo = self.ssh.lsnfsinfo(dirpath)
484 spec_set = set([
485 self.NFS_CLIENT_SPEC_PATTERN % i for i in nfsinfo
486 ])
487 else:
488 spec_set = set()
490 client_spec_set = set(client_spec)
492 del_spec = spec_set.difference(client_spec_set)
493 add_spec = client_spec_set.difference(spec_set)
495 return list(add_spec), list(del_spec)
497 def access_rule_to_client_spec(self, access_rule):
498 if access_rule['access_type'] != 'ip':
499 msg = _('only ip access type is supported when using NFS protocol')
500 raise exception.ShareBackendException(msg=msg)
502 network = ipaddress.ip_network(str(access_rule['access_to']))
503 if network.version != 4:
504 msg = _('only IPV4 is accepted when using NFS protocol')
505 raise exception.ShareBackendException(msg=msg)
507 client_spec = self.NFS_CLIENT_SPEC_PATTERN % {
508 'ip': str(network.network_address),
509 'mask': str(network.netmask),
510 'rights': access_rule['access_level'],
511 'all_squash': 'all_squash',
512 'root_squash': 'root_squash'
513 }
515 return client_spec
517 def update_nfs_access(self, share_name, access_rules, add_rules,
518 delete_rules):
519 """Update a NFS share's access rule."""
521 dirpath = self.get_dirpath_by_name(share_name)
522 if add_rules or delete_rules:
523 add_spec = [
524 self.access_rule_to_client_spec(r) for r in add_rules
525 ]
526 del_spec = [
527 self.access_rule_to_client_spec(r) for r in delete_rules
528 ]
530 _, can_del_spec = self.classify_nfs_client_spec(
531 [], dirpath
532 )
533 to_del_set = set(del_spec)
534 can_del_set = set(can_del_spec)
535 will_del_set = to_del_set.intersection(can_del_set)
536 del_spec = list(will_del_set)
537 else:
538 access_spec = [
539 self.access_rule_to_client_spec(r) for r in access_rules
540 ]
542 add_spec, del_spec = self.classify_nfs_client_spec(
543 access_spec, dirpath
544 )
546 for spec in del_spec:
547 self.ssh.rmnfsclient(dirpath, spec)
548 for spec in add_spec:
549 self.ssh.addnfsclient(dirpath, spec)
551 def classify_cifs_rights(self, access_rights, share_name):
552 cifsinfo = self.ssh.lscifsinfo(share_name)
553 rights_set = set([
554 self.CIFS_CLIENT_RIGHT_PATTERN % i for i in cifsinfo
555 ])
556 access_rights_set = set(access_rights)
558 del_rights = rights_set.difference(access_rights_set)
559 add_rights = access_rights_set.difference(rights_set)
561 return list(add_rights), list(del_rights)
563 def access_rule_to_rights(self, access_rule):
564 if access_rule['access_type'] != 'user':
565 msg = _('only user access type is supported'
566 ' when using CIFS protocol')
567 raise exception.ShareBackendException(msg=msg)
569 rights = self.CIFS_CLIENT_RIGHT_PATTERN % {
570 'type': 'LU',
571 'name': access_rule['access_to'],
572 'rights': access_rule['access_level']
573 }
575 return rights
577 def update_cifs_access(self, share_name, access_rules, add_rules,
578 delete_rules):
579 """Update a CIFS share's access rule."""
581 if add_rules or delete_rules:
582 add_rights = [
583 self.access_rule_to_rights(r) for r in add_rules
584 ]
585 del_rights = [
586 self.access_rule_to_rights(r) for r in delete_rules
587 ]
588 else:
589 access_rights = [
590 self.access_rule_to_rights(r) for r in access_rules
591 ]
593 add_rights, del_rights = self.classify_cifs_rights(
594 access_rights, share_name
595 )
597 for rights in del_rights:
598 self.ssh.rmcifsuser(share_name, rights)
599 for rights in add_rights:
600 self.ssh.addcifsuser(share_name, rights)
602 @staticmethod
603 def check_access_type(access_type, *rules):
604 rule_chain = itertools.chain(*rules)
605 if all([r['access_type'] == access_type for r in rule_chain]):
606 return True
607 else:
608 return False
610 def update_access(self, share_name, share_proto,
611 access_rules, add_rules, delete_rules):
612 if share_proto == 'CIFS':
613 if self.check_access_type('user', access_rules,
614 add_rules, delete_rules):
615 self.update_cifs_access(share_name, access_rules,
616 add_rules, delete_rules)
617 else:
618 msg = _("Only %s access type allowed.") % "user"
619 raise exception.InvalidShareAccess(reason=msg)
620 elif share_proto == 'NFS':
621 if self.check_access_type('ip', access_rules,
622 add_rules, delete_rules):
623 self.update_nfs_access(share_name, access_rules,
624 add_rules, delete_rules)
625 else:
626 msg = _("Only %s access type allowed.") % "ip"
627 raise exception.InvalidShareAccess(reason=msg)
628 else:
629 msg = _('share protocol %s is not supported') % share_proto
630 raise exception.ShareBackendException(msg=msg)