Coverage for manila/share/drivers/nexenta/ns5/nexenta_nas.py: 89%

291 statements  

« prev     ^ index     » next       coverage.py v7.11.0, created at 2026-02-18 22:19 +0000

1# Copyright 2019 Nexenta by DDN, Inc. 

2# All Rights Reserved. 

3# 

4# Licensed under the Apache License, Version 2.0 (the "License"); you may 

5# not use this file except in compliance with the License. You may obtain 

6# a copy of the License at 

7# 

8# http://www.apache.org/licenses/LICENSE-2.0 

9# 

10# Unless required by applicable law or agreed to in writing, software 

11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

13# License for the specific language governing permissions and limitations 

14# under the License. 

15 

16import posixpath 

17 

18from oslo_log import log 

19from oslo_utils import units 

20 

21from manila.common import constants as common 

22from manila import exception 

23from manila.i18n import _ 

24from manila.share import driver 

25from manila.share.drivers.nexenta.ns5 import jsonrpc 

26from manila.share.drivers.nexenta import options 

27from manila.share.drivers.nexenta import utils 

28 

29VERSION = '1.1' 

30LOG = log.getLogger(__name__) 

31ZFS_MULTIPLIER = 1.1 # ZFS quotas do not take metadata into account. 

32 

33 

34class NexentaNasDriver(driver.ShareDriver): 

35 """Nexenta Share Driver. 

36 

37 Executes commands relating to Shares. 

38 API version history: 

39 1.0 - Initial version. 

40 1.1 - Failover support. 

41 - Unshare filesystem completely after last securityContext 

42 is removed. 

43 - Moved all http/url code to jsonrpc. 

44 - Manage existing support. 

45 - Revert to snapshot support. 

46 """ 

47 

48 driver_prefix = 'nexenta' 

49 

50 def __init__(self, *args, **kwargs): 

51 """Do initialization.""" 

52 LOG.debug('Initializing Nexenta driver.') 

53 super(NexentaNasDriver, self).__init__(False, *args, **kwargs) 

54 self.configuration = kwargs.get('configuration') 

55 if self.configuration: 55 ↛ 63line 55 didn't jump to line 63 because the condition on line 55 was always true

56 self.configuration.append_config_values( 

57 options.nexenta_connection_opts) 

58 self.configuration.append_config_values( 

59 options.nexenta_nfs_opts) 

60 self.configuration.append_config_values( 

61 options.nexenta_dataset_opts) 

62 else: 

63 raise exception.BadConfigurationException( 

64 reason=_('Nexenta configuration missing.')) 

65 

66 self.nef = None 

67 self.verify_ssl = self.configuration.nexenta_ssl_cert_verify 

68 self.nas_host = self.configuration.nexenta_nas_host 

69 self.nef_port = self.configuration.nexenta_rest_port 

70 self.nef_user = self.configuration.nexenta_user 

71 self.nef_password = self.configuration.nexenta_password 

72 

73 self.pool_name = self.configuration.nexenta_pool 

74 self.parent_fs = self.configuration.nexenta_folder 

75 

76 self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base 

77 self.dataset_compression = ( 

78 self.configuration.nexenta_dataset_compression) 

79 self.provisioned_capacity = 0 

80 

81 @property 

82 def storage_protocol(self): 

83 protocol = '' 

84 if self.configuration.nexenta_nfs: 84 ↛ 87line 84 didn't jump to line 87 because the condition on line 84 was always true

85 protocol = 'NFS' 

86 else: 

87 msg = _('At least 1 storage protocol must be enabled.') 

88 raise exception.NexentaException(msg) 

89 return protocol 

90 

91 @property 

92 def root_path(self): 

93 return posixpath.join(self.pool_name, self.parent_fs) 

94 

95 @property 

96 def share_backend_name(self): 

97 if not hasattr(self, '_share_backend_name'): 97 ↛ 104line 97 didn't jump to line 104 because the condition on line 97 was always true

98 self._share_backend_name = None 

99 if self.configuration: 99 ↛ 102line 99 didn't jump to line 102 because the condition on line 99 was always true

100 self._share_backend_name = self.configuration.safe_get( 

101 'share_backend_name') 

102 if not self._share_backend_name: 102 ↛ 103line 102 didn't jump to line 103 because the condition on line 102 was never true

103 self._share_backend_name = 'NexentaStor5' 

104 return self._share_backend_name 

105 

106 def do_setup(self, context): 

107 self.nef = jsonrpc.NefProxy(self.storage_protocol, 

108 self.root_path, 

109 self.configuration) 

110 

111 def check_for_setup_error(self): 

112 """Check root filesystem, NFS service and NFS share.""" 

113 filesystem = self.nef.filesystems.get(self.root_path) 

114 if filesystem['mountPoint'] == 'none': 

115 message = (_('NFS root filesystem %(path)s is not writable') 

116 % {'path': filesystem['mountPoint']}) 

117 raise jsonrpc.NefException(code='ENOENT', message=message) 

118 if not filesystem['isMounted']: 

119 message = (_('NFS root filesystem %(path)s is not mounted') 

120 % {'path': filesystem['mountPoint']}) 

121 raise jsonrpc.NefException(code='ENOTDIR', message=message) 

122 payload = {} 

123 if filesystem['nonBlockingMandatoryMode']: 

124 payload['nonBlockingMandatoryMode'] = False 

125 if filesystem['smartCompression']: 

126 payload['smartCompression'] = False 

127 if payload: 

128 self.nef.filesystems.set(self.root_path, payload) 

129 service = self.nef.services.get('nfs') 

130 if service['state'] != 'online': 130 ↛ 131line 130 didn't jump to line 131 because the condition on line 130 was never true

131 message = (_('NFS server service is not online: %(state)s') 

132 % {'state': service['state']}) 

133 raise jsonrpc.NefException(code='ESRCH', message=message) 

134 self._get_provisioned_capacity() 

135 

136 def _get_provisioned_capacity(self): 

137 payload = {'fields': 'referencedQuotaSize'} 

138 self.provisioned_capacity += self.nef.filesystems.get( 

139 self.root_path, payload)['referencedQuotaSize'] 

140 

141 def ensure_share(self, context, share, share_server=None): 

142 pass 

143 

144 def create_share(self, context, share, share_server=None): 

145 """Create a share.""" 

146 LOG.debug('Creating share: %s.', self._get_share_name(share)) 

147 dataset_path = self._get_dataset_path(share) 

148 size = int(share['size'] * units.Gi * ZFS_MULTIPLIER) 

149 payload = { 

150 'recordSize': self.configuration.nexenta_dataset_record_size, 

151 'compressionMode': self.dataset_compression, 

152 'path': dataset_path, 

153 'referencedQuotaSize': size, 

154 'nonBlockingMandatoryMode': False 

155 } 

156 if not self.configuration.nexenta_thin_provisioning: 156 ↛ 158line 156 didn't jump to line 158 because the condition on line 156 was always true

157 payload['referencedReservationSize'] = size 

158 self.nef.filesystems.create(payload) 

159 

160 try: 

161 mount_path = self._mount_filesystem(share) 

162 except jsonrpc.NefException as create_error: 

163 try: 

164 payload = {'force': True} 

165 self.nef.filesystems.delete(dataset_path, payload) 

166 except jsonrpc.NefException as delete_error: 

167 LOG.debug('Failed to delete share %(path)s: %(error)s', 

168 {'path': dataset_path, 'error': delete_error}) 

169 raise create_error 

170 

171 self.provisioned_capacity += share['size'] 

172 location = { 

173 'path': mount_path, 

174 'id': self._get_share_name(share) 

175 } 

176 return [location] 

177 

178 def _mount_filesystem(self, share): 

179 """Ensure that filesystem is activated and mounted on the host.""" 

180 dataset_path = self._get_dataset_path(share) 

181 payload = {'fields': 'mountPoint,isMounted'} 

182 filesystem = self.nef.filesystems.get(dataset_path, payload) 

183 if filesystem['mountPoint'] == 'none': 

184 payload = {'datasetName': dataset_path} 

185 self.nef.hpr.activate(payload) 

186 filesystem = self.nef.filesystems.get(dataset_path, payload) 

187 elif not filesystem['isMounted']: 187 ↛ 189line 187 didn't jump to line 189 because the condition on line 187 was always true

188 self.nef.filesystems.mount(dataset_path) 

189 return '%s:%s' % (self.nas_host, filesystem['mountPoint']) 

190 

191 def create_share_from_snapshot(self, context, share, snapshot, 

192 share_server=None, parent_share=None): 

193 """Is called to create share from snapshot.""" 

194 snapshot_path = self._get_snapshot_path(snapshot) 

195 LOG.debug('Creating share from snapshot %s.', snapshot_path) 

196 clone_path = self._get_dataset_path(share) 

197 size = int(share['size'] * units.Gi * ZFS_MULTIPLIER) 

198 payload = { 

199 'targetPath': clone_path, 

200 'referencedQuotaSize': size, 

201 'recordSize': self.configuration.nexenta_dataset_record_size, 

202 'compressionMode': self.dataset_compression, 

203 'nonBlockingMandatoryMode': False 

204 } 

205 if not self.configuration.nexenta_thin_provisioning: 205 ↛ 207line 205 didn't jump to line 207 because the condition on line 205 was always true

206 payload['referencedReservationSize'] = size 

207 self.nef.snapshots.clone(snapshot_path, payload) 

208 self._remount_filesystem(clone_path) 

209 self.provisioned_capacity += share['size'] 

210 try: 

211 mount_path = self._mount_filesystem(share) 

212 except jsonrpc.NefException as create_error: 

213 try: 

214 payload = {'force': True} 

215 self.nef.filesystems.delete(clone_path, payload) 

216 except jsonrpc.NefException as delete_error: 

217 LOG.debug('Failed to delete share %(path)s: %(error)s', 

218 {'path': clone_path, 'error': delete_error}) 

219 raise create_error 

220 

221 location = { 

222 'path': mount_path, 

223 'id': self._get_share_name(share) 

224 } 

225 return [location] 

226 

227 def _remount_filesystem(self, clone_path): 

228 """Workaround for NEF bug: cloned share has offline NFS status""" 

229 self.nef.filesystems.unmount(clone_path) 

230 self.nef.filesystems.mount(clone_path) 

231 

232 def _get_dataset_path(self, share): 

233 share_name = self._get_share_name(share) 

234 return posixpath.join(self.root_path, share_name) 

235 

236 def _get_share_name(self, share): 

237 """Get share name with share name prefix.""" 

238 return ('%(prefix)s%(share_id)s' % { 

239 'prefix': self.configuration.nexenta_share_name_prefix, 

240 'share_id': share['share_id']}) 

241 

242 def _get_snapshot_path(self, snapshot): 

243 """Return ZFS snapshot path for the snapshot.""" 

244 snapshot_id = ( 

245 snapshot['snapshot_id'] or snapshot['share_group_snapshot_id']) 

246 share = snapshot.get('share') or snapshot.get('share_instance') 

247 fs_path = self._get_dataset_path(share) 

248 return '%s@snapshot-%s' % (fs_path, snapshot_id) 

249 

250 def delete_share(self, context, share, share_server=None): 

251 """Delete a share.""" 

252 LOG.debug('Deleting share: %s.', self._get_share_name(share)) 

253 share_path = self._get_dataset_path(share) 

254 delete_payload = {'force': True, 'snapshots': True} 

255 try: 

256 self.nef.filesystems.delete(share_path, delete_payload) 

257 except jsonrpc.NefException as error: 

258 if error.code != 'EEXIST': 258 ↛ 259line 258 didn't jump to line 259 because the condition on line 258 was never true

259 raise error 

260 snapshots_tree = {} 

261 snapshots_payload = {'parent': share_path, 'fields': 'path'} 

262 snapshots = self.nef.snapshots.list(snapshots_payload) 

263 for snapshot in snapshots: 

264 clones_payload = {'fields': 'clones,creationTxg'} 

265 data = self.nef.snapshots.get(snapshot['path'], clones_payload) 

266 if data['clones']: 266 ↛ 263line 266 didn't jump to line 263 because the condition on line 266 was always true

267 snapshots_tree[data['creationTxg']] = data['clones'][0] 

268 if snapshots_tree: 268 ↛ 271line 268 didn't jump to line 271 because the condition on line 268 was always true

269 clone_path = snapshots_tree[max(snapshots_tree)] 

270 self.nef.filesystems.promote(clone_path) 

271 self.nef.filesystems.delete(share_path, delete_payload) 

272 self.provisioned_capacity -= share['size'] 

273 

274 def extend_share(self, share, new_size, share_server=None): 

275 """Extends a share.""" 

276 LOG.debug( 

277 'Extending share: %(name)s to %(size)sG.', ( 

278 {'name': self._get_share_name(share), 'size': new_size})) 

279 self._set_quota(share, new_size) 

280 if not self.configuration.nexenta_thin_provisioning: 280 ↛ 282line 280 didn't jump to line 282 because the condition on line 280 was always true

281 self._set_reservation(share, new_size) 

282 self.provisioned_capacity += (new_size - share['size']) 

283 

284 def shrink_share(self, share, new_size, share_server=None): 

285 """Shrinks size of existing share.""" 

286 LOG.debug( 

287 'Shrinking share: %(name)s to %(size)sG.', { 

288 'name': self._get_share_name(share), 'size': new_size}) 

289 share_path = self._get_dataset_path(share) 

290 share_data = self.nef.filesystems.get(share_path) 

291 used = share_data['bytesUsedBySelf'] / units.Gi 

292 if used > new_size: 292 ↛ 293line 292 didn't jump to line 293 because the condition on line 292 was never true

293 raise exception.ShareShrinkingPossibleDataLoss( 

294 share_id=self._get_share_name(share)) 

295 if not self.configuration.nexenta_thin_provisioning: 295 ↛ 297line 295 didn't jump to line 297 because the condition on line 295 was always true

296 self._set_reservation(share, new_size) 

297 self._set_quota(share, new_size) 

298 self.provisioned_capacity += (share['size'] - new_size) 

299 

300 def create_snapshot(self, context, snapshot, share_server=None): 

301 """Create a snapshot.""" 

302 snapshot_path = self._get_snapshot_path(snapshot) 

303 LOG.debug('Creating snapshot: %s.', snapshot_path) 

304 payload = {'path': snapshot_path} 

305 self.nef.snapshots.create(payload) 

306 

307 def delete_snapshot(self, context, snapshot, share_server=None): 

308 """Deletes a snapshot. 

309 

310 :param snapshot: snapshot reference 

311 """ 

312 snapshot_path = self._get_snapshot_path(snapshot) 

313 LOG.debug('Deleting snapshot: %s.', snapshot_path) 

314 payload = {'defer': True} 

315 self.nef.snapshots.delete(snapshot_path, payload) 

316 

317 def revert_to_snapshot(self, context, snapshot, share_access_rules, 

318 snapshot_access_rules, share_server=None): 

319 """Reverts a share (in place) to the specified snapshot. 

320 

321 Does not delete the share snapshot. The share and snapshot must both 

322 be 'available' for the restore to be attempted. The snapshot must be 

323 the most recent one taken by Manila; the API layer performs this check 

324 so the driver doesn't have to. 

325 

326 The share must be reverted in place to the contents of the snapshot. 

327 Application admins should quiesce or otherwise prepare the application 

328 for the shared file system contents to change suddenly. 

329 

330 :param context: Current context 

331 :param snapshot: The snapshot to be restored 

332 :param share_access_rules: List of all access rules for the affected 

333 share 

334 :param snapshot_access_rules: List of all access rules for the affected 

335 snapshot 

336 :param share_server: Optional -- Share server model or None 

337 """ 

338 snapshot_path = self._get_snapshot_path(snapshot).split('@')[1] 

339 LOG.debug('Reverting to snapshot: %s.', snapshot_path) 

340 share_path = self._get_dataset_path(snapshot['share']) 

341 payload = {'snapshot': snapshot_path} 

342 self.nef.filesystems.rollback(share_path, payload) 

343 

344 def manage_existing(self, share, driver_options): 

345 """Brings an existing share under Manila management. 

346 

347 If the provided share is not valid, then raise a 

348 ManageInvalidShare exception, specifying a reason for the failure. 

349 

350 If the provided share is not in a state that can be managed, such as 

351 being replicated on the backend, the driver *MUST* raise 

352 ManageInvalidShare exception with an appropriate message. 

353 

354 The share has a share_type, and the driver can inspect that and 

355 compare against the properties of the referenced backend share. 

356 If they are incompatible, raise a 

357 ManageExistingShareTypeMismatch, specifying a reason for the failure. 

358 

359 :param share: Share model 

360 :param driver_options: Driver-specific options provided by admin. 

361 :return: share_update dictionary with required key 'size', 

362 which should contain size of the share. 

363 """ 

364 LOG.debug('Manage share %s.', self._get_share_name(share)) 

365 export_path = share['export_locations'][0]['path'] 

366 

367 # check that filesystem with provided export exists. 

368 fs_path = export_path.split(':/')[1] 

369 fs_data = self.nef.filesystems.get(fs_path) 

370 

371 if not fs_data: 371 ↛ 373line 371 didn't jump to line 373 because the condition on line 371 was never true

372 # wrong export path, raise exception. 

373 msg = _('Share %s does not exist on Nexenta Store appliance, ' 

374 'cannot manage.') % export_path 

375 raise exception.NexentaException(msg) 

376 

377 # get dataset properties. 

378 if fs_data['referencedQuotaSize']: 378 ↛ 381line 378 didn't jump to line 381 because the condition on line 378 was always true

379 size = (fs_data['referencedQuotaSize'] / units.Gi) + 1 

380 else: 

381 size = fs_data['bytesReferenced'] / units.Gi + 1 

382 # rename filesystem on appliance to correlate with manila ID. 

383 new_path = '%s/%s' % (self.root_path, self._get_share_name(share)) 

384 self.nef.filesystems.rename(fs_path, {'newPath': new_path}) 

385 # make sure quotas and reservations are correct. 

386 if not self.configuration.nexenta_thin_provisioning: 386 ↛ 388line 386 didn't jump to line 388 because the condition on line 386 was always true

387 self._set_reservation(share, size) 

388 self._set_quota(share, size) 

389 

390 return {'size': size, 'export_locations': [{ 

391 'path': '%s:/%s' % (self.nas_host, new_path) 

392 }]} 

393 

394 def update_access(self, context, share, access_rules, add_rules, 

395 delete_rules, update_rules, share_server=None): 

396 """Update access rules for given share. 

397 

398 Using access_rules list for both adding and deleting rules. 

399 :param context: The `context.RequestContext` object for the request 

400 :param share: Share that will have its access rules updated. 

401 :param access_rules: All access rules for given share. This list 

402 is enough to update the access rules for given share. 

403 :param add_rules: Empty List or List of access rules which should be 

404 added. access_rules already contains these rules. Not used by 

405 this driver. 

406 :param delete_rules: Empty List or List of access rules which should be 

407 removed. access_rules doesn't contain these rules. Not used by 

408 this driver. 

409 :param update_rules: Empty List or List of access rules which should be 

410 updated. access_rules already contains these rules. 

411 :param share_server: Data structure with share server information. 

412 Not used by this driver. 

413 """ 

414 LOG.debug('Updating access to share %(id)s with following access ' 

415 'rules: %(rules)s', { 

416 'id': self._get_share_name(share), 

417 'rules': [( 

418 rule.get('access_type'), rule.get('access_level'), 

419 rule.get('access_to')) for rule in access_rules]}) 

420 rw_list = [] 

421 ro_list = [] 

422 update_dict = {} 

423 if share['share_proto'] == 'NFS': 423 ↛ 441line 423 didn't jump to line 441 because the condition on line 423 was always true

424 for rule in access_rules: 

425 if rule['access_type'].lower() != 'ip': 

426 msg = _( 

427 'Only IP access control type is supported for NFS.') 

428 LOG.warning(msg) 

429 update_dict[rule['access_id']] = { 

430 'state': 'error', 

431 } 

432 else: 

433 update_dict[rule['access_id']] = { 

434 'state': 'active', 

435 } 

436 if rule['access_level'] == common.ACCESS_LEVEL_RW: 

437 rw_list.append(rule['access_to']) 

438 else: 

439 ro_list.append(rule['access_to']) 

440 self._update_nfs_access(share, rw_list, ro_list) 

441 return update_dict 

442 

443 def _update_nfs_access(self, share, rw_list, ro_list): 

444 # Define allowed security context types to be able to tell whether 

445 # the 'security_contexts' dict contains any rules at all 

446 context_types = {'none', 'root', 'readOnlyList', 'readWriteList'} 

447 

448 security_contexts = {'securityModes': ['sys']} 

449 

450 def add_sc(addr_list, sc_type): 

451 if sc_type not in context_types: 451 ↛ 452line 451 didn't jump to line 452 because the condition on line 451 was never true

452 return 

453 

454 rule_list = [] 

455 

456 for addr in addr_list: 

457 address_mask = addr.strip().split('/', 1) 

458 address = address_mask[0] 

459 ls = {"allow": True, "etype": "fqdn", "entity": address} 

460 if len(address_mask) == 2: 

461 mask = int(address_mask[1]) 

462 if 0 <= mask < 31: 462 ↛ 465line 462 didn't jump to line 465 because the condition on line 462 was always true

463 ls['mask'] = mask 

464 ls['etype'] = 'network' 

465 rule_list.append(ls) 

466 

467 # Context type with no addresses will result in an API error 

468 if rule_list: 

469 security_contexts[sc_type] = rule_list 

470 

471 add_sc(rw_list, 'readWriteList') 

472 add_sc(ro_list, 'readOnlyList') 

473 payload = {'securityContexts': [security_contexts]} 

474 share_path = self._get_dataset_path(share) 

475 if self.nef.nfs.list({'filesystem': share_path}): 475 ↛ 481line 475 didn't jump to line 481 because the condition on line 475 was always true

476 if not set(security_contexts.keys()) & context_types: 476 ↛ 477line 476 didn't jump to line 477 because the condition on line 476 was never true

477 self.nef.nfs.delete(share_path) 

478 else: 

479 self.nef.nfs.set(share_path, payload) 

480 else: 

481 payload['filesystem'] = share_path 

482 self.nef.nfs.create(payload) 

483 payload = { 

484 'flags': ['file_inherit', 'dir_inherit'], 

485 'permissions': ['full_set'], 

486 'principal': 'everyone@', 

487 'type': 'allow' 

488 } 

489 self.nef.filesystems.acl(share_path, payload) 

490 

491 def _set_quota(self, share, new_size): 

492 quota = int(new_size * units.Gi * ZFS_MULTIPLIER) 

493 share_path = self._get_dataset_path(share) 

494 payload = {'referencedQuotaSize': quota} 

495 LOG.debug('Setting quota for dataset %s.', share_path) 

496 self.nef.filesystems.set(share_path, payload) 

497 

498 def _set_reservation(self, share, new_size): 

499 res_size = int(new_size * units.Gi * ZFS_MULTIPLIER) 

500 share_path = self._get_dataset_path(share) 

501 payload = {'referencedReservationSize': res_size} 

502 self.nef.filesystems.set(share_path, payload) 

503 

504 def _update_share_stats(self, data=None): 

505 super(NexentaNasDriver, self)._update_share_stats() 

506 total, free, allocated = self._get_capacity_info() 

507 compression = not self.dataset_compression == 'off' 

508 data = { 

509 'vendor_name': 'Nexenta', 

510 'storage_protocol': self.storage_protocol, 

511 'share_backend_name': self.share_backend_name, 

512 'nfs_mount_point_base': self.nfs_mount_point_base, 

513 'driver_version': VERSION, 

514 'snapshot_support': True, 

515 'create_share_from_snapshot_support': True, 

516 'revert_to_snapshot_support': True, 

517 'pools': [{ 

518 'pool_name': self.pool_name, 

519 'compression': compression, 

520 'total_capacity_gb': total, 

521 'free_capacity_gb': free, 

522 'reserved_percentage': ( 

523 self.configuration.reserved_share_percentage), 

524 'reserved_snapshot_percentage': 

525 (self.configuration.reserved_share_from_snapshot_percentage 

526 or self.configuration.reserved_share_percentage), 

527 'reserved_share_extend_percentage': 

528 (self.configuration.reserved_share_extend_percentage 

529 or self.configuration.reserved_share_percentage), 

530 'max_over_subscription_ratio': ( 

531 self.configuration.safe_get( 

532 'max_over_subscription_ratio')), 

533 'thin_provisioning': 

534 self.configuration.nexenta_thin_provisioning, 

535 'provisioned_capacity_gb': self.provisioned_capacity, 

536 }], 

537 } 

538 self._stats.update(data) 

539 

540 def _get_capacity_info(self): 

541 """Calculate available space on the NFS share.""" 

542 data = self.nef.filesystems.get(self.root_path) 

543 free = int(utils.bytes_to_gb(data['bytesAvailable'])) 

544 allocated = int(utils.bytes_to_gb(data['bytesUsed'])) 

545 total = free + allocated 

546 return total, free, allocated