Coverage for manila/share/drivers/hitachi/hsp/driver.py: 99%

173 statements  

« prev     ^ index     » next       coverage.py v7.11.0, created at 2026-02-18 22:19 +0000

1# Copyright (c) 2016 Hitachi Data Systems, Inc. 

2# All Rights Reserved. 

3# 

4# Licensed under the Apache License, Version 2.0 (the "License"); you may 

5# not use this file except in compliance with the License. You may obtain 

6# a copy of the License at 

7# 

8# http://www.apache.org/licenses/LICENSE-2.0 

9# 

10# Unless required by applicable law or agreed to in writing, software 

11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

13# License for the specific language governing permissions and limitations 

14# under the License. 

15 

16from oslo_config import cfg 

17from oslo_log import log 

18from oslo_utils import excutils 

19from oslo_utils import units 

20 

21from manila.common import constants 

22from manila import exception 

23from manila.i18n import _ 

24from manila.share import driver 

25from manila.share.drivers.hitachi.hsp import rest 

26 

27LOG = log.getLogger(__name__) 

28 

29hitachi_hsp_opts = [ 

30 cfg.HostAddressOpt('hitachi_hsp_host', 

31 required=True, 

32 help="HSP management host for communication between " 

33 "Manila controller and HSP."), 

34 cfg.StrOpt('hitachi_hsp_username', 

35 required=True, 

36 help="HSP username to perform tasks such as create filesystems" 

37 " and shares."), 

38 cfg.StrOpt('hitachi_hsp_password', 

39 required=True, 

40 secret=True, 

41 help="HSP password for the username provided."), 

42] 

43 

44 

45class HitachiHSPDriver(driver.ShareDriver): 

46 """Manila HSP Driver implementation. 

47 

48 1.0.0 - Initial Version. 

49 """ 

50 

51 def __init__(self, *args, **kwargs): 

52 super(HitachiHSPDriver, self).__init__( 

53 [False], *args, config_opts=[hitachi_hsp_opts], **kwargs) 

54 

55 self.private_storage = kwargs.get('private_storage') 

56 

57 self.backend_name = self.configuration.safe_get('share_backend_name') 

58 self.hsp_host = self.configuration.safe_get('hitachi_hsp_host') 

59 

60 self.hsp = rest.HSPRestBackend( 

61 self.hsp_host, 

62 self.configuration.safe_get('hitachi_hsp_username'), 

63 self.configuration.safe_get('hitachi_hsp_password') 

64 ) 

65 

66 def _update_share_stats(self, data=None): 

67 LOG.debug("Updating Backend Capability Information - Hitachi HSP.") 

68 

69 reserved = self.configuration.safe_get('reserved_share_percentage') 

70 reserved_snapshot = (self.configuration.safe_get( 

71 'reserved_share_from_snapshot_percentage') or 

72 self.configuration.safe_get('reserved_share_percentage')) 

73 reserved_share_extend = (self.configuration.safe_get( 

74 'reserved_share_extend_percentage') or 

75 self.configuration.safe_get('reserved_share_percentage')) 

76 max_over_subscription_ratio = self.configuration.safe_get( 

77 'max_over_subscription_ratio') 

78 hsp_cluster = self.hsp.get_cluster() 

79 

80 total_space = hsp_cluster['properties']['total-storage-capacity'] 

81 free_space = hsp_cluster['properties']['total-storage-available'] 

82 

83 data = { 

84 'share_backend_name': self.backend_name, 

85 'vendor_name': 'Hitachi', 

86 'driver_version': '1.0.0', 

87 'storage_protocol': 'NFS', 

88 'pools': [{ 

89 'reserved_percentage': reserved, 

90 'reserved_snapshot_percentage': reserved_snapshot, 

91 'reserved_share_extend_percentage': reserved_share_extend, 

92 'pool_name': 'HSP', 

93 'thin_provisioning': True, 

94 'total_capacity_gb': total_space / units.Gi, 

95 'free_capacity_gb': free_space / units.Gi, 

96 'max_over_subscription_ratio': max_over_subscription_ratio, 

97 'qos': False, 

98 'dedupe': False, 

99 'compression': False, 

100 }], 

101 } 

102 

103 LOG.info("Hitachi HSP Capabilities: %(data)s.", 

104 {'data': data}) 

105 super(HitachiHSPDriver, self)._update_share_stats(data) 

106 

107 def create_share(self, context, share, share_server=None): 

108 LOG.debug("Creating share in HSP: %(shr)s", {'shr': share['id']}) 

109 

110 if share['share_proto'].lower() != 'nfs': 

111 msg = _("Only NFS protocol is currently supported.") 

112 raise exception.InvalidShare(reason=msg) 

113 

114 self.hsp.add_file_system(share['id'], share['size'] * units.Gi) 

115 filesystem_id = self.hsp.get_file_system(share['id'])['id'] 

116 

117 try: 

118 self.hsp.add_share(share['id'], filesystem_id) 

119 except exception.HSPBackendException: 

120 with excutils.save_and_reraise_exception(): 

121 self.hsp.delete_file_system(filesystem_id) 

122 msg = ("Could not create share %s on HSP.") 

123 LOG.exception(msg, share['id']) 

124 

125 uri = self.hsp_host + ':/' + share['id'] 

126 

127 LOG.debug("Share created successfully on path: %(uri)s.", 

128 {'uri': uri}) 

129 return [{ 

130 "path": uri, 

131 "metadata": {}, 

132 "is_admin_only": False, 

133 }] 

134 

135 def delete_share(self, context, share, share_server=None): 

136 LOG.debug("Deleting share in HSP: %(shr)s.", {'shr': share['id']}) 

137 

138 filesystem_id = hsp_share_id = None 

139 

140 try: 

141 filesystem_id = self.hsp.get_file_system(share['id'])['id'] 

142 hsp_share_id = self.hsp.get_share(filesystem_id)['id'] 

143 except exception.HSPItemNotFoundException: 

144 LOG.info("Share %(shr)s already removed from backend.", 

145 {'shr': share['id']}) 

146 

147 if hsp_share_id: 

148 # Clean all rules from share before deleting it 

149 current_rules = self.hsp.get_access_rules(hsp_share_id) 

150 for rule in current_rules: 

151 try: 

152 self.hsp.delete_access_rule(hsp_share_id, 

153 rule['name']) 

154 except exception.HSPBackendException as e: 

155 if 'No matching access rule found.' in e.msg: 

156 LOG.debug("Rule %(rule)s already deleted in " 

157 "backend.", {'rule': rule['name']}) 

158 else: 

159 raise 

160 

161 self.hsp.delete_share(hsp_share_id) 

162 

163 if filesystem_id: 

164 self.hsp.delete_file_system(filesystem_id) 

165 

166 LOG.debug("Export and share successfully deleted: %(shr)s.", 

167 {'shr': share['id']}) 

168 

169 def update_access(self, context, share, access_rules, add_rules, 

170 delete_rules, update_rules, share_server=None): 

171 

172 LOG.debug("Updating access rules for share: %(shr)s.", 

173 {'shr': share['id']}) 

174 

175 try: 

176 filesystem_id = self.hsp.get_file_system(share['id'])['id'] 

177 hsp_share_id = self.hsp.get_share(filesystem_id)['id'] 

178 except exception.HSPItemNotFoundException: 

179 raise exception.ShareResourceNotFound(share_id=share['id']) 

180 

181 if not (add_rules or delete_rules): 

182 # Recovery mode 

183 current_rules = self.hsp.get_access_rules(hsp_share_id) 

184 

185 # Indexing the rules for faster searching 

186 hsp_rules_dict = { 

187 rule['host-specification']: rule['read-write'] 

188 for rule in current_rules 

189 } 

190 

191 manila_rules_dict = {} 

192 

193 for rule in access_rules: 

194 if rule['access_type'].lower() != 'ip': 

195 msg = _("Only IP access type currently supported.") 

196 raise exception.InvalidShareAccess(reason=msg) 

197 

198 access_to = rule['access_to'] 

199 is_rw = rule['access_level'] == constants.ACCESS_LEVEL_RW 

200 manila_rules_dict[access_to] = is_rw 

201 

202 # Remove the rules that exist on HSP but not on manila 

203 remove_rules = self._get_complement(hsp_rules_dict, 

204 manila_rules_dict) 

205 

206 # Add the rules that exist on manila but not on HSP 

207 add_rules = self._get_complement(manila_rules_dict, hsp_rules_dict) 

208 

209 for rule in remove_rules: 

210 rule_name = self._get_hsp_rule_name(hsp_share_id, rule[0]) 

211 self.hsp.delete_access_rule(hsp_share_id, rule_name) 

212 

213 for rule in add_rules: 

214 self.hsp.add_access_rule(hsp_share_id, rule[0], rule[1]) 

215 else: 

216 for rule in delete_rules: 

217 if rule['access_type'].lower() != 'ip': 

218 continue 

219 

220 # get the real rule name in HSP 

221 rule_name = self._get_hsp_rule_name(hsp_share_id, 

222 rule['access_to']) 

223 try: 

224 self.hsp.delete_access_rule(hsp_share_id, 

225 rule_name) 

226 except exception.HSPBackendException as e: 

227 if 'No matching access rule found.' in e.msg: 

228 LOG.debug("Rule %(rule)s already deleted in " 

229 "backend.", {'rule': rule['access_to']}) 

230 else: 

231 raise 

232 

233 for rule in add_rules: 

234 if rule['access_type'].lower() != 'ip': 

235 msg = _("Only IP access type currently supported.") 

236 raise exception.InvalidShareAccess(reason=msg) 

237 

238 try: 

239 self.hsp.add_access_rule( 

240 hsp_share_id, rule['access_to'], 

241 (rule['access_level'] == constants.ACCESS_LEVEL_RW)) 

242 except exception.HSPBackendException as e: 

243 if 'Duplicate NFS access rule exists' in e.msg: 

244 LOG.debug("Rule %(rule)s already exists in " 

245 "backend.", {'rule': rule['access_to']}) 

246 else: 

247 raise 

248 

249 LOG.debug("Successfully updated share %(shr)s rules.", 

250 {'shr': share['id']}) 

251 

252 def _get_hsp_rule_name(self, share_id, host_to): 

253 rule_name = share_id + host_to 

254 all_rules = self.hsp.get_access_rules(share_id) 

255 for rule in all_rules: 

256 # check if this rule has other name in HSP 

257 if rule['host-specification'] == host_to: 257 ↛ 255line 257 didn't jump to line 255 because the condition on line 257 was always true

258 rule_name = rule['name'] 

259 break 

260 

261 return rule_name 

262 

263 def _get_complement(self, rules_a, rules_b): 

264 """Returns the rules of list A that are not on list B""" 

265 complement = [] 

266 for rule, is_rw in rules_a.items(): 

267 if rule not in rules_b or rules_b[rule] != is_rw: 267 ↛ 266line 267 didn't jump to line 266 because the condition on line 267 was always true

268 complement.append((rule, is_rw)) 

269 

270 return complement 

271 

272 def extend_share(self, share, new_size, share_server=None): 

273 LOG.debug("Extending share in HSP: %(shr_id)s.", 

274 {'shr_id': share['id']}) 

275 

276 old_size = share['size'] 

277 hsp_cluster = self.hsp.get_cluster() 

278 free_space = hsp_cluster['properties']['total-storage-available'] 

279 free_space = free_space / units.Gi 

280 

281 if (new_size - old_size) < free_space: 

282 filesystem_id = self.hsp.get_file_system(share['id'])['id'] 

283 self.hsp.resize_file_system(filesystem_id, new_size * units.Gi) 

284 else: 

285 msg = (_("Share %s cannot be extended due to insufficient space.") 

286 % share['id']) 

287 raise exception.HSPBackendException(msg=msg) 

288 

289 LOG.info("Share %(shr_id)s successfully extended to " 

290 "%(shr_size)sG.", 

291 {'shr_id': share['id'], 

292 'shr_size': new_size}) 

293 

294 def shrink_share(self, share, new_size, share_server=None): 

295 LOG.debug("Shrinking share in HSP: %(shr_id)s.", 

296 {'shr_id': share['id']}) 

297 

298 file_system = self.hsp.get_file_system(share['id']) 

299 usage = file_system['properties']['used-capacity'] / units.Gi 

300 

301 LOG.debug("Usage for share %(shr_id)s in HSP: %(usage)sG.", 

302 {'shr_id': share['id'], 'usage': usage}) 

303 

304 if new_size > usage: 

305 self.hsp.resize_file_system(file_system['id'], new_size * units.Gi) 

306 else: 

307 raise exception.ShareShrinkingPossibleDataLoss( 

308 share_id=share['id']) 

309 

310 LOG.info("Share %(shr_id)s successfully shrunk to " 

311 "%(shr_size)sG.", 

312 {'shr_id': share['id'], 

313 'shr_size': new_size}) 

314 

315 def manage_existing(self, share, driver_options): 

316 LOG.debug("Managing share in HSP: %(shr_id)s.", 

317 {'shr_id': share['id']}) 

318 

319 ip, share_name = share['export_locations'][0]['path'].split(':') 

320 

321 try: 

322 hsp_share = self.hsp.get_share(name=share_name.strip('/')) 

323 except exception.HSPItemNotFoundException: 

324 msg = _("The share %s trying to be managed was not found on " 

325 "backend.") % share['id'] 

326 raise exception.ManageInvalidShare(reason=msg) 

327 

328 self.hsp.rename_file_system(hsp_share['properties']['file-system-id'], 

329 share['id']) 

330 

331 original_name = hsp_share['properties']['file-system-name'] 

332 private_storage_content = { 

333 'old_name': original_name, 

334 'new_name': share['id'], 

335 } 

336 self.private_storage.update(share['id'], private_storage_content) 

337 

338 LOG.debug("Filesystem %(original_name)s was renamed to %(name)s.", 

339 {'original_name': original_name, 

340 'name': share['id']}) 

341 

342 file_system = self.hsp.get_file_system(share['id']) 

343 

344 LOG.info("Share %(shr_path)s was successfully managed with ID " 

345 "%(shr_id)s.", 

346 {'shr_path': share['export_locations'][0]['path'], 

347 'shr_id': share['id']}) 

348 

349 export_locations = [{ 

350 "path": share['export_locations'][0]['path'], 

351 "metadata": {}, 

352 "is_admin_only": False, 

353 }] 

354 

355 return {'size': file_system['properties']['quota'] / units.Gi, 

356 'export_locations': export_locations} 

357 

358 def unmanage(self, share): 

359 original_name = self.private_storage.get(share['id'], 'old_name') 

360 

361 LOG.debug("Filesystem %(name)s that was originally named " 

362 "%(original_name)s will no longer be managed.", 

363 {'original_name': original_name, 

364 'name': share['id']}) 

365 

366 self.private_storage.delete(share['id']) 

367 

368 LOG.info("The share with current path %(shr_path)s and ID " 

369 "%(shr_id)s is no longer being managed.", 

370 {'shr_path': share['export_locations'][0]['path'], 

371 'shr_id': share['id']}) 

372 

373 def get_default_filter_function(self): 

374 return "share.size >= 128"