Coverage for manila/share/drivers/container/storage_helper.py: 93%

121 statements  

« prev     ^ index     » next       coverage.py v7.11.0, created at 2026-02-18 22:19 +0000

1# Copyright (c) 2016 Mirantis, Inc. 

2# All Rights Reserved. 

3# 

4# Licensed under the Apache License, Version 2.0 (the "License"); you may 

5# not use this file except in compliance with the License. You may obtain 

6# a copy of the License at 

7# 

8# http://www.apache.org/licenses/LICENSE-2.0 

9# 

10# Unless required by applicable law or agreed to in writing, software 

11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

13# License for the specific language governing permissions and limitations 

14# under the License. 

15 

16import os 

17import re 

18 

19from oslo_config import cfg 

20from oslo_log import log 

21 

22from manila import exception 

23from manila.i18n import _ 

24from manila.share import driver 

25from manila.share import utils as share_utils 

26 

27CONF = cfg.CONF 

28 

29lv_opts = [ 

30 cfg.StrOpt("container_volume_group", 

31 default="manila_docker_volumes", 

32 help="LVM volume group to use for volumes. This volume group " 

33 "must be created by the cloud administrator independently " 

34 "from manila operations."), 

35] 

36 

37CONF.register_opts(lv_opts) 

38LOG = log.getLogger(__name__) 

39 

40 

41class LVMHelper(driver.ExecuteMixin): 

42 

43 def __init__(self, *args, **kwargs): 

44 self.configuration = kwargs.pop("configuration", None) 

45 if self.configuration is None: 

46 raise exception.ManilaException(_("LVMHelper called without " 

47 "supplying configuration.")) 

48 self.configuration.append_config_values(lv_opts) 

49 super(LVMHelper, self).__init__(*args, **kwargs) 

50 self.init_execute_mixin() 

51 

52 def get_share_server_pools(self, share_server=None): 

53 out, err = self._execute('vgs', 

54 self.configuration.container_volume_group, 

55 '--options', 'vg_size,vg_free', 

56 '--noheadings', 

57 '--units', 'g', 

58 run_as_root=True) 

59 if err: 59 ↛ 60line 59 didn't jump to line 60 because the condition on line 59 was never true

60 msg = _("Unable to gather size of the volume group %(vg)s to be " 

61 "used by the driver. Error: %(err)s") 

62 raise exception.ShareBackendException( 

63 msg % {'vg': self.configuration.container_volume_group, 

64 'err': err}) 

65 

66 (free_size, total_size) = sorted(re.findall(r"\d+\.\d+|\d+", out), 

67 reverse=False) 

68 return [{ 

69 'pool_name': self.configuration.container_volume_group, 

70 'total_capacity_gb': float(total_size), 

71 'free_capacity_gb': float(free_size), 

72 'reserved_percentage': 0, 

73 'reserved_snapshot_percentage': 0, 

74 'reserved_share_extend_percentage': 0, 

75 }, ] 

76 

77 def _get_lv_device(self, share_name): 

78 return os.path.join("/dev", self.configuration.container_volume_group, 

79 share_name) 

80 

81 def _get_lv_folder(self, share_name): 

82 return os.path.join(self.configuration.container_volume_mount_path, 

83 share_name) 

84 

85 def provide_storage(self, share_name, size): 

86 self._execute("lvcreate", "-p", "rw", "-L", 

87 str(size) + "G", "-n", share_name, 

88 self.configuration.container_volume_group, 

89 run_as_root=True) 

90 self._execute("mkfs.ext4", self._get_lv_device(share_name), 

91 run_as_root=True) 

92 

93 def _try_to_unmount_device(self, device): 

94 # NOTE(ganso): We invoke this method to be sure volume was unmounted, 

95 # and we swallow the exception in case it fails to. 

96 try: 

97 self._execute("umount", device, run_as_root=True) 

98 except exception.ProcessExecutionError as e: 

99 LOG.warning("Failed to umount helper directory %(device)s due to " 

100 "%(reason)s.", {'device': device, 'reason': e}) 

101 

102 def remove_storage(self, share_name): 

103 device = self._get_lv_device(share_name) 

104 self._try_to_unmount_device(device) 

105 

106 # (aovchinnikov): bug 1621784 manifests itself in jamming logical 

107 # volumes, so try removing once and issue warning until it is fixed. 

108 try: 

109 self._execute("lvremove", "-f", "--autobackup", "n", 

110 device, run_as_root=True) 

111 except exception.ProcessExecutionError as e: 

112 LOG.warning("Failed to remove logical volume %(device)s due to " 

113 "%(reason)s.", {'device': device, 'reason': e}) 

114 

115 def rename_storage(self, share_name, new_share_name): 

116 old_device = self._get_lv_device(share_name) 

117 new_device = self._get_lv_device(new_share_name) 

118 

119 self._try_to_unmount_device(old_device) 

120 

121 try: 

122 self._execute("lvrename", "--autobackup", "n", 

123 old_device, new_device, run_as_root=True) 

124 except exception.ProcessExecutionError as e: 

125 msg = ("Failed to rename logical volume %(device)s due to " 

126 "%(reason)s." % {'device': old_device, 'reason': e}) 

127 LOG.exception(msg) 

128 raise 

129 

130 def extend_share(self, share_name, new_size, share_server=None): 

131 lv_device = self._get_lv_device(share_name) 

132 cmd = ('lvextend', '-L', '%sG' % new_size, '-n', lv_device) 

133 self._execute(*cmd, run_as_root=True) 

134 self._execute("e2fsck", "-f", "-y", lv_device, run_as_root=True) 

135 self._execute('resize2fs', lv_device, run_as_root=True) 

136 

137 def get_size(self, share_name): 

138 device = self._get_lv_device(share_name) 

139 size = self._execute( 

140 "lvs", "-o", "lv_size", "--noheadings", "--nosuffix", 

141 "--units", "g", device, run_as_root=True) 

142 LOG.debug("Found size %(size)s for LVM device " 

143 "%(lvm)s.", {'size': size[0], 'lvm': share_name}) 

144 return size[0] 

145 

146 def migration_check_compatibility(self, context, source_share, 

147 destination_share, share_server=None, 

148 destination_share_server=None): 

149 """Checks compatibility between self.host and destination host.""" 

150 # They must be in same vg and host 

151 compatible = False 

152 destination_host = destination_share['host'] 

153 source_host = source_share['host'] 

154 destination_vg = share_utils.extract_host( 

155 destination_host, level='pool') 

156 source_vg = share_utils.extract_host( 

157 source_host, level='pool') 

158 

159 if destination_vg != source_vg: 

160 msg = ("Cannot migrate share %(shr)s between " 

161 "%(src)s and %(dest)s, they must be in the same volume " 

162 "group.") 

163 msg_args = { 

164 'shr': source_share['id'], 

165 'src': source_share['host'], 

166 'dest': destination_host, 

167 } 

168 LOG.exception(msg, msg_args) 

169 else: 

170 compatible = True 

171 

172 compatibility = { 

173 'compatible': compatible, 

174 'writable': True, 

175 'nondisruptive': False, 

176 'preserve_metadata': True, 

177 'preserve_snapshots': False, 

178 } 

179 

180 return compatibility 

181 

182 def migration_start(self, context, source_share, destination_share, 

183 source_snapshots, snapshot_mappings, 

184 share_server=None, destination_share_server=None): 

185 """Starts the migration of the share from one host to another.""" 

186 

187 # NOTE(felipe_rodrigues): Since they are in the same volume group, 

188 # there is no need to copy the data between the volumes. 

189 return 

190 

191 def migration_continue(self, context, source_share, destination_share, 

192 source_snapshots, snapshot_mappings, 

193 share_server=None, destination_share_server=None): 

194 """Check the progress of the migration.""" 

195 return True 

196 

197 def migration_get_progress(self, context, source_share, 

198 destination_share, source_snapshots, 

199 snapshot_mappings, share_server=None, 

200 destination_share_server=None): 

201 """Return detailed progress of the migration in progress.""" 

202 return { 

203 'total_progress': 100, 

204 } 

205 

206 def migration_cancel(self, context, source_share, destination_share, 

207 source_snapshots, snapshot_mappings, 

208 share_server=None, destination_share_server=None): 

209 """Abort an ongoing migration.""" 

210 

211 # NOTE(felipe_rodrigues): Since they are in the same volume group, 

212 # there is no need to cancel the copy of the data. 

213 return 

214 

215 def migration_complete(self, context, source_share, destination_share, 

216 source_snapshots, snapshot_mappings, 

217 share_server=None, destination_share_server=None): 

218 """Completes by removing the source local volume.""" 

219 

220 # NOTE(felipe_rodrigues): Since they are in the same volume group, 

221 # there is no need to remove source lv. 

222 return 

223 

224 def share_server_migration_check_compatibility( 

225 self, context, share_server, dest_host, old_share_network, 

226 new_share_network, shares_request_spec): 

227 """Is called to check migration compatibility for a share server.""" 

228 not_compatible = { 

229 'compatible': False, 

230 'writable': None, 

231 'nondisruptive': None, 

232 'preserve_snapshots': None, 

233 'migration_cancel': None, 

234 'migration_get_progress': None, 

235 } 

236 

237 dest_backend_name = share_utils.extract_host(dest_host, 

238 level='backend_name') 

239 source_backend_name = share_utils.extract_host(share_server['host'], 

240 level='backend_name') 

241 if dest_backend_name == source_backend_name: 

242 msg = _("Cannot perform server migration %(server)s within the " 

243 "same backend. Please choose a destination host different " 

244 "from the source.") 

245 msg_args = { 

246 'server': share_server['id'], 

247 } 

248 LOG.error(msg, msg_args) 

249 return not_compatible 

250 

251 # The container backend has only one pool, gets its pool name from the 

252 # first instance. 

253 first_share = shares_request_spec['shares_req_spec'][0] 

254 source_host = first_share['share_instance_properties']['host'] 

255 source_vg = share_utils.extract_host( 

256 source_host, level='pool') 

257 dest_vg = share_utils.extract_host( 

258 dest_host, level='pool') 

259 if dest_vg and dest_vg != source_vg: 

260 msg = ("Cannot migrate share server %(server)s between %(src)s " 

261 "and %(dest)s. They must be in the same volume group.") 

262 msg_args = { 

263 'server': share_server['id'], 

264 'src': source_host, 

265 'dest': dest_host, 

266 } 

267 LOG.error(msg, msg_args) 

268 return not_compatible 

269 

270 # NOTE(felipe_rodrigues): it is not required to check the capacity, 

271 # because it is migrating in the same volume group. 

272 

273 return { 

274 'compatible': True, 

275 'writable': True, 

276 'nondisruptive': False, 

277 'preserve_snapshots': False, 

278 'migration_cancel': True, 

279 'migration_get_progress': True, 

280 } 

281 

282 def share_server_migration_start(self, context, src_share_server, 

283 dest_share_server, shares, snapshots): 

284 """Is called to perform 1st phase of migration of a share server.""" 

285 

286 # NOTE(felipe_rodrigues): Since they are in the same volume group, 

287 # there is no need to copy the data between the volumes. 

288 return 

289 

290 def share_server_migration_continue(self, context, src_share_server, 

291 dest_share_server, shares, snapshots): 

292 """Check the progress of the migration.""" 

293 return True 

294 

295 def share_server_migration_complete(self, context, source_share_server, 

296 dest_share_server, shares, snapshots, 

297 new_network_allocations): 

298 """Completes by removing the source local volume.""" 

299 

300 # NOTE(felipe_rodrigues): Since they are in the same volume group, 

301 # there is no need to remove source lv. 

302 return 

303 

304 def share_server_migration_cancel(self, context, src_share_server, 

305 dest_share_server, shares, snapshots): 

306 """Abort an ongoing migration.""" 

307 

308 # NOTE(felipe_rodrigues): Since they are in the same volume group, 

309 # there is no need to cancel the copy of the data. 

310 return 

311 

312 def share_server_migration_get_progress(self, context, src_share_server, 

313 dest_share_server, shares, 

314 snapshots): 

315 """Return detailed progress of the server migration in progress.""" 

316 

317 return { 

318 'total_progress': 100, 

319 } 

320 

321 def get_share_pool_name(self, share_id): 

322 """Return the pool name where the share is allocated""" 

323 

324 return self.configuration.container_volume_group