Coverage for manila/tests/db/migrations/alembic/migrations_data_checks.py: 98%
1536 statements
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2026-02-18 22:19 +0000
1# Copyright 2015 Mirantis inc.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
16"""
17Tests data for database migrations.
19All database migrations with data manipulation
20(like moving data from column to the table) should have data check class:
22@map_to_migration('1f0bd302c1a6') # Revision of checked db migration
23class FooMigrationChecks(BaseMigrationChecks):
24 def setup_upgrade_data(self, conn):
25 ...
27 def check_upgrade(self, conn, data):
28 ...
30 def check_downgrade(self, conn):
31 ...
33See BaseMigrationChecks class for more information.
34"""
36import abc
37import copy
38import datetime
40from oslo_db import exception as oslo_db_exc
41from oslo_utils import uuidutils
42from sqlalchemy import exc as sa_exc
44from manila.common import constants
45from manila.db.migrations import utils
48class DbMigrationsData(object):
50 migration_mappings = {}
52 methods_mapping = {
53 'pre': 'setup_upgrade_data',
54 'check': 'check_upgrade',
55 'post': 'check_downgrade',
56 }
58 def __getattr__(self, item):
59 parts = item.split('_')
61 is_mapping_method = (
62 len(parts) > 2 and parts[0] == ''
63 and parts[1] in self.methods_mapping
64 )
66 if not is_mapping_method:
67 return super(DbMigrationsData, self).__getattribute__(item)
69 check_obj = self.migration_mappings.get(parts[-1], None)
71 if check_obj is None:
72 raise AttributeError
74 check_obj.set_test_case(self)
76 return getattr(check_obj, self.methods_mapping.get(parts[1]))
79def map_to_migration(revision):
80 def decorator(cls):
81 DbMigrationsData.migration_mappings[revision] = cls()
82 return cls
83 return decorator
86class BaseMigrationChecks(metaclass=abc.ABCMeta):
88 def __init__(self):
89 self.test_case = None
91 def set_test_case(self, test_case):
92 self.test_case = test_case
94 @abc.abstractmethod
95 def setup_upgrade_data(self, conn):
96 """This method should be used to insert test data for migration.
98 :param conn: SQLAlchemy conn
99 :return: any data which will be passed to 'check_upgrade' as 'data' arg
100 """
102 @abc.abstractmethod
103 def check_upgrade(self, conn, data):
104 """This method should be used to do assertions after upgrade method.
106 To perform assertions use 'self.test_case' instance property:
107 self.test_case.assertTrue(True)
109 :param conn: SQLAlchemy conn
110 :param data: data returned by 'setup_upgrade_data'
111 """
113 @abc.abstractmethod
114 def check_downgrade(self, conn):
115 """This method should be used to do assertions after downgrade method.
117 To perform assertions use 'self.test_case' instance property:
118 self.test_case.assertTrue(True)
120 :param conn: SQLAlchemy conn
121 """
124def fake_share(**kwargs):
125 share = {
126 'id': uuidutils.generate_uuid(),
127 'display_name': 'fake_share',
128 'display_description': 'my fake share',
129 'snapshot_id': uuidutils.generate_uuid(),
130 'is_public': True,
131 'size': 1,
132 'deleted': 'False',
133 'share_proto': 'fake_proto',
134 'user_id': uuidutils.generate_uuid(),
135 'project_id': uuidutils.generate_uuid(),
136 'snapshot_support': True,
137 'task_state': None,
138 }
139 share.update(kwargs)
140 return share
143def fake_instance(share_id=None, **kwargs):
144 instance = {
145 'id': uuidutils.generate_uuid(),
146 'share_id': share_id or uuidutils.generate_uuid(),
147 'deleted': 'False',
148 'host': 'openstack@BackendZ#PoolA',
149 'status': 'available',
150 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
151 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58),
152 'terminated_at': None,
153 'access_rules_status': 'active',
154 }
155 instance.update(kwargs)
156 return instance
159@map_to_migration('38e632621e5a')
160class ShareTypeMigrationChecks(BaseMigrationChecks):
161 def _get_fake_data(self):
162 extra_specs = []
163 self.share_type_ids = []
164 volume_types = [
165 {
166 'id': uuidutils.generate_uuid(),
167 'deleted': 'False',
168 'name': 'vol-type-A',
169 },
170 {
171 'id': uuidutils.generate_uuid(),
172 'deleted': 'False',
173 'name': 'vol-type-B',
174 },
175 ]
176 for idx, volume_type in enumerate(volume_types):
177 extra_specs.append({
178 'volume_type_id': volume_type['id'],
179 'key': 'foo',
180 'value': 'bar%s' % idx,
181 'deleted': False,
182 })
183 extra_specs.append({
184 'volume_type_id': volume_type['id'],
185 'key': 'xyzzy',
186 'value': 'spoon_%s' % idx,
187 'deleted': False,
188 })
189 self.share_type_ids.append(volume_type['id'])
190 return volume_types, extra_specs
192 def setup_upgrade_data(self, conn):
193 (self.volume_types, self.extra_specs) = self._get_fake_data()
195 volume_types_table = utils.load_table('volume_types', conn)
196 conn.execute(volume_types_table.insert().values(self.volume_types))
197 extra_specs_table = utils.load_table('volume_type_extra_specs',
198 conn)
200 conn.execute(extra_specs_table.insert().values(self.extra_specs))
202 def check_upgrade(self, conn, data):
203 # Verify table transformations
204 share_types_table = utils.load_table('share_types', conn)
205 share_types_specs_table = utils.load_table(
206 'share_type_extra_specs', conn)
207 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
208 'volume_types', conn)
209 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
210 'volume_type_extra_specs', conn)
212 # Verify presence of data
213 share_type_ids = [
214 st._mapping['id'] for st in conn.execute(
215 share_types_table.select()
216 )
217 if st._mapping['id'] in self.share_type_ids
218 ]
219 self.test_case.assertEqual(sorted(self.share_type_ids),
220 sorted(share_type_ids))
221 extra_specs = [
222 {
223 'type': es._mapping['share_type_id'],
224 'key': es._mapping['spec_key']}
225 for es in conn.execute(
226 share_types_specs_table.select()
227 )
228 if es._mapping['share_type_id'] in self.share_type_ids
229 ]
230 self.test_case.assertEqual(4, len(extra_specs))
232 def check_downgrade(self, conn):
233 # Verify table transformations
234 volume_types_table = utils.load_table('volume_types', conn)
235 volume_types_specs_table = utils.load_table(
236 'volume_type_extra_specs', conn)
237 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
238 'share_types', conn)
239 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
240 'share_type_extra_specs', conn)
242 # Verify presence of data
243 volume_type_ids = [
244 vt._mapping['id']
245 for vt in conn.execute(volume_types_table.select())
246 if vt._mapping['id'] in self.share_type_ids
247 ]
248 self.test_case.assertEqual(sorted(self.share_type_ids),
249 sorted(volume_type_ids))
250 extra_specs = [
251 {'type': es._mapping['volume_type_id'], 'key': es._mapping['key']}
252 for es in conn.execute(volume_types_specs_table.select())
253 if es._mapping['volume_type_id'] in self.share_type_ids
254 ]
255 self.test_case.assertEqual(4, len(extra_specs))
258@map_to_migration('5077ffcc5f1c')
259class ShareInstanceMigrationChecks(BaseMigrationChecks):
260 def _prepare_fake_data(self):
261 time = datetime.datetime(2017, 1, 12, 12, 12, 12)
262 self.share = {
263 'id': uuidutils.generate_uuid(),
264 'host': 'fake_host',
265 'status': 'fake_status',
266 'scheduled_at': time,
267 'launched_at': time,
268 'terminated_at': time,
269 'availability_zone': 'fake_az'}
270 self.share_snapshot = {
271 'id': uuidutils.generate_uuid(),
272 'status': 'fake_status',
273 'share_id': self.share['id'],
274 'progress': 'fake_progress'}
275 self.share_export_location = {
276 'id': 1001,
277 'share_id': self.share['id']}
279 def setup_upgrade_data(self, conn):
280 self._prepare_fake_data()
281 share_table = utils.load_table('shares', conn)
282 conn.execute(share_table.insert().values(self.share))
283 snapshot_table = utils.load_table('share_snapshots', conn)
284 conn.execute(snapshot_table.insert().values(self.share_snapshot))
285 el_table = utils.load_table('share_export_locations', conn)
286 conn.execute(el_table.insert().values(self.share_export_location))
288 def check_upgrade(self, conn, data):
289 share_table = utils.load_table('shares', conn)
290 s_instance_table = utils.load_table('share_instances', conn)
291 ss_instance_table = utils.load_table('share_snapshot_instances',
292 conn)
293 snapshot_table = utils.load_table('share_snapshots', conn)
294 instance_el_table = utils.load_table('share_instance_export_locations',
295 conn)
296 # Check shares table
297 for column in ['host', 'status', 'scheduled_at', 'launched_at',
298 'terminated_at', 'share_network_id', 'share_server_id',
299 'availability_zone']:
300 rows = conn.execute(share_table.select())
301 for row in rows:
302 self.test_case.assertFalse(hasattr(row, column))
304 # Check share instance table
305 s_instance_record = conn.execute(s_instance_table.select().where(
306 s_instance_table.c.share_id == self.share['id'])).first()
307 self.test_case.assertTrue(s_instance_record is not None)
308 for column in ['host', 'status', 'scheduled_at', 'launched_at',
309 'terminated_at', 'availability_zone']:
310 self.test_case.assertEqual(self.share[column],
311 s_instance_record._mapping[column])
313 # Check snapshot table
314 for column in ['status', 'progress']:
315 rows = conn.execute(snapshot_table.select())
316 for row in rows:
317 self.test_case.assertFalse(hasattr(row, column))
319 # Check snapshot instance table
320 ss_instance_record = conn.execute(ss_instance_table.select().where(
321 ss_instance_table.c.snapshot_id == self.share_snapshot['id'])
322 ).first()
323 self.test_case.assertEqual(
324 s_instance_record._mapping['id'],
325 ss_instance_record._mapping['share_instance_id'])
326 for column in ['status', 'progress']:
327 self.test_case.assertEqual(self.share_snapshot[column],
328 ss_instance_record._mapping[column])
330 # Check share export location table
331 self.test_case.assertRaises(
332 sa_exc.NoSuchTableError,
333 utils.load_table, 'share_export_locations', conn)
335 # Check share instance export location table
336 el_record = conn.execute(instance_el_table.select().where(
337 instance_el_table.c.share_instance_id ==
338 s_instance_record._mapping['id'])
339 ).first()
340 self.test_case.assertFalse(el_record is None)
341 self.test_case.assertTrue(hasattr(el_record, 'share_instance_id'))
342 self.test_case.assertFalse(hasattr(el_record, 'share_id'))
344 def check_downgrade(self, conn):
345 self.test_case.assertRaises(
346 sa_exc.NoSuchTableError,
347 utils.load_table, 'share_snapshot_instances', conn)
348 self.test_case.assertRaises(
349 sa_exc.NoSuchTableError,
350 utils.load_table, 'share_instances', conn)
351 self.test_case.assertRaises(
352 sa_exc.NoSuchTableError,
353 utils.load_table, 'share_instance_export_locations', conn)
354 share_table = utils.load_table('shares', conn)
355 snapshot_table = utils.load_table('share_snapshots', conn)
356 share_el_table = utils.load_table('share_export_locations',
357 conn)
358 for column in ['host', 'status', 'scheduled_at', 'launched_at',
359 'terminated_at', 'share_network_id', 'share_server_id',
360 'availability_zone']:
361 rows = conn.execute(share_table.select())
362 for row in rows:
363 self.test_case.assertTrue(hasattr(row, column))
365 for column in ['status', 'progress']:
366 rows = conn.execute(snapshot_table.select())
367 for row in rows:
368 self.test_case.assertTrue(hasattr(row, column))
369 rows = conn.execute(share_el_table.select())
370 for row in rows:
371 self.test_case.assertFalse(hasattr(row, 'share_instance_id'))
372 self.test_case.assertTrue(
373 hasattr(row, 'share_id'))
376@map_to_migration('1f0bd302c1a6')
377class AvailabilityZoneMigrationChecks(BaseMigrationChecks):
379 valid_az_names = ('az1', 'az2')
381 def _get_service_data(self, options):
382 base_dict = {
383 'binary': 'manila-share',
384 'topic': 'share',
385 'disabled': False,
386 'report_count': '100',
387 }
388 base_dict.update(options)
389 return base_dict
391 def setup_upgrade_data(self, conn):
392 service_fixture = [
393 self._get_service_data(
394 {'deleted': 0, 'host': 'fake1', 'availability_zone': 'az1'}
395 ),
396 self._get_service_data(
397 {'deleted': 0, 'host': 'fake2', 'availability_zone': 'az1'}
398 ),
399 self._get_service_data(
400 {'deleted': 1, 'host': 'fake3', 'availability_zone': 'az2'}
401 ),
402 ]
404 services_table = utils.load_table('services', conn)
406 for fixture in service_fixture:
407 conn.execute(services_table.insert().values(fixture))
409 def check_upgrade(self, conn, _):
410 az_table = utils.load_table('availability_zones', conn)
412 for az in conn.execute(az_table.select()):
413 self.test_case.assertTrue(uuidutils.is_uuid_like(az.id))
414 self.test_case.assertIn(az.name, self.valid_az_names)
415 self.test_case.assertEqual('False', az.deleted)
417 services_table = utils.load_table('services', conn)
418 for service in conn.execute(services_table.select()):
419 self.test_case.assertTrue(
420 uuidutils.is_uuid_like(service.availability_zone_id)
421 )
423 def check_downgrade(self, conn):
424 services_table = utils.load_table('services', conn)
425 for service in conn.execute(services_table.select()):
426 self.test_case.assertIn(
427 service.availability_zone, self.valid_az_names
428 )
431@map_to_migration('dda6de06349')
432class ShareInstanceExportLocationMetadataChecks(BaseMigrationChecks):
433 el_table_name = 'share_instance_export_locations'
434 elm_table_name = 'share_instance_export_locations_metadata'
436 def setup_upgrade_data(self, conn):
437 # Setup shares
438 share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}]
439 share_table = utils.load_table('shares', conn)
440 for fixture in share_fixture:
441 conn.execute(share_table.insert().values(fixture))
443 # Setup share instances
444 si_fixture = [
445 {'id': 'foo_share_instance_id_oof',
446 'share_id': share_fixture[0]['id']},
447 {'id': 'bar_share_instance_id_rab',
448 'share_id': share_fixture[1]['id']},
449 ]
450 si_table = utils.load_table('share_instances', conn)
451 for fixture in si_fixture:
452 conn.execute(si_table.insert().values(fixture))
454 # Setup export locations
455 el_fixture = [
456 {'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']},
457 {'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']},
458 ]
459 el_table = utils.load_table(self.el_table_name, conn)
460 for fixture in el_fixture:
461 conn.execute(el_table.insert().values(fixture))
463 def check_upgrade(self, conn, data):
464 el_table = utils.load_table(
465 'share_instance_export_locations', conn)
466 for el in conn.execute(el_table.select()):
467 self.test_case.assertTrue(hasattr(el, 'is_admin_only'))
468 self.test_case.assertTrue(hasattr(el, 'uuid'))
469 self.test_case.assertEqual(False, el.is_admin_only)
470 self.test_case.assertTrue(uuidutils.is_uuid_like(el.uuid))
472 # Write export location metadata
473 el_metadata = [
474 {'key': 'foo_key', 'value': 'foo_value', 'export_location_id': 1},
475 {'key': 'bar_key', 'value': 'bar_value', 'export_location_id': 2},
476 ]
477 elm_table = utils.load_table(self.elm_table_name, conn)
478 conn.execute(elm_table.insert().values(el_metadata))
480 # Verify values of written metadata
481 for el_meta_datum in el_metadata:
482 el_id = el_meta_datum['export_location_id']
483 records = conn.execute(elm_table.select().where(
484 elm_table.c.export_location_id == el_id))
485 self.test_case.assertEqual(1, records.rowcount)
486 record = records.first()
488 expected_keys = (
489 'id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
490 'export_location_id', 'key', 'value',
491 )
492 self.test_case.assertEqual(
493 len(expected_keys), len(record._mapping.keys()))
494 for key in expected_keys:
495 self.test_case.assertIn(key, record._mapping.keys())
497 for k, v in el_meta_datum.items():
498 self.test_case.assertTrue(hasattr(record, k))
499 self.test_case.assertEqual(v, getattr(record, k))
501 def check_downgrade(self, conn):
502 el_table = utils.load_table(
503 'share_instance_export_locations', conn)
504 for el in conn.execute(el_table.select()):
505 self.test_case.assertFalse(hasattr(el, 'is_admin_only'))
506 self.test_case.assertFalse(hasattr(el, 'uuid'))
507 self.test_case.assertRaises(
508 sa_exc.NoSuchTableError,
509 utils.load_table, self.elm_table_name, conn)
512@map_to_migration('344c1ac4747f')
513class AccessRulesStatusMigrationChecks(BaseMigrationChecks):
515 def _get_instance_data(self, data):
516 base_dict = {}
517 base_dict.update(data)
518 return base_dict
520 def setup_upgrade_data(self, conn):
522 share_table = utils.load_table('shares', conn)
524 share = {
525 'id': 1,
526 'share_proto': "NFS",
527 'size': 0,
528 'snapshot_id': None,
529 'user_id': 'fake',
530 'project_id': 'fake',
531 }
533 conn.execute(share_table.insert().values(share))
535 rules1 = [
536 {'id': 'r1', 'share_instance_id': 1, 'state': 'active',
537 'deleted': 'False'},
538 {'id': 'r2', 'share_instance_id': 1, 'state': 'active',
539 'deleted': 'False'},
540 {'id': 'r3', 'share_instance_id': 1, 'state': 'deleting',
541 'deleted': 'False'},
542 ]
543 rules2 = [
544 {'id': 'r4', 'share_instance_id': 2, 'state': 'active',
545 'deleted': 'False'},
546 {'id': 'r5', 'share_instance_id': 2, 'state': 'error',
547 'deleted': 'False'},
548 ]
550 rules3 = [
551 {'id': 'r6', 'share_instance_id': 3, 'state': 'new',
552 'deleted': 'False'},
553 ]
555 instance_fixtures = [
556 {'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1,
557 'status': 'available', 'rules': rules1},
558 {'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1,
559 'status': 'available', 'rules': rules2},
560 {'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1,
561 'status': 'available', 'rules': rules3},
562 {'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1,
563 'status': 'deleting', 'rules': []},
564 ]
566 share_instances_table = utils.load_table('share_instances', conn)
567 share_instances_rules_table = utils.load_table(
568 'share_instance_access_map', conn)
570 for fixture in instance_fixtures:
571 rules = fixture.pop('rules')
572 conn.execute(share_instances_table.insert().values(fixture))
574 for rule in rules:
575 conn.execute(share_instances_rules_table.insert().values(rule))
577 def check_upgrade(self, conn, _):
578 instances_table = utils.load_table('share_instances', conn)
580 valid_statuses = {
581 '1': 'active',
582 '2': 'error',
583 '3': 'out_of_sync',
584 '4': None,
585 }
587 instances = conn.execute(instances_table.select().where(
588 instances_table.c.id in valid_statuses.keys()))
590 for instance in instances: 590 ↛ 591line 590 didn't jump to line 591 because the loop on line 590 never started
591 self.test_case.assertEqual(valid_statuses[instance['id']],
592 instance['access_rules_status'])
594 def check_downgrade(self, conn):
595 share_instances_rules_table = utils.load_table(
596 'share_instance_access_map', conn)
597 share_instance_rules_to_check = conn.execute(
598 share_instances_rules_table.select().where(
599 share_instances_rules_table.c.id.in_(('1', '2', '3', '4'))))
601 valid_statuses = {
602 '1': 'active',
603 '2': 'error',
604 '3': 'error',
605 '4': None,
606 }
608 for rule in share_instance_rules_to_check: 608 ↛ 609line 608 didn't jump to line 609 because the loop on line 608 never started
609 valid_state = valid_statuses[rule['share_instance_id']]
610 self.test_case.assertEqual(valid_state, rule['state'])
613@map_to_migration('293fac1130ca')
614class ShareReplicationMigrationChecks(BaseMigrationChecks):
616 valid_share_display_names = ('FAKE_SHARE_1', 'FAKE_SHARE_2',
617 'FAKE_SHARE_3')
618 valid_share_ids = []
619 valid_replication_types = ('writable', 'readable', 'dr')
621 def _load_tables_and_get_data(self, conn):
622 share_table = utils.load_table('shares', conn)
623 share_instances_table = utils.load_table('share_instances', conn)
625 shares = conn.execute(
626 share_table.select().where(share_table.c.id.in_(
627 self.valid_share_ids))
628 ).fetchall()
629 share_instances = conn.execute(share_instances_table.select().where(
630 share_instances_table.c.share_id.in_(self.valid_share_ids))
631 ).fetchall()
633 return shares, share_instances
635 def setup_upgrade_data(self, conn):
637 shares_data = []
638 instances_data = []
639 self.valid_share_ids = []
641 for share_display_name in self.valid_share_display_names:
642 share_ref = fake_share(display_name=share_display_name)
643 shares_data.append(share_ref)
644 instances_data.append(fake_instance(share_id=share_ref['id']))
646 shares_table = utils.load_table('shares', conn)
648 for share in shares_data:
649 self.valid_share_ids.append(share['id'])
650 conn.execute(shares_table.insert().values(share))
652 shares_instances_table = utils.load_table('share_instances', conn)
654 for share_instance in instances_data:
655 conn.execute(
656 shares_instances_table.insert().values(share_instance))
658 def check_upgrade(self, conn, _):
659 shares, share_instances = self._load_tables_and_get_data(conn)
660 share_ids = [share._mapping['id'] for share in shares]
661 share_instance_share_ids = [share_instance._mapping['share_id'] for
662 share_instance in share_instances]
664 # Assert no data is lost
665 for sid in self.valid_share_ids:
666 self.test_case.assertIn(sid, share_ids)
667 self.test_case.assertIn(sid, share_instance_share_ids)
669 for share in shares:
670 self.test_case.assertIn(share._mapping['display_name'],
671 self.valid_share_display_names)
672 self.test_case.assertEqual('False', share.deleted)
673 self.test_case.assertTrue(hasattr(share, 'replication_type'))
675 for share_instance in share_instances:
676 self.test_case.assertTrue(hasattr(share_instance, 'replica_state'))
678 def check_downgrade(self, conn):
679 shares, share_instances = self._load_tables_and_get_data(conn)
680 share_ids = [share._mapping['id'] for share in shares]
681 share_instance_share_ids = [share_instance._mapping['share_id'] for
682 share_instance in share_instances]
683 # Assert no data is lost
684 for sid in self.valid_share_ids:
685 self.test_case.assertIn(sid, share_ids)
686 self.test_case.assertIn(sid, share_instance_share_ids)
688 for share in shares:
689 self.test_case.assertEqual('False', share.deleted)
690 self.test_case.assertIn(share.display_name,
691 self.valid_share_display_names)
692 self.test_case.assertFalse(hasattr(share, 'replication_type'))
694 for share_instance in share_instances:
695 self.test_case.assertEqual('False', share_instance.deleted)
696 self.test_case.assertIn(share_instance.share_id,
697 self.valid_share_ids)
698 self.test_case.assertFalse(
699 hasattr(share_instance, 'replica_state'))
702@map_to_migration('5155c7077f99')
703class NetworkAllocationsNewLabelColumnChecks(BaseMigrationChecks):
704 table_name = 'network_allocations'
705 ids = ['fake_network_allocation_id_%d' % i for i in (1, 2, 3)]
707 def setup_upgrade_data(self, conn):
708 user_id = 'user_id'
709 project_id = 'project_id'
710 share_server_id = 'foo_share_server_id'
712 # Create share network
713 share_network_data = {
714 'id': 'foo_share_network_id',
715 'user_id': user_id,
716 'project_id': project_id,
717 }
718 sn_table = utils.load_table('share_networks', conn)
719 conn.execute(sn_table.insert().values(share_network_data))
721 # Create share server
722 share_server_data = {
723 'id': share_server_id,
724 'share_network_id': share_network_data['id'],
725 'host': 'fake_host',
726 'status': 'active',
727 }
728 ss_table = utils.load_table('share_servers', conn)
729 conn.execute(ss_table.insert().values(share_server_data))
731 # Create network allocations
732 network_allocations = [
733 {'id': self.ids[0],
734 'share_server_id': share_server_id,
735 'ip_address': '1.1.1.1'},
736 {'id': self.ids[1],
737 'share_server_id': share_server_id,
738 'ip_address': '2.2.2.2'},
739 ]
740 na_table = utils.load_table(self.table_name, conn)
741 for network_allocation in network_allocations:
742 conn.execute(na_table.insert().values(network_allocation))
744 def check_upgrade(self, conn, data):
745 na_table = utils.load_table(self.table_name, conn)
746 for na in conn.execute(na_table.select()):
747 self.test_case.assertTrue(hasattr(na, 'label'))
748 self.test_case.assertEqual(na.label, 'user')
750 # Create admin network allocation
751 network_allocations = [
752 {'id': self.ids[2],
753 'share_server_id': na.share_server_id,
754 'ip_address': '3.3.3.3',
755 'label': 'admin',
756 'network_type': 'vlan',
757 'segmentation_id': 1005,
758 'ip_version': 4,
759 'cidr': '240.0.0.0/16'},
760 ]
761 conn.execute(na_table.insert().values(network_allocations))
763 # Select admin network allocations
764 for na in conn.execute(
765 na_table.select().where(na_table.c.label == 'admin')):
766 self.test_case.assertTrue(hasattr(na, 'label'))
767 self.test_case.assertEqual('admin', na.label)
768 for col_name in ('network_type', 'segmentation_id', 'ip_version',
769 'cidr'):
770 self.test_case.assertTrue(hasattr(na, col_name))
771 self.test_case.assertEqual(
772 network_allocations[0][col_name], getattr(na, col_name))
774 def check_downgrade(self, conn):
775 na_table = utils.load_table(self.table_name, conn)
776 db_result = conn.execute(na_table.select())
777 self.test_case.assertTrue(db_result.rowcount >= len(self.ids))
778 for na in db_result:
779 for col_name in ('label', 'network_type', 'segmentation_id',
780 'ip_version', 'cidr'):
781 self.test_case.assertFalse(hasattr(na, col_name))
784@map_to_migration('eb6d5544cbbd')
785class ShareSnapshotInstanceNewProviderLocationColumnChecks(
786 BaseMigrationChecks):
787 table_name = 'share_snapshot_instances'
789 def setup_upgrade_data(self, conn):
790 # Setup shares
791 share_data = {'id': 'new_share_id'}
792 s_table = utils.load_table('shares', conn)
793 conn.execute(s_table.insert().values(share_data))
795 # Setup share instances
796 share_instance_data = {
797 'id': 'new_share_instance_id',
798 'share_id': share_data['id']
799 }
800 si_table = utils.load_table('share_instances', conn)
801 conn.execute(si_table.insert().values(share_instance_data))
803 # Setup share snapshots
804 share_snapshot_data = {
805 'id': 'new_snapshot_id',
806 'share_id': share_data['id']}
807 snap_table = utils.load_table('share_snapshots', conn)
808 conn.execute(snap_table.insert().values(share_snapshot_data))
810 # Setup snapshot instances
811 snapshot_instance_data = {
812 'id': 'new_snapshot_instance_id',
813 'snapshot_id': share_snapshot_data['id'],
814 'share_instance_id': share_instance_data['id']
815 }
816 snap_i_table = utils.load_table('share_snapshot_instances', conn)
817 conn.execute(snap_i_table.insert().values(snapshot_instance_data))
819 def check_upgrade(self, conn, data):
820 ss_table = utils.load_table(self.table_name, conn)
821 db_result = conn.execute(ss_table.select().where(
822 ss_table.c.id == 'new_snapshot_instance_id'))
823 self.test_case.assertTrue(db_result.rowcount > 0)
824 for ss in db_result:
825 self.test_case.assertTrue(hasattr(ss, 'provider_location'))
826 self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
828 def check_downgrade(self, conn):
829 ss_table = utils.load_table(self.table_name, conn)
830 db_result = conn.execute(ss_table.select().where(
831 ss_table.c.id == 'new_snapshot_instance_id'))
832 self.test_case.assertTrue(db_result.rowcount > 0)
833 for ss in db_result:
834 self.test_case.assertFalse(hasattr(ss, 'provider_location'))
835 self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
838@map_to_migration('221a83cfd85b')
839class ShareNetworksFieldLengthChecks(BaseMigrationChecks):
840 def setup_upgrade_data(self, conn):
841 user_id = '123456789123456789'
842 project_id = 'project_id'
844 # Create share network data
845 share_network_data = {
846 'id': 'foo_share_network_id_2',
847 'user_id': user_id,
848 'project_id': project_id,
849 }
850 sn_table = utils.load_table('share_networks', conn)
851 conn.execute(sn_table.insert().values(share_network_data))
853 # Create security_service data
854 security_services_data = {
855 'id': 'foo_security_services_id',
856 'type': 'foo_type',
857 'project_id': project_id
858 }
859 ss_table = utils.load_table('security_services', conn)
860 conn.execute(ss_table.insert().values(security_services_data))
862 def _check_length_for_table_columns(self, table_name, conn,
863 cols, length):
864 table = utils.load_table(table_name, conn)
865 db_result = conn.execute(table.select())
866 self.test_case.assertTrue(db_result.rowcount > 0)
868 for col in cols:
869 self.test_case.assertEqual(table.columns.get(col).type.length,
870 length)
872 def check_upgrade(self, conn, data):
873 self._check_length_for_table_columns('share_networks', conn,
874 ('user_id', 'project_id'), 255)
876 self._check_length_for_table_columns('security_services', conn,
877 ('project_id',), 255)
879 def check_downgrade(self, conn):
880 self._check_length_for_table_columns('share_networks', conn,
881 ('user_id', 'project_id'), 36)
883 self._check_length_for_table_columns('security_services', conn,
884 ('project_id',), 36)
887@map_to_migration('fdfb668d19e1')
888class NewGatewayColumnChecks(BaseMigrationChecks):
889 na_table_name = 'network_allocations'
890 sn_table_name = 'share_networks'
891 na_ids = ['network_allocation_id_fake_%d' % i for i in (1, 2, 3)]
892 sn_ids = ['share_network_id_fake_%d' % i for i in (1, 2)]
894 def setup_upgrade_data(self, conn):
895 user_id = 'user_id'
896 project_id = 'project_id'
897 share_server_id = 'share_server_id_foo'
899 # Create share network
900 share_network_data = {
901 'id': self.sn_ids[0],
902 'user_id': user_id,
903 'project_id': project_id,
904 }
905 sn_table = utils.load_table(self.sn_table_name, conn)
906 conn.execute(sn_table.insert().values(share_network_data))
908 # Create share server
909 share_server_data = {
910 'id': share_server_id,
911 'share_network_id': share_network_data['id'],
912 'host': 'fake_host',
913 'status': 'active',
914 }
915 ss_table = utils.load_table('share_servers', conn)
916 conn.execute(ss_table.insert().values(share_server_data))
918 # Create network allocations
919 network_allocations = [
920 {
921 'id': self.na_ids[0],
922 'share_server_id': share_server_id,
923 'ip_address': '1.1.1.1',
924 },
925 {
926 'id': self.na_ids[1],
927 'share_server_id': share_server_id,
928 'ip_address': '2.2.2.2',
929 },
930 ]
931 na_table = utils.load_table(self.na_table_name, conn)
932 conn.execute(na_table.insert().values(network_allocations))
934 def check_upgrade(self, conn, data):
935 na_table = utils.load_table(self.na_table_name, conn)
936 for na in conn.execute(na_table.select()):
937 self.test_case.assertTrue(hasattr(na, 'gateway'))
939 # Create network allocation
940 network_allocations = [
941 {
942 'id': self.na_ids[2],
943 'share_server_id': na.share_server_id,
944 'ip_address': '3.3.3.3',
945 'gateway': '3.3.3.1',
946 'network_type': 'vlan',
947 'segmentation_id': 1005,
948 'ip_version': 4,
949 'cidr': '240.0.0.0/16',
950 },
951 ]
952 conn.execute(na_table.insert().values(network_allocations))
954 # Select network allocations with gateway info
955 for na in conn.execute(
956 na_table.select().where(na_table.c.gateway == '3.3.3.1')):
957 self.test_case.assertTrue(hasattr(na, 'gateway'))
958 self.test_case.assertEqual(network_allocations[0]['gateway'],
959 getattr(na, 'gateway'))
961 sn_table = utils.load_table(self.sn_table_name, conn)
962 for sn in conn.execute(sn_table.select()):
963 self.test_case.assertTrue(hasattr(sn, 'gateway'))
965 # Create share network
966 share_networks = [
967 {
968 'id': self.sn_ids[1],
969 'user_id': sn.user_id,
970 'project_id': sn.project_id,
971 'gateway': '1.1.1.1',
972 'name': 'name_foo',
973 },
974 ]
975 conn.execute(sn_table.insert().values(share_networks))
977 # Select share network
978 for sn in conn.execute(
979 sn_table.select().where(sn_table.c.name == 'name_foo')):
980 self.test_case.assertTrue(hasattr(sn, 'gateway'))
981 self.test_case.assertEqual(share_networks[0]['gateway'],
982 getattr(sn, 'gateway'))
984 def check_downgrade(self, conn):
985 for table_name, ids in ((self.na_table_name, self.na_ids),
986 (self.sn_table_name, self.sn_ids)):
987 table = utils.load_table(table_name, conn)
988 db_result = conn.execute(table.select())
989 self.test_case.assertTrue(db_result.rowcount >= len(ids))
990 for record in db_result:
991 self.test_case.assertFalse(hasattr(record, 'gateway'))
994@map_to_migration('e8ea58723178')
995class RemoveHostFromDriverPrivateDataChecks(BaseMigrationChecks):
996 table_name = 'drivers_private_data'
997 host_column_name = 'host'
999 def setup_upgrade_data(self, conn):
1000 dpd_data = {
1001 'created_at': datetime.datetime(2016, 7, 14, 22, 31, 22),
1002 'deleted': 0,
1003 'host': 'host1',
1004 'entity_uuid': 'entity_uuid1',
1005 'key': 'key1',
1006 'value': 'value1'
1007 }
1008 dpd_table = utils.load_table(self.table_name, conn)
1009 conn.execute(dpd_table.insert().values(dpd_data))
1011 def check_upgrade(self, conn, data):
1012 dpd_table = utils.load_table(self.table_name, conn)
1013 rows = conn.execute(dpd_table.select())
1014 for row in rows:
1015 self.test_case.assertFalse(hasattr(row, self.host_column_name))
1017 def check_downgrade(self, conn):
1018 dpd_table = utils.load_table(self.table_name, conn)
1019 rows = conn.execute(dpd_table.select())
1020 for row in rows:
1021 self.test_case.assertTrue(hasattr(row, self.host_column_name))
1022 self.test_case.assertEqual(
1023 'unknown', row._mapping[self.host_column_name])
1026@map_to_migration('493eaffd79e1')
1027class NewMTUColumnChecks(BaseMigrationChecks):
1028 na_table_name = 'network_allocations'
1029 sn_table_name = 'share_networks'
1030 na_ids = ['network_allocation_id_fake_3_%d' % i for i in (1, 2, 3)]
1031 sn_ids = ['share_network_id_fake_3_%d' % i for i in (1, 2)]
1033 def setup_upgrade_data(self, conn):
1034 user_id = 'user_id'
1035 project_id = 'project_id'
1036 share_server_id = 'share_server_id_foo_2'
1038 # Create share network
1039 share_network_data = {
1040 'id': self.sn_ids[0],
1041 'user_id': user_id,
1042 'project_id': project_id,
1043 }
1044 sn_table = utils.load_table(self.sn_table_name, conn)
1045 conn.execute(sn_table.insert().values(share_network_data))
1047 # Create share server
1048 share_server_data = {
1049 'id': share_server_id,
1050 'share_network_id': share_network_data['id'],
1051 'host': 'fake_host',
1052 'status': 'active',
1053 }
1054 ss_table = utils.load_table('share_servers', conn)
1055 conn.execute(ss_table.insert().values(share_server_data))
1057 # Create network allocations
1058 network_allocations = [
1059 {
1060 'id': self.na_ids[0],
1061 'share_server_id': share_server_id,
1062 'ip_address': '1.1.1.1',
1063 },
1064 {
1065 'id': self.na_ids[1],
1066 'share_server_id': share_server_id,
1067 'ip_address': '2.2.2.2',
1068 },
1069 ]
1070 na_table = utils.load_table(self.na_table_name, conn)
1071 conn.execute(na_table.insert().values(network_allocations))
1073 def check_upgrade(self, conn, data):
1074 na_table = utils.load_table(self.na_table_name, conn)
1075 for na in conn.execute(na_table.select()):
1076 self.test_case.assertTrue(hasattr(na, 'mtu'))
1078 # Create network allocation
1079 network_allocations = [
1080 {
1081 'id': self.na_ids[2],
1082 'share_server_id': na.share_server_id,
1083 'ip_address': '3.3.3.3',
1084 'gateway': '3.3.3.1',
1085 'network_type': 'vlan',
1086 'segmentation_id': 1005,
1087 'ip_version': 4,
1088 'cidr': '240.0.0.0/16',
1089 'mtu': 1509,
1090 },
1091 ]
1092 conn.execute(na_table.insert().values(network_allocations))
1094 # Select network allocations with mtu info
1095 for na in conn.execute(
1096 na_table.select().where(na_table.c.mtu == '1509')):
1097 self.test_case.assertTrue(hasattr(na, 'mtu'))
1098 self.test_case.assertEqual(network_allocations[0]['mtu'],
1099 getattr(na, 'mtu'))
1101 # Select all entries and check for the value
1102 for na in conn.execute(na_table.select()):
1103 self.test_case.assertTrue(hasattr(na, 'mtu'))
1104 if na._mapping['id'] == self.na_ids[2]:
1105 self.test_case.assertEqual(network_allocations[0]['mtu'],
1106 getattr(na, 'mtu'))
1107 else:
1108 self.test_case.assertIsNone(na._mapping['mtu'])
1110 sn_table = utils.load_table(self.sn_table_name, conn)
1111 for sn in conn.execute(sn_table.select()):
1112 self.test_case.assertTrue(hasattr(sn, 'mtu'))
1114 # Create share network
1115 share_networks = [
1116 {
1117 'id': self.sn_ids[1],
1118 'user_id': sn.user_id,
1119 'project_id': sn.project_id,
1120 'gateway': '1.1.1.1',
1121 'name': 'name_foo_2',
1122 'mtu': 1509,
1123 },
1124 ]
1125 conn.execute(sn_table.insert().values(share_networks))
1127 # Select share network with MTU set
1128 for sn in conn.execute(
1129 sn_table.select().where(sn_table.c.name == 'name_foo_2')):
1130 self.test_case.assertTrue(hasattr(sn, 'mtu'))
1131 self.test_case.assertEqual(share_networks[0]['mtu'],
1132 getattr(sn, 'mtu'))
1134 # Select all entries and check for the value
1135 for sn in conn.execute(sn_table.select()):
1136 self.test_case.assertTrue(hasattr(sn, 'mtu'))
1137 if sn._mapping['id'] == self.sn_ids[1]:
1138 self.test_case.assertEqual(network_allocations[0]['mtu'],
1139 getattr(sn, 'mtu'))
1140 else:
1141 self.test_case.assertIsNone(sn._mapping['mtu'])
1143 def check_downgrade(self, conn):
1144 for table_name, ids in ((self.na_table_name, self.na_ids),
1145 (self.sn_table_name, self.sn_ids)):
1146 table = utils.load_table(table_name, conn)
1147 db_result = conn.execute(table.select())
1148 self.test_case.assertTrue(db_result.rowcount >= len(ids))
1149 for record in db_result:
1150 self.test_case.assertFalse(hasattr(record, 'mtu'))
1153@map_to_migration('63809d875e32')
1154class AddAccessKeyToShareAccessMapping(BaseMigrationChecks):
1155 table_name = 'share_access_map'
1156 access_key_column_name = 'access_key'
1158 def setup_upgrade_data(self, conn):
1159 share_data = {
1160 'id': uuidutils.generate_uuid(),
1161 'share_proto': "CEPHFS",
1162 'size': 1,
1163 'snapshot_id': None,
1164 'user_id': 'fake',
1165 'project_id': 'fake'
1166 }
1167 share_table = utils.load_table('shares', conn)
1168 conn.execute(share_table.insert().values(share_data))
1170 share_instance_data = {
1171 'id': uuidutils.generate_uuid(),
1172 'deleted': 'False',
1173 'host': 'fake',
1174 'share_id': share_data['id'],
1175 'status': 'available',
1176 'access_rules_status': 'active'
1177 }
1178 share_instance_table = utils.load_table('share_instances', conn)
1179 conn.execute(share_instance_table.insert().values(share_instance_data))
1181 share_access_data = {
1182 'id': uuidutils.generate_uuid(),
1183 'share_id': share_data['id'],
1184 'access_type': 'cephx',
1185 'access_to': 'alice',
1186 'deleted': 'False'
1187 }
1188 share_access_table = utils.load_table(self.table_name, conn)
1189 conn.execute(share_access_table.insert().values(share_access_data))
1191 share_instance_access_data = {
1192 'id': uuidutils.generate_uuid(),
1193 'share_instance_id': share_instance_data['id'],
1194 'access_id': share_access_data['id'],
1195 'deleted': 'False'
1196 }
1197 share_instance_access_table = utils.load_table(
1198 'share_instance_access_map', conn)
1199 conn.execute(share_instance_access_table.insert().values(
1200 share_instance_access_data))
1202 def check_upgrade(self, conn, data):
1203 share_access_table = utils.load_table(self.table_name, conn)
1204 rows = conn.execute(share_access_table.select())
1205 for row in rows:
1206 self.test_case.assertTrue(hasattr(row,
1207 self.access_key_column_name))
1209 def check_downgrade(self, conn):
1210 share_access_table = utils.load_table(self.table_name, conn)
1211 rows = conn.execute(share_access_table.select())
1212 for row in rows:
1213 self.test_case.assertFalse(hasattr(row,
1214 self.access_key_column_name))
1217@map_to_migration('48a7beae3117')
1218class MoveShareTypeIdToInstancesCheck(BaseMigrationChecks):
1220 some_shares = [
1221 {
1222 'id': 's1',
1223 'share_type_id': 't1',
1224 },
1225 {
1226 'id': 's2',
1227 'share_type_id': 't2',
1228 },
1229 {
1230 'id': 's3',
1231 'share_type_id': 't3',
1232 },
1233 ]
1235 share_ids = [x['id'] for x in some_shares]
1237 some_instances = [
1238 {
1239 'id': 'i1',
1240 'share_id': 's3',
1241 },
1242 {
1243 'id': 'i2',
1244 'share_id': 's2',
1245 },
1246 {
1247 'id': 'i3',
1248 'share_id': 's2',
1249 },
1250 {
1251 'id': 'i4',
1252 'share_id': 's1',
1253 },
1254 ]
1256 instance_ids = [x['id'] for x in some_instances]
1258 some_share_types = [
1259 {'id': 't1'},
1260 {'id': 't2'},
1261 {'id': 't3'},
1262 ]
1264 def setup_upgrade_data(self, conn):
1266 shares_table = utils.load_table('shares', conn)
1267 share_instances_table = utils.load_table('share_instances', conn)
1268 share_types_table = utils.load_table('share_types', conn)
1270 for stype in self.some_share_types:
1271 conn.execute(share_types_table.insert().values(stype))
1273 for share in self.some_shares:
1274 conn.execute(shares_table.insert().values(share))
1276 for instance in self.some_instances:
1277 conn.execute(share_instances_table.insert().values(instance))
1279 def check_upgrade(self, conn, data):
1281 shares_table = utils.load_table('shares', conn)
1282 share_instances_table = utils.load_table('share_instances', conn)
1284 for instance in conn.execute(share_instances_table.select().where( 1284 ↛ 1286line 1284 didn't jump to line 1286 because the loop on line 1284 never started
1285 share_instances_table.c.id in self.instance_ids)):
1286 share = conn.execute(shares_table.select().where(
1287 instance['share_id'] == shares_table.c.id)).first()
1288 self.test_case.assertEqual(
1289 next((x for x in self.some_shares if share['id'] == x['id']),
1290 None)['share_type_id'],
1291 instance['share_type_id'])
1293 for share in conn.execute(share_instances_table.select().where( 1293 ↛ 1295line 1293 didn't jump to line 1295 because the loop on line 1293 never started
1294 shares_table.c.id in self.share_ids)):
1295 self.test_case.assertNotIn('share_type_id', share)
1297 def check_downgrade(self, conn):
1299 shares_table = utils.load_table('shares', conn)
1300 share_instances_table = utils.load_table('share_instances', conn)
1302 for instance in conn.execute(share_instances_table.select().where( 1302 ↛ 1304line 1302 didn't jump to line 1304 because the loop on line 1302 never started
1303 share_instances_table.c.id in self.instance_ids)):
1304 self.test_case.assertNotIn('share_type_id', instance)
1306 for share in conn.execute(share_instances_table.select().where( 1306 ↛ 1308line 1306 didn't jump to line 1308 because the loop on line 1306 never started
1307 shares_table.c.id in self.share_ids)):
1308 self.test_case.assertEqual(
1309 next((x for x in self.some_shares if share['id'] == x['id']),
1310 None)['share_type_id'],
1311 share['share_type_id'])
1314@map_to_migration('3e7d62517afa')
1315class CreateFromSnapshotExtraSpecAndShareColumn(BaseMigrationChecks):
1317 expected_attr = constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT
1318 snap_support_attr = constants.ExtraSpecs.SNAPSHOT_SUPPORT
1320 def _get_fake_data(self):
1321 extra_specs = []
1322 shares = []
1323 share_instances = []
1324 share_types = [
1325 {
1326 'id': uuidutils.generate_uuid(),
1327 'deleted': 'False',
1328 'name': 'share-type-1',
1329 'is_public': False,
1330 },
1331 {
1332 'id': uuidutils.generate_uuid(),
1333 'deleted': 'False',
1334 'name': 'share-type-2',
1335 'is_public': True,
1336 },
1337 ]
1338 snapshot_support = (False, True)
1339 dhss = ('True', 'False')
1340 for idx, share_type in enumerate(share_types):
1341 extra_specs.append({
1342 'share_type_id': share_type['id'],
1343 'spec_key': 'snapshot_support',
1344 'spec_value': snapshot_support[idx],
1345 'deleted': 0,
1346 })
1347 extra_specs.append({
1348 'share_type_id': share_type['id'],
1349 'spec_key': 'driver_handles_share_servers',
1350 'spec_value': dhss[idx],
1351 'deleted': 0,
1352 })
1353 share = fake_share(snapshot_support=snapshot_support[idx])
1354 shares.append(share)
1355 share_instances.append(
1356 fake_instance(share_id=share['id'],
1357 share_type_id=share_type['id'])
1358 )
1360 return share_types, extra_specs, shares, share_instances
1362 def setup_upgrade_data(self, conn):
1364 (self.share_types, self.extra_specs, self.shares,
1365 self.share_instances) = self._get_fake_data()
1367 share_types_table = utils.load_table('share_types', conn)
1368 conn.execute(share_types_table.insert().values(self.share_types))
1369 extra_specs_table = utils.load_table('share_type_extra_specs',
1370 conn)
1371 conn.execute(extra_specs_table.insert().values(self.extra_specs))
1372 shares_table = utils.load_table('shares', conn)
1373 conn.execute(shares_table.insert().values(self.shares))
1374 share_instances_table = utils.load_table('share_instances', conn)
1375 conn.execute(
1376 share_instances_table.insert().values(self.share_instances))
1378 def check_upgrade(self, conn, data):
1379 share_type_ids = [st['id'] for st in self.share_types]
1380 share_ids = [s['id'] for s in self.shares]
1381 shares_table = utils.load_table('shares', conn)
1382 share_types_table = utils.load_table('share_types', conn)
1383 extra_specs_table = utils.load_table('share_type_extra_specs',
1384 conn)
1386 # Pre-existing Shares must be present
1387 shares_in_db = conn.execute(shares_table.select()).fetchall()
1388 share_ids_in_db = [s._mapping['id'] for s in shares_in_db]
1389 self.test_case.assertTrue(len(share_ids_in_db) > 1)
1390 for share_id in share_ids:
1391 self.test_case.assertIn(share_id, share_ids_in_db)
1393 # new shares attr must match snapshot support
1394 for share in shares_in_db:
1395 self.test_case.assertTrue(hasattr(share, self.expected_attr))
1396 self.test_case.assertEqual(share._mapping[self.snap_support_attr],
1397 share._mapping[self.expected_attr])
1399 # Pre-existing Share types must be present
1400 share_types_in_db = (
1401 conn.execute(share_types_table.select()).fetchall())
1402 share_type_ids_in_db = [s._mapping['id'] for s in share_types_in_db]
1403 for share_type_id in share_type_ids:
1404 self.test_case.assertIn(share_type_id, share_type_ids_in_db)
1406 # Pre-existing extra specs must be present
1407 extra_specs_in_db = (
1408 conn.execute(extra_specs_table.select().where(
1409 extra_specs_table.c.deleted == 0)).fetchall())
1410 self.test_case.assertGreaterEqual(len(extra_specs_in_db),
1411 len(self.extra_specs))
1413 # New Extra spec for share types must match snapshot support
1414 for share_type_id in share_type_ids:
1415 new_extra_spec = [x for x in extra_specs_in_db
1416 if x._mapping['spec_key'] == self.expected_attr
1417 and x._mapping['share_type_id'] == share_type_id]
1418 snapshot_support_spec = [
1419 x for x in extra_specs_in_db
1420 if x._mapping['spec_key'] == self.snap_support_attr
1421 and x._mapping['share_type_id'] == share_type_id]
1422 self.test_case.assertEqual(1, len(new_extra_spec))
1423 self.test_case.assertEqual(1, len(snapshot_support_spec))
1424 self.test_case.assertEqual(
1425 snapshot_support_spec[0]._mapping['spec_value'],
1426 new_extra_spec[0]._mapping['spec_value'])
1428 def check_downgrade(self, conn):
1429 share_type_ids = [st['id'] for st in self.share_types]
1430 share_ids = [s['id'] for s in self.shares]
1431 shares_table = utils.load_table('shares', conn)
1432 share_types_table = utils.load_table('share_types', conn)
1433 extra_specs_table = utils.load_table('share_type_extra_specs',
1434 conn)
1436 # Pre-existing Shares must be present
1437 shares_in_db = conn.execute(shares_table.select()).fetchall()
1438 share_ids_in_db = [s._mapping['id'] for s in shares_in_db]
1439 self.test_case.assertTrue(len(share_ids_in_db) > 1)
1440 for share_id in share_ids:
1441 self.test_case.assertIn(share_id, share_ids_in_db)
1443 # Shares should have no attr to create share from snapshot
1444 for share in shares_in_db:
1445 self.test_case.assertFalse(hasattr(share, self.expected_attr))
1447 # Pre-existing Share types must be present
1448 share_types_in_db = (
1449 conn.execute(share_types_table.select()).fetchall())
1450 share_type_ids_in_db = [s._mapping['id'] for s in share_types_in_db]
1451 for share_type_id in share_type_ids:
1452 self.test_case.assertIn(share_type_id, share_type_ids_in_db)
1454 # Pre-existing extra specs must be present
1455 extra_specs_in_db = (
1456 conn.execute(extra_specs_table.select().where(
1457 extra_specs_table.c.deleted == 0)).fetchall())
1458 self.test_case.assertGreaterEqual(len(extra_specs_in_db),
1459 len(self.extra_specs))
1461 # Share types must not have create share from snapshot extra spec
1462 for share_type_id in share_type_ids:
1463 new_extra_spec = [x for x in extra_specs_in_db
1464 if x._mapping['spec_key'] == self.expected_attr
1465 and x._mapping['share_type_id'] == share_type_id]
1466 self.test_case.assertEqual(0, len(new_extra_spec))
1469@map_to_migration('87ce15c59bbe')
1470class RevertToSnapshotShareColumn(BaseMigrationChecks):
1472 expected_attr = constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT
1474 def _get_fake_data(self):
1475 extra_specs = []
1476 shares = []
1477 share_instances = []
1478 share_types = [
1479 {
1480 'id': uuidutils.generate_uuid(),
1481 'deleted': 'False',
1482 'name': 'revert-1',
1483 'is_public': False,
1484 },
1485 {
1486 'id': uuidutils.generate_uuid(),
1487 'deleted': 'False',
1488 'name': 'revert-2',
1489 'is_public': True,
1491 },
1492 ]
1493 snapshot_support = (False, True)
1494 dhss = ('True', 'False')
1495 for idx, share_type in enumerate(share_types):
1496 extra_specs.append({
1497 'share_type_id': share_type['id'],
1498 'spec_key': 'snapshot_support',
1499 'spec_value': snapshot_support[idx],
1500 'deleted': 0,
1501 })
1502 extra_specs.append({
1503 'share_type_id': share_type['id'],
1504 'spec_key': 'driver_handles_share_servers',
1505 'spec_value': dhss[idx],
1506 'deleted': 0,
1507 })
1508 share = fake_share(snapshot_support=snapshot_support[idx])
1509 shares.append(share)
1510 share_instances.append(
1511 fake_instance(share_id=share['id'],
1512 share_type_id=share_type['id'])
1513 )
1515 return share_types, extra_specs, shares, share_instances
1517 def setup_upgrade_data(self, conn):
1519 (self.share_types, self.extra_specs, self.shares,
1520 self.share_instances) = self._get_fake_data()
1522 share_types_table = utils.load_table('share_types', conn)
1523 conn.execute(share_types_table.insert().values(self.share_types))
1524 extra_specs_table = utils.load_table('share_type_extra_specs',
1525 conn)
1526 conn.execute(extra_specs_table.insert().values(self.extra_specs))
1527 shares_table = utils.load_table('shares', conn)
1528 conn.execute(shares_table.insert().values(self.shares))
1529 share_instances_table = utils.load_table('share_instances', conn)
1530 conn.execute(
1531 share_instances_table.insert().values(self.share_instances))
1533 def check_upgrade(self, conn, data):
1534 share_ids = [s['id'] for s in self.shares]
1535 shares_table = utils.load_table('shares', conn)
1537 # Pre-existing Shares must be present
1538 shares_in_db = conn.execute(shares_table.select().where(
1539 shares_table.c.deleted == 'False')).fetchall()
1540 share_ids_in_db = [s._mapping['id'] for s in shares_in_db]
1541 self.test_case.assertTrue(len(share_ids_in_db) > 1)
1542 for share_id in share_ids:
1543 self.test_case.assertIn(share_id, share_ids_in_db)
1545 # New shares attr must be present and set to False
1546 for share in shares_in_db:
1547 self.test_case.assertTrue(hasattr(share, self.expected_attr))
1548 self.test_case.assertEqual(
1549 False, share._mapping[self.expected_attr])
1551 def check_downgrade(self, conn):
1552 share_ids = [s['id'] for s in self.shares]
1553 shares_table = utils.load_table('shares', conn)
1555 # Pre-existing Shares must be present
1556 shares_in_db = conn.execute(shares_table.select()).fetchall()
1557 share_ids_in_db = [s._mapping['id'] for s in shares_in_db]
1558 self.test_case.assertTrue(len(share_ids_in_db) > 1)
1559 for share_id in share_ids:
1560 self.test_case.assertIn(share_id, share_ids_in_db)
1562 # Shares should have no attr to revert share to snapshot
1563 for share in shares_in_db:
1564 self.test_case.assertFalse(hasattr(share, self.expected_attr))
1567@map_to_migration('95e3cf760840')
1568class RemoveNovaNetIdColumnFromShareNetworks(BaseMigrationChecks):
1569 table_name = 'share_networks'
1570 nova_net_column_name = 'nova_net_id'
1572 def setup_upgrade_data(self, conn):
1573 user_id = 'user_id'
1574 project_id = 'project_id'
1575 nova_net_id = 'foo_nova_net_id'
1577 share_network_data = {
1578 'id': 'foo_share_network_id_3',
1579 'user_id': user_id,
1580 'project_id': project_id,
1581 'nova_net_id': nova_net_id,
1582 }
1583 sn_table = utils.load_table(self.table_name, conn)
1584 conn.execute(sn_table.insert().values(share_network_data))
1586 def check_upgrade(self, conn, data):
1587 sn_table = utils.load_table(self.table_name, conn)
1588 rows = conn.execute(sn_table.select())
1589 self.test_case.assertGreater(rows.rowcount, 0)
1590 for row in rows:
1591 self.test_case.assertFalse(hasattr(row, self.nova_net_column_name))
1593 def check_downgrade(self, conn):
1594 sn_table = utils.load_table(self.table_name, conn)
1595 rows = conn.execute(sn_table.select())
1596 self.test_case.assertGreater(rows.rowcount, 0)
1597 for row in rows:
1598 self.test_case.assertTrue(hasattr(row, self.nova_net_column_name))
1599 self.test_case.assertIsNone(
1600 row._mapping[self.nova_net_column_name])
1603@map_to_migration('54667b9cade7')
1604class RestoreStateToShareInstanceAccessMap(BaseMigrationChecks):
1605 new_instance_mapping_state = {
1606 constants.STATUS_ACTIVE: constants.STATUS_ACTIVE,
1607 constants.SHARE_INSTANCE_RULES_SYNCING:
1608 constants.ACCESS_STATE_QUEUED_TO_APPLY,
1609 constants.STATUS_OUT_OF_SYNC: constants.ACCESS_STATE_QUEUED_TO_APPLY,
1610 'updating': constants.ACCESS_STATE_QUEUED_TO_APPLY,
1611 'updating_multiple': constants.ACCESS_STATE_QUEUED_TO_APPLY,
1612 constants.SHARE_INSTANCE_RULES_ERROR: constants.ACCESS_STATE_ERROR,
1613 }
1615 new_access_rules_status = {
1616 constants.STATUS_ACTIVE: constants.STATUS_ACTIVE,
1617 constants.STATUS_OUT_OF_SYNC: constants.SHARE_INSTANCE_RULES_SYNCING,
1618 'updating': constants.SHARE_INSTANCE_RULES_SYNCING,
1619 'updating_multiple': constants.SHARE_INSTANCE_RULES_SYNCING,
1620 constants.SHARE_INSTANCE_RULES_ERROR:
1621 constants.SHARE_INSTANCE_RULES_ERROR,
1622 }
1624 @staticmethod
1625 def generate_share_instance(sid, access_rules_status):
1626 share_instance_data = {
1627 'id': uuidutils.generate_uuid(),
1628 'deleted': 'False',
1629 'host': 'fake',
1630 'share_id': sid,
1631 'status': constants.STATUS_AVAILABLE,
1632 'access_rules_status': access_rules_status
1633 }
1634 return share_instance_data
1636 @staticmethod
1637 def generate_share_instance_access_map(share_access_data_id,
1638 share_instance_id):
1639 share_instance_access_data = {
1640 'id': uuidutils.generate_uuid(),
1641 'share_instance_id': share_instance_id,
1642 'access_id': share_access_data_id,
1643 'deleted': 'False'
1644 }
1645 return share_instance_access_data
1647 def setup_upgrade_data(self, conn):
1648 share_data = {
1649 'id': uuidutils.generate_uuid(),
1650 'share_proto': 'fake',
1651 'size': 1,
1652 'snapshot_id': None,
1653 'user_id': 'fake',
1654 'project_id': 'fake'
1655 }
1656 share_table = utils.load_table('shares', conn)
1657 conn.execute(share_table.insert().values(share_data))
1659 share_instances = [
1660 self.generate_share_instance(
1661 share_data['id'], constants.STATUS_ACTIVE),
1662 self.generate_share_instance(
1663 share_data['id'], constants.STATUS_OUT_OF_SYNC),
1664 self.generate_share_instance(
1665 share_data['id'], constants.STATUS_ERROR),
1666 self.generate_share_instance(
1667 share_data['id'], 'updating'),
1668 self.generate_share_instance(
1669 share_data['id'], 'updating_multiple'),
1670 ]
1671 self.updating_share_instance = share_instances[3]
1672 self.updating_multiple_share_instance = share_instances[4]
1674 share_instance_table = utils.load_table('share_instances', conn)
1675 for share_instance_data in share_instances:
1676 conn.execute(
1677 share_instance_table.insert().values(share_instance_data))
1679 share_access_data = {
1680 'id': uuidutils.generate_uuid(),
1681 'share_id': share_data['id'],
1682 'access_type': 'fake',
1683 'access_to': 'alice',
1684 'deleted': 'False'
1685 }
1686 share_access_table = utils.load_table('share_access_map', conn)
1687 conn.execute(share_access_table.insert().values(share_access_data))
1689 share_instance_access_data = []
1690 for share_instance in share_instances:
1691 sia_map = self.generate_share_instance_access_map(
1692 share_access_data['id'], share_instance['id'])
1693 share_instance_access_data.append(sia_map)
1695 share_instance_access_table = utils.load_table(
1696 'share_instance_access_map', conn)
1697 for sia_map in share_instance_access_data:
1698 conn.execute(share_instance_access_table.insert().values(sia_map))
1700 def check_upgrade(self, conn, data):
1701 share_instance_table = utils.load_table('share_instances', conn)
1702 sia_table = utils.load_table('share_instance_access_map', conn)
1704 for rule in conn.execute(sia_table.select()):
1705 self.test_case.assertTrue(hasattr(rule, 'state'))
1706 correlated_share_instances = conn.execute(
1707 share_instance_table.select().where(
1708 share_instance_table.c.id ==
1709 rule._mapping['share_instance_id']))
1710 access_rules_status = getattr(correlated_share_instances.first(),
1711 'access_rules_status')
1712 self.test_case.assertEqual(
1713 self.new_instance_mapping_state[access_rules_status],
1714 rule._mapping['state'])
1716 for instance in conn.execute(share_instance_table.select()):
1717 self.test_case.assertTrue(instance._mapping['access_rules_status']
1718 not in ('updating',
1719 'updating_multiple',
1720 constants.STATUS_OUT_OF_SYNC))
1721 if instance._mapping['id'] in (
1722 self.updating_share_instance['id'],
1723 self.updating_multiple_share_instance['id']
1724 ):
1725 self.test_case.assertEqual(
1726 constants.SHARE_INSTANCE_RULES_SYNCING,
1727 instance._mapping['access_rules_status'])
1729 def check_downgrade(self, conn):
1730 share_instance_table = utils.load_table('share_instances', conn)
1731 sia_table = utils.load_table('share_instance_access_map', conn)
1732 for rule in conn.execute(sia_table.select()):
1733 self.test_case.assertFalse(hasattr(rule, 'state'))
1735 for instance in conn.execute(share_instance_table.select()):
1736 if instance._mapping['id'] in (
1737 self.updating_share_instance['id'],
1738 self.updating_multiple_share_instance['id']
1739 ):
1740 self.test_case.assertEqual(
1741 constants.STATUS_OUT_OF_SYNC,
1742 instance._mapping['access_rules_status'])
1745@map_to_migration('e9f79621d83f')
1746class AddCastRulesToReadonlyToInstances(BaseMigrationChecks):
1748 share_type = {
1749 'id': uuidutils.generate_uuid(),
1750 }
1752 shares = [
1753 {
1754 'id': uuidutils.generate_uuid(),
1755 'replication_type': constants.REPLICATION_TYPE_READABLE,
1756 },
1757 {
1758 'id': uuidutils.generate_uuid(),
1759 'replication_type': constants.REPLICATION_TYPE_READABLE,
1760 },
1761 {
1762 'id': uuidutils.generate_uuid(),
1763 'replication_type': constants.REPLICATION_TYPE_WRITABLE,
1764 },
1765 {
1766 'id': uuidutils.generate_uuid(),
1767 },
1768 ]
1769 share_ids = [x['id'] for x in shares]
1771 correct_instance = {
1772 'id': uuidutils.generate_uuid(),
1773 'share_id': share_ids[1],
1774 'replica_state': constants.REPLICA_STATE_IN_SYNC,
1775 'status': constants.STATUS_AVAILABLE,
1776 'share_type_id': share_type['id'],
1777 }
1779 instances = [
1780 {
1781 'id': uuidutils.generate_uuid(),
1782 'share_id': share_ids[0],
1783 'replica_state': constants.REPLICA_STATE_ACTIVE,
1784 'status': constants.STATUS_AVAILABLE,
1785 'share_type_id': share_type['id'],
1786 },
1787 {
1788 'id': uuidutils.generate_uuid(),
1789 'share_id': share_ids[0],
1790 'replica_state': constants.REPLICA_STATE_IN_SYNC,
1791 'status': constants.STATUS_REPLICATION_CHANGE,
1792 'share_type_id': share_type['id'],
1793 },
1794 {
1795 'id': uuidutils.generate_uuid(),
1796 'share_id': share_ids[1],
1797 'replica_state': constants.REPLICA_STATE_ACTIVE,
1798 'status': constants.STATUS_REPLICATION_CHANGE,
1799 'share_type_id': share_type['id'],
1800 },
1801 correct_instance,
1802 {
1803 'id': uuidutils.generate_uuid(),
1804 'share_id': share_ids[2],
1805 'replica_state': constants.REPLICA_STATE_ACTIVE,
1806 'status': constants.STATUS_REPLICATION_CHANGE,
1807 'share_type_id': share_type['id'],
1808 },
1809 {
1810 'id': uuidutils.generate_uuid(),
1811 'share_id': share_ids[2],
1812 'replica_state': constants.REPLICA_STATE_IN_SYNC,
1813 'status': constants.STATUS_AVAILABLE,
1814 'share_type_id': share_type['id'],
1815 },
1816 {
1817 'id': uuidutils.generate_uuid(),
1818 'share_id': share_ids[3],
1819 'status': constants.STATUS_AVAILABLE,
1820 'share_type_id': share_type['id'],
1821 },
1822 ]
1823 instance_ids = share_ids = [x['id'] for x in instances]
1825 def setup_upgrade_data(self, conn):
1826 shares_table = utils.load_table('shares', conn)
1827 share_instances_table = utils.load_table('share_instances', conn)
1828 share_types_table = utils.load_table('share_types', conn)
1830 conn.execute(share_types_table.insert().values(self.share_type))
1832 for share in self.shares:
1833 conn.execute(shares_table.insert().values(share))
1835 for instance in self.instances:
1836 conn.execute(share_instances_table.insert().values(instance))
1838 def check_upgrade(self, conn, data):
1840 shares_table = utils.load_table('shares', conn)
1841 share_instances_table = utils.load_table('share_instances', conn)
1843 for instance in conn.execute(share_instances_table.select().where( 1843 ↛ 1845line 1843 didn't jump to line 1845 because the loop on line 1843 never started
1844 share_instances_table.c.id in self.instance_ids)):
1845 self.test_case.assertIn('cast_rules_to_readonly', instance)
1846 share = conn.execute(shares_table.select().where(
1847 instance._mapping['share_id'] == shares_table.c.id)).first()
1848 if (instance['replica_state'] != constants.REPLICA_STATE_ACTIVE and
1849 share._mapping['replication_type'] ==
1850 constants.REPLICATION_TYPE_READABLE and
1851 instance._mapping['status'] !=
1852 constants.STATUS_REPLICATION_CHANGE):
1853 self.test_case.assertTrue(
1854 instance._mapping['cast_rules_to_readonly'])
1855 self.test_case.assertEqual(instance._mapping['id'],
1856 self.correct_instance['id'])
1857 else:
1858 self.test_case.assertEqual(
1859 False, instance._mapping['cast_rules_to_readonly'])
1861 def check_downgrade(self, conn):
1863 share_instances_table = utils.load_table('share_instances', conn)
1865 for instance in conn.execute(share_instances_table.select()):
1866 self.test_case.assertNotIn('cast_rules_to_readonly', instance)
1869@map_to_migration('03da71c0e321')
1870class ShareGroupMigrationChecks(BaseMigrationChecks):
1872 def setup_upgrade_data(self, conn):
1873 # Create share type
1874 self.share_type_id = uuidutils.generate_uuid()
1875 st_fixture = {
1876 'deleted': "False",
1877 'id': self.share_type_id,
1878 }
1879 st_table = utils.load_table('share_types', conn)
1880 conn.execute(st_table.insert().values(st_fixture))
1882 # Create CG
1883 self.cg_id = uuidutils.generate_uuid()
1884 cg_fixture = {
1885 'deleted': "False",
1886 'id': self.cg_id,
1887 'user_id': 'fake_user',
1888 'project_id': 'fake_project_id',
1889 }
1890 cg_table = utils.load_table('consistency_groups', conn)
1891 conn.execute(cg_table.insert().values(cg_fixture))
1893 # Create share_type group mapping
1894 self.mapping_id = uuidutils.generate_uuid()
1895 mapping_fixture = {
1896 'deleted': "False",
1897 'id': self.mapping_id,
1898 'consistency_group_id': self.cg_id,
1899 'share_type_id': self.share_type_id,
1900 }
1901 mapping_table = utils.load_table(
1902 'consistency_group_share_type_mappings', conn)
1903 conn.execute(mapping_table.insert().values(mapping_fixture))
1905 # Create share
1906 self.share_id = uuidutils.generate_uuid()
1907 share_fixture = {
1908 'deleted': "False",
1909 'id': self.share_id,
1910 'consistency_group_id': self.cg_id,
1911 'user_id': 'fake_user',
1912 'project_id': 'fake_project_id',
1913 }
1914 share_table = utils.load_table('shares', conn)
1915 conn.execute(share_table.insert().values(share_fixture))
1917 # Create share instance
1918 self.share_instance_id = uuidutils.generate_uuid()
1919 share_instance_fixture = {
1920 'deleted': "False",
1921 'share_type_id': self.share_type_id,
1922 'id': self.share_instance_id,
1923 'share_id': self.share_id,
1924 'cast_rules_to_readonly': False,
1925 }
1926 share_instance_table = utils.load_table('share_instances', conn)
1927 conn.execute(
1928 share_instance_table.insert().values(share_instance_fixture))
1930 # Create cgsnapshot
1931 self.cgsnapshot_id = uuidutils.generate_uuid()
1932 cg_snap_fixture = {
1933 'deleted': "False",
1934 'id': self.cgsnapshot_id,
1935 'consistency_group_id': self.cg_id,
1936 'user_id': 'fake_user',
1937 'project_id': 'fake_project_id',
1938 }
1939 cgsnapshots_table = utils.load_table('cgsnapshots', conn)
1940 conn.execute(cgsnapshots_table.insert().values(cg_snap_fixture))
1942 # Create cgsnapshot member
1943 self.cgsnapshot_member_id = uuidutils.generate_uuid()
1944 cg_snap_member_fixture = {
1945 'deleted': "False",
1946 'id': self.cgsnapshot_member_id,
1947 'cgsnapshot_id': self.cgsnapshot_id,
1948 'share_type_id': self.share_type_id,
1949 'share_instance_id': self.share_instance_id,
1950 'share_id': self.share_id,
1951 'user_id': 'fake_user',
1952 'project_id': 'fake_project_id',
1953 }
1954 cgsnapshot_members_table = utils.load_table(
1955 'cgsnapshot_members', conn)
1956 conn.execute(
1957 cgsnapshot_members_table.insert().values(cg_snap_member_fixture))
1959 def check_upgrade(self, conn, data):
1960 sg_table = utils.load_table("share_groups", conn)
1961 db_result = conn.execute(sg_table.select().where(
1962 sg_table.c.id == self.cg_id))
1963 self.test_case.assertEqual(1, db_result.rowcount)
1964 sg = db_result.first()
1965 self.test_case.assertIsNone(
1966 sg._mapping['source_share_group_snapshot_id'])
1968 share_table = utils.load_table("shares", conn)
1969 share_result = conn.execute(share_table.select().where(
1970 share_table.c.id == self.share_id))
1971 self.test_case.assertEqual(1, share_result.rowcount)
1972 share = share_result.first()
1973 self.test_case.assertEqual(
1974 self.cg_id, share._mapping['share_group_id'])
1975 self.test_case.assertIsNone(
1976 share._mapping['source_share_group_snapshot_member_id'])
1978 mapping_table = utils.load_table(
1979 "share_group_share_type_mappings", conn)
1980 mapping_result = conn.execute(mapping_table.select().where(
1981 mapping_table.c.id == self.mapping_id))
1982 self.test_case.assertEqual(1, mapping_result.rowcount)
1983 mapping_record = mapping_result.first()
1984 self.test_case.assertEqual(
1985 self.cg_id, mapping_record._mapping['share_group_id'])
1986 self.test_case.assertEqual(
1987 self.share_type_id, mapping_record._mapping['share_type_id'])
1989 sgs_table = utils.load_table("share_group_snapshots", conn)
1990 db_result = conn.execute(sgs_table.select().where(
1991 sgs_table.c.id == self.cgsnapshot_id))
1992 self.test_case.assertEqual(1, db_result.rowcount)
1993 sgs = db_result.first()
1994 self.test_case.assertEqual(self.cg_id, sgs._mapping['share_group_id'])
1996 sgsm_table = utils.load_table("share_group_snapshot_members", conn)
1997 db_result = conn.execute(sgsm_table.select().where(
1998 sgsm_table.c.id == self.cgsnapshot_member_id))
1999 self.test_case.assertEqual(1, db_result.rowcount)
2000 sgsm = db_result.first()
2001 self.test_case.assertEqual(
2002 self.cgsnapshot_id, sgsm._mapping['share_group_snapshot_id'])
2003 self.test_case.assertNotIn('share_type_id', sgsm)
2005 def check_downgrade(self, conn):
2006 cg_table = utils.load_table("consistency_groups", conn)
2007 db_result = conn.execute(cg_table.select().where(
2008 cg_table.c.id == self.cg_id))
2009 self.test_case.assertEqual(1, db_result.rowcount)
2010 cg = db_result.first()
2011 self.test_case.assertIsNone(cg._mapping['source_cgsnapshot_id'])
2013 share_table = utils.load_table("shares", conn)
2014 share_result = conn.execute(share_table.select().where(
2015 share_table.c.id == self.share_id))
2016 self.test_case.assertEqual(1, share_result.rowcount)
2017 share = share_result.first()
2018 self.test_case.assertEqual(
2019 self.cg_id, share._mapping['consistency_group_id'])
2020 self.test_case.assertIsNone(
2021 share._mapping['source_cgsnapshot_member_id'])
2023 mapping_table = utils.load_table(
2024 "consistency_group_share_type_mappings", conn)
2025 mapping_result = conn.execute(mapping_table.select().where(
2026 mapping_table.c.id == self.mapping_id))
2027 self.test_case.assertEqual(1, mapping_result.rowcount)
2028 cg_st_mapping = mapping_result.first()
2029 self.test_case.assertEqual(
2030 self.cg_id, cg_st_mapping._mapping['consistency_group_id'])
2031 self.test_case.assertEqual(
2032 self.share_type_id, cg_st_mapping._mapping['share_type_id'])
2034 cg_snapshots_table = utils.load_table("cgsnapshots", conn)
2035 db_result = conn.execute(cg_snapshots_table.select().where(
2036 cg_snapshots_table.c.id == self.cgsnapshot_id))
2037 self.test_case.assertEqual(1, db_result.rowcount)
2038 cgsnap = db_result.first()
2039 self.test_case.assertEqual(
2040 self.cg_id, cgsnap._mapping['consistency_group_id'])
2042 cg_snap_member_table = utils.load_table("cgsnapshot_members", conn)
2043 db_result = conn.execute(cg_snap_member_table.select().where(
2044 cg_snap_member_table.c.id == self.cgsnapshot_member_id))
2045 self.test_case.assertEqual(1, db_result.rowcount)
2046 member = db_result.first()
2047 self.test_case.assertEqual(
2048 self.cgsnapshot_id, member._mapping['cgsnapshot_id'])
2049 self.test_case.assertIn('share_type_id', member._mapping)
2050 self.test_case.assertEqual(
2051 self.share_type_id, member._mapping['share_type_id'])
2054@map_to_migration('927920b37453')
2055class ShareGroupSnapshotMemberNewProviderLocationColumnChecks(
2056 BaseMigrationChecks):
2057 table_name = 'share_group_snapshot_members'
2058 share_group_type_id = uuidutils.generate_uuid()
2059 share_group_id = uuidutils.generate_uuid()
2060 share_id = uuidutils.generate_uuid()
2061 share_instance_id = uuidutils.generate_uuid()
2062 share_group_snapshot_id = uuidutils.generate_uuid()
2063 share_group_snapshot_member_id = uuidutils.generate_uuid()
2065 def setup_upgrade_data(self, conn):
2066 # Setup share group type
2067 sgt_data = {
2068 'id': self.share_group_type_id,
2069 'name': uuidutils.generate_uuid(),
2070 }
2071 sgt_table = utils.load_table('share_group_types', conn)
2072 conn.execute(sgt_table.insert().values(sgt_data))
2074 # Setup share group
2075 sg_data = {
2076 'id': self.share_group_id,
2077 'project_id': 'fake_project_id',
2078 'user_id': 'fake_user_id',
2079 'share_group_type_id': self.share_group_type_id,
2080 }
2081 sg_table = utils.load_table('share_groups', conn)
2082 conn.execute(sg_table.insert().values(sg_data))
2084 # Setup shares
2085 share_data = {
2086 'id': self.share_id,
2087 'share_group_id': self.share_group_id,
2088 }
2089 s_table = utils.load_table('shares', conn)
2090 conn.execute(s_table.insert().values(share_data))
2092 # Setup share instances
2093 share_instance_data = {
2094 'id': self.share_instance_id,
2095 'share_id': share_data['id'],
2096 'cast_rules_to_readonly': False,
2097 }
2098 si_table = utils.load_table('share_instances', conn)
2099 conn.execute(si_table.insert().values(share_instance_data))
2101 # Setup share group snapshot
2102 sgs_data = {
2103 'id': self.share_group_snapshot_id,
2104 'share_group_id': self.share_group_id,
2105 'project_id': 'fake_project_id',
2106 'user_id': 'fake_user_id',
2107 }
2108 sgs_table = utils.load_table('share_group_snapshots', conn)
2109 conn.execute(sgs_table.insert().values(sgs_data))
2111 # Setup share group snapshot member
2112 sgsm_data = {
2113 'id': self.share_group_snapshot_member_id,
2114 'share_group_snapshot_id': self.share_group_snapshot_id,
2115 'share_id': self.share_id,
2116 'share_instance_id': self.share_instance_id,
2117 'project_id': 'fake_project_id',
2118 'user_id': 'fake_user_id',
2119 }
2120 sgsm_table = utils.load_table(self.table_name, conn)
2121 conn.execute(sgsm_table.insert().values(sgsm_data))
2123 def check_upgrade(self, conn, data):
2124 sgsm_table = utils.load_table(self.table_name, conn)
2125 db_result = conn.execute(sgsm_table.select().where(
2126 sgsm_table.c.id == self.share_group_snapshot_member_id))
2127 self.test_case.assertEqual(1, db_result.rowcount)
2128 for sgsm in db_result:
2129 self.test_case.assertTrue(hasattr(sgsm, 'provider_location'))
2131 # Check that we can write string data to the new field
2132 # pylint: disable=no-value-for-parameter
2133 conn.execute(sgsm_table.update().where(
2134 sgsm_table.c.id == self.share_group_snapshot_member_id,
2135 ).values({
2136 'provider_location': ('z' * 255),
2137 }))
2139 def check_downgrade(self, conn):
2140 sgsm_table = utils.load_table(self.table_name, conn)
2141 db_result = conn.execute(sgsm_table.select().where(
2142 sgsm_table.c.id == self.share_group_snapshot_member_id))
2143 self.test_case.assertEqual(1, db_result.rowcount)
2144 for sgsm in db_result:
2145 self.test_case.assertFalse(hasattr(sgsm, 'provider_location'))
2148@map_to_migration('d5db24264f5c')
2149class ShareGroupNewConsistentSnapshotSupportColumnChecks(BaseMigrationChecks):
2150 table_name = 'share_groups'
2151 new_attr_name = 'consistent_snapshot_support'
2152 share_group_type_id = uuidutils.generate_uuid()
2153 share_group_id = uuidutils.generate_uuid()
2155 def setup_upgrade_data(self, conn):
2156 # Setup share group type
2157 sgt_data = {
2158 'id': self.share_group_type_id,
2159 'name': uuidutils.generate_uuid(),
2160 }
2161 sgt_table = utils.load_table('share_group_types', conn)
2162 conn.execute(sgt_table.insert().values(sgt_data))
2164 # Setup share group
2165 sg_data = {
2166 'id': self.share_group_id,
2167 'project_id': 'fake_project_id',
2168 'user_id': 'fake_user_id',
2169 'share_group_type_id': self.share_group_type_id,
2170 }
2171 sg_table = utils.load_table('share_groups', conn)
2172 conn.execute(sg_table.insert().values(sg_data))
2174 def check_upgrade(self, conn, data):
2175 sg_table = utils.load_table(self.table_name, conn)
2176 db_result = conn.execute(sg_table.select().where(
2177 sg_table.c.id == self.share_group_id))
2178 self.test_case.assertEqual(1, db_result.rowcount)
2179 for sg in db_result:
2180 self.test_case.assertTrue(hasattr(sg, self.new_attr_name))
2182 # Check that we can write proper enum data to the new field
2183 for value in (None, 'pool', 'host'):
2184 # pylint: disable=no-value-for-parameter
2185 conn.execute(sg_table.update().where(
2186 sg_table.c.id == self.share_group_id,
2187 ).values({self.new_attr_name: value}))
2189 # Check that we cannot write values that are not allowed by enum.
2190 for value in ('', 'fake', 'pool1', 'host1', '1pool', '1host'):
2191 # pylint: disable=no-value-for-parameter
2192 self.test_case.assertRaises(
2193 # FIXME(zzzeek) - oslo.db may require exception translation
2194 # updates here for the particular DataError in question
2195 (oslo_db_exc.DBError, sa_exc.DataError),
2196 conn.execute,
2197 sg_table.update().where(
2198 sg_table.c.id == self.share_group_id
2199 ).values({self.new_attr_name: value})
2200 )
2202 def check_downgrade(self, conn):
2203 sg_table = utils.load_table(self.table_name, conn)
2204 db_result = conn.execute(sg_table.select().where(
2205 sg_table.c.id == self.share_group_id))
2206 self.test_case.assertEqual(1, db_result.rowcount)
2207 for sg in db_result:
2208 self.test_case.assertFalse(hasattr(sg, self.new_attr_name))
2211@map_to_migration('7d142971c4ef')
2212class ReservationExpireIndexChecks(BaseMigrationChecks):
2214 def setup_upgrade_data(self, conn):
2215 pass
2217 def _get_reservations_expire_delete_index(self, conn):
2218 reservation_table = utils.load_table('reservations', conn)
2219 members = ['deleted', 'expire']
2220 for idx in reservation_table.indexes:
2221 if sorted(idx.columns.keys()) == members:
2222 return idx
2224 def check_upgrade(self, conn, data):
2225 self.test_case.assertTrue(
2226 self._get_reservations_expire_delete_index(conn))
2228 def check_downgrade(self, conn):
2229 self.test_case.assertFalse(
2230 self._get_reservations_expire_delete_index(conn))
2233@map_to_migration('5237b6625330')
2234class ShareGroupNewAvailabilityZoneIDColumnChecks(BaseMigrationChecks):
2235 table_name = 'share_groups'
2236 new_attr_name = 'availability_zone_id'
2237 share_group_type_id = uuidutils.generate_uuid()
2238 share_group_id = uuidutils.generate_uuid()
2239 availability_zone_id = uuidutils.generate_uuid()
2241 def setup_upgrade_data(self, conn):
2242 # Setup AZ
2243 az_data = {
2244 'id': self.availability_zone_id,
2245 'name': uuidutils.generate_uuid(),
2246 }
2247 az_table = utils.load_table('availability_zones', conn)
2248 conn.execute(az_table.insert().values(az_data))
2250 # Setup share group type
2251 sgt_data = {
2252 'id': self.share_group_type_id,
2253 'name': uuidutils.generate_uuid(),
2254 }
2255 sgt_table = utils.load_table('share_group_types', conn)
2256 conn.execute(sgt_table.insert().values(sgt_data))
2258 # Setup share group
2259 sg_data = {
2260 'id': self.share_group_id,
2261 'project_id': 'fake_project_id',
2262 'user_id': 'fake_user_id',
2263 'share_group_type_id': self.share_group_type_id,
2264 }
2265 sg_table = utils.load_table('share_groups', conn)
2266 conn.execute(sg_table.insert().values(sg_data))
2268 def check_upgrade(self, conn, data):
2269 sg_table = utils.load_table(self.table_name, conn)
2270 db_result = conn.execute(sg_table.select().where(
2271 sg_table.c.id == self.share_group_id))
2272 self.test_case.assertEqual(1, db_result.rowcount)
2273 for sg in db_result:
2274 self.test_case.assertTrue(hasattr(sg, self.new_attr_name))
2276 # Check that we can write proper data to the new field
2277 for value in (None, self.availability_zone_id):
2278 # pylint: disable=no-value-for-parameter
2279 conn.execute(sg_table.update().where(
2280 sg_table.c.id == self.share_group_id,
2281 ).values({self.new_attr_name: value}))
2283 def check_downgrade(self, conn):
2284 sg_table = utils.load_table(self.table_name, conn)
2285 db_result = conn.execute(sg_table.select().where(
2286 sg_table.c.id == self.share_group_id))
2287 self.test_case.assertEqual(1, db_result.rowcount)
2288 for sg in db_result:
2289 self.test_case.assertFalse(hasattr(sg, self.new_attr_name))
2292@map_to_migration('31252d671ae5')
2293class SquashSGSnapshotMembersAndSSIModelsChecks(BaseMigrationChecks):
2294 old_table_name = 'share_group_snapshot_members'
2295 new_table_name = 'share_snapshot_instances'
2296 share_group_type_id = uuidutils.generate_uuid()
2297 share_group_id = uuidutils.generate_uuid()
2298 share_id = uuidutils.generate_uuid()
2299 share_instance_id = uuidutils.generate_uuid()
2300 share_group_snapshot_id = uuidutils.generate_uuid()
2301 share_group_snapshot_member_id = uuidutils.generate_uuid()
2302 keys = (
2303 'user_id', 'project_id', 'size', 'share_proto',
2304 'share_group_snapshot_id',
2305 )
2307 def setup_upgrade_data(self, conn):
2308 # Setup share group type
2309 sgt_data = {
2310 'id': self.share_group_type_id,
2311 'name': uuidutils.generate_uuid(),
2312 }
2313 sgt_table = utils.load_table('share_group_types', conn)
2314 conn.execute(sgt_table.insert().values(sgt_data))
2316 # Setup share group
2317 sg_data = {
2318 'id': self.share_group_id,
2319 'project_id': 'fake_project_id',
2320 'user_id': 'fake_user_id',
2321 'share_group_type_id': self.share_group_type_id,
2322 }
2323 sg_table = utils.load_table('share_groups', conn)
2324 conn.execute(sg_table.insert().values(sg_data))
2326 # Setup shares
2327 share_data = {
2328 'id': self.share_id,
2329 'share_group_id': self.share_group_id,
2330 }
2331 s_table = utils.load_table('shares', conn)
2332 conn.execute(s_table.insert().values(share_data))
2334 # Setup share instances
2335 share_instance_data = {
2336 'id': self.share_instance_id,
2337 'share_id': share_data['id'],
2338 'cast_rules_to_readonly': False,
2339 }
2340 si_table = utils.load_table('share_instances', conn)
2341 conn.execute(si_table.insert().values(share_instance_data))
2343 # Setup share group snapshot
2344 sgs_data = {
2345 'id': self.share_group_snapshot_id,
2346 'share_group_id': self.share_group_id,
2347 'project_id': 'fake_project_id',
2348 'user_id': 'fake_user_id',
2349 }
2350 sgs_table = utils.load_table('share_group_snapshots', conn)
2351 conn.execute(sgs_table.insert().values(sgs_data))
2353 # Setup share group snapshot member
2354 sgsm_data = {
2355 'id': self.share_group_snapshot_member_id,
2356 'share_group_snapshot_id': self.share_group_snapshot_id,
2357 'share_id': self.share_id,
2358 'share_instance_id': self.share_instance_id,
2359 'project_id': 'fake_project_id',
2360 'user_id': 'fake_user_id',
2361 }
2362 sgsm_table = utils.load_table(self.old_table_name, conn)
2363 conn.execute(sgsm_table.insert().values(sgsm_data))
2365 def check_upgrade(self, conn, data):
2366 ssi_table = utils.load_table(self.new_table_name, conn)
2367 db_result = conn.execute(ssi_table.select().where(
2368 ssi_table.c.id == self.share_group_snapshot_member_id))
2369 self.test_case.assertEqual(1, db_result.rowcount)
2370 for ssi in db_result:
2371 for key in self.keys:
2372 self.test_case.assertTrue(hasattr(ssi, key))
2374 # Check that we can write string data to the new fields
2375 # pylint: disable=no-value-for-parameter
2376 conn.execute(ssi_table.update().where(
2377 ssi_table.c.id == self.share_group_snapshot_member_id,
2378 ).values({
2379 'user_id': ('u' * 255),
2380 'project_id': ('p' * 255),
2381 'share_proto': ('s' * 255),
2382 'size': 123456789,
2383 'share_group_snapshot_id': self.share_group_snapshot_id,
2384 }))
2386 # Check that table 'share_group_snapshot_members' does not
2387 # exist anymore
2388 self.test_case.assertRaises(
2389 sa_exc.NoSuchTableError,
2390 utils.load_table, 'share_group_snapshot_members', conn)
2392 def check_downgrade(self, conn):
2393 sgsm_table = utils.load_table(self.old_table_name, conn)
2394 db_result = conn.execute(sgsm_table.select().where(
2395 sgsm_table.c.id == self.share_group_snapshot_member_id))
2396 self.test_case.assertEqual(1, db_result.rowcount)
2397 for sgsm in db_result:
2398 for key in self.keys:
2399 self.test_case.assertTrue(hasattr(sgsm, key))
2401 # Check that create SGS member is absent in SSI table
2402 ssi_table = utils.load_table(self.new_table_name, conn)
2403 db_result = conn.execute(ssi_table.select().where(
2404 ssi_table.c.id == self.share_group_snapshot_member_id))
2405 self.test_case.assertEqual(0, db_result.rowcount)
2408@map_to_migration('238720805ce1')
2409class MessagesTableChecks(BaseMigrationChecks):
2410 new_table_name = 'messages'
2412 def setup_upgrade_data(self, conn):
2413 pass
2415 def check_upgrade(self, conn, data):
2416 message_data = {
2417 'id': uuidutils.generate_uuid(),
2418 'project_id': 'x' * 255,
2419 'request_id': 'x' * 255,
2420 'resource_type': 'x' * 255,
2421 'resource_id': 'y' * 36,
2422 'action_id': 'y' * 10,
2423 'detail_id': 'y' * 10,
2424 'message_level': 'x' * 255,
2425 'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
2426 'updated_at': None,
2427 'deleted_at': None,
2428 'deleted': 0,
2429 'expires_at': datetime.datetime(2017, 7, 11, 18, 5, 58),
2430 }
2432 new_table = utils.load_table(self.new_table_name, conn)
2433 conn.execute(new_table.insert().values(message_data))
2435 def check_downgrade(self, conn):
2436 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
2437 'messages', conn)
2440@map_to_migration('b516de97bfee')
2441class ProjectShareTypesQuotasChecks(BaseMigrationChecks):
2442 new_table_name = 'project_share_type_quotas'
2443 usages_table = 'quota_usages'
2444 reservations_table = 'reservations'
2445 st_record_id = uuidutils.generate_uuid()
2447 def setup_upgrade_data(self, conn):
2448 # Create share type
2449 self.st_data = {
2450 'id': self.st_record_id,
2451 'name': uuidutils.generate_uuid(),
2452 'deleted': "False",
2453 }
2454 st_table = utils.load_table('share_types', conn)
2455 conn.execute(st_table.insert().values(self.st_data))
2457 def check_upgrade(self, conn, data):
2458 # Create share type quota
2459 self.quota_data = {
2460 'project_id': 'x' * 255,
2461 'resource': 'y' * 255,
2462 'hard_limit': 987654321,
2463 'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58),
2464 'updated_at': None,
2465 'deleted_at': None,
2466 'deleted': 0,
2467 'share_type_id': self.st_record_id,
2468 }
2469 new_table = utils.load_table(self.new_table_name, conn)
2470 conn.execute(new_table.insert().values(self.quota_data))
2472 # Create usage record
2473 self.usages_data = {
2474 'project_id': 'x' * 255,
2475 'user_id': None,
2476 'share_type_id': self.st_record_id,
2477 'resource': 'y' * 255,
2478 'in_use': 13,
2479 'reserved': 15,
2480 }
2481 usages_table = utils.load_table(self.usages_table, conn)
2482 conn.execute(usages_table.insert().values(self.usages_data))
2484 # Create reservation record
2485 self.reservations_data = {
2486 'uuid': uuidutils.generate_uuid(),
2487 'usage_id': 1,
2488 'project_id': 'x' * 255,
2489 'user_id': None,
2490 'share_type_id': self.st_record_id,
2491 'resource': 'y' * 255,
2492 'delta': 13,
2493 'expire': datetime.datetime(2399, 4, 11, 18, 5, 58),
2494 }
2495 reservations_table = utils.load_table(self.reservations_table, conn)
2496 conn.execute(
2497 reservations_table.insert().values(self.reservations_data))
2499 def check_downgrade(self, conn):
2500 self.test_case.assertRaises(
2501 sa_exc.NoSuchTableError,
2502 utils.load_table, self.new_table_name, conn)
2503 for table_name in (self.usages_table, self.reservations_table):
2504 table = utils.load_table(table_name, conn)
2505 db_result = conn.execute(table.select())
2506 self.test_case.assertGreater(db_result.rowcount, 0)
2507 for row in db_result:
2508 self.test_case.assertFalse(hasattr(row, 'share_type_id'))
2511@map_to_migration('829a09b0ddd4')
2512class FixProjectShareTypesQuotasUniqueConstraintChecks(BaseMigrationChecks):
2513 st_record_id = uuidutils.generate_uuid()
2515 def setup_upgrade_data(self, conn):
2516 # Create share type
2517 self.st_data = {
2518 'id': self.st_record_id,
2519 'name': uuidutils.generate_uuid(),
2520 'deleted': "False",
2521 }
2522 st_table = utils.load_table('share_types', conn)
2523 conn.execute(st_table.insert().values(self.st_data))
2525 def check_upgrade(self, conn, data):
2526 for project_id in ('x' * 255, 'x'):
2527 # Create share type quota
2528 self.quota_data = {
2529 'project_id': project_id,
2530 'resource': 'y' * 255,
2531 'hard_limit': 987654321,
2532 'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58),
2533 'updated_at': None,
2534 'deleted_at': None,
2535 'deleted': 0,
2536 'share_type_id': self.st_record_id,
2537 }
2538 new_table = utils.load_table('project_share_type_quotas', conn)
2539 conn.execute(new_table.insert().values(self.quota_data))
2541 def check_downgrade(self, conn):
2542 pass
2545@map_to_migration('27cb96d991fa')
2546class NewDescriptionColumnChecks(BaseMigrationChecks):
2547 st_table_name = 'share_types'
2548 st_ids = ['share_type_id_fake_3_%d' % i for i in (1, 2)]
2550 def setup_upgrade_data(self, conn):
2551 # Create share type
2552 share_type_data = {
2553 'id': self.st_ids[0],
2554 'name': 'name_1',
2555 }
2556 st_table = utils.load_table(self.st_table_name, conn)
2557 conn.execute(st_table.insert().values(share_type_data))
2559 def check_upgrade(self, conn, data):
2560 st_table = utils.load_table(self.st_table_name, conn)
2561 for na in conn.execute(st_table.select()):
2562 self.test_case.assertTrue(hasattr(na, 'description'))
2564 share_type_data_ds = {
2565 'id': self.st_ids[1],
2566 'name': 'name_1',
2567 'description': 'description_1',
2568 }
2569 conn.execute(st_table.insert().values(share_type_data_ds))
2570 st = conn.execute(st_table.select().where(
2571 share_type_data_ds['id'] == st_table.c.id)).first()
2572 self.test_case.assertEqual(
2573 share_type_data_ds['description'], st._mapping['description'])
2575 def check_downgrade(self, conn):
2576 table = utils.load_table(self.st_table_name, conn)
2577 db_result = conn.execute(table.select())
2578 for record in db_result:
2579 self.test_case.assertFalse(hasattr(record, 'description'))
2582@map_to_migration('4a482571410f')
2583class BackenInfoTableChecks(BaseMigrationChecks):
2584 new_table_name = 'backend_info'
2586 def setup_upgrade_data(self, conn):
2587 pass
2589 def check_upgrade(self, conn, data):
2590 data = {
2591 'host': 'test_host',
2592 'info_hash': 'test_hash',
2593 'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
2594 'updated_at': None,
2595 'deleted_at': None,
2596 'deleted': 0,
2597 }
2599 new_table = utils.load_table(self.new_table_name, conn)
2600 conn.execute(new_table.insert().values(data))
2602 def check_downgrade(self, conn):
2603 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
2604 self.new_table_name, conn)
2607@map_to_migration('579c267fbb4d')
2608class ShareInstanceAccessMapTableChecks(BaseMigrationChecks):
2609 share_access_table = 'share_access_map'
2610 share_instance_access_table = 'share_instance_access_map'
2612 @staticmethod
2613 def generate_share_instance(share_id, **kwargs):
2614 share_instance_data = {
2615 'id': uuidutils.generate_uuid(),
2616 'deleted': 'False',
2617 'host': 'fake',
2618 'share_id': share_id,
2619 'status': constants.STATUS_AVAILABLE,
2620 }
2621 share_instance_data.update(**kwargs)
2622 return share_instance_data
2624 @staticmethod
2625 def generate_share_access_map(share_id, **kwargs):
2626 share_access_data = {
2627 'id': uuidutils.generate_uuid(),
2628 'share_id': share_id,
2629 'deleted': 'False',
2630 'access_type': 'ip',
2631 'access_to': '192.0.2.10',
2632 }
2633 share_access_data.update(**kwargs)
2634 return share_access_data
2636 def setup_upgrade_data(self, conn):
2637 share = {
2638 'id': uuidutils.generate_uuid(),
2639 'share_proto': 'fake',
2640 'size': 1,
2641 'snapshot_id': None,
2642 'user_id': 'fake',
2643 'project_id': 'fake'
2644 }
2645 share_table = utils.load_table('shares', conn)
2646 conn.execute(share_table.insert().values(share))
2648 share_instances = [
2649 self.generate_share_instance(share['id']),
2650 self.generate_share_instance(share['id']),
2651 ]
2653 share_instance_table = utils.load_table('share_instances', conn)
2654 for share_instance in share_instances:
2655 conn.execute(share_instance_table.insert().values(share_instance))
2657 share_accesses = [
2658 self.generate_share_access_map(
2659 share['id'], state=constants.ACCESS_STATE_ACTIVE),
2660 self.generate_share_access_map(
2661 share['id'], state=constants.ACCESS_STATE_ERROR),
2662 ]
2663 self.active_share_access = share_accesses[0]
2664 self.error_share_access = share_accesses[1]
2665 share_access_table = utils.load_table('share_access_map', conn)
2666 conn.execute(share_access_table.insert().values(share_accesses))
2668 def check_upgrade(self, conn, data):
2669 share_access_table = utils.load_table(
2670 self.share_access_table, conn)
2671 share_instance_access_table = utils.load_table(
2672 self.share_instance_access_table, conn)
2673 share_accesses = conn.execute(share_access_table.select())
2674 share_instance_accesses = conn.execute(
2675 share_instance_access_table.select())
2677 for share_access in share_accesses:
2678 self.test_case.assertFalse(hasattr(share_access, 'state'))
2680 for si_access in share_instance_accesses:
2681 if si_access._mapping['access_id'] in ( 2681 ↛ 2680line 2681 didn't jump to line 2680 because the condition on line 2681 was always true
2682 self.active_share_access['id'],
2683 self.error_share_access['id']
2684 ):
2685 self.test_case.assertIn(si_access._mapping['state'],
2686 (self.active_share_access['state'],
2687 self.error_share_access['state']))
2689 def check_downgrade(self, conn):
2690 self.test_case.assertRaises(
2691 sa_exc.NoSuchTableError, utils.load_table,
2692 self.share_instance_access_table, conn)
2694 share_access_table = utils.load_table(
2695 self.share_access_table, conn)
2696 share_accesses = conn.execute(share_access_table.select().where(
2697 share_access_table.c.id.in_((self.active_share_access['id'],
2698 self.error_share_access['id']))))
2700 for share_access in share_accesses:
2701 self.test_case.assertTrue(hasattr(share_access, 'state'))
2702 if share_access._mapping['id'] == self.active_share_access['id']:
2703 self.test_case.assertEqual(
2704 constants.ACCESS_STATE_ACTIVE,
2705 share_access._mapping['state'])
2706 elif share_access._mapping['id'] == self.error_share_access['id']: 2706 ↛ 2700line 2706 didn't jump to line 2700 because the condition on line 2706 was always true
2707 self.test_case.assertEqual(
2708 constants.ACCESS_STATE_ERROR,
2709 share_access._mapping['state'])
2712@map_to_migration('097fad24d2fc')
2713class ShareInstancesShareIdIndexChecks(BaseMigrationChecks):
2715 def setup_upgrade_data(self, conn):
2716 pass
2718 def _get_share_instances_share_id_index(self, conn):
2719 share_instances_table = utils.load_table('share_instances', conn)
2720 for idx in share_instances_table.indexes:
2721 if idx.name == 'share_instances_share_id_idx':
2722 return idx
2724 def check_upgrade(self, conn, data):
2725 self.test_case.assertTrue(
2726 self._get_share_instances_share_id_index(conn))
2728 def check_downgrade(self, conn):
2729 self.test_case.assertFalse(
2730 self._get_share_instances_share_id_index(conn))
2733@map_to_migration('11ee96se625f3')
2734class AccessMetadataTableChecks(BaseMigrationChecks):
2735 new_table_name = 'share_access_rules_metadata'
2736 record_access_id = uuidutils.generate_uuid()
2738 def setup_upgrade_data(self, conn):
2739 share_data = {
2740 'id': uuidutils.generate_uuid(),
2741 'share_proto': "NFS",
2742 'size': 1,
2743 'snapshot_id': None,
2744 'user_id': 'fake',
2745 'project_id': 'fake'
2746 }
2747 share_table = utils.load_table('shares', conn)
2748 conn.execute(share_table.insert().values(share_data))
2750 share_instance_data = {
2751 'id': uuidutils.generate_uuid(),
2752 'deleted': 'False',
2753 'host': 'fake',
2754 'share_id': share_data['id'],
2755 'status': 'available',
2756 'access_rules_status': 'active',
2757 'cast_rules_to_readonly': False,
2758 }
2759 share_instance_table = utils.load_table('share_instances', conn)
2760 conn.execute(share_instance_table.insert().values(share_instance_data))
2762 share_access_data = {
2763 'id': self.record_access_id,
2764 'share_id': share_data['id'],
2765 'access_type': 'NFS',
2766 'access_to': '10.0.0.1',
2767 'deleted': 'False'
2768 }
2769 share_access_table = utils.load_table('share_access_map', conn)
2770 conn.execute(share_access_table.insert().values(share_access_data))
2772 share_instance_access_data = {
2773 'id': uuidutils.generate_uuid(),
2774 'share_instance_id': share_instance_data['id'],
2775 'access_id': share_access_data['id'],
2776 'deleted': 'False'
2777 }
2778 share_instance_access_table = utils.load_table(
2779 'share_instance_access_map', conn)
2780 conn.execute(share_instance_access_table.insert().values(
2781 share_instance_access_data))
2783 def check_upgrade(self, conn, data):
2784 data = {
2785 'id': 1,
2786 'key': 't' * 255,
2787 'value': 'v' * 1023,
2788 'access_id': self.record_access_id,
2789 'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58),
2790 'updated_at': None,
2791 'deleted_at': None,
2792 'deleted': 'False',
2793 }
2795 new_table = utils.load_table(self.new_table_name, conn)
2796 conn.execute(new_table.insert().values(data))
2798 def check_downgrade(self, conn):
2799 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
2800 self.new_table_name, conn)
2803@map_to_migration('6a3fd2984bc31')
2804class ShareServerIsAutoDeletableAndIdentifierChecks(BaseMigrationChecks):
2806 def setup_upgrade_data(self, conn):
2807 user_id = 'user_id'
2808 project_id = 'project_id'
2810 # Create share network
2811 share_network_data = {
2812 'id': 'fake_sn_id',
2813 'user_id': user_id,
2814 'project_id': project_id,
2815 }
2816 sn_table = utils.load_table('share_networks', conn)
2817 conn.execute(sn_table.insert().values(share_network_data))
2819 # Create share server
2820 share_server_data = {
2821 'id': 'fake_ss_id',
2822 'share_network_id': share_network_data['id'],
2823 'host': 'fake_host',
2824 'status': 'active',
2825 }
2826 ss_table = utils.load_table('share_servers', conn)
2827 conn.execute(ss_table.insert().values(share_server_data))
2829 def check_upgrade(self, conn, data):
2830 ss_table = utils.load_table('share_servers', conn)
2831 for ss in conn.execute(ss_table.select()):
2832 self.test_case.assertTrue(hasattr(ss, 'is_auto_deletable'))
2833 self.test_case.assertEqual(1, ss.is_auto_deletable)
2834 self.test_case.assertTrue(hasattr(ss, 'identifier'))
2835 self.test_case.assertEqual(ss.id, ss.identifier)
2837 def check_downgrade(self, conn):
2838 ss_table = utils.load_table('share_servers', conn)
2839 for ss in conn.execute(ss_table.select()):
2840 self.test_case.assertFalse(hasattr(ss, 'is_auto_deletable'))
2841 self.test_case.assertFalse(hasattr(ss, 'identifier'))
2844@map_to_migration('805685098bd2')
2845class ShareNetworkSubnetMigrationChecks(BaseMigrationChecks):
2847 user_id = '6VFQ87wnV24lg1c2q1q0lJkTbQBPFZ1m4968'
2848 project_id = '19HAW8w58yeUPBy8zGex4EGulWZHd8zZGtHk'
2849 share_network = {
2850 'id': uuidutils.generate_uuid(),
2851 'user_id': user_id,
2852 'project_id': project_id,
2853 'neutron_net_id': uuidutils.generate_uuid(),
2854 'neutron_subnet_id': uuidutils.generate_uuid(),
2855 'cidr': '203.0.113.0/24',
2856 'ip_version': 4,
2857 'network_type': 'vxlan',
2858 'segmentation_id': 100,
2859 'gateway': 'fake_gateway',
2860 'mtu': 1500,
2861 }
2863 share_networks = [share_network]
2865 sns_table_name = 'share_network_subnets'
2866 sn_table_name = 'share_networks'
2867 ss_table_name = 'share_servers'
2869 expected_keys = ['neutron_net_id', 'neutron_subnet_id', 'cidr',
2870 'ip_version', 'network_type', 'segmentation_id',
2871 'gateway', 'mtu']
2873 def _setup_data_for_empty_neutron_net_and_subnet_id_test(self, network):
2874 network['id'] = uuidutils.generate_uuid()
2875 for key in self.expected_keys:
2876 network[key] = None
2877 return network
2879 def setup_upgrade_data(self, conn):
2880 share_network_data_without_net_info = (
2881 self._setup_data_for_empty_neutron_net_and_subnet_id_test(
2882 copy.deepcopy(self.share_network)))
2883 self.share_networks.append(share_network_data_without_net_info)
2884 # Load the table to be used below
2885 sn_table = utils.load_table(self.sn_table_name, conn)
2886 ss_table = utils.load_table(self.ss_table_name, conn)
2888 # Share server data
2889 share_server_data = {
2890 'host': 'acme@controller-ostk-0',
2891 'status': 'active',
2892 }
2894 # Create share share networks and one share server for each of them
2895 for network in self.share_networks:
2896 share_server_data['share_network_id'] = network['id']
2897 share_server_data['id'] = uuidutils.generate_uuid()
2898 conn.execute(sn_table.insert().values(network))
2899 conn.execute(ss_table.insert().values(share_server_data))
2901 def check_upgrade(self, conn, data):
2902 # Load the necessary tables
2903 sn_table = utils.load_table(self.sn_table_name, conn)
2904 sns_table = utils.load_table(self.sns_table_name, conn)
2905 ss_table = utils.load_table(self.ss_table_name, conn)
2907 for network in self.share_networks:
2908 sn_record = conn.execute(sn_table.select().where(
2909 sn_table.c.id == network['id'])).first()
2911 for key in self.expected_keys:
2912 self.test_case.assertFalse(hasattr(sn_record, key))
2914 sns_record = conn.execute(sns_table.select().where(
2915 sns_table.c.share_network_id == network['id'])).first()
2917 for key in self.expected_keys:
2918 self.test_case.assertTrue(hasattr(sns_record, key))
2919 self.test_case.assertEqual(
2920 network[key], sns_record._mapping[key])
2922 ss_record = (
2923 conn.execute(
2924 ss_table.select().where(
2925 ss_table.c.share_network_subnet_id ==
2926 sns_record._mapping['id'])
2927 ).first())
2929 self.test_case.assertIs(
2930 True, hasattr(ss_record, 'share_network_subnet_id'))
2931 self.test_case.assertEqual(
2932 ss_record._mapping['share_network_subnet_id'],
2933 sns_record._mapping['id']
2934 )
2935 self.test_case.assertIs(
2936 False, hasattr(ss_record, 'share_network_id'))
2938 def check_downgrade(self, conn):
2939 sn_table = utils.load_table(self.sn_table_name, conn)
2941 # Check if the share network table contains the expected keys
2942 for sn in conn.execute(sn_table.select()):
2943 for key in self.expected_keys:
2944 self.test_case.assertTrue(hasattr(sn, key))
2946 ss_table = utils.load_table(self.ss_table_name, conn)
2947 for network in self.share_networks:
2948 for ss in conn.execute(ss_table.select().where( 2948 ↛ 2950line 2948 didn't jump to line 2950 because the loop on line 2948 never started
2949 ss_table.c.share_network_id == network['id'])):
2950 self.test_case.assertFalse(hasattr(ss,
2951 'share_network_subnet_id'))
2952 self.test_case.assertTrue(hasattr(ss, 'share_network_id'))
2953 self.test_case.assertEqual(network['id'], ss['id'])
2955 # Check if the created table doesn't exists anymore
2956 self.test_case.assertRaises(
2957 sa_exc.NoSuchTableError,
2958 utils.load_table, self.sns_table_name, conn)
2961@map_to_migration('e6d88547b381')
2962class ShareInstanceProgressFieldChecks(BaseMigrationChecks):
2964 si_table_name = 'share_instances'
2965 progress_field_name = 'progress'
2967 def setup_upgrade_data(self, conn):
2968 pass
2970 def check_upgrade(self, conn, data):
2971 si_table = utils.load_table(self.si_table_name, conn)
2973 for si_record in conn.execute(si_table.select()):
2974 self.test_case.assertTrue(hasattr(si_record,
2975 self.progress_field_name))
2976 if si_record._mapping['status'] == constants.STATUS_AVAILABLE:
2977 self.test_case.assertEqual(
2978 '100%',
2979 si_record._mapping[self.progress_field_name]
2980 )
2981 else:
2982 self.test_case.assertIsNone(
2983 si_record._mapping[self.progress_field_name])
2985 def check_downgrade(self, conn):
2986 si_table = utils.load_table(self.si_table_name, conn)
2988 for si_record in conn.execute(si_table.select()):
2989 self.test_case.assertFalse(hasattr(si_record,
2990 self.progress_field_name))
2993@map_to_migration('5aa813ae673d')
2994class ShareServerTaskState(BaseMigrationChecks):
2996 def setup_upgrade_data(self, conn):
2997 # Create share server
2998 share_server_data = {
2999 'id': uuidutils.generate_uuid(),
3000 'host': 'fake_host',
3001 'status': 'active',
3002 }
3003 ss_table = utils.load_table('share_servers', conn)
3004 conn.execute(ss_table.insert().values(share_server_data))
3006 def check_upgrade(self, conn, data):
3007 ss_table = utils.load_table('share_servers', conn)
3008 for ss in conn.execute(ss_table.select()):
3009 self.test_case.assertTrue(hasattr(ss, 'task_state'))
3010 self.test_case.assertTrue(hasattr(ss, 'source_share_server_id'))
3011 self.test_case.assertIsNone(ss._mapping['task_state'])
3012 self.test_case.assertIsNone(ss._mapping['source_share_server_id'])
3014 def check_downgrade(self, conn):
3015 ss_table = utils.load_table('share_servers', conn)
3016 for ss in conn.execute(ss_table.select()):
3017 self.test_case.assertFalse(hasattr(ss, 'task_state'))
3018 self.test_case.assertFalse(hasattr(ss, 'source_share_server_id'))
3021@map_to_migration('478c445d8d3e')
3022class AddUpdateSecurityServiceControlFields(BaseMigrationChecks):
3024 def setup_upgrade_data(self, conn):
3025 user_id = 'user_id'
3026 project_id = 'project_id'
3028 # Create share network
3029 share_network_data = {
3030 'id': uuidutils.generate_uuid(),
3031 'user_id': user_id,
3032 'project_id': project_id,
3033 }
3034 sn_table = utils.load_table('share_networks', conn)
3035 conn.execute(sn_table.insert().values(share_network_data))
3037 share_network_subnet_data = {
3038 'id': uuidutils.generate_uuid(),
3039 'share_network_id': share_network_data['id']
3040 }
3042 sns_table = utils.load_table('share_network_subnets', conn)
3043 conn.execute(sns_table.insert().values(share_network_subnet_data))
3045 # Create share server
3046 share_server_data = {
3047 'id': uuidutils.generate_uuid(),
3048 'share_network_subnet_id': share_network_subnet_data['id'],
3049 'host': 'fake_host',
3050 'status': 'active',
3051 }
3052 ss_table = utils.load_table('share_servers', conn)
3053 conn.execute(ss_table.insert().values(share_server_data))
3055 def check_upgrade(self, conn, data):
3056 ss_table = utils.load_table('share_servers', conn)
3057 for ss in conn.execute(ss_table.select()):
3058 self.test_case.assertTrue(
3059 hasattr(ss, 'security_service_update_support'))
3060 self.test_case.assertEqual(
3061 False, ss.security_service_update_support)
3063 sn_table = utils.load_table('share_networks', conn)
3064 for sn in conn.execute(sn_table.select()):
3065 self.test_case.assertTrue(hasattr(sn, 'status'))
3066 self.test_case.assertEqual(constants.STATUS_NETWORK_ACTIVE,
3067 sn.status)
3068 async_op_data = {
3069 'created_at': datetime.datetime(2021, 3, 12, 17, 40, 34),
3070 'updated_at': None,
3071 'deleted_at': None,
3072 'deleted': 0,
3073 'entity_uuid': uuidutils.generate_uuid(),
3074 'key': 't' * 255,
3075 'value': 'v' * 1023,
3076 }
3077 async_op_data_table = utils.load_table('async_operation_data', conn)
3078 conn.execute(async_op_data_table.insert().values(async_op_data))
3080 def check_downgrade(self, conn):
3081 ss_table = utils.load_table('share_servers', conn)
3082 for ss in conn.execute(ss_table.select()):
3083 self.test_case.assertFalse(
3084 hasattr(ss, 'security_service_update_support'))
3085 sn_table = utils.load_table('share_networks', conn)
3086 for sn in conn.execute(sn_table.select()):
3087 self.test_case.assertFalse(hasattr(sn, 'status'))
3089 self.test_case.assertRaises(
3090 sa_exc.NoSuchTableError,
3091 utils.load_table, 'async_operation_data', conn)
3094@map_to_migration('1946cb97bb8d')
3095class ShareIsSoftDeleted(BaseMigrationChecks):
3097 def setup_upgrade_data(self, conn):
3098 # Setup shares
3099 share_fixture = [{'id': 'foo_share_id1'}, {'id': 'bar_share_id1'}]
3100 share_table = utils.load_table('shares', conn)
3101 for fixture in share_fixture:
3102 conn.execute(share_table.insert().values(fixture))
3104 # Setup share instances
3105 si_fixture = [
3106 {'id': 'foo_share_instance_id_oof1',
3107 'share_id': share_fixture[0]['id'],
3108 'cast_rules_to_readonly': False},
3109 {'id': 'bar_share_instance_id_rab1',
3110 'share_id': share_fixture[1]['id'],
3111 'cast_rules_to_readonly': False},
3112 ]
3113 si_table = utils.load_table('share_instances', conn)
3114 for fixture in si_fixture:
3115 conn.execute(si_table.insert().values(fixture))
3117 def check_upgrade(self, conn, data):
3118 s_table = utils.load_table('shares', conn)
3119 for s in conn.execute(s_table.select()):
3120 self.test_case.assertTrue(hasattr(s, 'is_soft_deleted'))
3121 self.test_case.assertTrue(hasattr(s,
3122 'scheduled_to_be_deleted_at'))
3123 self.test_case.assertIn(s._mapping['is_soft_deleted'], (0, False))
3124 self.test_case.assertIsNone(
3125 s._mapping['scheduled_to_be_deleted_at'])
3127 def check_downgrade(self, conn):
3128 s_table = utils.load_table('shares', conn)
3129 for s in conn.execute(s_table.select()):
3130 self.test_case.assertFalse(hasattr(s, 'is_soft_deleted'))
3131 self.test_case.assertFalse(hasattr(s,
3132 'scheduled_to_be_deleted_at'))
3135@map_to_migration('a87e0fb17dee')
3136class ShareServerMultipleSubnets(BaseMigrationChecks):
3138 def setup_upgrade_data(self, conn):
3139 user_id = 'user_id_multiple_subnets'
3140 project_id = 'project_id_multiple_subnets'
3142 # Create share network
3143 share_network_data = {
3144 'id': uuidutils.generate_uuid(),
3145 'user_id': user_id,
3146 'project_id': project_id,
3147 }
3148 sn_table = utils.load_table('share_networks', conn)
3149 conn.execute(sn_table.insert().values(share_network_data))
3151 # Create share network subnets
3152 share_network_subnet_data = {
3153 'id': uuidutils.generate_uuid(),
3154 'share_network_id': share_network_data['id']
3155 }
3156 sns_table = utils.load_table('share_network_subnets', conn)
3157 conn.execute(sns_table.insert().values(share_network_subnet_data))
3159 # Create share server
3160 share_server_data = {
3161 'id': uuidutils.generate_uuid(),
3162 'host': 'fake_host',
3163 'status': 'active',
3164 'share_network_subnet_id': share_network_subnet_data['id'],
3165 }
3166 ss_table = utils.load_table('share_servers', conn)
3167 conn.execute(ss_table.insert().values(share_server_data))
3169 def check_upgrade(self, conn, data):
3170 ss_sns_map_table = utils.load_table(
3171 'share_server_share_network_subnet_mappings', conn)
3172 ss_table = utils.load_table('share_servers', conn)
3173 sns_table = utils.load_table('share_network_subnets', conn)
3174 na_table = utils.load_table('network_allocations', conn)
3176 na_record = conn.execute(na_table.select()).first()
3177 self.test_case.assertFalse(na_record is None)
3178 self.test_case.assertTrue(
3179 hasattr(na_record, 'share_network_subnet_id'))
3181 for map_record in conn.execute(ss_sns_map_table.select()):
3182 self.test_case.assertTrue(
3183 hasattr(map_record, 'share_network_subnet_id'))
3184 self.test_case.assertTrue(
3185 hasattr(map_record, 'share_server_id'))
3187 ss_record = conn.execute(
3188 ss_table
3189 .select()
3190 .where(ss_table.c.id == map_record._mapping['share_server_id'])
3191 ).first()
3192 self.test_case.assertFalse(ss_record is None)
3193 self.test_case.assertFalse(
3194 hasattr(ss_record, 'share_network_subnet_id'))
3195 self.test_case.assertTrue(
3196 hasattr(ss_record, 'network_allocation_update_support'))
3198 sns_record = conn.execute(
3199 sns_table
3200 .select()
3201 .where(sns_table.c.id ==
3202 map_record._mapping['share_network_subnet_id'])
3203 ).first()
3204 self.test_case.assertFalse(sns_record is None)
3206 def check_downgrade(self, conn):
3207 ss_table = utils.load_table('share_servers', conn)
3208 na_table = utils.load_table('network_allocations', conn)
3209 self.test_case.assertRaises(
3210 sa_exc.NoSuchTableError, utils.load_table,
3211 'share_server_share_network_subnet_mappings', conn)
3213 for ss_record in conn.execute(ss_table.select()):
3214 self.test_case.assertTrue(
3215 hasattr(ss_record, 'share_network_subnet_id'))
3216 self.test_case.assertFalse(
3217 hasattr(ss_record, 'network_allocation_update_support'))
3219 na_record = conn.execute(
3220 na_table
3221 .select()
3222 ).first()
3223 self.test_case.assertFalse(
3224 hasattr(na_record, 'share_network_subnet_id'))
3227@map_to_migration('bb5938d74b73')
3228class AddSnapshotMetadata(BaseMigrationChecks):
3229 snapshot_id = uuidutils.generate_uuid()
3230 new_table_name = 'share_snapshot_metadata'
3232 def setup_upgrade_data(self, conn):
3233 # Setup Share
3234 share_data = {
3235 'id': uuidutils.generate_uuid(),
3236 'share_proto': "NFS",
3237 'size': 1,
3238 'snapshot_id': None,
3239 'user_id': 'fake',
3240 'project_id': 'fake'
3241 }
3242 share_table = utils.load_table('shares', conn)
3243 conn.execute(share_table.insert().values(share_data))
3245 share_instance_data = {
3246 'id': uuidutils.generate_uuid(),
3247 'deleted': 'False',
3248 'host': 'fake',
3249 'share_id': share_data['id'],
3250 'status': 'available',
3251 'access_rules_status': 'active',
3252 'cast_rules_to_readonly': False,
3253 }
3254 share_instance_table = utils.load_table('share_instances', conn)
3255 conn.execute(share_instance_table.insert().values(share_instance_data))
3257 # Setup Share Snapshot
3258 share_snapshot_data = {
3259 'id': self.snapshot_id,
3260 'share_id': share_data['id']
3261 }
3262 snapshot_table = utils.load_table('share_snapshots', conn)
3263 conn.execute(snapshot_table.insert().values(share_snapshot_data))
3265 # Setup snapshot instances
3266 snapshot_instance_data = {
3267 'id': uuidutils.generate_uuid(),
3268 'snapshot_id': share_snapshot_data['id'],
3269 'share_instance_id': share_instance_data['id']
3270 }
3271 snap_i_table = utils.load_table('share_snapshot_instances', conn)
3272 conn.execute(snap_i_table.insert().values(snapshot_instance_data))
3274 def check_upgrade(self, conn, data):
3275 data = {
3276 'id': 1,
3277 'key': 't' * 255,
3278 'value': 'v' * 1023,
3279 'share_snapshot_id': self.snapshot_id,
3280 'deleted': 'False',
3281 }
3283 new_table = utils.load_table(self.new_table_name, conn)
3284 conn.execute(new_table.insert().values(data))
3286 item = conn.execute(
3287 new_table.select().where(new_table.c.id == data['id'])).first()
3288 self.test_case.assertTrue(hasattr(item, 'id'))
3289 self.test_case.assertEqual(data['id'], item._mapping['id'])
3290 self.test_case.assertTrue(hasattr(item, 'key'))
3291 self.test_case.assertEqual(data['key'], item._mapping['key'])
3292 self.test_case.assertTrue(hasattr(item, 'value'))
3293 self.test_case.assertEqual(data['value'], item._mapping['value'])
3294 self.test_case.assertTrue(hasattr(item, 'share_snapshot_id'))
3295 self.test_case.assertEqual(self.snapshot_id,
3296 item._mapping['share_snapshot_id'])
3297 self.test_case.assertTrue(hasattr(item, 'deleted'))
3298 self.test_case.assertEqual('False', item._mapping['deleted'])
3300 def check_downgrade(self, conn):
3301 self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
3302 self.new_table_name, conn)
3305@map_to_migration('ac0620cbe74d')
3306class AddSubnetMetadata(BaseMigrationChecks):
3307 share_subnet_id = uuidutils.generate_uuid()
3308 new_table_name = 'share_network_subnet_metadata'
3310 def setup_upgrade_data(self, conn):
3311 # Setup Share network.
3312 share_network_data = {
3313 'id': uuidutils.generate_uuid(),
3314 'user_id': 'fake',
3315 'project_id': 'fake'
3316 }
3317 network_table = utils.load_table('share_networks', conn)
3318 conn.execute(network_table.insert().values(share_network_data))
3320 # Setup share network subnet.
3321 share_network_subnet_data = {
3322 'id': self.share_subnet_id,
3323 'share_network_id': share_network_data['id']
3324 }
3325 network_table = utils.load_table('share_network_subnets', conn)
3326 conn.execute(network_table.insert().values(share_network_subnet_data))
3328 def check_upgrade(self, conn, data):
3329 data = {
3330 'id': 1,
3331 'key': 't' * 255,
3332 'value': 'v' * 1023,
3333 'share_network_subnet_id': self.share_subnet_id,
3334 'deleted': 'False',
3335 }
3337 new_table = utils.load_table(self.new_table_name, conn)
3338 conn.execute(new_table.insert().values(data))
3340 item = conn.execute(
3341 new_table.select().where(new_table.c.id == data['id'])).first()
3342 self.test_case.assertTrue(hasattr(item, 'id'))
3343 self.test_case.assertEqual(data['id'], item._mapping['id'])
3344 self.test_case.assertTrue(hasattr(item, 'key'))
3345 self.test_case.assertEqual(data['key'], item._mapping['key'])
3346 self.test_case.assertTrue(hasattr(item, 'value'))
3347 self.test_case.assertEqual(data['value'], item._mapping['value'])
3348 self.test_case.assertTrue(hasattr(item, 'share_network_subnet_id'))
3349 self.test_case.assertEqual(self.share_subnet_id,
3350 item._mapping['share_network_subnet_id'])
3351 self.test_case.assertTrue(hasattr(item, 'deleted'))
3352 self.test_case.assertEqual('False', item._mapping['deleted'])
3354 def check_downgrade(self, conn):
3355 self.test_case.assertRaises(sa_exc.NoSuchTableError,
3356 utils.load_table,
3357 self.new_table_name, conn)
3360@map_to_migration('aebe2a413e13')
3361class AddServiceState(BaseMigrationChecks):
3363 def _get_service_data(self, options):
3364 base_dict = {
3365 'binary': 'manila-share',
3366 'topic': 'share',
3367 'disabled': False,
3368 'report_count': '100',
3369 }
3370 base_dict.update(options)
3371 return base_dict
3373 def setup_upgrade_data(self, conn):
3374 service_fixture = [
3375 self._get_service_data({'host': 'fake1'}),
3376 self._get_service_data({'host': 'fake2'}),
3377 ]
3378 services_table = utils.load_table('services', conn)
3379 for fixture in service_fixture:
3380 conn.execute(services_table.insert().values(fixture))
3382 def check_upgrade(self, conn, data):
3383 s_table = utils.load_table('services', conn)
3384 for s in conn.execute(s_table.select()):
3385 self.test_case.assertTrue(hasattr(s, 'state'))
3387 def check_downgrade(self, conn):
3388 s_table = utils.load_table('services', conn)
3389 for s in conn.execute(s_table.select()):
3390 self.test_case.assertFalse(hasattr(s, 'state'))
3393@map_to_migration('cb20f743ca7b')
3394class AddResourceLocks(BaseMigrationChecks):
3396 def setup_upgrade_data(self, conn):
3397 pass
3399 def check_upgrade(self, conn, data):
3400 lock_data = {
3401 'id': uuidutils.generate_uuid(),
3402 'project_id': uuidutils.generate_uuid(dashed=False),
3403 'user_id': uuidutils.generate_uuid(dashed=False),
3404 'resource_id': uuidutils.generate_uuid(),
3405 'created_at': datetime.datetime(2023, 7, 18, 12, 6, 30),
3406 'updated_at': None,
3407 'deleted_at': None,
3408 'deleted': 'False',
3409 'resource_type': 'share',
3410 'resource_action': 'delete',
3411 'lock_reason': 'xyzzy' * 200,
3412 'lock_context': 'user',
3413 }
3415 locks_table = utils.load_table('resource_locks', conn)
3416 conn.execute(locks_table.insert().values(lock_data))
3418 def check_downgrade(self, conn):
3419 self.test_case.assertRaises(sa_exc.NoSuchTableError,
3420 utils.load_table,
3421 'resource_locks', conn)
3424@map_to_migration('99d328f0a3d2')
3425class ServiceDisabledReason(BaseMigrationChecks):
3426 def _get_service_data(self, options):
3427 base_dict = {
3428 'binary': 'manila-share',
3429 'topic': 'share',
3430 'disabled': False,
3431 'report_count': '100',
3432 }
3433 base_dict.update(options)
3434 return base_dict
3436 def setup_upgrade_data(self, conn):
3437 service_fixture = [
3438 self._get_service_data({'host': 'fake1'}),
3439 self._get_service_data({'host': 'fake2'}),
3440 ]
3441 services_table = utils.load_table('services', conn)
3442 for fixture in service_fixture:
3443 conn.execute(services_table.insert().values(fixture))
3445 def check_upgrade(self, conn, data):
3446 service_table = utils.load_table('services', conn)
3447 for s in conn.execute(service_table.select()):
3448 self.test_case.assertTrue(hasattr(s, 'disabled_reason'))
3450 def check_downgrade(self, conn):
3451 service_table = utils.load_table('services', conn)
3452 for s in conn.execute(service_table.select()):
3453 self.test_case.assertFalse(hasattr(s, 'disabled_reason'))