]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: drop filestore support
authorGuillaume Abrioux <gabrioux@ibm.com>
Wed, 22 Mar 2023 09:57:40 +0000 (09:57 +0000)
committerGuillaume Abrioux <gabrioux@ibm.com>
Mon, 15 May 2023 11:05:39 +0000 (13:05 +0200)
FileStore support has been dropped as of Ceph Reef, let's drop
it in ceph-volume too.

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
34 files changed:
src/ceph-volume/ceph_volume/api/lvm.py
src/ceph-volume/ceph_volume/devices/lvm/activate.py
src/ceph-volume/ceph_volume/devices/lvm/batch.py
src/ceph-volume/ceph_volume/devices/lvm/common.py
src/ceph-volume/ceph_volume/devices/lvm/create.py
src/ceph-volume/ceph_volume/devices/lvm/prepare.py
src/ceph-volume/ceph_volume/devices/lvm/zap.py
src/ceph-volume/ceph_volume/devices/raw/prepare.py
src/ceph-volume/ceph_volume/devices/simple/activate.py
src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts [deleted file]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts [deleted file]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml [deleted file]
src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml [deleted symlink]
src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml [deleted file]
src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
src/ceph-volume/ceph_volume/tests/util/test_prepare.py
src/ceph-volume/ceph_volume/util/arg_validators.py
src/ceph-volume/ceph_volume/util/encryption.py
src/ceph-volume/ceph_volume/util/prepare.py

index 5277102da9b26c4b73d654756890e2cb30787ac8..16cbc08b26254fb59b623d675f497b1552f0582c 100644 (file)
@@ -1018,7 +1018,6 @@ def create_lv(name_prefix,
     # be so this function will set it after creation using the mapping
     # XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations
     type_path_tag = {
-        'journal': 'ceph.journal_device',
         'data': 'ceph.data_device',
         'block': 'ceph.block_device',
         'wal': 'ceph.wal_device',
index 53ed6aa47918e786ae6109b3299cf26f60cec444..feb91053b4478982c4f973c436aca3ba1b11d7aa 100644 (file)
@@ -15,86 +15,6 @@ from .listing import direct_report
 logger = logging.getLogger(__name__)
 
 
-def activate_filestore(osd_lvs, no_systemd=False):
-    # find the osd
-    for osd_lv in osd_lvs:
-        if osd_lv.tags.get('ceph.type') == 'data':
-            data_lv = osd_lv
-            break
-    else:
-        raise RuntimeError('Unable to find a data LV for filestore activation')
-
-    is_encrypted = data_lv.tags.get('ceph.encrypted', '0') == '1'
-    is_vdo = data_lv.tags.get('ceph.vdo', '0')
-
-    osd_id = data_lv.tags['ceph.osd_id']
-    configuration.load_ceph_conf_path(data_lv.tags['ceph.cluster_name'])
-    configuration.load()
-    # it may have a volume with a journal
-    for osd_lv in osd_lvs:
-        if osd_lv.tags.get('ceph.type') == 'journal':
-            osd_journal_lv = osd_lv
-            break
-    else:
-        osd_journal_lv = None
-
-    # TODO: add sensible error reporting if this is ever the case
-    # blow up with a KeyError if this doesn't exist
-    osd_fsid = data_lv.tags['ceph.osd_fsid']
-    if not osd_journal_lv:
-        # must be a disk partition, by querying blkid by the uuid we are ensuring that the
-        # device path is always correct
-        journal_uuid = data_lv.tags['ceph.journal_uuid']
-        osd_journal = disk.get_device_from_partuuid(journal_uuid)
-    else:
-        journal_uuid = osd_journal_lv.lv_uuid
-        osd_journal = data_lv.tags['ceph.journal_device']
-
-    if not osd_journal:
-        raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id)
-
-    # this is done here, so that previous checks that ensure path availability
-    # and correctness can still be enforced, and report if any issues are found
-    if is_encrypted:
-        lockbox_secret = data_lv.tags['ceph.cephx_lockbox_secret']
-        # this keyring writing is idempotent
-        encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
-        dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
-        encryption_utils.luks_open(dmcrypt_secret, data_lv.lv_path, data_lv.lv_uuid)
-        encryption_utils.luks_open(dmcrypt_secret, osd_journal, journal_uuid)
-
-        osd_journal = '/dev/mapper/%s' % journal_uuid
-        source = '/dev/mapper/%s' % data_lv.lv_uuid
-    else:
-        source = data_lv.lv_path
-
-    # mount the osd
-    destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
-    if not system.device_is_mounted(source, destination=destination):
-        prepare_utils.mount_osd(source, osd_id, is_vdo=is_vdo)
-
-    # ensure that the OSD destination is always chowned properly
-    system.chown(destination)
-
-    # always re-do the symlink regardless if it exists, so that the journal
-    # device path that may have changed can be mapped correctly every time
-    destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id)
-    process.run(['ln', '-snf', osd_journal, destination])
-
-    # make sure that the journal has proper permissions
-    system.chown(osd_journal)
-
-    if no_systemd is False:
-        # enable the ceph-volume unit for this OSD
-        systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
-
-        # enable the OSD
-        systemctl.enable_osd(osd_id)
-
-        # start the OSD
-        systemctl.start_osd(osd_id)
-    terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
-
 
 def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None):
     """
@@ -279,30 +199,16 @@ class Activate(object):
 
         # This argument is only available when passed in directly or via
         # systemd, not when ``create`` is being used
+        # placeholder when a new objectstore support will be added
         if getattr(args, 'auto_detect_objectstore', False):
             logger.info('auto detecting objectstore')
-            # may get multiple lvs, so can't do get_the_lvs() calls here
-            for lv in lvs:
-                has_journal = lv.tags.get('ceph.journal_uuid')
-                if has_journal:
-                    logger.info('found a journal associated with the OSD, '
-                                'assuming filestore')
-                    return activate_filestore(lvs, args.no_systemd)
-
-            logger.info('unable to find a journal associated with the OSD, '
-                        'assuming bluestore')
-
             return activate_bluestore(lvs, args.no_systemd)
 
-        # explicit filestore/bluestore flags take precedence
+        # explicit 'objectstore' flags take precedence
         if getattr(args, 'bluestore', False):
             activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
-        elif getattr(args, 'filestore', False):
-            activate_filestore(lvs, args.no_systemd)
         elif any('ceph.block_device' in lv.tags for lv in lvs):
             activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
-        elif any('ceph.data_device' in lv.tags for lv in lvs):
-            activate_filestore(lvs, args.no_systemd)
 
     def main(self):
         sub_command_help = dedent("""
@@ -348,11 +254,6 @@ class Activate(object):
             action='store_true',
             help='force bluestore objectstore activation',
         )
-        parser.add_argument(
-            '--filestore',
-            action='store_true',
-            help='force filestore objectstore activation',
-        )
         parser.add_argument(
             '--all',
             dest='activate_all',
index d867fe2d87e3bb33c30bbce277433095449d80f1..69a3f672b4825a18b0a45378df2e596615598ab3 100644 (file)
@@ -29,11 +29,10 @@ def device_formatter(devices):
     return ''.join(lines)
 
 
-def ensure_disjoint_device_lists(data, db=[], wal=[], journal=[]):
+def ensure_disjoint_device_lists(data, db=[], wal=[]):
     # check that all device lists are disjoint with each other
     if not all([set(data).isdisjoint(set(db)),
                 set(data).isdisjoint(set(wal)),
-                set(data).isdisjoint(set(journal)),
                 set(db).isdisjoint(set(wal))]):
         raise Exception('Device lists are not disjoint')
 
@@ -220,13 +219,6 @@ class Batch(object):
             default=[],
             help='Devices to provision OSDs wal volumes',
         )
-        parser.add_argument(
-            '--journal-devices',
-            nargs='*',
-            type=arg_validators.ValidBatchDevice(),
-            default=[],
-            help='Devices to provision OSDs journal volumes',
-        )
         parser.add_argument(
             '--auto',
             action='store_true',
@@ -246,11 +238,6 @@ class Batch(object):
             action='store_true',
             help='bluestore objectstore (default)',
         )
-        parser.add_argument(
-            '--filestore',
-            action='store_true',
-            help='filestore objectstore',
-        )
         parser.add_argument(
             '--report',
             action='store_true',
@@ -323,25 +310,6 @@ class Batch(object):
             type=int,
             help='Provision slots on WAL device, can remain unoccupied'
         )
-        def journal_size_in_mb_hack(size):
-            # TODO give user time to adjust, then remove this
-            if size and size[-1].isdigit():
-                mlogger.warning('DEPRECATION NOTICE')
-                mlogger.warning('--journal-size as integer is parsed as megabytes')
-                mlogger.warning('A future release will parse integers as bytes')
-                mlogger.warning('Add a "M" to explicitly pass a megabyte size')
-                size += 'M'
-            return disk.Size.parse(size)
-        parser.add_argument(
-            '--journal-size',
-            type=journal_size_in_mb_hack,
-            help='Override the "osd_journal_size" value, in megabytes'
-        )
-        parser.add_argument(
-            '--journal-slots',
-            type=int,
-            help='Provision slots on journal device, can remain unoccupied'
-        )
         parser.add_argument(
             '--prepare',
             action='store_true',
@@ -356,7 +324,7 @@ class Batch(object):
         )
         self.args = parser.parse_args(argv)
         self.parser = parser
-        for dev_list in ['', 'db_', 'wal_', 'journal_']:
+        for dev_list in ['', 'db_', 'wal_']:
             setattr(self, '{}usable'.format(dev_list), [])
 
     def report(self, plan):
@@ -395,7 +363,7 @@ class Batch(object):
         '''
         Helper for legacy auto behaviour.
         Sorts drives into rotating and non-rotating, the latter being used for
-        db or journal.
+        db.
         '''
         mlogger.warning('DEPRECATION NOTICE')
         mlogger.warning('You are using the legacy automatic disk sorting behavior')
@@ -408,10 +376,7 @@ class Batch(object):
             # no need for additional sorting, we'll only deploy standalone on ssds
             return
         self.args.devices = rotating
-        if self.args.filestore:
-            self.args.journal_devices = ssd
-        else:
-            self.args.db_devices = ssd
+        self.args.db_devices = ssd
 
     @decorators.needs_root
     def main(self):
@@ -420,19 +385,18 @@ class Batch(object):
 
         # Default to bluestore here since defaulting it in add_argument may
         # cause both to be True
-        if not self.args.bluestore and not self.args.filestore:
+        if not self.args.bluestore:
             self.args.bluestore = True
 
         if (self.args.auto and not self.args.db_devices and not
-            self.args.wal_devices and not self.args.journal_devices):
+            self.args.wal_devices):
             self._sort_rotational_disks()
 
         self._check_slot_args()
 
         ensure_disjoint_device_lists(self.args.devices,
                                      self.args.db_devices,
-                                     self.args.wal_devices,
-                                     self.args.journal_devices)
+                                     self.args.wal_devices)
 
         plan = self.get_plan(self.args)
 
@@ -453,7 +417,6 @@ class Batch(object):
         defaults = common.get_default_args()
         global_args = [
             'bluestore',
-            'filestore',
             'dmcrypt',
             'crush_device_class',
             'no_systemd',
@@ -473,8 +436,6 @@ class Batch(object):
         if args.bluestore:
             plan = self.get_deployment_layout(args, args.devices, args.db_devices,
                                               args.wal_devices)
-        elif args.filestore:
-            plan = self.get_deployment_layout(args, args.devices, args.journal_devices)
         return plan
 
     def get_deployment_layout(self, args, devices, fast_devices=[],
@@ -500,7 +461,8 @@ class Batch(object):
             return plan
         requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
 
-        fast_type = 'block_db' if args.bluestore else 'journal'
+        if args.bluestore:
+            fast_type = 'block_db'
         fast_allocations = self.fast_allocations(fast_devices,
                                                  requested_osds,
                                                  num_osds,
index edc8e1cbce117c377b60bf36d97b04090b9072e5..35e53181aff07021d83df92eb31d02ef5e4ed4ad 100644 (file)
@@ -126,33 +126,12 @@ bluestore_args = {
     },
 }
 
-filestore_args = {
-    '--filestore': {
-        'action': 'store_true',
-        'help': 'Use the filestore objectstore',
-    },
-    '--journal': {
-        'help': 'A logical volume (vg_name/lv_name), or path to a device',
-        'type': arg_validators.ValidDevice(as_string=True),
-    },
-    '--journal-size': {
-        'help': 'Size of journal LV in case a raw block device was passed in --journal',
-        'default': '0',
-        'type': disk.Size.parse
-    },
-    '--journal-slots': {
-        'help': ('Intended number of slots on journal device. The new OSD gets one'
-              'of those slots or 1/nth of the available capacity'),
-        'type': int,
-        'default': 1,
-    },
-}
 
 def get_default_args():
     defaults = {}
     def format_name(name):
         return name.strip('-').replace('-', '_').replace('.', '_')
-    for argset in (common_args, filestore_args, bluestore_args):
+    for argset in (common_args, bluestore_args):
         defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()})
     return defaults
 
@@ -168,7 +147,6 @@ def common_parser(prog, description):
         description=description,
     )
 
-    filestore_group = parser.add_argument_group('filestore')
     bluestore_group = parser.add_argument_group('bluestore')
 
     for name, kwargs in common_args.items():
@@ -177,9 +155,6 @@ def common_parser(prog, description):
     for name, kwargs in bluestore_args.items():
         bluestore_group.add_argument(name, **kwargs)
 
-    for name, kwargs in filestore_args.items():
-        filestore_group.add_argument(name, **kwargs)
-
     # Do not parse args, so that consumers can do something before the args get
     # parsed triggering argparse behavior
     return parser
index af2cd96c0845ea93a0a5e141ce353bb5d00396a1..631a21b239d2e5c3056973918441b062e7307ece 100644 (file)
@@ -68,10 +68,10 @@ class Create(object):
         if len(self.argv) == 0:
             print(sub_command_help)
             return
-        exclude_group_options(parser, groups=['filestore', 'bluestore'], argv=self.argv)
+        exclude_group_options(parser, groups=['bluestore'], argv=self.argv)
         args = parser.parse_args(self.argv)
         # Default to bluestore here since defaulting it in add_argument may
         # cause both to be True
-        if not args.bluestore and not args.filestore:
+        if not args.bluestore:
             args.bluestore = True
         self.create(args)
index 2f715fdba122c8a87fc2a097a34df4c66c0766a1..1cf19d98d92844cc1a6e42818bf4d3f52349531f 100644 (file)
@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
 def prepare_dmcrypt(key, device, device_type, tags):
     """
     Helper for devices that are encrypted. The operations needed for
-    block, db, wal, or data/journal devices are all the same
+    block, db, wal devices are all the same
     """
     if not device:
         return ''
@@ -37,50 +37,6 @@ def prepare_dmcrypt(key, device, device_type, tags):
     return '/dev/mapper/%s' % uuid
 
 
-def prepare_filestore(device, journal, secrets, tags, osd_id, fsid):
-    """
-    :param device: The name of the logical volume to work with
-    :param journal: similar to device but can also be a regular/plain disk
-    :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
-    :param id_: The OSD id
-    :param fsid: The OSD fsid, also known as the OSD UUID
-    """
-    cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
-
-    # encryption-only operations
-    if secrets.get('dmcrypt_key'):
-        # format and open ('decrypt' devices) and re-assign the device and journal
-        # variables so that the rest of the process can use the mapper paths
-        key = secrets['dmcrypt_key']
-        device = prepare_dmcrypt(key, device, 'data', tags)
-        journal = prepare_dmcrypt(key, journal, 'journal', tags)
-
-    # vdo detection
-    is_vdo = api.is_vdo(device)
-    # create the directory
-    prepare_utils.create_osd_path(osd_id)
-    # format the device
-    prepare_utils.format_device(device)
-    # mount the data device
-    prepare_utils.mount_osd(device, osd_id, is_vdo=is_vdo)
-    # symlink the journal
-    prepare_utils.link_journal(journal, osd_id)
-    # get the latest monmap
-    prepare_utils.get_monmap(osd_id)
-    # prepare the osd filesystem
-    prepare_utils.osd_mkfs_filestore(osd_id, fsid, cephx_secret)
-    # write the OSD keyring if it doesn't exist already
-    prepare_utils.write_keyring(osd_id, cephx_secret)
-    if secrets.get('dmcrypt_key'):
-        # if the device is going to get activated right away, this can be done
-        # here, otherwise it will be recreated
-        encryption_utils.write_lockbox_keyring(
-            osd_id,
-            fsid,
-            tags['ceph.cephx_lockbox_secret']
-        )
-
-
 def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
     """
     :param block: The name of the logical volume for the bluestore data
@@ -201,7 +157,7 @@ class Prepare(object):
         a device or partition will result in error.
 
         :param arg: The value of ``--data`` when parsing args
-        :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore)
+        :param device_type: Usually ``block``
         :param osd_uuid: The OSD uuid
         """
         device = self.args.data
@@ -298,60 +254,7 @@ class Prepare(object):
             'ceph.crush_device_class': crush_device_class,
             'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
         }
-        if self.args.filestore:
-            if not self.args.journal:
-                logger.info(('no journal was specifed, creating journal lv '
-                             'on {}').format(self.args.data))
-                self.args.journal = self.args.data
-                self.args.journal_size = disk.Size(g=5)
-                # need to adjust data size/slots for colocated journal
-                if self.args.data_size:
-                    self.args.data_size -= self.args.journal_size
-                if self.args.data_slots == 1:
-                    self.args.data_slots = 0
-                else:
-                    raise RuntimeError('Can\'t handle multiple filestore OSDs '
-                                       'with colocated journals yet. Please '
-                                       'create journal LVs manually')
-            tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
-            tags['ceph.encrypted'] = encrypted
-
-            journal_device, journal_uuid, tags = self.setup_device(
-                'journal',
-                self.args.journal,
-                tags,
-                self.args.journal_size,
-                self.args.journal_slots)
-
-            try:
-                vg_name, lv_name = self.args.data.split('/')
-                data_lv = api.get_single_lv(filters={'lv_name': lv_name,
-                                                    'vg_name': vg_name})
-            except ValueError:
-                data_lv = None
-
-            if not data_lv:
-                data_lv = self.prepare_data_device('data', osd_fsid)
-
-            tags['ceph.data_device'] = data_lv.lv_path
-            tags['ceph.data_uuid'] = data_lv.lv_uuid
-            tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path)
-            tags['ceph.type'] = 'data'
-            data_lv.set_tags(tags)
-            if not journal_device.startswith('/'):
-                # we got a journal lv, set rest of the tags
-                api.get_single_lv(filters={'lv_name': lv_name,
-                                           'vg_name': vg_name}).set_tags(tags)
-
-            prepare_filestore(
-                data_lv.lv_path,
-                journal_device,
-                secrets,
-                tags,
-                self.osd_id,
-                osd_fsid,
-            )
-        elif self.args.bluestore:
+        if self.args.bluestore:
             try:
                 vg_name, lv_name = self.args.data.split('/')
                 block_lv = api.get_single_lv(filters={'lv_name': lv_name,
@@ -427,15 +330,10 @@ class Prepare(object):
         if len(self.argv) == 0:
             print(sub_command_help)
             return
-        exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore'])
+        exclude_group_options(parser, argv=self.argv, groups=['bluestore'])
         self.args = parser.parse_args(self.argv)
-        # the unfortunate mix of one superset for both filestore and bluestore
-        # makes this validation cumbersome
-        if self.args.filestore:
-            if not self.args.journal:
-                raise SystemExit('--journal is required when using --filestore')
         # Default to bluestore here since defaulting it in add_argument may
         # cause both to be True
-        if not self.args.bluestore and not self.args.filestore:
+        if not self.args.bluestore:
             self.args.bluestore = True
         self.safe_prepare()
index 708716e5e37a28b0240943dd02f1b7f8679ed2f4..2f6e00f8774986936fc2a135ef7f184d7539440c 100644 (file)
@@ -101,10 +101,9 @@ def ensure_associated_lvs(lvs, lv_tags={}):
     # leaving many journals with osd.1 - usually, only a single LV will be
     # returned
 
-    journal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'journal'}))
     db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
     wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
-    backing_devices = [(journal_lvs, 'journal'), (db_lvs, 'db'),
+    backing_devices = [(db_lvs, 'db'),
                        (wal_lvs, 'wal')]
 
     verified_devices = []
index 3c96eedacf34a6d6f30d7ce09eceeb595016092f..6165da3a64346d3e1b8bebb6ee1b531a8129e8bc 100644 (file)
@@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
 def prepare_dmcrypt(key, device, device_type, fsid):
     """
     Helper for devices that are encrypted. The operations needed for
-    block, db, wal, or data/journal devices are all the same
+    block, db, wal, devices are all the same
     """
     if not device:
         return ''
index 7439141c03a8b4865238c4825ad58271ed6225fa..f3dcdcef8388076c2e85af9195eb0125b1c6f5e9 100644 (file)
@@ -9,7 +9,6 @@ from textwrap import dedent
 from ceph_volume import process, decorators, terminal, conf
 from ceph_volume.util import system, disk
 from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.util import prepare as prepare_utils
 from ceph_volume.systemd import systemctl
 
 
@@ -36,29 +35,15 @@ class Activate(object):
         try:
             objectstore = json_config['type']
         except KeyError:
-            if {'data', 'journal'}.issubset(set(devices)):
-                logger.warning(
-                    '"type" key not found, assuming "filestore" since journal key is present'
-                )
-                objectstore = 'filestore'
-            else:
-                logger.warning(
-                    '"type" key not found, assuming "bluestore" since journal key is not present'
-                )
-                objectstore = 'bluestore'
+            logger.warning(
+                '"type" key not found, assuming "bluestore" since journal key is not present'
+            )
+            objectstore = 'bluestore'
 
         # Go through all the device combinations that are absolutely required,
         # raise an error describing what was expected and what was found
         # otherwise.
-        if objectstore == 'filestore':
-            if {'data', 'journal'}.issubset(set(devices)):
-                return True
-            else:
-                found = [i for i in devices if i in ['data', 'journal']]
-                mlogger.error("Required devices (data, and journal) not present for filestore")
-                mlogger.error('filestore devices found: %s', found)
-                raise RuntimeError('Unable to activate filestore OSD due to missing devices')
-        else:
+        if objectstore == 'bluestore':
             # This is a bit tricky, with newer bluestore we don't need data, older implementations
             # do (e.g. with ceph-disk). ceph-volume just uses a tmpfs that doesn't require data.
             if {'block', 'data'}.issubset(set(devices)):
@@ -176,19 +161,14 @@ class Activate(object):
                 "be skipped, consider cleaning legacy "
                 "json file {}".format(osd_metadata['fsid'], args.json_config))
 
-        journal_device = self.get_device(osd_metadata.get('journal', {}).get('uuid'))
         block_device = self.get_device(osd_metadata.get('block', {}).get('uuid'))
         block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid'))
         block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid'))
 
         if not system.device_is_mounted(data_device, destination=osd_dir):
-            if osd_metadata.get('type') == 'filestore':
-                prepare_utils.mount_osd(data_device, osd_id)
-            else:
-                process.run(['mount', '-v', data_device, osd_dir])
+            process.run(['mount', '-v', data_device, osd_dir])
 
         device_map = {
-            'journal': journal_device,
             'block': block_device,
             'block.db': block_db_device,
             'block.wal': block_wal_device
index 2237f259eb200ab73bd6723823931cd7820024c8..5d48a0ef4044f5ffac82ed50e643fbd1a94a8f1a 100644 (file)
@@ -10,7 +10,6 @@ class Args(object):
     def __init__(self, **kw):
         # default flags
         self.bluestore = False
-        self.filestore = False
         self.no_systemd = False
         self.auto_detect_objectstore = None
         for k, v in kw.items():
@@ -23,17 +22,6 @@ class TestActivate(object):
     # test the negative side effect with an actual functional run, so we must
     # setup a perfect scenario for this test to check it can really work
     # with/without osd_id
-    def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture):
-        FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
-                               lv_tags="ceph.osd_fsid=1234")
-        volumes = []
-        volumes.append(FooVolume)
-        monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
-        monkeypatch.setattr(activate, 'activate_filestore', capture)
-        args = Args(osd_id=None, osd_fsid='1234', filestore=True)
-        activate.Activate([]).activate(args)
-        assert capture.calls[0]['args'][0] == [FooVolume]
-
     def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
         FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
                                lv_tags="ceph.osd_fsid=1234")
@@ -45,19 +33,6 @@ class TestActivate(object):
         activate.Activate([]).activate(args)
         assert capture.calls[0]['args'][0] == [FooVolume]
 
-    def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture):
-        FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
-                               lv_tags="ceph.osd_fsid=1111")
-        volumes = []
-        volumes.append(FooVolume)
-        monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: [])
-        monkeypatch.setattr(api, 'get_single_lv', lambda **kwargs: [])
-        monkeypatch.setattr(activate, 'activate_filestore', capture)
-
-        args = Args(osd_id=None, osd_fsid='2222')
-        with pytest.raises(RuntimeError):
-            activate.Activate([]).activate(args)
-
     def test_osd_id_no_osd_fsid(self, is_root):
         args = Args(osd_id=42, osd_fsid=None)
         with pytest.raises(RuntimeError) as result:
@@ -70,149 +45,6 @@ class TestActivate(object):
             activate.Activate([]).activate(args)
         assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
 
-    def test_filestore_no_systemd(self, is_root, monkeypatch, capture):
-        monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
-        fake_enable = Capture()
-        fake_start_osd = Capture()
-        monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
-        monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
-        monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
-        JournalVolume = api.Volume(
-            lv_name='journal',
-            lv_path='/dev/vg/journal',
-            lv_uuid='000',
-            lv_tags=','.join([
-                "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
-                "ceph.journal_uuid=000", "ceph.type=journal",
-                "ceph.osd_id=0", "ceph.osd_fsid=1234"])
-        )
-        DataVolume = api.Volume(
-            lv_name='data',
-            lv_path='/dev/vg/data',
-            lv_uuid='001',
-            lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
-                    "journal,ceph.journal_uuid=000,ceph.type=data," + \
-                    "ceph.osd_id=0,ceph.osd_fsid=1234")
-        volumes = []
-        volumes.append(DataVolume)
-        volumes.append(JournalVolume)
-        monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
-        args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True)
-        activate.Activate([]).activate(args)
-        assert fake_enable.calls == []
-        assert fake_start_osd.calls == []
-
-    def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
-        monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
-        fake_enable = Capture()
-        fake_start_osd = Capture()
-        monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
-        monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
-        monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
-        JournalVolume = api.Volume(
-            lv_name='journal',
-            lv_path='/dev/vg/journal',
-            lv_uuid='000',
-            lv_tags=','.join([
-                "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
-                "ceph.journal_uuid=000", "ceph.type=journal",
-                "ceph.osd_id=0", "ceph.osd_fsid=1234"])
-        )
-        DataVolume = api.Volume(
-            lv_name='data',
-            lv_path='/dev/vg/data',
-            lv_uuid='001',
-            lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
-                    "journal,ceph.journal_uuid=000,ceph.type=data," + \
-                    "ceph.osd_id=0,ceph.osd_fsid=1234")
-        volumes = []
-        volumes.append(DataVolume)
-        volumes.append(JournalVolume)
-        monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
-        args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
-                    filestore=True, auto_detect_objectstore=True)
-        activate.Activate([]).activate(args)
-        assert fake_enable.calls == []
-        assert fake_start_osd.calls == []
-
-    def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture):
-        fake_enable = Capture()
-        fake_start_osd = Capture()
-        monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
-        monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
-        monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
-        monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
-        JournalVolume = api.Volume(
-            lv_name='journal',
-            lv_path='/dev/vg/journal',
-            lv_uuid='000',
-            lv_tags=','.join([
-                "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
-                "ceph.journal_uuid=000", "ceph.type=journal",
-                "ceph.osd_id=0","ceph.osd_fsid=1234"])
-            )
-        DataVolume = api.Volume(
-            lv_name='data',
-            lv_path='/dev/vg/data',
-            lv_uuid='001',
-            lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
-                    "journal,ceph.journal_uuid=000,ceph.type=data," + \
-                    "ceph.osd_id=0,ceph.osd_fsid=1234")
-        volumes = []
-        volumes.append(DataVolume)
-        volumes.append(JournalVolume)
-        monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
-        args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
-                    filestore=True, auto_detect_objectstore=False)
-        activate.Activate([]).activate(args)
-        assert fake_enable.calls != []
-        assert fake_start_osd.calls != []
-
-    def test_filestore_systemd(self, is_root, monkeypatch, capture):
-        fake_enable = Capture()
-        fake_start_osd = Capture()
-        monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
-        monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
-        monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
-        monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
-        monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
-        JournalVolume = api.Volume(
-            lv_name='journal',
-            lv_path='/dev/vg/journal',
-            lv_uuid='000',
-            lv_tags=','.join([
-                "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
-                "ceph.journal_uuid=000", "ceph.type=journal",
-                "ceph.osd_id=0","ceph.osd_fsid=1234"])
-            )
-        DataVolume = api.Volume(
-            lv_name='data',
-            lv_path='/dev/vg/data',
-            lv_uuid='001',
-            lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
-                    "journal,ceph.journal_uuid=000,ceph.type=data," + \
-                    "ceph.osd_id=0,ceph.osd_fsid=1234")
-        volumes = []
-        volumes.append(DataVolume)
-        volumes.append(JournalVolume)
-        monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
-        args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
-                    filestore=True)
-        activate.Activate([]).activate(args)
-        assert fake_enable.calls != []
-        assert fake_start_osd.calls != []
-
     def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
         monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
         fake_enable = Capture()
@@ -318,16 +150,6 @@ class TestActivateFlags(object):
         activation.activate = capture
         activation.main()
         parsed_args = capture.calls[0]['args'][0]
-        assert parsed_args.filestore is False
-        assert parsed_args.bluestore is False
-
-    def test_uses_filestore(self, capture):
-        args = ['--filestore', '0', 'asdf-ljh-asdf']
-        activation = activate.Activate(args)
-        activation.activate = capture
-        activation.main()
-        parsed_args = capture.calls[0]['args'][0]
-        assert parsed_args.filestore is True
         assert parsed_args.bluestore is False
 
     def test_uses_bluestore(self, capture):
@@ -336,7 +158,6 @@ class TestActivateFlags(object):
         activation.activate = capture
         activation.main()
         parsed_args = capture.calls[0]['args'][0]
-        assert parsed_args.filestore is False
         assert parsed_args.bluestore is True
 
 
index d27134d5341886138a22dc582eebba566781dc96..75073c51aca258b73dd403220eca78ffdaa55fc2 100644 (file)
@@ -150,14 +150,13 @@ class TestBatch(object):
         devices = [device1, device2, device3]
         args = factory(report=True,
                        devices=devices,
-                       filestore=False,
                       )
         b = batch.Batch([])
         b.args = args
         b._sort_rotational_disks()
         assert len(b.args.devices) == 3
 
-    @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+    @pytest.mark.parametrize('objectstore', ['bluestore'])
     def test_batch_sort_mixed(self, factory, objectstore):
         device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
         device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
@@ -165,16 +164,12 @@ class TestBatch(object):
         devices = [device1, device2, device3]
         args = factory(report=True,
                        devices=devices,
-                       filestore=False if objectstore == 'bluestore' else True,
                       )
         b = batch.Batch([])
         b.args = args
         b._sort_rotational_disks()
         assert len(b.args.devices) == 2
-        if objectstore == 'bluestore':
-            assert len(b.args.db_devices) == 1
-        else:
-            assert len(b.args.journal_devices) == 1
+        assert len(b.args.db_devices) == 1
 
     def test_get_physical_osds_return_len(self, factory,
                                           mock_devices_available,
index 1665d76c3884a139ffbae06b3f5ebbab8889c4d6..f91fd7072d0fd4d2bfd5c64b8f64e0b553d5d0eb 100644 (file)
@@ -1,6 +1,5 @@
 import pytest
 from ceph_volume.devices import lvm
-from mock import patch
 
 
 class TestCreate(object):
@@ -14,39 +13,6 @@ class TestCreate(object):
         with pytest.raises(SystemExit):
             lvm.create.Create(argv=['--help']).main()
         stdout, stderr = capsys.readouterr()
-        assert 'Use the filestore objectstore' in stdout
         assert 'Use the bluestore objectstore' in stdout
         assert 'A physical device or logical' in stdout
 
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
-        device_info()
-        with pytest.raises(SystemExit):
-            lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
-        stdout, stderr = capsys.readouterr()
-        expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
-        assert expected in stderr
-
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
-        device_info()
-        with pytest.raises(SystemExit):
-            lvm.create.Create(argv=[
-                '--bluestore', '--data', '/dev/sdfoo',
-                '--journal', '/dev/sf14',
-            ]).main()
-        stdout, stderr = capsys.readouterr()
-        expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
-        assert expected in stderr
-
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
-        device_info()
-        with pytest.raises(SystemExit):
-            lvm.create.Create(argv=[
-                '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
-                '--journal', '/dev/sf14',
-            ]).main()
-        stdout, stderr = capsys.readouterr()
-        expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
-        assert expected in stderr
index 9f0a5e0bbc2239816cc9f5d43182dd3843468d8a..0a356988eebc5151d9c685d243810e48e3aba597 100644 (file)
@@ -1,7 +1,7 @@
 import pytest
 from ceph_volume.devices import lvm
 from ceph_volume.api import lvm as api
-from mock.mock import patch, Mock, MagicMock
+from mock.mock import patch, Mock
 
 
 class TestLVM(object):
@@ -62,59 +62,9 @@ class TestPrepare(object):
         with pytest.raises(SystemExit):
             lvm.prepare.Prepare(argv=['--help']).main()
         stdout, stderr = capsys.readouterr()
-        assert 'Use the filestore objectstore' in stdout
         assert 'Use the bluestore objectstore' in stdout
         assert 'A physical device or logical' in stdout
 
-
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
-        device_info()
-        with pytest.raises(SystemExit):
-            lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
-        stdout, stderr = capsys.readouterr()
-        expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
-        assert expected in stderr
-
-
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
-        device_info()
-        with pytest.raises(SystemExit):
-            lvm.prepare.Prepare(argv=[
-                '--bluestore', '--data', '/dev/sdfoo',
-                '--journal', '/dev/sf14',
-            ]).main()
-        stdout, stderr = capsys.readouterr()
-        expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
-        assert expected in stderr
-
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
-        device_info()
-        with pytest.raises(SystemExit):
-            lvm.prepare.Prepare(argv=[
-                '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
-                '--journal', '/dev/sf14',
-            ]).main()
-        stdout, stderr = capsys.readouterr()
-        expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
-        assert expected in stderr
-
-    @patch('ceph_volume.util.arg_validators.Device')
-    @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
-    def test_journal_is_required_with_filestore(self, m_has_bs_label, m_device, is_root, monkeypatch, device_info):
-        m_device.return_value = MagicMock(exists=True,
-                                          has_fs=False,
-                                          used_by_ceph=False,
-                                          has_partitions=False,
-                                          has_gpt_headers=False)
-        monkeypatch.setattr("os.path.exists", lambda path: True)
-        with pytest.raises(SystemExit) as error:
-            lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
-        expected = '--journal is required when using --filestore'
-        assert expected in str(error.value)
-
     @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
     def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
         m_is_ceph_device.return_value = True
index 5c7bd3117920ac4e19c6e37b2a799d13449f620e..152ac9b09e23bc28ced5ddf09f62df98f23ad736 100644 (file)
@@ -128,42 +128,6 @@ class TestEnableSystemdUnits(object):
 
 class TestValidateDevices(object):
 
-    def test_filestore_missing_journal(self):
-        activation = activate.Activate([])
-        with pytest.raises(RuntimeError) as error:
-            activation.validate_devices({'type': 'filestore', 'data': {}})
-        assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
-
-    def test_filestore_missing_data(self):
-        activation = activate.Activate([])
-        with pytest.raises(RuntimeError) as error:
-            activation.validate_devices({'type': 'filestore', 'journal': {}})
-        assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
-
-    def test_filestore_journal_device_found(self, capsys):
-        activation = activate.Activate([])
-        with pytest.raises(RuntimeError):
-            activation.validate_devices({'type': 'filestore', 'journal': {}})
-        stdout, stderr = capsys.readouterr()
-        assert "devices found: ['journal']" in stderr
-
-    def test_filestore_data_device_found(self, capsys):
-        activation = activate.Activate([])
-        with pytest.raises(RuntimeError):
-            activation.validate_devices({'type': 'filestore', 'data': {}})
-        stdout, stderr = capsys.readouterr()
-        assert "devices found: ['data']" in stderr
-
-    def test_filestore_with_all_devices(self):
-        activation = activate.Activate([])
-        result = activation.validate_devices({'type': 'filestore', 'journal': {}, 'data': {}})
-        assert result is True
-
-    def test_filestore_without_type(self):
-        activation = activate.Activate([])
-        result = activation.validate_devices({'journal': {}, 'data': {}})
-        assert result is True
-
     def test_bluestore_with_all_devices(self):
         activation = activate.Activate([])
         result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}})
index 873c96a0d1857cb9e8b6f2d1809fbb854fa1e64c..bc50be8101b4d0a27a0966014a3c573e2ca5782e 100644 (file)
@@ -1,5 +1,5 @@
 [tox]
-envlist = centos8-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos8-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
+envlist = centos8-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
 skipsdist = True
 
 [testenv]
@@ -19,12 +19,6 @@ setenv=
   CEPH_VOLUME_DEBUG = 1
   DEBIAN_FRONTEND=noninteractive
 changedir=
-  centos8-filestore-single_type: {toxinidir}/centos8/filestore/single-type
-  centos8-filestore-single_type_dmcrypt: {toxinidir}/centos8/filestore/single-type-dmcrypt
-  centos8-filestore-mixed_type: {toxinidir}/centos8/filestore/mixed-type
-  centos8-filestore-mixed_type_dmcrypt: {toxinidir}/centos8/filestore/mixed-type-dmcrypt
-  centos8-filestore-mixed_type_explicit: {toxinidir}/centos8/filestore/mixed-type-explicit
-  centos8-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/filestore/mixed-type-dmcrypt-explicit
   centos8-bluestore-single_type: {toxinidir}/centos8/bluestore/single-type
   centos8-bluestore-single_type_dmcrypt: {toxinidir}/centos8/bluestore/single-type-dmcrypt
   centos8-bluestore-mixed_type: {toxinidir}/centos8/bluestore/mixed-type
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/Vagrantfile
deleted file mode 120000 (symlink)
index 16076e4..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../Vagrantfile
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/group_vars/all
deleted file mode 120000 (symlink)
index d6c7145..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../group_vars/filestore_lvm
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/hosts
deleted file mode 100644 (file)
index e1c1de6..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/setup.yml
deleted file mode 120000 (symlink)
index 1c1a3ce..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/test.yml
deleted file mode 120000 (symlink)
index 1a8c37c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../playbooks/test_filestore.yml
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/create/vagrant_variables.yml
deleted file mode 120000 (symlink)
index d21531f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../vagrant_variables.yml
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/Vagrantfile
deleted file mode 120000 (symlink)
index 16076e4..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../Vagrantfile
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/group_vars/all
deleted file mode 120000 (symlink)
index a175127..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../group_vars/filestore_lvm_dmcrypt
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/hosts
deleted file mode 100644 (file)
index e1c1de6..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/setup.yml
deleted file mode 120000 (symlink)
index 1c1a3ce..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml
deleted file mode 100644 (file)
index 21eff00..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-
-- hosts: osds
-  become: yes
-  tasks:
-
-    - name: stop ceph-osd@2 daemon
-      service:
-        name: ceph-osd@2
-        state: stopped
-
-    - name: stop ceph-osd@0 daemon
-      service:
-        name: ceph-osd@0
-        state: stopped
-
-
-- hosts: mons
-  become: yes
-  tasks:
-    - name: mark osds down
-      command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
-      with_items:
-        - 0
-        - 2
-
-    - name: destroy osd.2
-      command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
-      register: result
-      retries: 30
-      delay: 1
-      until: result is succeeded
-
-    - name: destroy osd.0
-      command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-      register: result
-      retries: 30
-      delay: 1
-      until: result is succeeded
-
-- hosts: osds
-  become: yes
-  tasks:
-
-    # osd.2 device
-    - name: zap /dev/vdd1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: zap /dev/vdd2
-      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    # partitions have been completely removed, so re-create them again
-    - name: re-create partition /dev/vdd for lvm data usage
-      parted:
-        device: /dev/vdd
-        number: 1
-        part_start: 0%
-        part_end: 50%
-        unit: '%'
-        label: gpt
-        state: present
-
-    - name: re-create partition /dev/vdd lvm journals
-      parted:
-        device: /dev/vdd
-        number: 2
-        part_start: 50%
-        part_end: 100%
-        unit: '%'
-        state: present
-        label: gpt
-
-    - name: redeploy osd.2 using /dev/vdd1
-      command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    # osd.0 lv
-    - name: zap test_group/data-lv1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: zap /dev/vdc1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: re-create partition /dev/vdc1
-      parted:
-        device: /dev/vdc
-        number: 1
-        part_start: 0%
-        part_end: 50%
-        unit: '%'
-        state: present
-        label: gpt
-
-    - name: prepare osd.0 again using test_group/data-lv1
-      command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: activate all to start the previously prepared osd.0
-      command: "ceph-volume lvm activate --filestore --all"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: node inventory
-      command: "ceph-volume inventory"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: list all OSDs
-      command: "ceph-volume lvm list"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/vagrant_variables.yml
deleted file mode 120000 (symlink)
index d21531f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../../../vagrant_variables.yml
\ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
deleted file mode 100644 (file)
index a9b6aa2..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-
-- hosts: osds
-  become: yes
-  tasks:
-
-    - name: stop ceph-osd@2 daemon
-      service:
-        name: ceph-osd@2
-        state: stopped
-
-    - name: stop ceph-osd@0 daemon
-      service:
-        name: ceph-osd@0
-        state: stopped
-
-
-- hosts: mons
-  become: yes
-  tasks:
-    - name: mark osds down
-      command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
-      with_items:
-        - 0
-        - 2
-
-    - name: destroy osd.2
-      command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
-      register: result
-      retries: 30
-      delay: 1
-      until: result is succeeded
-
-    - name: destroy osd.0
-      command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-      register: result
-      retries: 30
-      delay: 1
-      until: result is succeeded
-
-- hosts: osds
-  become: yes
-  tasks:
-
-    # osd.2 device
-    - name: zap /dev/vdd1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    # osd.2 journal
-    - name: zap /dev/vdd2
-      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    # partitions have been completely removed, so re-create them again
-    - name: re-create partition /dev/vdd for lvm data usage
-      parted:
-        device: /dev/vdd
-        number: 1
-        part_start: 0%
-        part_end: 50%
-        unit: '%'
-        label: gpt
-        state: present
-
-    - name: re-create partition /dev/vdd lvm journals
-      parted:
-        device: /dev/vdd
-        number: 2
-        part_start: 50%
-        part_end: 100%
-        unit: '%'
-        state: present
-        label: gpt
-
-    - name: redeploy osd.2 using /dev/vdd1
-      command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    # osd.0 data lv
-    # note: we don't use --destroy here to test this works without that flag.
-    # --destroy is used in the bluestore tests
-    - name: zap test_group/data-lv1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    # osd.0 journal device
-    - name: zap /dev/vdc1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: re-create partition /dev/vdc1
-      parted:
-        device: /dev/vdc
-        number: 1
-        part_start: 0%
-        part_end: 50%
-        unit: '%'
-        state: present
-        label: gpt
-
-    - name: prepare osd.0 again using test_group/data-lv1
-      command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: find all OSD paths
-      find:
-        paths: /var/lib/ceph/osd
-        recurse: no
-        file_type: directory
-      register: osd_paths
-
-    # set all OSD paths to root:rootto ensure that the OSD will be able to
-    # activate regardless
-    - name: mangle permissions to root
-      file:
-        path: "{{ item.path }}"
-        owner: root
-        group: root
-        recurse: yes
-      with_items:
-        - "{{ osd_paths.files }}"
-
-    - name: stop ceph-osd@2 daemon
-      service:
-        name: ceph-osd@2
-        state: stopped
-
-    - name: stop ceph-osd@1 daemon
-      service:
-        name: ceph-osd@1
-        state: stopped
-
-    - name: activate all to start the previously prepared osd.0
-      command: "ceph-volume lvm activate --filestore --all"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: node inventory
-      command: "ceph-volume inventory"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: list all OSDs
-      command: "ceph-volume lvm list"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: create temporary directory
-      tempfile:
-        state: directory
-        suffix: sparse
-      register: tmpdir
-
-    - name: create a 1GB sparse file
-      command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
-
-    - name: find an empty loop device
-      command: losetup -f
-      register: losetup_list
-
-    - name: setup loop device with sparse file
-      command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
-
-    - name: create volume group
-      command: vgcreate test_zap {{ losetup_list.stdout }}
-      failed_when: false
-
-    - name: create logical volume 1
-      command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
-      failed_when: false
-
-    - name: create logical volume 2
-      command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
-      failed_when: false
-
-    # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
-    - name: zap test_zap/data-lv1
-      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-
-    - name: zap test_zap/data-lv2
-      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
index ec2982a3a000096e44fd3d221ca774875c02bd8c..fe60c7db2289d9259c86e543269415e2b6ba6ceb 100644 (file)
@@ -1,5 +1,5 @@
 [tox]
-envlist = centos8-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
+envlist = centos8-bluestore-{create,prepare_activate,dmcrypt}
 skipsdist = True
 
 [testenv]
@@ -20,14 +20,11 @@ setenv=
   DEBIAN_FRONTEND=noninteractive
 changedir=
   # plain/unencrypted
-  centos8-filestore-create: {toxinidir}/centos8/filestore/create
   centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
   # dmcrypt
-  centos8-filestore-dmcrypt: {toxinidir}/centos8/filestore/dmcrypt
   centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
   # TODO: these are placeholders for now, eventually we want to
   # test the prepare/activate workflow of ceph-volume as well
-  centos8-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
   centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
 commands=
   git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
index 6ee8e73d8d27ea737d42754bdcc869f84d2bf975..c910754c337d40d871b454aee74bc714054f9724 100644 (file)
@@ -1,5 +1,5 @@
 [tox]
-envlist = centos7-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
+envlist = centos7-bluestore-{activate,dmcrypt_plain,dmcrypt_luks}
 skipsdist = True
 
 [testenv]
@@ -19,12 +19,9 @@ setenv=
   CEPH_VOLUME_DEBUG = 1
   DEBIAN_FRONTEND=noninteractive
 changedir=
-  centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
   centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
   centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
   centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
-  centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
-  centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
 commands=
   git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
   pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
index d4ebd48c24b9a13f778830e1b5016a40f2368c6e..ee9774ecc833c98ddb4297638375d46c93349c04 100644 (file)
@@ -116,31 +116,6 @@ class TestFormatDevice(object):
         assert expected == fake_run.calls[0]['args'][0]
 
 
-mkfs_filestore_flags = [
-    'ceph-osd',
-    '--cluster',
-    '--osd-objectstore', 'filestore',
-    '--mkfs',
-    '-i',
-    '--monmap',
-    '--keyfile', '-', # goes through stdin
-    '--osd-data',
-    '--osd-journal',
-    '--osd-uuid',
-    '--setuser', 'ceph',
-    '--setgroup', 'ceph'
-]
-
-
-class TestOsdMkfsFilestore(object):
-
-    @pytest.mark.parametrize('flag', mkfs_filestore_flags)
-    def test_keyring_is_used(self, fake_call, monkeypatch, flag):
-        monkeypatch.setattr(system, 'chown', lambda path: True)
-        prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret')
-        assert flag in fake_call.calls[0]['args'][0]
-
-
 class TestOsdMkfsBluestore(object):
 
     def test_keyring_is_added(self, fake_call, monkeypatch):
@@ -289,35 +264,6 @@ class TestNormalizeFlags(object):
         assert ','.join(result) == 'auto,discard,exec,rw'
 
 
-class TestMkfsFilestore(object):
-
-    def test_non_zero_exit_status(self, stub_call, monkeypatch):
-        conf.cluster = 'ceph'
-        monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
-        stub_call(([], [], 1))
-        with pytest.raises(RuntimeError) as error:
-            prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
-        assert "Command failed with exit code 1" in str(error.value)
-
-    def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
-        conf.cluster = 'ceph'
-        monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
-        stub_call(([], [], 1))
-        with pytest.raises(RuntimeError) as error:
-            prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
-        expected = ' '.join([
-            'ceph-osd',
-            '--cluster',
-            'ceph',
-            '--osd-objectstore', 'filestore', '--mkfs',
-            '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap',
-            '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/',
-            '--osd-journal', '/var/lib/ceph/osd/ceph-1/journal',
-            '--osd-uuid', 'asdf-1234',
-            '--setuser', 'ceph', '--setgroup', 'ceph'])
-        assert expected in str(error.value)
-
-
 class TestMkfsBluestore(object):
 
     def test_non_zero_exit_status(self, stub_call, monkeypatch):
@@ -344,57 +290,3 @@ class TestMkfsBluestore(object):
             '--osd-uuid', 'asdf-1234',
             '--setuser', 'ceph', '--setgroup', 'ceph'])
         assert expected in str(error.value)
-
-
-class TestGetJournalSize(object):
-
-    def test_undefined_size_fallbacks_formatted(self, conf_ceph_stub):
-        conf_ceph_stub(dedent("""
-        [global]
-        fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-        """))
-        result = prepare.get_journal_size()
-        assert result == '5G'
-
-    def test_undefined_size_fallbacks_unformatted(self, conf_ceph_stub):
-        conf_ceph_stub(dedent("""
-        [global]
-        fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-        """))
-        result = prepare.get_journal_size(lv_format=False)
-        assert result.gb.as_int() == 5
-
-    def test_defined_size_unformatted(self, conf_ceph_stub):
-        conf_ceph_stub(dedent("""
-        [global]
-        fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-
-        [osd]
-        osd journal size = 10240
-        """))
-        result = prepare.get_journal_size(lv_format=False)
-        assert result.gb.as_int() == 10
-
-    def test_defined_size_formatted(self, conf_ceph_stub):
-        conf_ceph_stub(dedent("""
-        [global]
-        fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-
-        [osd]
-        osd journal size = 10240
-        """))
-        result = prepare.get_journal_size()
-        assert result == '10G'
-
-    def test_refuse_tiny_journals(self, conf_ceph_stub):
-        conf_ceph_stub(dedent("""
-        [global]
-        fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-
-        [osd]
-        osd journal size = 1024
-        """))
-        with pytest.raises(RuntimeError) as error:
-            prepare.get_journal_size()
-        assert 'journal sizes must be larger' in str(error.value)
-        assert 'detected: 1024.00 MB' in str(error.value)
index 655f7cd55ed0f9d882e72a255a267da0524930ee..1abb5165ec004349531c5c38847dab8ecdd9695f 100644 (file)
@@ -121,7 +121,7 @@ class ValidBatchDataDevice(ValidBatchDevice, ValidDataDevice):
         # leave the validation to Batch.get_deployment_layout()
         # This way the idempotency isn't broken (especially when using --osds-per-device)
         for lv in self._device.lvs:
-            if lv.tags.get('ceph.type') in ['db', 'wal', 'journal']:
+            if lv.tags.get('ceph.type') in ['db', 'wal']:
                 return self._device
         if self._device.used_by_ceph:
             return self._device
index 3310ab78c37f49a5a9144a3696904dc4a2098910..fdb73e1b1c8ff1ac8d97c3aa32bf7727a757acc2 100644 (file)
@@ -158,16 +158,11 @@ def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None):
 def write_lockbox_keyring(osd_id, osd_fsid, secret):
     """
     Helper to write the lockbox keyring. This is needed because the bluestore OSD will
-    not persist the keyring, and it can't be stored in the data device for filestore because
-    at the time this is needed, the device is encrypted.
+    not persist the keyring.
 
     For bluestore: A tmpfs filesystem is mounted, so the path can get written
     to, but the files are ephemeral, which requires this file to be created
     every time it is activated.
-    For filestore: The path for the OSD would exist at this point even if no
-    OSD data device is mounted, so the keyring is written to fetch the key, and
-    then the data device is mounted on that directory, making the keyring
-    "disappear".
     """
     if os.path.exists('/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id)):
         return
index ff7427eedd207bd7782655bc7be147ea71184012..576c086170847d70cca03eb5946aaf9bb9023071 100644 (file)
@@ -53,28 +53,6 @@ def write_keyring(osd_id, secret, keyring_name='keyring', name=None):
     system.chown(osd_keyring)
 
 
-def get_journal_size(lv_format=True):
-    """
-    Helper to retrieve the size (defined in megabytes in ceph.conf) to create
-    the journal logical volume, it "translates" the string into a float value,
-    then converts that into gigabytes, and finally (optionally) it formats it
-    back as a string so that it can be used for creating the LV.
-
-    :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
-    would result in '5G', otherwise it will return a ``Size`` object.
-    """
-    conf_journal_size = conf.ceph.get_safe('osd', 'osd_journal_size', '5120')
-    logger.debug('osd_journal_size set to %s' % conf_journal_size)
-    journal_size = disk.Size(mb=str_to_int(conf_journal_size))
-
-    if journal_size < disk.Size(gb=2):
-        mlogger.error('Refusing to continue with configured size for journal')
-        raise RuntimeError('journal sizes must be larger than 2GB, detected: %s' % journal_size)
-    if lv_format:
-        return '%sG' % journal_size.gb.as_int()
-    return journal_size
-
-
 def get_block_db_size(lv_format=True):
     """
     Helper to retrieve the size (defined in megabytes in ceph.conf) to create
@@ -366,9 +344,6 @@ def _validate_bluestore_device(device, excepted_device_type, osd_uuid):
         terminal.error('device %s is used by another osd %s as %s, should be %s'% (device, current_osd_uuid, current_device_type, osd_uuid))
         raise SystemExit(1)
 
-def link_journal(journal_device, osd_id):
-    _link_device(journal_device, 'journal', osd_id)
-
 
 def link_block(block_device, osd_id):
     _link_device(block_device, 'block', osd_id)
@@ -483,50 +458,3 @@ def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False):
             else:
                 raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
 
-
-def osd_mkfs_filestore(osd_id, fsid, keyring):
-    """
-    Create the files for the OSD to function. A normal call will look like:
-
-          ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
-                   --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
-                   --osd-data /var/lib/ceph/osd/ceph-0 \
-                   --osd-journal /var/lib/ceph/osd/ceph-0/journal \
-                   --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
-                   --keyring /var/lib/ceph/osd/ceph-0/keyring \
-                   --setuser ceph --setgroup ceph
-
-    """
-    path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
-    monmap = os.path.join(path, 'activate.monmap')
-    journal = os.path.join(path, 'journal')
-
-    system.chown(journal)
-    system.chown(path)
-
-    command = [
-        'ceph-osd',
-        '--cluster', conf.cluster,
-        '--osd-objectstore', 'filestore',
-        '--mkfs',
-        '-i', osd_id,
-        '--monmap', monmap,
-    ]
-
-    if get_osdspec_affinity():
-        command.extend(['--osdspec-affinity', get_osdspec_affinity()])
-
-    command.extend([
-        '--keyfile', '-',
-        '--osd-data', path,
-        '--osd-journal', journal,
-        '--osd-uuid', fsid,
-        '--setuser', 'ceph',
-        '--setgroup', 'ceph'
-    ])
-
-    _, _, returncode = process.call(
-        command, stdin=keyring, terminal_verbose=True, show_command=True
-    )
-    if returncode != 0:
-        raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))