action='store_true', default=None,
help='bluestore objectstore',
)
+ parser.add_argument(
+ '--filestore',
+ action='store_true', default=True,
+ help='IGNORED FORWARD COMPATIBIILTY HACK',
+ )
return parser
@staticmethod
self.set_data_partition()
self.populate_data_path_device(*to_prepare_list)
+ def populate_data_path(self, path, *to_prepare_list):
+ super(PrepareFilestoreData, self).populate_data_path(path,
+ *to_prepare_list)
+ write_one_line(path, 'type', 'filestore')
+
class PrepareBluestoreData(PrepareData):
#
# bluestore
#
- c.conf['global']['enable experimental unrecoverable data corrupting features'] = '*'
c.conf['global']['bluestore fsck on mount'] = 'true'
c.save_conf()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk --verbose zap " + disk)
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid + " " + disk)
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
c.wait_for_osd_up(osd_uuid)
c.check_osd_status(osd_uuid)
c.destroy_osd(osd_uuid)
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid + " " + disk)
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
c.wait_for_osd_up(osd_uuid)
partition = c.get_osd_partition(osd_uuid)
assert partition['type'] == 'data'
osd_uuid = str(uuid.uuid1())
journal_uuid = str(uuid.uuid1())
c.sh("ceph-disk --verbose zap " + disk)
- c.sh(ceph_disk + " --verbose prepare " +
+ c.sh(ceph_disk + " --verbose prepare --filestore " +
" --osd-uuid " + osd_uuid +
" --journal-uuid " + journal_uuid +
" --dmcrypt " +
c.sh("ceph-disk --verbose zap " + disk)
c.conf['global']['osd objectstore'] = 'memstore'
c.save_conf()
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk --verbose zap " + disk)
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
symlink = os.path.join(tempdir, 'osd')
os.symlink(disk, symlink)
c.sh("ceph-disk --verbose zap " + symlink)
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + symlink)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0]
c.sh("chown ceph:ceph " + tempdir + " || true")
journal_file = os.path.join(tempdir, 'journal')
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_file)
c.wait_for_osd_up(osd_uuid)
device = json.loads(
def activate_separated_journal(self, data_disk, journal_disk):
c = CephDisk()
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(
#
# Create another OSD with the journal partition of the previous OSD
#
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_path)
c.helper("pool_read_write 1") # 1 == pool size
c.wait_for_osd_up(osd_uuid)
#
osd_uuid = str(uuid.uuid1())
c.sh("ceph-disk --verbose zap " + multipath)
- c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
" " + multipath)
c.wait_for_osd_up(osd_uuid)
device = json.loads(
mkdir -p $osd_data
ceph-disk $ceph_disk_args \
- prepare $osd_data || return 1
+ prepare --filestore $osd_data || return 1
activate_osd $dir $id "$@"
}
ceph-disk $ceph_disk_args \
prepare --bluestore $osd_data || return 1
- local ceph_osd_args
- ceph_osd_args+=" --enable-experimental-unrecoverable-data-corrupting-features=bluestore"
- activate_osd $dir $id $ceph_osd_args "$@"
+ activate_osd $dir $id "$@"
}
function test_run_osd() {
ceph_disk_args+=" --prepend-to-path="
local ceph_args="$CEPH_ARGS"
- ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=bluestore"
ceph_args+=" --osd-failsafe-full-ratio=.99"
ceph_args+=" --osd-journal-size=100"
ceph_args+=" --osd-scrub-load-threshold=2000"
journal_args=" --journal-path $osd_data/journal"
fi
ceph-objectstore-tool \
- --enable-experimental-unrecoverable-data-corrupting-features=bluestore \
--data-path $osd_data \
$journal_args \
"$@" || return 1
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.fill(textwrap.dedent("""\
If the --bluestore argument is given, a bluestore objectstore
- will be used instead of the legacy filestore objectstore.
+ will be created. If --filestore is provided, a legacy FileStore
+ objectstore will be created. If neither is specified, we default
+ to BlueStore.
When an entire device is prepared for bluestore, two
partitions are created. The first partition is for metadata,
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--bluestore',
- action='store_true', default=None,
+ dest='bluestore',
+ action='store_true', default=True,
help='bluestore objectstore',
)
+ parser.add_argument(
+ '--filestore',
+ dest='bluestore',
+ action='store_false',
+ help='filestore objectstore',
+ )
return parser
@staticmethod
self.set_data_partition()
self.populate_data_path_device(*to_prepare_list)
+ def populate_data_path(self, path, *to_prepare_list):
+ super(PrepareFilestoreData, self).populate_data_path(path,
+ *to_prepare_list)
+ write_one_line(path, 'type', 'filestore')
+
class PrepareBluestoreData(PrepareData):
'--setgroup', get_ceph_group(),
],
)
- else:
+ elif osd_type == 'filestore':
ceph_osd_mkfs(
[
'ceph-osd',
'--setgroup', get_ceph_group(),
],
)
+ else:
+ raise Error('unrecognized objectstore type %s' % osd_type)
def auth_key(
$mkdir -p $osd_data
${CEPH_DISK} $CEPH_DISK_ARGS \
- prepare --osd-uuid $osd_uuid $osd_data || return 1
+ prepare --filestore --osd-uuid $osd_uuid $osd_data || return 1
${CEPH_DISK} $CEPH_DISK_ARGS \
--verbose \
mkdir -p $osd_data/fsid
CEPH_ARGS="--fsid $uuid" \
- ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data > $dir/out 2>&1
+ ${CEPH_DISK} $CEPH_DISK_ARGS prepare --filestore $osd_data > $dir/out 2>&1
grep --quiet 'Is a directory' $dir/out || return 1
! [ -f $osd_data/magic ] || return 1
rmdir $osd_data/fsid
echo successfully prepare the OSD
CEPH_ARGS="--fsid $uuid" \
- ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data 2>&1 | tee $dir/out
+ ${CEPH_DISK} $CEPH_DISK_ARGS prepare --filestore $osd_data 2>&1 | tee $dir/out
grep --quiet 'Preparing osd data dir' $dir/out || return 1
grep --quiet $uuid $osd_data/ceph_fsid || return 1
[ -f $osd_data/magic ] || return 1
echo will not override an existing OSD
CEPH_ARGS="--fsid $($uuidgen)" \
- ${CEPH_DISK} $CEPH_DISK_ARGS prepare $osd_data 2>&1 | tee $dir/out
+ ${CEPH_DISK} $CEPH_DISK_ARGS prepare --filestore $osd_data 2>&1 | tee $dir/out
grep --quiet 'Data dir .* already exists' $dir/out || return 1
grep --quiet $uuid $osd_data/ceph_fsid || return 1
}
fi
${CEPH_DISK} $CEPH_DISK_ARGS \
- prepare --osd-uuid $osd_uuid $to_prepare || return 1
+ prepare --filestore --osd-uuid $osd_uuid $to_prepare || return 1
$timeoutcmd ${CEPH_DISK} $CEPH_DISK_ARGS \
activate \
${CEPH_DISK} $CEPH_DISK_ARGS \
prepare --bluestore --block-file --osd-uuid $osd_uuid $to_prepare || return 1
- CEPH_ARGS=" --osd-objectstore=bluestore --bluestore-fsck-on-mount=true --enable_experimental_unrecoverable_data_corrupting_features=* --bluestore-block-db-size=67108864 --bluestore-block-wal-size=134217728 --bluestore-block-size=10737418240 $CEPH_ARGS" \
+ CEPH_ARGS=" --osd-objectstore=bluestore --bluestore-fsck-on-mount=true --bluestore-block-db-size=67108864 --bluestore-block-wal-size=134217728 --bluestore-block-size=10737418240 $CEPH_ARGS" \
$timeout $TIMEOUT ${CEPH_DISK} $CEPH_DISK_ARGS \
activate \
--mark-init=none \
$mkdir -p $osd_data
${CEPH_DISK} $CEPH_DISK_ARGS \
- prepare --osd-uuid $osd_uuid \
+ prepare --filestore --osd-uuid $osd_uuid \
--crush-device-class CRUSH_CLASS \
$osd_data || return 1
test -f $osd_data/crush_device_class || return 1
class TestPrepare(Base):
- def test_init_dir(self):
+ def test_init_filestore_dir(self):
parser = argparse.ArgumentParser('ceph-disk')
subparsers = parser.add_subparsers()
main.Prepare.set_subparser(subparsers)
args = parser.parse_args([
'prepare',
data,
+ '--filestore',
])
def set_type(self):
@mock.patch('stat.S_ISBLK')
@mock.patch('ceph_disk.main.is_partition')
- def test_init_dev(self, m_is_partition, m_s_isblk):
+ def test_init_filestore_dev(self, m_is_partition, m_s_isblk):
m_s_isblk.return_value = True
parser = argparse.ArgumentParser('ceph-disk')
args = parser.parse_args([
'prepare',
data,
+ '--filestore',
])
prepare = main.Prepare.factory(args)
assert isinstance(prepare.data, main.PrepareData)
assert isinstance(prepare.journal, main.PrepareJournal)
assert prepare.journal.is_device()
+ def test_init_default_dir(self):
+ parser = argparse.ArgumentParser('ceph-disk')
+ subparsers = parser.add_subparsers()
+ main.Prepare.set_subparser(subparsers)
+
+ data = tempfile.mkdtemp()
+ main.setup_statedir(data)
+ args = parser.parse_args([
+ 'prepare',
+ data,
+ ])
+
+ def set_type(self):
+ self.type = self.FILE
+ with mock.patch.multiple(main.PrepareData,
+ set_type=set_type):
+ prepare = main.Prepare.factory(args)
+ assert isinstance(prepare.data, main.PrepareBluestoreData)
+ assert prepare.data.is_file()
+ prepare.prepare()
+ assert os.path.exists(os.path.join(data, 'fsid'))
+ shutil.rmtree(data)
+
def test_set_subparser(self):
parser = argparse.ArgumentParser('ceph-disk')
subparsers = parser.add_subparsers()
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
- CEPH_ARGS+="--enable-experimental-unrecoverable-data-corrupting-features bluestore "
# avoid running out of fds in rados bench
CEPH_ARGS+="--filestore_wbthrottle_xfs_ios_hard_limit=900 "
CEPH_ARGS+="--filestore_wbthrottle_btrfs_ios_hard_limit=900 "
--op dup || return 1
CEPH_ARGS=$O
- run_osd $dir 0 || return 1
+ run_osd_bluestore $dir 0 || return 1
while ! ceph osd stat | grep '3 up' ; do sleep 1 ; done
ceph osd metadata 0 | grep bluestore || return 1
done
id=${osds0[0]}
- ceph-objectstore-tool --data-path $dir/$id --enable-experimental-unrecoverable-data-corrupting-features=bluestore \
+ ceph-objectstore-tool --data-path $dir/$id \
MOBJ0 remove || return 1
id=${osds0[1]}
- ceph-objectstore-tool --data-path $dir/$id --enable-experimental-unrecoverable-data-corrupting-features=bluestore \
+ ceph-objectstore-tool --data-path $dir/$id \
MOBJ0 remove || return 1
id=${osds1[1]}
- ceph-objectstore-tool --data-path $dir/$id --enable-experimental-unrecoverable-data-corrupting-features=bluestore \
+ ceph-objectstore-tool --data-path $dir/$id \
MOBJ1 remove || return 1
id=${osds1[2]}
- ceph-objectstore-tool --data-path $dir/$id --enable-experimental-unrecoverable-data-corrupting-features=bluestore \
+ ceph-objectstore-tool --data-path $dir/$id \
MOBJ1 remove || return 1
for id in $(seq 0 2) ; do