objects: 20 # <number of objects>
pgnum: 12
crimson_objectstore_tool: true # use crimson-objectstore-tool instead of ceph-objectstore-tool
+ gc_before_restart: true # run crimson-objectstore-tool --op gc before restarting OSDs
"""
if config is None:
ERRORS += IMP_ERRORS
if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
+ if CRIMSON and config.get('gc_before_restart', False):
+ # Run GC on each OSD's seastore to reclaim segments consumed
+ # by repeated tool mount/unmount cycles before restarting.
+ log.info("Running GC on each OSD store...")
+ for remote in osds.remotes.keys():
+ for role in osds.remotes[remote]:
+ if not role.startswith("osd."):
+ continue
+ osdid = int(role.split('.')[1])
+ cmd = (prefix + "--op gc").format(id=osdid)
+ try:
+ remote.sh(cmd, wait=True)
+ except CommandFailedError as e:
+ log.warning(
+ "GC failed on osd.{id} with {ret}".format(
+ id=osdid, ret=e.exitstatus))
+
log.info("Restarting OSDs....")
# They are still look to be up because of setting nodown
for osd in manager.get_osd_status()['up']: