From 2af05c5fb2ab0b0aace0fed56c3a1d314f312efb Mon Sep 17 00:00:00 2001 From: David Zafman Date: Mon, 4 Aug 2014 13:07:19 -0700 Subject: [PATCH] ceph_manager: Implement export/import when thrasher kills an osd Use list-pgs to avoid races by seeing actual pgs present Signed-off-by: David Zafman (cherry picked from commit 0cdf6e813db6bddd4dd2b97b8722ed32a4cf56a8) --- tasks/ceph_manager.py | 44 +++++++++++++++++++++++++++++++++++++++++++ tasks/thrashosds.py | 2 ++ 2 files changed, 46 insertions(+) diff --git a/tasks/ceph_manager.py b/tasks/ceph_manager.py index 4dbca805bf28f..f1b72e0463648 100644 --- a/tasks/ceph_manager.py +++ b/tasks/ceph_manager.py @@ -7,6 +7,7 @@ import time import gevent import json import threading +import os from teuthology import misc as teuthology from tasks.scrub import Scrubber @@ -74,6 +75,7 @@ class Thrasher: self.revive_timeout += 120 self.clean_wait = self.config.get('clean_wait', 0) self.minin = self.config.get("min_in", 3) + self.ceph_objectstore_tool = self.config.get('ceph_objectstore_tool', True) num_osds = self.in_osds + self.out_osds self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds @@ -114,6 +116,42 @@ class Thrasher: self.ceph_manager.mark_down_osd(osd) if mark_out and osd in self.in_osds: self.out_osd(osd) + if self.ceph_objectstore_tool: + self.log("Testing ceph_objectstore_tool on down osd") + (remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + FSPATH = self.ceph_manager.get_filepath() + JPATH = os.path.join(FSPATH, "journal") + prefix = "sudo ceph_objectstore_tool --data-path {fpath} --journal-path {jpath} ".format(fpath=FSPATH, jpath=JPATH) + cmd = (prefix + "--op list-pgs").format(id=osd) + proc = remote.run(args=cmd, wait=True, check_status=True, stdout=StringIO()) + if proc.exitstatus != 0: + self.log("Failed to get pg list for osd.{osd}".format(osd=osd)) + return + pgs = proc.stdout.getvalue().split('\n')[:-1] + if len(pgs) == 0: + self.log("No PGs found for osd.{osd}".format(osd=osd)) + return + pg = random.choice(pgs) + fpath = os.path.join(os.path.join(teuthology.get_testdir(self.ceph_manager.ctx), "data"), "exp.{pg}.{id}".format(pg=pg,id=osd)) + # export + success = False + cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=osd, pg=pg, file=fpath) + proc = remote.run(args=cmd) + if proc.exitstatus == 0: + # remove + cmd = (prefix + "--op remove --pgid {pg}").format(id=osd, pg=pg) + proc = remote.run(args=cmd) + if proc.exitstatus == 0: + # import + cmd = (prefix + "--op import --file {file}").format(id=osd, file=fpath) + remote.run(args=cmd) + if proc.exitstatus == 0: + success = True + cmd = "rm -f {file}".format(file=fpath) + remote.run(args=cmd) + if not success: + raise Exception("ceph_objectstore_tool: failure with status {ret}".format(ret=proc.exitstatus)) + def blackhole_kill_osd(self, osd=None): """ @@ -1467,3 +1505,9 @@ class CephManager: out = self.raw_cluster_cmd('mds', 'dump', '--format=json') j = json.loads(' '.join(out.splitlines()[1:])) return j + + def get_filepath(self): + """ + Return path to osd data with {id} needing to be replaced + """ + return "/var/lib/ceph/osd/ceph-{id}" diff --git a/tasks/thrashosds.py b/tasks/thrashosds.py index 3913fa5ea0a4e..c19631fbe099c 100644 --- a/tasks/thrashosds.py +++ b/tasks/thrashosds.py @@ -94,6 +94,8 @@ def task(ctx, config): chance_test_map_discontinuity: (0) chance to test map discontinuity map_discontinuity_sleep_time: (40) time to wait for map trims + ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down + example: tasks: -- 2.39.5