From fe9fb49e27ca0e8f0cee2f8106536d5c69dcdbcb Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 2 Feb 2013 21:01:08 -0800 Subject: [PATCH] ceph_manager: use get() for self.config powercycle checks I think this is what is going on... Traceback (most recent call last): File "/var/lib/teuthworker/teuthology-master/teuthology/contextutil.py", line 27, in nested yield vars File "/var/lib/teuthworker/teuthology-master/teuthology/task/ceph.py", line 1158, in task yield File "/var/lib/teuthworker/teuthology-master/teuthology/run_tasks.py", line 25, in run_tasks manager = _run_one_task(taskname, ctx=ctx, config=config) File "/var/lib/teuthworker/teuthology-master/teuthology/run_tasks.py", line 14, in _run_one_task return fn(**kwargs) File "/var/lib/teuthworker/teuthology-master/teuthology/task/dump_stuck.py", line 93, in task manager.kill_osd(id_) File "/var/lib/teuthworker/teuthology-master/teuthology/task/ceph_manager.py", line 665, in kill_osd if 'powercycle' in self.config and self.config['powercycle']: TypeError: argument of type 'NoneType' is not iterable --- teuthology/task/ceph_manager.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index 341b4ae6ac983..5647cd8ea8d0c 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -660,7 +660,7 @@ class CephManager: self.raw_cluster_cmd('osd', 'out', str(osd)) def kill_osd(self, osd): - if 'powercycle' in self.config and self.config['powercycle']: + if self.config.get('powercycle'): (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) remote.console.power_off() @@ -674,7 +674,7 @@ class CephManager: self.ctx.daemons.get_daemon('osd', osd).stop() def revive_osd(self, osd): - if 'powercycle' in self.config and self.config['powercycle']: + if self.config.get('powercycle'): (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) remote.console.power_on() @@ -696,7 +696,7 @@ class CephManager: ## monitors def kill_mon(self, mon): - if 'powercycle' in self.config and self.config['powercycle']: + if self.config.get('powercycle'): (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) remote.console.power_off() @@ -704,7 +704,7 @@ class CephManager: self.ctx.daemons.get_daemon('mon', mon).stop() def revive_mon(self, mon): - if 'powercycle' in self.config and self.config['powercycle']: + if self.config.get('powercycle'): (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) remote.console.power_on() @@ -740,7 +740,7 @@ class CephManager: ## metadata servers def kill_mds(self, mds): - if 'powercycle' in self.config and self.config['powercycle']: + if self.config.get('powercycle'): (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) remote.console.power_off() @@ -752,7 +752,7 @@ class CephManager: self.kill_mds(status['name']) def revive_mds(self, mds, standby_for_rank=None): - if 'powercycle' in self.config and self.config['powercycle']: + if self.config.get('powercycle'): (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) remote.console.power_on() -- 2.39.5