]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph_manager: use get() for self.config powercycle checks
authorSage Weil <sage@inktank.com>
Sun, 3 Feb 2013 05:01:08 +0000 (21:01 -0800)
committerSage Weil <sage@inktank.com>
Sun, 3 Feb 2013 05:01:08 +0000 (21:01 -0800)
I think this is what is going on...

Traceback (most recent call last):
  File "/var/lib/teuthworker/teuthology-master/teuthology/contextutil.py", line 27, in nested
    yield vars
  File "/var/lib/teuthworker/teuthology-master/teuthology/task/ceph.py", line 1158, in task
    yield
  File "/var/lib/teuthworker/teuthology-master/teuthology/run_tasks.py", line 25, in run_tasks
    manager = _run_one_task(taskname, ctx=ctx, config=config)
  File "/var/lib/teuthworker/teuthology-master/teuthology/run_tasks.py", line 14, in _run_one_task
    return fn(**kwargs)
  File "/var/lib/teuthworker/teuthology-master/teuthology/task/dump_stuck.py", line 93, in task
    manager.kill_osd(id_)
  File "/var/lib/teuthworker/teuthology-master/teuthology/task/ceph_manager.py", line 665, in kill_osd
    if 'powercycle' in self.config and self.config['powercycle']:
TypeError: argument of type 'NoneType' is not iterable

teuthology/task/ceph_manager.py

index 341b4ae6ac983335dfc0ec0cdd34fc54b5691ab5..5647cd8ea8d0c0e968df2820e656c6cbf4963759 100644 (file)
@@ -660,7 +660,7 @@ class CephManager:
         self.raw_cluster_cmd('osd', 'out', str(osd))
 
     def kill_osd(self, osd):
-        if 'powercycle' in self.config and self.config['powercycle']:
+        if self.config.get('powercycle'):
             (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
             self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
             remote.console.power_off()
@@ -674,7 +674,7 @@ class CephManager:
         self.ctx.daemons.get_daemon('osd', osd).stop()
 
     def revive_osd(self, osd):
-        if 'powercycle' in self.config and self.config['powercycle']:
+        if self.config.get('powercycle'):
             (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
             self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
             remote.console.power_on()
@@ -696,7 +696,7 @@ class CephManager:
     ## monitors
 
     def kill_mon(self, mon):
-        if 'powercycle' in self.config and self.config['powercycle']:
+        if self.config.get('powercycle'):
             (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
             self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
             remote.console.power_off()
@@ -704,7 +704,7 @@ class CephManager:
             self.ctx.daemons.get_daemon('mon', mon).stop()
 
     def revive_mon(self, mon):
-        if 'powercycle' in self.config and self.config['powercycle']:
+        if self.config.get('powercycle'):
             (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
             self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
             remote.console.power_on()
@@ -740,7 +740,7 @@ class CephManager:
     ## metadata servers
 
     def kill_mds(self, mds):
-        if 'powercycle' in self.config and self.config['powercycle']:
+        if self.config.get('powercycle'):
             (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
             self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
             remote.console.power_off()
@@ -752,7 +752,7 @@ class CephManager:
         self.kill_mds(status['name'])
 
     def revive_mds(self, mds, standby_for_rank=None):
-        if 'powercycle' in self.config and self.config['powercycle']:
+        if self.config.get('powercycle'):
             (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
             self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
             remote.console.power_on()