]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks: update ctx.ceph.conf readers to use per-cluster conf
authorJosh Durgin <jdurgin@redhat.com>
Wed, 6 Apr 2016 05:40:17 +0000 (22:40 -0700)
committerJosh Durgin <jdurgin@redhat.com>
Mon, 9 May 2016 21:55:27 +0000 (14:55 -0700)
Just use the default cluster name for now, since these tasks aren't used
for multi-cluster tests yet.

Signed-off-by: Josh Durgin <jdurgin@redhat.com>
tasks/cephfs/filesystem.py
tasks/mon_thrash.py
tasks/qemu.py
tasks/rgw.py
tasks/s3tests.py
tasks/util/rgw.py

index f9ad1488aa9b30a62ed5c286031750831409250c..1b3b71e017d56f0149cca2f81e7c525d7aac3a7b 100644 (file)
@@ -168,14 +168,14 @@ class MDSCluster(object):
         return self.json_asok(['config', 'get', key], service_type, service_id)[key]
 
     def set_ceph_conf(self, subsys, key, value):
-        if subsys not in self._ctx.ceph.conf:
-            self._ctx.ceph.conf[subsys] = {}
-        self._ctx.ceph.conf[subsys][key] = value
+        if subsys not in self._ctx.ceph['ceph'].conf:
+            self._ctx.ceph['ceph'].conf[subsys] = {}
+        self._ctx.ceph['ceph'].conf[subsys][key] = value
         write_conf(self._ctx)  # XXX because we don't have the ceph task's config object, if they
                                # used a different config path this won't work.
 
     def clear_ceph_conf(self, subsys, key):
-        del self._ctx.ceph.conf[subsys][key]
+        del self._ctx.ceph['ceph'].conf[subsys][key]
         write_conf(self._ctx)
 
     def json_asok(self, command, service_type, service_id):
index b45aaa999780b4de2daaea07575cda3360882001..0754bcdd4e97f50b4e833e3f2938abbe827df7cc 100644 (file)
@@ -156,7 +156,7 @@ class MonitorThrasher:
         Thrash the monitor specified.
         :param mon: monitor to thrash
         """
-        addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr']
+        addr = self.ctx.ceph['ceph'].conf['mon.%s' % mon]['mon addr']
         self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr))
         out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force')
         j = json.loads(out)
index a40a431952752585653557e490e936f1db14f314..70d95e45362e70524a2bafbca54d68b0fd6deea6 100644 (file)
@@ -311,9 +311,9 @@ def run_qemu(ctx, config):
             ]
 
         cachemode = 'none'
-        ceph_config = ctx.ceph.conf.get('global', {})
-        ceph_config.update(ctx.ceph.conf.get('client', {}))
-        ceph_config.update(ctx.ceph.conf.get(client, {}))
+        ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+        ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+        ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
         if ceph_config.get('rbd cache'):
             if ceph_config.get('rbd cache max dirty', 1) > 0:
                 cachemode = 'writeback'
index b874642661c7d92e369f7549a0e00549575c0af3..a1a25f9f84e7ecfa31729cb2d57c99d4b9143ada 100644 (file)
@@ -452,9 +452,9 @@ def extract_zone_info(ctx, client, client_config):
     :param client_config: dictionary of client configuration information
     :returns: zone extracted from client and client_config information
     """
-    ceph_config = ctx.ceph.conf.get('global', {})
-    ceph_config.update(ctx.ceph.conf.get('client', {}))
-    ceph_config.update(ctx.ceph.conf.get(client, {}))
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
     for key in ['rgw zone', 'rgw region', 'rgw zone root pool']:
         assert key in ceph_config, \
             'ceph conf must contain {key} for {client}'.format(key=key,
index 3fe391622f1c58e2909c8066403664d68d1cdc2a..20f328b17994fd95ed0cacc4bd542e0bccd2deaa 100644 (file)
@@ -29,7 +29,7 @@ def extract_sync_client_data(ctx, client_name):
     """
     return_region_name = None
     return_dict = None
-    client = ctx.ceph.conf.get(client_name, None)
+    client = ctx.ceph['ceph'].conf.get(client_name, None)
     if client:
         current_client_zone = client.get('rgw zone', None)
         if current_client_zone:
index f7d6ba58a9be4b37add090bf9caa89356c26eb3f..fe03311c9aab64973e77bd35f81abe79e3951c56 100644 (file)
@@ -119,21 +119,21 @@ def get_zone_system_keys(ctx, client, zone):
     return system_key['access_key'], system_key['secret_key']
 
 def zone_for_client(ctx, client):
-    ceph_config = ctx.ceph.conf.get('global', {})
-    ceph_config.update(ctx.ceph.conf.get('client', {}))
-    ceph_config.update(ctx.ceph.conf.get(client, {}))
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
     return ceph_config.get('rgw zone')
 
 def region_for_client(ctx, client):
-    ceph_config = ctx.ceph.conf.get('global', {})
-    ceph_config.update(ctx.ceph.conf.get('client', {}))
-    ceph_config.update(ctx.ceph.conf.get(client, {}))
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
     return ceph_config.get('rgw region')
 
 def radosgw_data_log_window(ctx, client):
-    ceph_config = ctx.ceph.conf.get('global', {})
-    ceph_config.update(ctx.ceph.conf.get('client', {}))
-    ceph_config.update(ctx.ceph.conf.get(client, {}))
+    ceph_config = ctx.ceph['ceph'].conf.get('global', {})
+    ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
+    ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
     return ceph_config.get('rgw data log window', 30)
 
 def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False):