From dad981d339a21e530e187e17b027dc3d4111af48 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 11 Sep 2015 12:11:31 -0400 Subject: [PATCH] tasks: sudo ceph for cli /var/run/ceph is 770. This is mainly necessary for any interaction with the daemon sockets, but it is what users do and it may avoid log noise. Signed-off-by: Sage Weil --- tasks/ceph.py | 14 +++++++++----- tasks/ceph_manager.py | 2 ++ tasks/devstack.py | 12 ++++++------ tasks/osd_failsafe_enospc.py | 35 ++++++++++++++++++++--------------- tasks/rest_api.py | 1 + tasks/rgw.py | 2 +- tasks/util/rados.py | 17 +++++++++-------- 7 files changed, 48 insertions(+), 35 deletions(-) diff --git a/tasks/ceph.py b/tasks/ceph.py index e0b7d8d231c23..bb5a6e67102ea 100644 --- a/tasks/ceph.py +++ b/tasks/ceph.py @@ -281,6 +281,7 @@ def cephfs_setup(ctx, config): all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles] num_active = len([r for r in all_roles if is_active_mds(r)]) mon_remote.run(args=[ + 'sudo', 'adjust-ulimits', 'ceph-coverage', coverage_dir, @@ -882,11 +883,12 @@ def get_all_pg_info(rem_site, testdir): Get the results of a ceph pg dump """ info = rem_site.run(args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', 'pg', 'dump', - '--format', 'json'], stdout=StringIO()) + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', 'pg', 'dump', + '--format', 'json'], stdout=StringIO()) all_info = json.loads(info.stdout.getvalue()) return all_info['pg_stats'] @@ -924,6 +926,7 @@ def osd_scrub_pgs(ctx, config): if role.startswith('osd.'): log.info("Scrubbing osd {osd}".format(osd=role)) rem_site.run(args=[ + 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), @@ -1074,6 +1077,7 @@ def wait_for_mon_quorum(ctx, config): while True: r = remote.run( args=[ + 'sudo', 'ceph', 'quorum_status', ], diff --git a/tasks/ceph_manager.py b/tasks/ceph_manager.py index 0227f4ab97834..c5ec54c4b8d3f 100644 --- a/tasks/ceph_manager.py +++ b/tasks/ceph_manager.py @@ -801,6 +801,7 @@ class CephManager: """ testdir = teuthology.get_testdir(self.ctx) ceph_args = [ + 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), @@ -819,6 +820,7 @@ class CephManager: """ testdir = teuthology.get_testdir(self.ctx) ceph_args = [ + 'sudo', 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), diff --git a/tasks/devstack.py b/tasks/devstack.py index c5cd41b06bd8f..9fa4c68c7a087 100644 --- a/tasks/devstack.py +++ b/tasks/devstack.py @@ -109,7 +109,7 @@ def create_pools(ceph_node, pool_size): log.info("Creating pools on Ceph cluster...") for pool_name in ['volumes', 'images', 'backups']: - args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size] + args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size] ceph_node.run(args=args) @@ -125,11 +125,11 @@ def generate_ceph_keys(ceph_node): log.info("Generating Ceph keys...") ceph_auth_cmds = [ - ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', + ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa - ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon', + ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon', 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa - ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', + ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa ] for cmd in ceph_auth_cmds: @@ -142,7 +142,7 @@ def distribute_ceph_keys(devstack_node, ceph_node): def copy_key(from_remote, key_name, to_remote, dest_path, owner): key_stringio = StringIO() from_remote.run( - args=['ceph', 'auth', 'get-or-create', key_name], + args=['sudo', 'ceph', 'auth', 'get-or-create', key_name], stdout=key_stringio) key_stringio.seek(0) misc.sudo_write_file(to_remote, dest_path, @@ -173,7 +173,7 @@ def set_libvirt_secret(devstack_node, ceph_node): log.info("Setting libvirt secret...") cinder_key_stringio = StringIO() - ceph_node.run(args=['ceph', 'auth', 'get-key', 'client.cinder'], + ceph_node.run(args=['sudo', 'ceph', 'auth', 'get-key', 'client.cinder'], stdout=cinder_key_stringio) cinder_key = cinder_key_stringio.getvalue().strip() diff --git a/tasks/osd_failsafe_enospc.py b/tasks/osd_failsafe_enospc.py index 2af94cd58e4c6..856ff4a5adb78 100644 --- a/tasks/osd_failsafe_enospc.py +++ b/tasks/osd_failsafe_enospc.py @@ -56,9 +56,10 @@ def task(ctx, config): proc = mon.run( args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' ], stdin=run.PIPE, stdout=StringIO(), @@ -83,9 +84,10 @@ def task(ctx, config): proc = mon.run( args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' ], stdin=run.PIPE, stdout=StringIO(), @@ -124,9 +126,10 @@ def task(ctx, config): proc = mon.run( args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' ], stdin=run.PIPE, stdout=StringIO(), @@ -152,9 +155,10 @@ def task(ctx, config): proc = mon.run( args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' ], stdin=run.PIPE, stdout=StringIO(), @@ -182,9 +186,10 @@ def task(ctx, config): proc = mon.run( args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' ], stdin=run.PIPE, stdout=StringIO(), diff --git a/tasks/rest_api.py b/tasks/rest_api.py index 0956d00be51d2..e86f77eb52596 100644 --- a/tasks/rest_api.py +++ b/tasks/rest_api.py @@ -170,6 +170,7 @@ def task(ctx, config): ) rems.run( args=[ + 'sudo', 'ceph', 'auth', 'import', diff --git a/tasks/rgw.py b/tasks/rgw.py index ff0aa4e592987..d392aaf512eb9 100644 --- a/tasks/rgw.py +++ b/tasks/rgw.py @@ -661,7 +661,7 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints): (remote,) = ctx.cluster.only(role).remotes.keys() for pool_info in zone_info['placement_pools']: - remote.run(args=['ceph', 'osd', 'pool', 'create', + remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', pool_info['val']['index_pool'], '64', '64']) if ctx.rgw.ec_data_pool: create_ec_pool(remote, pool_info['val']['data_pool'], diff --git a/tasks/util/rados.py b/tasks/util/rados.py index 63a0848281f1d..a5b27d5b1c782 100644 --- a/tasks/util/rados.py +++ b/tasks/util/rados.py @@ -25,25 +25,26 @@ def rados(ctx, remote, cmd, wait=True, check_status=False): return proc def create_ec_pool(remote, name, profile_name, pgnum, profile={}): - remote.run(args=['ceph'] + cmd_erasure_code_profile(profile_name, profile)) + remote.run(args=['sudo', 'ceph'] + + cmd_erasure_code_profile(profile_name, profile)) remote.run(args=[ - 'ceph', 'osd', 'pool', 'create', name, + 'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), 'erasure', profile_name, ]) def create_replicated_pool(remote, name, pgnum): remote.run(args=[ - 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), + 'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), ]) def create_cache_pool(remote, base_name, cache_name, pgnum, size): remote.run(args=[ - 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum) - ]) + 'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum) + ]) remote.run(args=[ - 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name, - str(size), - ]) + 'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name, + str(size), + ]) def cmd_erasure_code_profile(profile_name, profile): """ -- 2.39.5