From 6f9027aa434ab4840cad14999cad7d6965c370ea Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 10 Jul 2024 11:02:47 +0200 Subject: [PATCH] qa/tasks/cephadm: don't wait for OSDs in create_rbd_pool() This fails because teuthology.wait_until_osds_up() wants to use adjust-ulimits wrapper which isn't available in "cephadm shell" environment. The whole thing is also redundant because cephadm task is supposed to wait for OSDs to come up earlier, in ceph_osds(). Signed-off-by: Ilya Dryomov (cherry picked from commit 0bda782952a81820752acdbf0b3ab9dbed56fb64) --- qa/tasks/cephadm.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/qa/tasks/cephadm.py b/qa/tasks/cephadm.py index 8617c5025ca77..d0697d06b721b 100644 --- a/qa/tasks/cephadm.py +++ b/qa/tasks/cephadm.py @@ -1718,13 +1718,6 @@ def crush_setup(ctx, config): def create_rbd_pool(ctx, config): if config.get('create_rbd_pool', False): cluster_name = config['cluster'] - log.info('Waiting for OSDs to come up') - teuthology.wait_until_osds_up( - ctx, - cluster=ctx.cluster, - remote=ctx.ceph[cluster_name].bootstrap_remote, - ceph_cluster=cluster_name, - ) log.info('Creating RBD pool') _shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote, args=['sudo', 'ceph', '--cluster', cluster_name, -- 2.39.5