Each image has its own cache and each cache uses its own thread. With
a large replicated cluster, this could result in thousands of extra
threads and gigabytes of extra memory.
Fixes: http://tracker.ceph.com/issues/15930
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
(cherry picked from commit
ea35f148257282fe3f3ae02fe7a26cf245cda952)
TestImageReplayer() : m_watch_handle(0)
{
EXPECT_EQ("", connect_cluster_pp(m_local_cluster));
+ EXPECT_EQ(0, m_local_cluster.conf_set("rbd_cache", "false"));
m_local_pool_name = get_temp_pool_name();
EXPECT_EQ(0, m_local_cluster.pool_create(m_local_pool_name.c_str()));
m_local_ioctx));
EXPECT_EQ("", connect_cluster_pp(m_remote_cluster));
+ EXPECT_EQ(0, m_remote_cluster.conf_set("rbd_cache", "false"));
m_remote_pool_name = get_temp_pool_name();
EXPECT_EQ(0, m_remote_cluster.pool_create(m_remote_pool_name.c_str()));
void TestFixture::SetUpTestCase() {
ASSERT_EQ("", connect_cluster_pp(_rados));
+ ASSERT_EQ(0, _rados.conf_set("rbd_cache", "false"));
_local_pool_name = get_temp_pool_name("test-rbd-mirror-");
ASSERT_EQ(0, _rados.pool_create(_local_pool_name.c_str()));
}
}
+ // disable unnecessary librbd cache
+ cct->_conf->set_val_or_die("rbd_cache", "false");
cct->_conf->apply_changes(nullptr);
cct->_conf->complain_about_parse_errors(cct);
std::vector<const char*> cmd_args;
argv_to_vec(argc, argv, cmd_args);
+ // disable unnecessary librbd cache
+ g_ceph_context->_conf->set_val_or_die("rbd_cache", "false");
+
mirror = new rbd::mirror::Mirror(g_ceph_context, cmd_args);
int r = mirror->init();
if (r < 0) {