From: Dhairya Parmar Date: Thu, 13 Jun 2024 13:00:15 +0000 (+0530) Subject: src/test: test async i/o after cluster is full X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=445075c509a59fd2afde7538ae6d60141499f36a;p=ceph.git src/test: test async i/o after cluster is full This requires a new suite, cannot be run with other async i/o test cases therefore apart from adding the test case, add a binary, a shell script to run it and a YAML file to pick it up in teuthology. Fixes: https://tracker.ceph.com/issues/63104 Signed-off-by: Dhairya Parmar --- diff --git a/qa/suites/fs/libcephfs/tasks/client_full.yaml b/qa/suites/fs/libcephfs/tasks/client_full.yaml new file mode 100644 index 000000000000..a7e8b1e33f5a --- /dev/null +++ b/qa/suites/fs/libcephfs/tasks/client_full.yaml @@ -0,0 +1,38 @@ +overrides: + ceph: + cephfs: + ec_profile: + - disabled + log-ignorelist: + - OSD full dropping all updates + - OSD near full + - pausewr flag + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + - POOL_FULL + - POOL_BACKFILLFULL + - PG_RECOVERY_FULL + - PG_DEGRADED + conf: + mon: + mon osd nearfull ratio: 0.6 + mon osd backfillfull ratio: 0.6 + mon osd full ratio: 0.7 + osd: + osd mon report interval: 5 + osd objectstore: memstore + osd failsafe full ratio: 1.0 + memstore device bytes: 200000000 + client: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + mds: + debug ms: 1 + debug mds: 20 +tasks: +- workunit: + clients: + client.0: + - client/test_full.sh diff --git a/qa/workunits/client/test_full.sh b/qa/workunits/client/test_full.sh new file mode 100755 index 000000000000..e562383e0e73 --- /dev/null +++ b/qa/workunits/client/test_full.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +set -ex + +ceph_test_client_async_full diff --git a/src/test/client/CMakeLists.txt b/src/test/client/CMakeLists.txt index ed233fb887db..1ef003cfc236 100644 --- a/src/test/client/CMakeLists.txt +++ b/src/test/client/CMakeLists.txt @@ -39,4 +39,21 @@ if(${WITH_CEPHFS}) ) install(TARGETS ceph_test_client_fscrypt DESTINATION ${CMAKE_INSTALL_BINDIR}) + + add_executable(ceph_test_client_async_full + main.cc + fscrypt_conf.cc + nonblocking_full.cc + ) + target_link_libraries(ceph_test_client_async_full + client + global + ceph-common + cephfs + ${UNITTEST_LIBS} + ${EXTRALIBS} + ${CMAKE_DL_LIBS} + ) + install(TARGETS ceph_test_client_async_full + DESTINATION ${CMAKE_INSTALL_BINDIR}) endif(${WITH_CEPHFS}) diff --git a/src/test/client/TestClient.h b/src/test/client/TestClient.h index ec9ae93dcf3f..326478f3e5a9 100644 --- a/src/test/client/TestClient.h +++ b/src/test/client/TestClient.h @@ -224,6 +224,7 @@ public: #endif return 0; } + void ll_write_n_bytes(struct Fh *fh, size_t to_write, size_t block_size, int iov_cnt, off_t *offset) { /// @brief Write N bytes of data asynchronously. diff --git a/src/test/client/nonblocking_full.cc b/src/test/client/nonblocking_full.cc new file mode 100644 index 000000000000..ca7bd5b36627 --- /dev/null +++ b/src/test/client/nonblocking_full.cc @@ -0,0 +1,114 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2024 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include + +#include +#include + +#include +#include + +#include "test/client/TestClient.h" + +TEST_F(TestClient, LlreadvLlwritevDataPoolFull) { + /* Test perfoming async I/O after filling the fs and make sure it handles + the write gracefully */ + + Inode *root = nullptr, *file_a = nullptr; + Fh *fh_a = nullptr; + struct ceph_statx stx_a; + root = client->get_root(); + ASSERT_NE(root, (Inode *)NULL); + + int mypid = getpid(); + char fname_a[256]; + sprintf(fname_a, "test_llreadvllwritevdatapoolfullfile_a%u", mypid); + ASSERT_EQ(0, client->ll_createx(root, fname_a, 0666, + O_RDWR | O_CREAT | O_TRUNC, + &file_a, &fh_a, &stx_a, 0, 0, myperm)); + + int64_t rc = 0, bytes_written = 0; + + // this test case cannot handle multiple data pools + const std::vector &data_pools = client->mdsmap->get_data_pools(); + ASSERT_EQ(data_pools.size(), 1); + + struct statvfs stbuf; + rc = client->ll_statfs(root, &stbuf, myperm); + ASSERT_EQ(rc, 0); + // available size = num of free blocks * size of a block + size_t data_pool_available_space = stbuf.f_bfree * stbuf.f_bsize; + ASSERT_GT(data_pool_available_space, 0); + + off_t offset = 0; + // writing blocks of 1GiB + const size_t BLOCK_SIZE = 1024 * 1024 * 1024; + client->ll_write_n_bytes(fh_a, size_t(data_pool_available_space / 2), + BLOCK_SIZE, 4, &offset); + + // get a new file + mypid = getpid(); + char fname_b[256]; + Inode *file_b = nullptr; + Fh *fh_b = nullptr; + struct ceph_statx stx_b; + sprintf(fname_b, "test_llreadvllwritevdatapoolfullfile_b%u", mypid); + ASSERT_EQ(0, client->ll_createx(root, fname_b, 0666, + O_RDWR | O_CREAT | O_TRUNC, + &file_b, &fh_b, &stx_b, 0, 0, myperm)); + + client->ll_write_n_bytes(fh_b, size_t((data_pool_available_space * 1.1) / 2), + BLOCK_SIZE, 4, &offset); + + // if we're here then it means the write succeeded but the cluster is full + // so let us get a new osdmap epoch + const epoch_t osd_epoch = objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)); + + objecter->maybe_request_map(); + + // wait till we have a new osdmap epoch + ASSERT_TRUE(client->wait_for_osdmap_epoch_update(osd_epoch)); + + // with the new osdmap epoch, the pools should return full flag + bool data_pool_full = client->wait_until_true([&]() + { return client->is_data_pool_full(data_pools[0]); }); + ASSERT_TRUE(data_pool_full); + + // write here should fail since the cluster is full + const size_t TINY_BLOCK_SIZE = 256 * 1024 * 1024; + auto out_buf_0 = std::make_unique(TINY_BLOCK_SIZE); + memset(out_buf_0.get(), 0xDD, TINY_BLOCK_SIZE); + auto out_buf_1 = std::make_unique(TINY_BLOCK_SIZE); + memset(out_buf_1.get(), 0xFF, TINY_BLOCK_SIZE); + + struct iovec iov_out[2] = { + {out_buf_0.get(), TINY_BLOCK_SIZE}, + {out_buf_1.get(), TINY_BLOCK_SIZE} + }; + + std::unique_ptr writefinish = nullptr; + writefinish.reset(new C_SaferCond("test-nonblocking-writefinish-datapool-full")); + rc = client->ll_preadv_pwritev(fh_b, iov_out, 2, + size_t(data_pool_available_space / 2), + true, writefinish.get(), nullptr); + ASSERT_EQ(rc, 0); + bytes_written = writefinish->wait(); + ASSERT_EQ(bytes_written, -ENOSPC); + + client->ll_release(fh_a); + ASSERT_EQ(0, client->ll_unlink(root, fname_a, myperm)); + client->ll_release(fh_b); + ASSERT_EQ(0, client->ll_unlink(root, fname_b, myperm)); +}