From 32ffcfdcdedf18cae654752e22fa7bf327314372 Mon Sep 17 00:00:00 2001 From: Venky Shankar Date: Mon, 6 Feb 2023 23:13:54 -0500 Subject: [PATCH] mgr/volumes: avoid returning -ESHUTDOWN back to cli I think mgr/volumes is acting a bit fancy by checking if the ceph-mgr daemon and plugins are under termination, thereby returning -ESHUTDOWN to cli commands. This seems unnecessary. None of the other mgr plugins do anything like this. Handling of commands when the ceph-mgr is under termination is possibly handled in one of the upper layers. Fixes: http://tracker.ceph.com/issues/58651 Signed-off-by: Venky Shankar --- src/pybind/mgr/mgr_util.py | 10 ---------- src/pybind/mgr/volumes/fs/volume.py | 12 ------------ 2 files changed, 22 deletions(-) diff --git a/src/pybind/mgr/mgr_util.py b/src/pybind/mgr/mgr_util.py index 8c1e5be44161b..fd352ede139a1 100644 --- a/src/pybind/mgr/mgr_util.py +++ b/src/pybind/mgr/mgr_util.py @@ -296,16 +296,10 @@ class CephfsConnectionPool(object): class CephfsClient(Generic[Module_T]): def __init__(self, mgr: Module_T): self.mgr = mgr - self.stopping = Event() self.connection_pool = CephfsConnectionPool(self.mgr) - def is_stopping(self) -> bool: - return self.stopping.is_set() - def shutdown(self) -> None: logger.info("shutting down") - # first, note that we're shutting down - self.stopping.set() # second, delete all libcephfs handles from connection pool self.connection_pool.del_all_connections() @@ -348,10 +342,6 @@ def open_filesystem(fsc: CephfsClient, fs_name: str) -> Generator["cephfs.LibCep :param fs_name: fs name :return: yields a fs handle (ceph filesystem handle) """ - if fsc.is_stopping(): - raise CephfsConnectionException(-errno.ESHUTDOWN, - "shutdown in progress") - fs_handle = fsc.connection_pool.get_fs_handle(fs_name) try: yield fs_handle diff --git a/src/pybind/mgr/volumes/fs/volume.py b/src/pybind/mgr/volumes/fs/volume.py index 6d465febc5e3b..99764bfcfd2c3 100644 --- a/src/pybind/mgr/volumes/fs/volume.py +++ b/src/pybind/mgr/volumes/fs/volume.py @@ -70,8 +70,6 @@ class VolumeClient(CephfsClient["Module"]): def shutdown(self): # Overrides CephfsClient.shutdown() log.info("shutting down") - # first, note that we're shutting down - self.stopping.set() # stop clones self.cloner.shutdown() # stop purge threads @@ -96,14 +94,9 @@ class VolumeClient(CephfsClient["Module"]): ### volume operations -- create, rm, ls def create_fs_volume(self, volname, placement): - if self.is_stopping(): - return -errno.ESHUTDOWN, "", "shutdown in progress" return create_volume(self.mgr, volname, placement) def delete_fs_volume(self, volname, confirm): - if self.is_stopping(): - return -errno.ESHUTDOWN, "", "shutdown in progress" - if confirm != "--yes-i-really-mean-it": return -errno.EPERM, "", "WARNING: this will *PERMANENTLY DESTROY* all data " \ "stored in the filesystem '{0}'. If you are *ABSOLUTELY CERTAIN* " \ @@ -130,15 +123,10 @@ class VolumeClient(CephfsClient["Module"]): return delete_volume(self.mgr, volname, metadata_pool, data_pools) def list_fs_volumes(self): - if self.stopping.is_set(): - return -errno.ESHUTDOWN, "", "shutdown in progress" volumes = list_volumes(self.mgr) return 0, json.dumps(volumes, indent=4, sort_keys=True), "" def rename_fs_volume(self, volname, newvolname, sure): - if self.is_stopping(): - return -errno.ESHUTDOWN, "", "shutdown in progress" - if not sure: return ( -errno.EPERM, "", -- 2.39.5