From dfd01d765304ed8783cef613930e65980d9aee23 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Jun 2020 10:27:03 -0500 Subject: [PATCH] blacklist -> blocklist Signed-off-by: Sage Weil Signed-off-by: Neha Ojha --- PendingReleaseNotes | 25 +++++ doc/cephfs/eviction.rst | 44 ++++---- doc/cephfs/full.rst | 2 +- doc/cephfs/mdcache.rst | 2 +- doc/cephfs/mds-config-ref.rst | 12 +-- doc/man/8/ceph.rst | 16 +-- doc/man/8/mount.ceph.rst | 6 +- doc/rados/operations/control.rst | 14 +-- doc/rados/operations/user-management.rst | 2 +- doc/rbd/rbd-exclusive-locks.rst | 10 +- qa/suites/rgw/tempest/tasks/rgw_tempest.yaml | 2 +- qa/tasks/ceph_manager.py | 2 +- qa/tasks/cephfs/cephfs_test_case.py | 16 +-- qa/tasks/cephfs/mount.py | 6 +- qa/tasks/cephfs/test_client_recovery.py | 12 +-- qa/tasks/cephfs/test_misc.py | 2 +- qa/tasks/cephfs/test_sessionmap.py | 8 +- qa/tasks/cephfs/test_volume_client.py | 4 +- qa/tasks/fs.py | 6 +- qa/tasks/mgr/dashboard/test_auth.py | 6 +- qa/tasks/tempest.py | 8 +- qa/workunits/cephtool/test.sh | 37 ++++--- qa/workunits/rbd/krbd_exclusive_option.sh | 6 +- qa/workunits/rbd/rbd_mirror_journal.sh | 6 +- qa/workunits/rbd/rbd_mirror_snapshot.sh | 6 +- qa/workunits/rbd/test_lock_fence.sh | 4 +- src/client/Client.cc | 78 +++++++------- src/client/Client.h | 2 +- src/common/legacy_config_opts.h | 6 +- src/common/options.cc | 28 ++--- src/crimson/osd/pg.h | 2 +- src/include/cephfs/libcephfs.h | 4 +- src/include/rados.h | 3 +- src/include/rados/librados.h | 12 ++- src/include/rados/librados.hpp | 4 +- src/journal/JournalMetadata.cc | 6 +- src/librados/RadosClient.cc | 11 +- src/librados/RadosClient.h | 4 +- src/librados/librados_c.cc | 11 +- src/librados/librados_cxx.cc | 8 +- src/librbd/ExclusiveLock.cc | 8 +- src/librbd/ManagedLock.cc | 20 ++-- src/librbd/ManagedLock.h | 14 +-- src/librbd/Watcher.cc | 20 ++-- src/librbd/Watcher.h | 6 +- .../cache/ObjectCacherObjectDispatch.cc | 2 +- .../exclusive_lock/PreReleaseRequest.cc | 6 +- src/librbd/internal.cc | 10 +- src/librbd/managed_lock/AcquireRequest.cc | 18 ++-- src/librbd/managed_lock/AcquireRequest.h | 12 +-- src/librbd/managed_lock/BreakRequest.cc | 30 +++--- src/librbd/managed_lock/BreakRequest.h | 20 ++-- src/librbd/watcher/RewatchRequest.cc | 2 +- src/mds/FSMap.cc | 8 +- src/mds/FSMap.h | 4 +- src/mds/Locker.cc | 2 +- src/mds/MDLog.cc | 14 +-- src/mds/MDSContext.cc | 2 +- src/mds/MDSMap.h | 2 +- src/mds/MDSRank.cc | 70 ++++++------ src/mds/MDSRank.h | 2 +- src/mds/Server.cc | 48 ++++----- src/mds/Server.h | 2 +- src/mgr/BaseMgrModule.cc | 4 +- src/mgr/Mgr.cc | 6 +- src/mgr/MgrStandby.cc | 2 +- src/mon/FSCommands.cc | 4 +- src/mon/MDSMonitor.cc | 22 ++-- src/mon/MDSMonitor.h | 2 +- src/mon/MgrMap.h | 2 +- src/mon/MgrMonitor.cc | 14 +-- src/mon/MonCap.cc | 12 ++- src/mon/MonCommands.h | 21 +++- src/mon/OSDMonitor.cc | 101 +++++++++--------- src/mon/OSDMonitor.h | 4 +- src/msg/msg_types.h | 2 +- src/osd/OSD.cc | 14 +-- src/osd/OSDMap.cc | 88 +++++++-------- src/osd/OSDMap.h | 20 ++-- src/osd/PeeringState.cc | 4 +- src/osd/PeeringState.h | 4 +- src/osd/PrimaryLogPG.cc | 26 ++--- src/osd/PrimaryLogPG.h | 4 +- src/osd/error_code.cc | 12 +-- src/osd/error_code.h | 2 +- src/osdc/Journaler.cc | 4 +- src/osdc/Objecter.cc | 51 ++++----- src/osdc/Objecter.h | 22 ++-- src/pybind/mgr/cephadm/services/iscsi.py | 2 +- src/pybind/rados/rados.pyx | 12 +-- src/rgw/rgw_op.h | 4 +- .../librados_test_stub/LibradosTestStub.cc | 6 +- .../MockTestMemRadosClient.h | 8 +- src/test/librados_test_stub/TestIoCtxImpl.cc | 40 +++---- src/test/librados_test_stub/TestMemCluster.cc | 12 +-- src/test/librados_test_stub/TestMemCluster.h | 8 +- .../librados_test_stub/TestMemIoCtxImpl.cc | 92 ++++++++-------- .../librados_test_stub/TestMemRadosClient.cc | 20 ++-- .../librados_test_stub/TestMemRadosClient.h | 4 +- src/test/librados_test_stub/TestRadosClient.h | 4 +- .../librados_test_stub/TestWatchNotify.cc | 2 +- src/test/librados_test_stub/TestWatchNotify.h | 2 +- .../managed_lock/test_mock_AcquireRequest.cc | 12 +-- .../managed_lock/test_mock_BreakRequest.cc | 20 ++-- src/test/librbd/mock/MockImageWatcher.h | 2 +- src/test/librbd/rbdrw.py | 2 +- src/test/librbd/test_librbd.cc | 28 ++--- src/test/librbd/test_mock_ExclusiveLock.cc | 8 +- src/test/librbd/test_mock_ManagedLock.cc | 32 +++--- src/test/librbd/test_mock_Watcher.cc | 6 +- src/test/pybind/test_ceph_argparse.py | 24 ++--- src/test/pybind/test_rados.py | 6 +- src/test/pybind/test_rbd.py | 38 +++---- src/test/rbd_mirror/test_mock_ImageMap.cc | 6 +- .../rbd_mirror/test_mock_InstanceReplayer.cc | 10 +- .../rbd_mirror/test_mock_InstanceWatcher.cc | 8 +- .../rbd_mirror/test_mock_LeaderWatcher.cc | 4 +- .../rbd_mirror/test_mock_NamespaceReplayer.cc | 2 +- src/test/rbd_mirror/test_mock_PoolReplayer.cc | 42 ++++---- src/test/rbd_mirror/test_mock_PoolWatcher.cc | 16 +-- src/test/rgw/test_rgw_iam_policy.cc | 36 +++---- src/test/test_stress_watch.cc | 14 +-- src/tools/rbd_mirror/ImageDeleter.cc | 4 +- src/tools/rbd_mirror/ImageReplayer.h | 4 +- src/tools/rbd_mirror/InstanceReplayer.cc | 10 +- src/tools/rbd_mirror/InstanceReplayer.h | 4 +- src/tools/rbd_mirror/InstanceWatcher.cc | 2 +- src/tools/rbd_mirror/Instances.cc | 2 +- src/tools/rbd_mirror/LeaderWatcher.cc | 12 +-- src/tools/rbd_mirror/LeaderWatcher.h | 12 +-- src/tools/rbd_mirror/Mirror.cc | 4 +- src/tools/rbd_mirror/NamespaceReplayer.cc | 10 +- src/tools/rbd_mirror/NamespaceReplayer.h | 2 +- src/tools/rbd_mirror/PoolReplayer.cc | 16 +-- src/tools/rbd_mirror/PoolReplayer.h | 4 +- src/tools/rbd_mirror/PoolWatcher.cc | 22 ++-- src/tools/rbd_mirror/PoolWatcher.h | 4 +- .../rbd_mirror/image_deleter/TrashWatcher.cc | 18 ++-- 138 files changed, 972 insertions(+), 902 deletions(-) diff --git a/PendingReleaseNotes b/PendingReleaseNotes index 53c7c310f766d..c0189f712cf6e 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -110,3 +110,28 @@ * fs: Names of new FSs, volumes, subvolumes and subvolume groups can only contain alphanumeric and ``-``, ``_`` and ``.`` characters. Some commands or CephX credentials may not work with old FSs with non-conformant names. + +* `blacklist` has been replaced with `blocklist` throughout. The following commands have changed: + + - ``ceph osd blacklist ...`` are now ``ceph osd blocklist ...`` + - ``ceph osd. dump_blacklist`` is now ``ceph osd. dump_blocklist`` + +* The following config options have changed: + + - ``mon osd blacklist default expire`` is now ``mon osd blocklist default expire`` + - ``mon mds blacklist interval`` is now ``mon mds blocklist interval`` + - ``mon mgr blacklist interval`` is now ''mon mgr blocklist interval`` + - ``rbd blacklist on break lock`` is now ``rbd blocklist on break lock`` + - ``rbd blacklist expire seconds`` is now ``rbd blocklist expire seconds`` + - ``mds session blacklist on timeout`` is now ``mds session blocklist on timeout`` + - ``mds session blacklist on evict`` is now ``mds session blocklist on evict`` + +* The following librados API calls have changed: + + - ``rados_blacklist_add`` is now ``rados_blocklist_add``; the former will issue a deprecation warning and be removed in a future release. + - ``rados.blacklist_add`` is now ``rados.blocklist_add`` in the C++ API. + +* The JSON output for the following commands now shows ``blocklist`` instead of ``blacklist``: + + - ``ceph osd dump`` + - ``ceph osd. dump_blocklist`` diff --git a/doc/cephfs/eviction.rst b/doc/cephfs/eviction.rst index c64972c0ff10f..eb6f70a8ed9d0 100644 --- a/doc/cephfs/eviction.rst +++ b/doc/cephfs/eviction.rst @@ -89,30 +89,30 @@ do that using its unique ID, or various other attributes to identify it: ceph tell mds.0 client evict client_metadata.=4305 -Advanced: Un-blacklisting a client +Advanced: Un-blocklisting a client ================================== -Ordinarily, a blacklisted client may not reconnect to the servers: it +Ordinarily, a blocklisted client may not reconnect to the servers: it must be unmounted and then mounted anew. However, in some situations it may be useful to permit a client that was evicted to attempt to reconnect. -Because CephFS uses the RADOS OSD blacklist to control client eviction, +Because CephFS uses the RADOS OSD blocklist to control client eviction, CephFS clients can be permitted to reconnect by removing them from -the blacklist: +the blocklist: :: - $ ceph osd blacklist ls + $ ceph osd blocklist ls listed 1 entries 127.0.0.1:0/3710147553 2018-03-19 11:32:24.716146 - $ ceph osd blacklist rm 127.0.0.1:0/3710147553 - un-blacklisting 127.0.0.1:0/3710147553 + $ ceph osd blocklist rm 127.0.0.1:0/3710147553 + un-blocklisting 127.0.0.1:0/3710147553 Doing this may put data integrity at risk if other clients have accessed -files that the blacklisted client was doing buffered IO to. It is also not +files that the blocklisted client was doing buffered IO to. It is also not guaranteed to result in a fully functional client -- the best way to get a fully healthy client back after an eviction is to unmount the client and do a fresh mount. @@ -121,7 +121,7 @@ If you are trying to reconnect clients in this way, you may also find it useful to set ``client_reconnect_stale`` to true in the FUSE client, to prompt the client to try to reconnect. -Advanced: Configuring blacklisting +Advanced: Configuring blocklisting ================================== If you are experiencing frequent client evictions, due to slow @@ -131,27 +131,27 @@ issue, then you may want to ask the MDS to be less strict. It is possible to respond to slow clients by simply dropping their MDS sessions, but permit them to re-open sessions and permit them to continue talking to OSDs. To enable this mode, set -``mds_session_blacklist_on_timeout`` to false on your MDS nodes. +``mds_session_blocklist_on_timeout`` to false on your MDS nodes. For the equivalent behaviour on manual evictions, set -``mds_session_blacklist_on_evict`` to false. +``mds_session_blocklist_on_evict`` to false. -Note that if blacklisting is disabled, then evicting a client will +Note that if blocklisting is disabled, then evicting a client will only have an effect on the MDS you send the command to. On a system with multiple active MDS daemons, you would need to send an -eviction command to each active daemon. When blacklisting is enabled +eviction command to each active daemon. When blocklisting is enabled (the default), sending an eviction command to just a single -MDS is sufficient, because the blacklist propagates it to the others. +MDS is sufficient, because the blocklist propagates it to the others. -.. _background_blacklisting_and_osd_epoch_barrier: +.. _background_blocklisting_and_osd_epoch_barrier: -Background: Blacklisting and OSD epoch barrier +Background: Blocklisting and OSD epoch barrier ============================================== -After a client is blacklisted, it is necessary to make sure that +After a client is blocklisted, it is necessary to make sure that other clients and MDS daemons have the latest OSDMap (including -the blacklist entry) before they try to access any data objects -that the blacklisted client might have been accessing. +the blocklist entry) before they try to access any data objects +that the blocklisted client might have been accessing. This is ensured using an internal "osdmap epoch barrier" mechanism. @@ -159,12 +159,12 @@ The purpose of the barrier is to ensure that when we hand out any capabilities which might allow touching the same RADOS objects, the clients we hand out the capabilities to must have a sufficiently recent OSD map to not race with cancelled operations (from ENOSPC) or -blacklisted clients (from evictions). +blocklisted clients (from evictions). More specifically, the cases where an epoch barrier is set are: - * Client eviction (where the client is blacklisted and other clients - must wait for a post-blacklist epoch to touch the same objects). + * Client eviction (where the client is blocklisted and other clients + must wait for a post-blocklist epoch to touch the same objects). * OSD map full flag handling in the client (where the client may cancel some OSD ops from a pre-full epoch, so other clients must wait until the full epoch or later before touching the same objects). diff --git a/doc/cephfs/full.rst b/doc/cephfs/full.rst index 35c5ff266e922..fe0616cb6922d 100644 --- a/doc/cephfs/full.rst +++ b/doc/cephfs/full.rst @@ -40,7 +40,7 @@ time the OSD full flag is sent. Clients update the ``osd_epoch_barrier`` when releasing capabilities on files affected by cancelled operations, in order to ensure that these cancelled operations do not interfere with subsequent access to the data objects by the MDS or other clients. For -more on the epoch barrier mechanism, see :ref:`background_blacklisting_and_osd_epoch_barrier`. +more on the epoch barrier mechanism, see :ref:`background_blocklisting_and_osd_epoch_barrier`. Legacy (pre-hammer) behavior ---------------------------- diff --git a/doc/cephfs/mdcache.rst b/doc/cephfs/mdcache.rst index 7e397b29d396a..f2e20238cc61b 100644 --- a/doc/cephfs/mdcache.rst +++ b/doc/cephfs/mdcache.rst @@ -22,7 +22,7 @@ Clients can request capabilities and will generally get them, but when there is competing access or memory pressure on the MDS, they may be **revoked**. When a capability is revoked, the client is responsible for returning it as soon as it is able. Clients that fail to do so in a -timely fashion may end up **blacklisted** and unable to communicate with +timely fashion may end up **blocklisted** and unable to communicate with the cluster. Since the cache is distributed, the MDS must take great care to ensure diff --git a/doc/cephfs/mds-config-ref.rst b/doc/cephfs/mds-config-ref.rst index 83caf795b8977..0df15275bfcf4 100644 --- a/doc/cephfs/mds-config-ref.rst +++ b/doc/cephfs/mds-config-ref.rst @@ -69,14 +69,14 @@ :Default: ``15`` -``mds blacklist interval`` +``mds blocklist interval`` -:Description: The blacklist duration for failed MDSs in the OSD map. Note, +:Description: The blocklist duration for failed MDSs in the OSD map. Note, this controls how long failed MDS daemons will stay in the - OSDMap blacklist. It has no effect on how long something is - blacklisted when the administrator blacklists it manually. For - example, ``ceph osd blacklist add`` will still use the default - blacklist time. + OSDMap blocklist. It has no effect on how long something is + blocklisted when the administrator blocklists it manually. For + example, ``ceph osd blocklist add`` will still use the default + blocklist time. :Type: Float :Default: ``24.0*60.0`` diff --git a/doc/man/8/ceph.rst b/doc/man/8/ceph.rst index 78aeb5ef29eab..33077d95a6f5c 100644 --- a/doc/man/8/ceph.rst +++ b/doc/man/8/ceph.rst @@ -37,7 +37,7 @@ Synopsis | **ceph** **mon** [ *add* \| *dump* \| *getmap* \| *remove* \| *stat* ] ... -| **ceph** **osd** [ *blacklist* \| *blocked-by* \| *create* \| *new* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *ls* \| *lspools* \| *map* \| *metadata* \| *ok-to-stop* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *force-create-pg* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *destroy* \| *purge* \| *safe-to-destroy* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ... +| **ceph** **osd** [ *blocklist* \| *blocked-by* \| *create* \| *new* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *ls* \| *lspools* \| *map* \| *metadata* \| *ok-to-stop* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *force-create-pg* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *destroy* \| *purge* \| *safe-to-destroy* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ... | **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *rename-bucket* \| *reweight* \| *reweight-all* \| *reweight-subtree* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ... @@ -613,27 +613,27 @@ osd Manage OSD configuration and administration. It uses some additional subcommands. -Subcommand ``blacklist`` manage blacklisted clients. It uses some additional +Subcommand ``blocklist`` manage blocklisted clients. It uses some additional subcommands. -Subcommand ``add`` add to blacklist (optionally until seconds +Subcommand ``add`` add to blocklist (optionally until seconds from now) Usage:: - ceph osd blacklist add {} + ceph osd blocklist add {} -Subcommand ``ls`` show blacklisted clients +Subcommand ``ls`` show blocklisted clients Usage:: - ceph osd blacklist ls + ceph osd blocklist ls -Subcommand ``rm`` remove from blacklist +Subcommand ``rm`` remove from blocklist Usage:: - ceph osd blacklist rm + ceph osd blocklist rm Subcommand ``blocked-by`` prints a histogram of which OSDs are blocking their peers diff --git a/doc/man/8/mount.ceph.rst b/doc/man/8/mount.ceph.rst index cbd7ab5834d9e..f5d070867b9ad 100644 --- a/doc/man/8/mount.ceph.rst +++ b/doc/man/8/mount.ceph.rst @@ -82,15 +82,15 @@ Basic path to file containing the secret key to use with CephX :command:`recover_session=` - Set auto reconnect mode in the case where the client is blacklisted. The + Set auto reconnect mode in the case where the client is blocklisted. The available modes are ``no`` and ``clean``. The default is ``no``. - ``no``: never attempt to reconnect when client detects that it has been - blacklisted. Blacklisted clients will not attempt to reconnect and + blocklisted. Blocklisted clients will not attempt to reconnect and their operations will fail too. - ``clean``: client reconnects to the Ceph cluster automatically when it - detects that it has been blacklisted. During reconnect, client drops + detects that it has been blocklisted. During reconnect, client drops dirty data/metadata, invalidates page caches and writable file handles. After reconnect, file locks become stale because the MDS loses track of them. If an inode contains any stale file locks, read/write on the inode diff --git a/doc/rados/operations/control.rst b/doc/rados/operations/control.rst index fdd7e3a903078..7ec372282c230 100644 --- a/doc/rados/operations/control.rst +++ b/doc/rados/operations/control.rst @@ -237,18 +237,18 @@ Deployments utilizing Nautilus (or later revisions of Luminous and Mimic) that have no pre-Luminous cients may instead wish to instead enable the `balancer`` module for ``ceph-mgr``. -Add/remove an IP address to/from the blacklist. When adding an address, -you can specify how long it should be blacklisted in seconds; otherwise, -it will default to 1 hour. A blacklisted address is prevented from -connecting to any OSD. Blacklisting is most often used to prevent a +Add/remove an IP address to/from the blocklist. When adding an address, +you can specify how long it should be blocklisted in seconds; otherwise, +it will default to 1 hour. A blocklisted address is prevented from +connecting to any OSD. Blocklisting is most often used to prevent a lagging metadata server from making bad changes to data on the OSDs. These commands are mostly only useful for failure testing, as -blacklists are normally maintained automatically and shouldn't need +blocklists are normally maintained automatically and shouldn't need manual intervention. :: - ceph osd blacklist add ADDRESS[:source_port] [TIME] - ceph osd blacklist rm ADDRESS[:source_port] + ceph osd blocklist add ADDRESS[:source_port] [TIME] + ceph osd blocklist rm ADDRESS[:source_port] Creates/deletes a snapshot of a pool. :: diff --git a/doc/rados/operations/user-management.rst b/doc/rados/operations/user-management.rst index 7b7713a83bd04..6612f82ae859d 100644 --- a/doc/rados/operations/user-management.rst +++ b/doc/rados/operations/user-management.rst @@ -275,7 +275,7 @@ The following entries describe valid capability profiles: :Description: Gives a user permissions to manipulate RBD images. When used as a Monitor cap, it provides the minimal privileges required by an RBD client application; this includes the ability - to blacklist other client users. When used as an OSD cap, it + to blocklist other client users. When used as an OSD cap, it provides read-write access to the specified pool to an RBD client application. The Manager cap supports optional ``pool`` and ``namespace`` keyword arguments. diff --git a/doc/rbd/rbd-exclusive-locks.rst b/doc/rbd/rbd-exclusive-locks.rst index cedc0d11191a7..f02d6d48f4492 100644 --- a/doc/rbd/rbd-exclusive-locks.rst +++ b/doc/rbd/rbd-exclusive-locks.rst @@ -62,8 +62,8 @@ accessing RBD data in an uncoordinated and destructive manner. Thus, in the event that a lock cannot be acquired in the standard graceful manner, the overtaking process not only breaks the lock, but -also blacklists the previous lock holder. This is negotiated between -the new client process and the Ceph Mon: upon receiving the blacklist +also blocklists the previous lock holder. This is negotiated between +the new client process and the Ceph Mon: upon receiving the blocklist request, * the Mon instructs the relevant OSDs to no longer serve requests from @@ -73,10 +73,10 @@ request, * once the new client has acquired the lock, it can commence writing to the image. -Blacklisting is thus a form of storage-level resource `fencing`_. +Blocklisting is thus a form of storage-level resource `fencing`_. -In order for blacklisting to work, the client must have the ``osd -blacklist`` capability. This capability is included in the ``profile +In order for blocklisting to work, the client must have the ``osd +blocklist`` capability. This capability is included in the ``profile rbd`` capability profile, which should generally be set on all Ceph :ref:`client identities ` using RBD. diff --git a/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml b/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml index 99c776df78891..c08272dcd9490 100644 --- a/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml +++ b/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml @@ -40,7 +40,7 @@ tasks: object-storage-feature-enabled: container_sync: false discoverability: true - blacklist: + blocklist: - .*test_account_quotas_negative.AccountQuotasNegativeTest.test_user_modify_quota - .*test_container_acl_negative.ObjectACLsNegativeTest.* - .*test_container_services_negative.ContainerNegativeTest.test_create_container_metadata_.* diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 0ea99210b6531..4c9c54ed9ff82 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1445,7 +1445,7 @@ class CephManager: wait for all specified osds, but some of them could be moved out of osdmap, so we cannot get their updated stat seq from monitor anymore. in that case, you need - to pass a blacklist. + to pass a blocklist. :param wait_for_mon: wait for mon to be synced with mgr. 0 to disable it. (5 min by default) """ diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 42d78f8caef31..e0648f9475636 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -104,17 +104,17 @@ class CephFSTestCase(CephTestCase): self.fs = None # is now invalid! self.recovery_fs = None - # In case anything is in the OSD blacklist list, clear it out. This is to avoid - # the OSD map changing in the background (due to blacklist expiry) while tests run. + # In case anything is in the OSD blocklist list, clear it out. This is to avoid + # the OSD map changing in the background (due to blocklist expiry) while tests run. try: - self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "clear") + self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "clear") except CommandFailedError: # Fallback for older Ceph cluster - blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd", - "dump", "--format=json-pretty"))['blacklist'] - log.info("Removing {0} blacklist entries".format(len(blacklist))) - for addr, blacklisted_at in blacklist.items(): - self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr) + blocklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd", + "dump", "--format=json-pretty"))['blocklist'] + log.info("Removing {0} blocklist entries".format(len(blocklist))) + for addr, blocklisted_at in blocklist.items(): + self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blocklist", "rm", addr) client_mount_ids = [m.client_id for m in self.mounts] # In case the test changes the IDs of clients, stash them so that we can diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py index da346f944aa71..83524f57cea18 100644 --- a/qa/tasks/cephfs/mount.py +++ b/qa/tasks/cephfs/mount.py @@ -433,10 +433,10 @@ class CephFSMount(object): finally: self.umount_wait() - def is_blacklisted(self): + def is_blocklisted(self): addr = self.get_global_addr() - blacklist = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "blacklist", "ls", "--format=json")) - for b in blacklist: + blocklist = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "blocklist", "ls", "--format=json")) + for b in blocklist: if addr == b["addr"]: return True return False diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index d1f9e245153c0..bec07ec2739ef 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -619,10 +619,10 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.kill_cleanup() - def test_reconnect_after_blacklisted(self): + def test_reconnect_after_blocklisted(self): """ - Test reconnect after blacklisted. - - writing to a fd that was opened before blacklist should return -EBADF + Test reconnect after blocklisted. + - writing to a fd that was opened before blocklist should return -EBADF - reading/writing to a file with lost file locks should return -EIO - readonly fd should continue to work """ @@ -640,7 +640,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.wait_until_mounted() - path = os.path.join(self.mount_a.mountpoint, 'testfile_reconnect_after_blacklisted') + path = os.path.join(self.mount_a.mountpoint, 'testfile_reconnect_after_blocklisted') pyscript = dedent(""" import os import sys @@ -660,7 +660,7 @@ class TestClientRecovery(CephFSTestCase): os.read(fd4, 1); fcntl.flock(fd4, fcntl.LOCK_SH | fcntl.LOCK_NB) - print("blacklist") + print("blocklist") sys.stdout.flush() sys.stdin.readline() @@ -669,7 +669,7 @@ class TestClientRecovery(CephFSTestCase): time.sleep(10); # trigger 'open session' message. kclient relies on 'session reject' message - # to detect if itself is blacklisted + # to detect if itself is blocklisted try: os.stat("{path}.1") except: diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index 44d9ee420bedc..14a7e662b1064 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -149,7 +149,7 @@ class TestMisc(CephFSTestCase): cap_waited, session_timeout )) - self.assertTrue(self.mount_a.is_blacklisted()) + self.assertTrue(self.mount_a.is_blocklisted()) cap_holder.stdin.close() try: cap_holder.wait() diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index f1c535eb03d19..13dc9e8cf520a 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -188,12 +188,12 @@ class TestSessionMap(CephFSTestCase): with self.assertRaises(CommandFailedError): self.mount_b.mount_wait(mount_path="/foo/bar") - def test_session_evict_blacklisted(self): + def test_session_evict_blocklisted(self): """ - Check that mds evicts blacklisted client + Check that mds evicts blocklisted client """ if not isinstance(self.mount_a, FuseMount): - self.skipTest("Requires FUSE client to use is_blacklisted()") + self.skipTest("Requires FUSE client to use is_blocklisted()") self.fs.set_max_mds(2) status = self.fs.wait_for_daemons() @@ -214,7 +214,7 @@ class TestSessionMap(CephFSTestCase): mount_a_client_id = self.mount_a.get_global_id() self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id], mds_id=self.fs.get_rank(rank=0, status=status)['name']) - self.wait_until_true(lambda: self.mount_a.is_blacklisted(), timeout=30) + self.wait_until_true(lambda: self.mount_a.is_blocklisted(), timeout=30) # 10 seconds should be enough for evicting client time.sleep(10) diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index 785a6914772d8..b7fd51dac47d1 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -461,7 +461,7 @@ vc.disconnect() # Evicted guest client, guest_mounts[0], should not be able to do # anymore metadata ops. It should start failing all operations - # when it sees that its own address is in the blacklist. + # when it sees that its own address is in the blocklist. try: guest_mounts[0].write_n_mb("rogue.bin", 1) except CommandFailedError: @@ -469,7 +469,7 @@ vc.disconnect() else: raise RuntimeError("post-eviction write should have failed!") - # The blacklisted guest client should now be unmountable + # The blocklisted guest client should now be unmountable guest_mounts[0].umount_wait() # Guest client, guest_mounts[1], using the same auth ID 'guest', but diff --git a/qa/tasks/fs.py b/qa/tasks/fs.py index 70caceaf22d59..4f7a3e2060b6c 100644 --- a/qa/tasks/fs.py +++ b/qa/tasks/fs.py @@ -55,11 +55,11 @@ def clients_evicted(ctx, config): mount = mounts.get(client) if mount is not None: if evicted: - log.info("confirming client {} is blacklisted".format(client)) - assert mount.is_blacklisted() + log.info("confirming client {} is blocklisted".format(client)) + assert mount.is_blocklisted() elif client in no_session: log.info("client {} should not be evicted but has no session with an MDS".format(client)) - mount.is_blacklisted() # for debugging + mount.is_blocklisted() # for debugging should_assert = True if should_assert: raise RuntimeError("some clients which should not be evicted have no session with an MDS?") diff --git a/qa/tasks/mgr/dashboard/test_auth.py b/qa/tasks/mgr/dashboard/test_auth.py index e76708a9c43be..12ff14304db68 100644 --- a/qa/tasks/mgr/dashboard/test_auth.py +++ b/qa/tasks/mgr/dashboard/test_auth.py @@ -99,12 +99,12 @@ class AuthTest(DashboardTestCase): self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) self.set_jwt_token(None) - def test_remove_from_blacklist(self): + def test_remove_from_blocklist(self): self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5']) self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - # the following call adds the token to the blacklist + # the following call adds the token to the blocklist self._post("/api/auth/logout") self.assertStatus(200) self._get("/api/host") @@ -115,7 +115,7 @@ class AuthTest(DashboardTestCase): self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) self.assertStatus(201) self.set_jwt_token(self.jsonBody()['token']) - # the following call removes expired tokens from the blacklist + # the following call removes expired tokens from the blocklist self._post("/api/auth/logout") self.assertStatus(200) diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py index cf211b0b2cf51..cee942e37e607 100644 --- a/qa/tasks/tempest.py +++ b/qa/tasks/tempest.py @@ -148,8 +148,8 @@ def run_tempest(ctx, config): log.info('Configuring Tempest') for (client, cconf) in config.items(): - blacklist = cconf.get('blacklist', []) - assert isinstance(blacklist, list) + blocklist = cconf.get('blocklist', []) + assert isinstance(blocklist, list) run_in_tempest_venv(ctx, client, [ 'tempest', @@ -159,7 +159,7 @@ def run_tempest(ctx, config): '--workspace', 'rgw', '--regex', '^tempest.api.object_storage', - '--black-regex', '|'.join(blacklist) + '--black-regex', '|'.join(blocklist) ]) try: yield @@ -219,7 +219,7 @@ def task(ctx, config): object-storage-feature-enabled: container_sync: false discoverability: false - blacklist: + blocklist: # please strip half of these items after merging PRs #15369 # and #12704 - .*test_list_containers_reverse_order.* diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index b518137fc5dcc..49a4ca3996b5d 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -1386,34 +1386,37 @@ function test_mon_config_key() function test_mon_osd() { # - # osd blacklist + # osd blocklist # bl=192.168.0.1:0/1000 - ceph osd blacklist add $bl - ceph osd blacklist ls | grep $bl - ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl + ceph osd blocklist add $bl + ceph osd blocklist ls | grep $bl + ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl ceph osd dump --format=json-pretty | grep $bl ceph osd dump | grep $bl - ceph osd blacklist rm $bl - ceph osd blacklist ls | expect_false grep $bl + ceph osd blocklist rm $bl + ceph osd blocklist ls | expect_false grep $bl bl=192.168.0.1 # test without nonce, invalid nonce - ceph osd blacklist add $bl - ceph osd blacklist ls | grep $bl - ceph osd blacklist rm $bl - ceph osd blacklist ls | expect_false grep $bl - expect_false "ceph osd blacklist $bl/-1" - expect_false "ceph osd blacklist $bl/foo" + ceph osd blocklist add $bl + ceph osd blocklist ls | grep $bl + ceph osd blocklist rm $bl + ceph osd blocklist ls | expect_false grep $bl + expect_false "ceph osd blocklist $bl/-1" + expect_false "ceph osd blocklist $bl/foo" # test with wrong address - expect_false "ceph osd blacklist 1234.56.78.90/100" + expect_false "ceph osd blocklist 1234.56.78.90/100" # Test `clear` - ceph osd blacklist add $bl - ceph osd blacklist ls | grep $bl - ceph osd blacklist clear - ceph osd blacklist ls | expect_false grep $bl + ceph osd blocklist add $bl + ceph osd blocklist ls | grep $bl + ceph osd blocklist clear + ceph osd blocklist ls | expect_false grep $bl + + # deprecated syntax? + ceph osd blacklist ls # # osd crush diff --git a/qa/workunits/rbd/krbd_exclusive_option.sh b/qa/workunits/rbd/krbd_exclusive_option.sh index 09edc13865438..f8493ce98943b 100755 --- a/qa/workunits/rbd/krbd_exclusive_option.sh +++ b/qa/workunits/rbd/krbd_exclusive_option.sh @@ -53,13 +53,13 @@ function assert_unlocked() { grep '"lockers":\[\]' } -function blacklist_add() { +function blocklist_add() { local dev_id="${1#/dev/rbd}" local client_addr client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)" - ceph osd blacklist add $client_addr + ceph osd blocklist add $client_addr } SYSFS_DIR="/sys/bus/rbd/devices" @@ -203,7 +203,7 @@ assert_unlocked DEV=$(sudo rbd map $IMAGE_NAME) assert_locked $DEV dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct -{ sleep 10; blacklist_add $DEV; } & +{ sleep 10; blocklist_add $DEV; } & PID=$! expect_false dd if=/dev/urandom of=$DEV bs=4k count=200000 oflag=direct wait $PID diff --git a/qa/workunits/rbd/rbd_mirror_journal.sh b/qa/workunits/rbd/rbd_mirror_journal.sh index da856861b0c95..84fd2424f4ba8 100755 --- a/qa/workunits/rbd/rbd_mirror_journal.sh +++ b/qa/workunits/rbd/rbd_mirror_journal.sh @@ -547,7 +547,7 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then # teuthology will trash the daemon - testlog "TEST: no blacklists" - CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blacklist ls 2>&1 | grep -q "listed 0 entries" - CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blacklist ls 2>&1 | grep -q "listed 0 entries" + testlog "TEST: no blocklists" + CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries" + CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries" fi diff --git a/qa/workunits/rbd/rbd_mirror_snapshot.sh b/qa/workunits/rbd/rbd_mirror_snapshot.sh index 7ab2239db03a3..6452739026638 100755 --- a/qa/workunits/rbd/rbd_mirror_snapshot.sh +++ b/qa/workunits/rbd/rbd_mirror_snapshot.sh @@ -448,7 +448,7 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then # teuthology will trash the daemon - testlog "TEST: no blacklists" - CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blacklist ls 2>&1 | grep -q "listed 0 entries" - CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blacklist ls 2>&1 | grep -q "listed 0 entries" + testlog "TEST: no blocklists" + CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries" + CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries" fi diff --git a/qa/workunits/rbd/test_lock_fence.sh b/qa/workunits/rbd/test_lock_fence.sh index fd8fa6347d224..7cf2d21c53be1 100755 --- a/qa/workunits/rbd/test_lock_fence.sh +++ b/qa/workunits/rbd/test_lock_fence.sh @@ -26,7 +26,7 @@ clientid=$(rbd lock list $IMAGE | tail -1 | awk '{print $1;}') echo "clientaddr: $clientaddr" echo "clientid: $clientid" -ceph osd blacklist add $clientaddr || exit 1 +ceph osd blocklist add $clientaddr || exit 1 wait $iochild rbdrw_exitcode=$? @@ -39,7 +39,7 @@ else fi set -e -ceph osd blacklist rm $clientaddr +ceph osd blocklist rm $clientaddr rbd lock remove $IMAGE $LOCKID "$clientid" # rbdrw will have exited with an existing watch, so, until #3527 is fixed, # hang out until the watch expires diff --git a/src/client/Client.cc b/src/client/Client.cc index 7401ae59d5b16..1fcdc85ec6b48 100755 --- a/src/client/Client.cc +++ b/src/client/Client.cc @@ -481,7 +481,7 @@ void Client::dump_status(Formatter *f) f->dump_int("mds_epoch", mdsmap->get_epoch()); f->dump_int("osd_epoch", osd_epoch); f->dump_int("osd_epoch_barrier", cap_epoch_barrier); - f->dump_bool("blacklisted", blacklisted); + f->dump_bool("blocklisted", blocklisted); } } @@ -491,7 +491,7 @@ void Client::_pre_init() objecter_finisher.start(); filer.reset(new Filer(objecter, &objecter_finisher)); - objecter->enable_blacklist_events(); + objecter->enable_blocklist_events(); objectcacher->start(); } @@ -1741,8 +1741,8 @@ int Client::make_request(MetaRequest *request, if (request->aborted()) break; - if (blacklisted) { - request->abort(-EBLACKLISTED); + if (blocklisted) { + request->abort(-EBLOCKLISTED); break; } @@ -2522,58 +2522,58 @@ void Client::_handle_full_flag(int64_t pool) void Client::handle_osd_map(const MConstRef& m) { - std::set new_blacklists; - objecter->consume_blacklist_events(&new_blacklists); + std::set new_blocklists; + objecter->consume_blocklist_events(&new_blocklists); const auto myaddrs = messenger->get_myaddrs(); - bool new_blacklist = false; + bool new_blocklist = false; bool prenautilus = objecter->with_osdmap( [&](const OSDMap& o) { return o.require_osd_release < ceph_release_t::nautilus; }); - if (!blacklisted) { + if (!blocklisted) { for (auto a : myaddrs.v) { - // blacklist entries are always TYPE_ANY for nautilus+ + // blocklist entries are always TYPE_ANY for nautilus+ a.set_type(entity_addr_t::TYPE_ANY); - if (new_blacklists.count(a)) { - new_blacklist = true; + if (new_blocklists.count(a)) { + new_blocklist = true; break; } if (prenautilus) { // ...except pre-nautilus, they were TYPE_LEGACY a.set_type(entity_addr_t::TYPE_LEGACY); - if (new_blacklists.count(a)) { - new_blacklist = true; + if (new_blocklists.count(a)) { + new_blocklist = true; break; } } } } - if (new_blacklist) { + if (new_blocklist) { auto epoch = objecter->with_osdmap([](const OSDMap &o){ return o.get_epoch(); }); - lderr(cct) << "I was blacklisted at osd epoch " << epoch << dendl; - blacklisted = true; + lderr(cct) << "I was blocklisted at osd epoch " << epoch << dendl; + blocklisted = true; - _abort_mds_sessions(-EBLACKLISTED); + _abort_mds_sessions(-EBLOCKLISTED); // Since we know all our OSD ops will fail, cancel them all preemtively, // so that on an unhealthy cluster we can umount promptly even if e.g. // some PGs were inaccessible. - objecter->op_cancel_writes(-EBLACKLISTED); + objecter->op_cancel_writes(-EBLOCKLISTED); } - if (blacklisted) { - // Handle case where we were blacklisted but no longer are - blacklisted = objecter->with_osdmap([myaddrs](const OSDMap &o){ - return o.is_blacklisted(myaddrs);}); + if (blocklisted) { + // Handle case where we were blocklisted but no longer are + blocklisted = objecter->with_osdmap([myaddrs](const OSDMap &o){ + return o.is_blocklisted(myaddrs);}); } - // Always subscribe to next osdmap for blacklisted client - // until this client is not blacklisted. - if (blacklisted) { + // Always subscribe to next osdmap for blocklisted client + // until this client is not blocklisted. + if (blocklisted) { objecter->maybe_request_map(); } @@ -4245,7 +4245,7 @@ void Client::remove_session_caps(MetaSession *s, int err) } caps &= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER; if (caps && !in->caps_issued_mask(caps, true)) { - if (err == -EBLACKLISTED) { + if (err == -EBLOCKLISTED) { if (in->oset.dirty_or_tx) { lderr(cct) << __func__ << " still has dirty data on " << *in << dendl; in->set_async_err(err); @@ -6208,8 +6208,8 @@ void Client::_unmount(bool abort) std::unique_lock lock{client_lock}; - if (abort || blacklisted) { - ldout(cct, 2) << "unmounting (" << (abort ? "abort)" : "blacklisted)") << dendl; + if (abort || blocklisted) { + ldout(cct, 2) << "unmounting (" << (abort ? "abort)" : "blocklisted)") << dendl; } else { ldout(cct, 2) << "unmounting" << dendl; } @@ -6280,7 +6280,7 @@ void Client::_unmount(bool abort) // prevent inode from getting freed anchor.emplace_back(in); - if (abort || blacklisted) { + if (abort || blocklisted) { objectcacher->purge_set(&in->oset); } else if (!in->caps.empty()) { _release(in); @@ -6289,7 +6289,7 @@ void Client::_unmount(bool abort) } } - if (abort || blacklisted) { + if (abort || blocklisted) { for (auto p = dirty_list.begin(); !p.end(); ) { Inode *in = *p; ++p; @@ -6425,12 +6425,12 @@ void Client::tick() trim_cache(true); - if (blacklisted && (is_mounted() || is_unmounting()) && + if (blocklisted && (is_mounted() || is_unmounting()) && last_auto_reconnect + 30 * 60 < now && cct->_conf.get_val("client_reconnect_stale")) { messenger->client_reset(); fd_gen++; // invalidate open files - blacklisted = false; + blocklisted = false; _kick_stale_sessions(); last_auto_reconnect = now; } @@ -14076,7 +14076,7 @@ int Client::set_deleg_timeout(uint32_t timeout) std::scoped_lock lock(client_lock); /* - * The whole point is to prevent blacklisting so we must time out the + * The whole point is to prevent blocklisting so we must time out the * delegation before the session autoclose timeout kicks in. */ if (timeout >= mdsmap->get_session_autoclose()) @@ -14444,7 +14444,7 @@ void Client::ms_handle_remote_reset(Connection *con) case MetaSession::STATE_OPEN: { - objecter->maybe_request_map(); /* to check if we are blacklisted */ + objecter->maybe_request_map(); /* to check if we are blocklisted */ if (cct->_conf.get_val("client_reconnect_stale")) { ldout(cct, 1) << "reset from mds we were open; close mds session for reconnect" << dendl; _closed_mds_session(s); @@ -14870,8 +14870,8 @@ int Client::start_reclaim(const std::string& uuid, unsigned flags, if (flags & CEPH_RECLAIM_RESET) return 0; - // use blacklist to check if target session was killed - // (config option mds_session_blacklist_on_evict needs to be true) + // use blocklist to check if target session was killed + // (config option mds_session_blocklist_on_evict needs to be true) ldout(cct, 10) << __func__ << ": waiting for OSD epoch " << reclaim_osd_epoch << dendl; bs::error_code ec; l.unlock(); @@ -14881,11 +14881,11 @@ int Client::start_reclaim(const std::string& uuid, unsigned flags, if (ec) return ceph::from_error_code(ec); - bool blacklisted = objecter->with_osdmap( + bool blocklisted = objecter->with_osdmap( [this](const OSDMap &osd_map) -> bool { - return osd_map.is_blacklisted(reclaim_target_addrs); + return osd_map.is_blocklisted(reclaim_target_addrs); }); - if (blacklisted) + if (blocklisted) return -ENOTRECOVERABLE; metadata["reclaiming_uuid"] = uuid; diff --git a/src/client/Client.h b/src/client/Client.h index 32306d34b4dcc..620c6819a3858 100644 --- a/src/client/Client.h +++ b/src/client/Client.h @@ -1369,7 +1369,7 @@ private: ceph::unordered_set opened_dirs; uint64_t fd_gen = 1; - bool blacklisted = false; + bool blocklisted = false; ceph::unordered_map inode_map; ceph::unordered_map faked_ino_map; diff --git a/src/common/legacy_config_opts.h b/src/common/legacy_config_opts.h index e4001eb75ebd1..90c72d1b9b21a 100644 --- a/src/common/legacy_config_opts.h +++ b/src/common/legacy_config_opts.h @@ -303,7 +303,7 @@ OPTION(mon_keyvaluedb, OPT_STR) // type of keyvaluedb backend // UNSAFE -- TESTING ONLY! Allows addition of a cache tier with preexisting snaps OPTION(mon_debug_unsafe_allow_tier_with_nonempty_snaps, OPT_BOOL) -OPTION(mon_osd_blacklist_default_expire, OPT_DOUBLE) // default one hour +OPTION(mon_osd_blocklist_default_expire, OPT_DOUBLE) // default one hour OPTION(mon_osd_crush_smoke_test, OPT_BOOL) OPTION(paxos_stash_full_interval, OPT_INT) // how often (in commits) to stash a full copy of the PaxosService state @@ -406,8 +406,8 @@ OPTION(mds_beacon_interval, OPT_FLOAT) OPTION(mds_beacon_grace, OPT_FLOAT) OPTION(mds_enforce_unique_name, OPT_BOOL) -OPTION(mds_session_blacklist_on_timeout, OPT_BOOL) // whether to blacklist clients whose sessions are dropped due to timeout -OPTION(mds_session_blacklist_on_evict, OPT_BOOL) // whether to blacklist clients whose sessions are dropped via admin commands +OPTION(mds_session_blocklist_on_timeout, OPT_BOOL) // whether to blocklist clients whose sessions are dropped due to timeout +OPTION(mds_session_blocklist_on_evict, OPT_BOOL) // whether to blocklist clients whose sessions are dropped via admin commands OPTION(mds_sessionmap_keys_per_op, OPT_U32) // how many sessions should I try to load/store in a single OMAP operation? OPTION(mds_freeze_tree_timeout, OPT_FLOAT) // detecting freeze tree deadlock diff --git a/src/common/options.cc b/src/common/options.cc index 8113f30b2d082..f7a325b624c56 100644 --- a/src/common/options.cc +++ b/src/common/options.cc @@ -2118,25 +2118,25 @@ std::vector