The description text are mixed for mds_kill_skip_replaying_inotable
and mds_inject_skip_replaying_inotable.
At the same time rename "mds_kill_skip_replaying_inotable", which
is a bit confusing to "mds_kill_after_journal_logs_flushed".
Fixes: https://tracker.ceph.com/issues/61660
Signed-off-by: Xiubo Li <xiubli@redhat.com>
(cherry picked from commit
7df5714803e659a591075fb1db146f2a447b4afa)
than the number mentioned against the config tunable `mds_max_snaps_per_dir`
so that a new snapshot can be created and retained during the next schedule
run.
+* cephfs: Running the command "ceph fs authorize" for an existing entity now
+ upgrades the entity's capabilities instead of printing an error. It can now
+ also change read/write permissions in a capability that the entity already
+ holds. If the capability passed by user is same as one of the capabilities
+ that the entity already holds, idempotency is maintained.
* `ceph config dump --format <json|xml>` output will display the localized
option names instead of its normalized version. For e.g.,
"mgr/prometheus/x/server_port" will be displayed instead of
"mgr/prometheus/server_port". This matches the output of the non pretty-print
formatted version of the command.
+* CEPHFS: MDS config option name "mds_kill_skip_replaying_inotable" is a bit
+ confusing with "mds_inject_skip_replaying_inotable", therefore renaming it to
+ "mds_kill_after_journal_logs_flushed"
+
>=17.2.1
.. confval:: mds_kill_link_at
.. confval:: mds_kill_rename_at
.. confval:: mds_inject_skip_replaying_inotable
-.. confval:: mds_kill_skip_replaying_inotable
+.. confval:: mds_kill_after_journal_logs_flushed
.. confval:: mds_wipe_sessions
.. confval:: mds_wipe_ino_prealloc
.. confval:: mds_skip_ino
status = self.fs.status()
rank0 = self.fs.get_rank(rank=0, status=status)
- self.fs.mds_asok(['config', 'set', 'mds_kill_skip_replaying_inotable', "true"])
+ self.fs.mds_asok(['config', 'set', 'mds_kill_after_journal_logs_flushed', "true"])
# This will make the MDS crash, since we only have one MDS in the
# cluster and without the "wait=False" it will stuck here forever.
self.mount_a.run_shell(["mkdir", "test_alloc_ino/dir1"], wait=False)
default: false
services:
- mds
-- name: mds_kill_skip_replaying_inotable
+- name: mds_kill_after_journal_logs_flushed
type: bool
level: dev
default: false
services:
- mds
- fmt_desc: Ceph will skip replaying the inotable when replaying the journal, and
- the premary MDS will crash, while the replacing MDS won't.
+ fmt_desc: The primary MDS will crash just after the mknod/openc journal logs
+ are flushed to the pool.
(for testing only).
with_legacy: true
- name: mds_inject_skip_replaying_inotable
default: false
services:
- mds
- fmt_desc: Ceph will skip replaying the inotable when replaying the journal, and
- the premary MDS will crash, while the replacing MDS won't.
+ fmt_desc: MDS will skip replaying the inotable when replaying the journal logs.
(for testing only).
with_legacy: true
# percentage of MDS modify replies to skip sending the client a trace on [0-1]
ceph_assert(r == 0);
// crash current MDS and the replacing MDS will test the journal
- ceph_assert(!g_conf()->mds_kill_skip_replaying_inotable);
+ ceph_assert(!g_conf()->mds_kill_after_journal_logs_flushed);
dn->pop_projected_linkage();
ceph_assert(r == 0);
// crash current MDS and the replacing MDS will test the journal
- ceph_assert(!g_conf()->mds_kill_skip_replaying_inotable);
+ ceph_assert(!g_conf()->mds_kill_after_journal_logs_flushed);
// link the inode
dn->pop_projected_linkage();
mds->balancer->maybe_fragment(dir, false);
// flush the journal as soon as possible
- if (g_conf()->mds_kill_skip_replaying_inotable) {
+ if (g_conf()->mds_kill_after_journal_logs_flushed) {
mdlog->flush();
}
}