.set_default(true)
.set_description("allow setting directory export pins to particular ranks"),
+ Option("mds_export_ephemeral_random", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(false)
+ .set_flag(Option::FLAG_RUNTIME)
+ .set_description("allow ephemeral random pinning of the loaded subtrees")
+ .set_long_description("probabilistically pin the loaded directory inode and the subtree beneath it to an MDS based on the consistent hash of the inode number. The higher this value the more likely the loaded subtrees get pinned"),
+
+ Option("mds_export_ephemeral_distributed", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(false)
+ .set_flag(Option::FLAG_RUNTIME)
+ .set_description("allow ephemeral distributed pinning of the loaded subtrees")
+ .set_long_description("pin the immediate child directories of the loaded directory inode based on the consistent hash of the child's inode number. "),
+
Option("mds_bal_sample_interval", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
.set_default(3.0)
.set_description("interval in seconds between balancer ticks"),
if (in->inode.is_dirty_rstat())
in->mark_dirty_rstat();
+ in->maybe_export_ephemeral_random_pin(true);
//in->hack_accessed = false;
//in->hack_load_stamp = ceph_clock_now();
//num_new_inodes_loaded++;
if (new_export_pin)
maybe_export_pin(true);
+ if (front.inode.version == 1)
+ maybe_export_ephemeral_random_pin();
if (inode.is_backtrace_updated())
mark_dirty_parent(ls, old_pool != inode.layout.pool_id);
encode(inode.layout, bl, mdcache->mds->mdsmap->get_up_features());
encode(inode.quota, bl);
encode(inode.export_pin, bl);
+ encode(inode.export_ephemeral_distributed_pin, bl);
+ encode(inode.export_ephemeral_random_pin, bl);
}
ENCODE_FINISH(bl);
}
mds_rank_t old_pin = inode.export_pin;
decode(inode.export_pin, p);
maybe_export_pin(old_pin != inode.export_pin);
+ bool old_ephemeral_pin = inode.export_ephemeral_distributed_pin;
+ decode(inode.export_ephemeral_distributed_pin, p);
+ maybe_export_ephemeral_distributed_pin(old_ephemeral_pin != inode.export_ephemeral_distributed_pin);
+ decode(inode.export_ephemeral_random_pin, p);
}
DECODE_FINISH(p);
}
return;
mds_rank_t export_pin = get_export_pin(false);
- if (export_pin == MDS_RANK_NONE && !update)
+ if (export_pin == MDS_RANK_NONE && !update) {
+ maybe_export_ephemeral_distributed_pin();
return;
+ }
if (state_test(CInode::STATE_QUEUEDEXPORTPIN))
return;
}
}
+void CInode::maybe_export_ephemeral_random_pin(bool update)
+{
+ bool export_ephemeral_random_config = mdcache->get_export_ephemeral_random_config();
+
+ //If the config isn't set then return
+ if (!export_ephemeral_random_config)
+ return;
+
+ //Check if it's already ephemerally pinned
+ if (is_export_ephemeral_random_pinned && !update)
+ return;
+
+ if (export_ephemeral_random_config) {
+ double export_ephemeral_random_pin = get_export_ephemeral_random_pin(false);
+ if ((update || export_ephemeral_random_pin >=
+ ceph::util::generate_random_number(0.0, 1.0))
+ && is_export_ephemeral_distributed_pinned == false) {
+
+ dout(10) << "I'm here under ephemeral random because is_export_ephemeral_distributed is" << is_export_ephemeral_distributed_pinned << dendl;
+
+ is_export_ephemeral_random_migrating = true;
+
+ bool queue = false;
+ for (auto& p : dirfrags) {
+ CDir *dir = p.second;
+ if (!dir->is_auth())
+ continue;
+ if (dir->is_subtree_root()) {
+ // set auxsubtree bit or export it
+ if (!dir->state_test(CDir::STATE_AUXSUBTREE) ||
+ mdcache->hash_into_rank_bucket(ino(), mdcache->mds->mdsmap->get_max_mds()) != dir->get_dir_auth().first)
+ queue = true;
+ } else {
+ // create aux subtree or export it
+ queue = true;
+ }
+ if (queue) {
+ if (mdcache->hash_into_rank_bucket(ino(), mdcache->mds->mdsmap->get_max_mds()) == mdcache->mds->get_nodeid())
+ mdcache->ephemeral_pin(ephemeral_pin_inode);
+ state_set(CInode::STATE_QUEUEDEXPORTPIN);
+ mdcache->export_pin_queue.insert(this);
+ break;
+ }
+ }
+ return;
+ }
+ }
+}
+
+void CInode::maybe_export_ephemeral_distributed_pin(bool update)
+{
+ bool export_ephemeral_distributed_config = mdcache->get_export_ephemeral_distributed_config();
+
+ //If both the configs aren't set then return
+ if (!export_ephemeral_distributed_config)
+ return;
+
+ //Check if it's already ephemerally pinned
+ if (is_export_ephemeral_distributed_pinned && !update)
+ return;
+
+ if (export_ephemeral_distributed_config) {
+ CDentry *pdn = get_parent_dn();
+
+ if (!pdn) {
+ return;
+ }
+
+ auto dir = pdn->get_dir();
+
+ if (get_export_ephemeral_distributed_pin() && dir->get_num_head_items()) {
+ for (auto& bound : bounds) {
+ bound->maybe_export_ephemeral_distributed_pin();
+ }
+ }
+
+ else if (update || (dir->get_inode()->get_export_ephemeral_distributed_pin())) {
+ is_export_ephemeral_distributed_migrating = true;
+
+ bool queue = false;
+ for (auto& p : dirfrags) {
+ CDir *dir = p.second;
+ if (!dir->is_auth())
+ continue;
+ if (dir->is_subtree_root()) {
+ // set auxsubtree bit or export it
+ if (!dir->state_test(CDir::STATE_AUXSUBTREE) ||
+ mdcache->hash_into_rank_bucket(ino(), mdcache->mds->mdsmap->get_max_mds()) != dir->get_dir_auth().first)
+ queue = true;
+ } else {
+ // create aux subtree or export it
+ queue = true;
+ }
+ if (queue) {
+ dout(10) << "max_mds is" << mdcache->mds->mdsmap->get_max_mds() << "and target mds is:" << mdcache->hash_into_rank_bucket(ino(), mdcache->mds->mdsmap->get_max_mds()) << dendl;
+ if (mdcache->hash_into_rank_bucket(ino(), mdcache->mds->mdsmap->get_max_mds()) == mdcache->mds->get_nodeid()) {
+ mdcache->ephemeral_pin(ephemeral_pin_inode);
+ dout(10) << "Inside if inside the else" << dendl;
+ }
+ state_set(CInode::STATE_QUEUEDEXPORTPIN);
+ mdcache->export_pin_queue.insert(this);
+ break;
+ }
+ }
+ return;
+ }
+ }
+}
+
+void CInode::set_export_ephemeral_random_pin(double probability)
+{
+ ceph_assert(is_dir());
+ ceph_assert(is_projected());
+ get_projected_inode()->export_ephemeral_random_pin = probability;
+}
+
+void CInode::set_export_ephemeral_distributed_pin(bool val)
+{
+ ceph_assert(is_dir());
+ ceph_assert(is_projected());
+ get_projected_inode()->export_ephemeral_distributed_pin = val;
+}
+
void CInode::set_export_pin(mds_rank_t rank)
{
ceph_assert(is_dir());
return MDS_RANK_NONE;
}
+double CInode::get_export_ephemeral_random_pin(bool inherit) const
+{
+ /* An inode that is export pinned may not necessarily be a subtree root, we
+ * need to traverse the parents. A base or system inode cannot be pinned.
+ * N.B. inodes not yet linked into a dir (i.e. anonymous inodes) will not
+ * have a parent yet.
+ */
+ const CInode *in = this;
+ while (true) {
+ if (in->is_system())
+ break;
+ const CDentry *pdn = in->get_parent_dn();
+ if (!pdn)
+ break;
+ // ignore export pin for unlinked directory
+ if (in->get_inode().nlink == 0)
+ break;
+ if (in->get_inode().export_ephemeral_random_pin >= 0)
+ return in->get_inode().export_ephemeral_random_pin;
+
+ if (!inherit)
+ break;
+ in = pdn->get_dir()->inode;
+ }
+ return 0;
+}
+
+bool CInode::get_export_ephemeral_distributed_pin() const
+{
+ if (get_inode().export_ephemeral_distributed_pin)
+ return get_inode().export_ephemeral_distributed_pin;
+ else
+ return false;
+}
+
bool CInode::is_exportable(mds_rank_t dest) const
{
mds_rank_t pin = get_export_pin();
std::map<int, std::unique_ptr<BatchOp>> batch_ops;
+ bool is_export_ephemeral_distributed_pinned = false;
+ bool is_export_ephemeral_random_pinned = false;
+
+ bool is_export_ephemeral_distributed_migrating = false;
+ bool is_export_ephemeral_random_migrating = false;
+
+ void finish_export_ephemeral_distributed_migration() {
+ is_export_ephemeral_distributed_migrating = false;
+ is_export_ephemeral_distributed_pinned = true;
+ }
+
+ void finish_export_ephemeral_random_migration() {
+ is_export_ephemeral_random_migrating = false;
+ is_export_ephemeral_random_pinned = true;
+ }
+
std::string_view pin_name(int p) const override;
ostream& print_db_line_prefix(ostream& out) override;
}
void maybe_export_pin(bool update=false);
+ void maybe_export_ephemeral_random_pin(bool update=false);
+ void maybe_export_ephemeral_distributed_pin(bool update=false);
void set_export_pin(mds_rank_t rank);
+ void set_export_ephemeral_random_pin(double probablitiy=0);
+ void set_export_ephemeral_distributed_pin(bool val=false);
mds_rank_t get_export_pin(bool inherit=true) const;
+ double get_export_ephemeral_random_pin(bool inherit=true) const;
+ bool get_export_ephemeral_distributed_pin() const;
bool is_exportable(mds_rank_t dest) const;
void print(ostream& out) override;
// list item node for when we have unpropagated rstat data
elist<CInode*>::item dirty_rstat_item;
+ elist<CInode*>::item ephemeral_pin_inode;
+
mempool::mds_co::set<client_t> client_snap_caps;
mempool::mds_co::compact_map<snapid_t, mempool::mds_co::set<client_t> > client_need_snapflush;
auto cur = it++;
CInode *in = *cur;
ceph_assert(in->is_dir());
- mds_rank_t export_pin = in->get_export_pin(false);
+ mds_rank_t export_pin = MDS_RANK_NONE;
+ // Making sure the ephemeral pin does not override export pin
+ if (in->get_export_pin(false) != MDS_RANK_NONE)
+ export_pin = in->get_export_pin(false);
+ else if (in->is_export_ephemeral_distributed_migrating || in->is_export_ephemeral_random_migrating) {
+ export_pin = mds->mdcache->hash_into_rank_bucket(in->ino(), mds->mdsmap->get_max_mds());
+ dout(10) << "Ephemeral export pin set on" << *in << dendl;
+ }
if (export_pin >= mds->mdsmap->get_max_mds()) {
dout(20) << " delay export_pin=" << export_pin << " on " << *in << dendl;
in->state_clear(CInode::STATE_QUEUEDEXPORTPIN);
filer(m->objecter, m->finisher),
stray_manager(m, purge_queue_),
recovery_queue(m),
+ ephemeral_pins(member_offset(CInode, ephemeral_pin_inode)),
trim_counter(g_conf().get_val<double>("mds_cache_trim_decay_rate"))
{
migrator.reset(new Migrator(mds, this));
cache_health_threshold = g_conf().get_val<double>("mds_health_cache_threshold");
forward_all_requests_to_auth = g_conf().get_val<bool>("mds_forward_all_requests_to_auth");
+ export_ephemeral_distributed_config = g_conf().get_val<bool>("mds_export_ephemeral_distributed");
+ export_ephemeral_random_config = g_conf().get_val<bool>("mds_export_ephemeral_random");
+
lru.lru_set_midpoint(g_conf().get_val<double>("mds_cache_mid"));
bottom_lru.lru_set_midpoint(0);
cache_memory_limit = g_conf().get_val<Option::size_t>("mds_cache_memory_limit");
if (changed.count("mds_cache_reservation"))
cache_reservation = g_conf().get_val<double>("mds_cache_reservation");
+ if (changed.count("mds_export_ephemeral_distributed"))
+ export_ephemeral_distributed_config = g_conf().get_val<bool>("mds_export_ephemeral_distributed");
+ if (changed.count("mds_export_ephemeral_random"))
+ export_ephemeral_random_config = g_conf().get_val<bool>("mds_export_ephemeral_random");
if (changed.count("mds_health_cache_threshold"))
cache_health_threshold = g_conf().get_val<double>("mds_health_cache_threshold");
if (changed.count("mds_cache_mid"))
o->item_open_file.remove_myself();
+ o->ephemeral_pin_inode.remove_myself();
+
if (o->state_test(CInode::STATE_QUEUEDEXPORTPIN))
export_pin_queue.erase(o);
}
+// ====================================================================
+// consistent hash ring
+
+/*
+ * hashing implementation based on Lamping and Veach's Jump Consistent Hash: https://arxiv.org/pdf/1406.2294.pdf
+*/
+mds_rank_t MDCache::hash_into_rank_bucket(inodeno_t ino, mds_rank_t max_mds)
+{
+ uint64_t hash = rjhash64(ino);
+ int64_t b = -1, j = 0;
+ while (j < max_mds) {
+ b = j;
+ hash = hash*2862933555777941757ULL + 1;
+ j = (b + 1) * (double(1LL << 31) / double((hash >> 33) + 1));
+ }
+ // verify bounds before returning
+ auto result = mds_rank_t(b);
+ ceph_assert(result >= 0 && result < max_mds);
+ return result;
+}
// ====================================================================
}
migrator->clear_export_queue();
+
+ if (export_ephemeral_random_config ||
+ export_ephemeral_distributed_config) {
+ dout(10) << "Migrating ephemerally pinned inodes due to shutdown" << dendl;
+ elist<CInode*>::iterator it = ephemeral_pins.begin(member_offset(CInode, ephemeral_pin_inode));
+ while (!it.end()) {
+ if ((*it) == NULL || !((*it)->is_auth()))
+ dout(10) << "Inode is not auth to this rank" << dendl;
+ else {
+ dout(10) << "adding inode to export queue" << dendl;
+ (*it)->maybe_export_ephemeral_distributed_pin(true);
+ (*it)->maybe_export_ephemeral_random_pin(true);
+ }
+ ++it;
+ }
+ }
+
for (const auto& dir : ls) {
mds_rank_t dest = dir->get_inode()->authority().first;
if (dest > 0 && !mds->mdsmap->is_active(dest))
return true;
}
-void MDCache::handle_mdsmap(const MDSMap &mdsmap) {
+void MDCache::handle_mdsmap(const MDSMap &mdsmap, const MDSMap &oldmap) {
// process export_pin_delayed_queue whenever a new MDSMap received
auto &q = export_pin_delayed_queue;
for (auto it = q.begin(); it != q.end(); ) {
it = q.erase(it);
in->maybe_export_pin();
}
-}
+ /* Handle consistent hash ring during cluster resizes */
+ if (mdsmap.get_max_mds() != oldmap.get_max_mds()) {
+ dout(10) << "Checking ephemerally pinned directories for re-export due to max_mds change." << dendl;
+ auto it = ephemeral_pins.begin(member_offset(CInode, ephemeral_pin_inode));
+ while (!it.end()) {
+ auto in = *it;
+ ++it;
+ // Migrate if the inodes hash elsewhere
+ if (hash_into_rank_bucket(in->ino(), mdsmap.get_max_mds()) != mds->get_nodeid()) {
+ if (in == NULL || !in->is_auth()) {
+ dout(10) << "Inode is not auth to this rank" << dendl;
+ // ++it; ??? - batrick
+ }
+ } else {
+ dout(10) << "adding inode to export queue" << dendl;
+ in->maybe_export_ephemeral_distributed_pin(true);
+ in->maybe_export_ephemeral_random_pin(true);
+ in->ephemeral_pin_inode.remove_myself();
+ }
+ }
+ }
+}
stray_index = (stray_index+1)%NUM_STRAY;
}
+ bool get_export_ephemeral_distributed_config(void) const {
+ return export_ephemeral_distributed_config;
+ }
+
+ bool get_export_ephemeral_random_config(void) const {
+ return export_ephemeral_random_config;
+ }
+
/**
* Call this when you know that a CDentry is ready to be passed
* on to StrayManager (i.e. this is a stray you've just created)
stray_manager.eval_stray(dn);
}
+ mds_rank_t hash_into_rank_bucket(inodeno_t ino, mds_rank_t max_mds);
+
void maybe_eval_stray(CInode *in, bool delay=false);
void clear_dirty_bits_for_stray(CInode* diri);
void discard_delayed_expire(CDir *dir);
// -- mdsmap --
- void handle_mdsmap(const MDSMap &mdsmap);
+ void handle_mdsmap(const MDSMap &mdsmap, const MDSMap &oldmap);
int dump_cache() { return dump_cache({}, nullptr); }
int dump_cache(std::string_view filename);
bool forward_all_requests_to_auth;
std::array<CInode *, NUM_STRAY> strays{}; // my stray dir
+ bool export_ephemeral_distributed_config;
+ bool export_ephemeral_random_config;
+
// File size recovery
RecoveryQueue recovery_queue;
map<dirfrag_t,fragment_info_t> fragments;
+ elist<CInode*> ephemeral_pins;
+
DecayCounter trim_counter;
std::thread upkeeper;
scrubstack->scrub_abort(c);
}
}
- mdcache->handle_mdsmap(*mdsmap);
+ mdcache->handle_mdsmap(*mdsmap, oldmap);
}
void MDSRank::handle_mds_recovery(mds_rank_t who)
"mds_dump_cache_threshold_file",
"mds_dump_cache_threshold_formatter",
"mds_enable_op_tracker",
+ "mds_export_ephemeral_random"
+ "mds_export_ephemeral_distributed"
"mds_health_cache_threshold",
"mds_inject_migrator_session_race",
"mds_log_pause",
mut->cleanup();
}
+ if (dir->get_inode()->is_export_ephemeral_distributed_migrating)
+ dir->get_inode()->finish_export_ephemeral_distributed_migration();
+ else if (dir->get_inode()->is_export_ephemeral_random_migrating)
+ dir->get_inode()->finish_export_ephemeral_random_migration();
+
if (parent)
child_export_finish(parent, true);
MutationRef mut = it->second.mut;
import_state.erase(it);
- mds->mdlog->start_submit_entry(new EImportFinish(dir, true));
+ // start the journal entry
+ EImportFinish *le = new EImportFinish(dir, true);
+ mds->mdlog->start_entry(le);
+
+ CInode *in = dir->get_inode();
+
+ CDentry *pdn = in->get_parent_dn();
+
+ if (in->get_export_ephemeral_random_pin(false)) { // Lazy checks. FIXME
+ le->metablob.add_primary_dentry(pdn, in, false, false, false, false,
+ false, true);
+ in->is_export_ephemeral_random_pinned = true;
+ cache->ephemeral_pins.push_back(&in->ephemeral_pin_inode);
+ } else if (pdn->get_dir()->get_inode()
+ && pdn->get_dir()->get_inode()->get_export_ephemeral_distributed_pin()) {
+ le->metablob.add_primary_dentry(pdn, in, false, false, false, false,
+ true, false);
+ in->is_export_ephemeral_distributed_pinned = true;
+ cache->ephemeral_pins.push_back(&in->ephemeral_pin_inode);
+ }
+
+ // log it
+ mds->mdlog->submit_entry(le);
// process delayed expires
cache->process_delayed_expire(dir);
}
}
-
void Migrator::decode_import_inode(CDentry *dn, bufferlist::const_iterator& blp,
mds_rank_t oldauth, LogSegment *ls,
map<CInode*, map<client_t,Capability::Export> >& peer_exports,
auto &pi = cur->project_inode();
cur->set_export_pin(rank);
pip = &pi.inode;
+ } else if (name.find("ceph.dir.pin.random") == 0) {
+ if (!cur->is_dir() || cur->is_root()) {
+ respond_to_request(mdr, -EINVAL);
+ return;
+ }
+
+ double val;
+ try {
+ val = boost::lexical_cast<double>(value);
+ } catch (boost::bad_lexical_cast const&) {
+ dout(10) << "bad vxattr value, unable to parse float for " << name << dendl;
+ respond_to_request(mdr, -EINVAL);
+ return;
+ }
+
+ if (!xlock_policylock(mdr, cur))
+ return;
+
+ auto &pi = cur->project_inode();
+ cur->set_export_ephemeral_random_pin(val);
+ pip = &pi.inode;
+ } else if (name.find("ceph.dir.pin.distributed") == 0) {
+ if (!cur->is_dir() || cur->is_root()) {
+ respond_to_request(mdr, -EINVAL);
+ return;
+ }
+
+ bool val;
+ try {
+ val = boost::lexical_cast<bool>(value);
+ } catch (boost::bad_lexical_cast const&) {
+ dout(10) << "bad vxattr value, unable to parse bool for " << name << dendl;
+ respond_to_request(mdr, -EINVAL);
+ return;
+ }
+
+ if (!xlock_policylock(mdr, cur))
+ return;
+
+ auto &pi = cur->project_inode();
+ cur->set_export_ephemeral_distributed_pin(val);
+ pip = &pi.inode;
+ dout(10) << "Here is the distrib pin value" << pip->export_ephemeral_distributed_pin << dendl;
} else {
dout(10) << " unknown vxattr " << name << dendl;
respond_to_request(mdr, -EINVAL);
static const int STATE_DIRTYPARENT = (1<<1);
static const int STATE_DIRTYPOOL = (1<<2);
static const int STATE_NEED_SNAPFLUSH = (1<<3);
+ static const int STATE_EPHEMERAL_DISTRIBUTED = (1<<4);
+ static const int STATE_EPHEMERAL_RANDOM = (1<<5);
std::string dn; // dentry
snapid_t dnfirst, dnlast;
version_t dnv{0};
bool is_dirty_parent() const { return (state & STATE_DIRTYPARENT); }
bool is_dirty_pool() const { return (state & STATE_DIRTYPOOL); }
bool need_snapflush() const { return (state & STATE_NEED_SNAPFLUSH); }
+ bool is_export_ephemeral_distributed() const { return (state & STATE_EPHEMERAL_DISTRIBUTED); }
+ bool is_export_ephemeral_random() const { return (state & STATE_EPHEMERAL_RANDOM); }
void print(ostream& out) const {
out << " fullbit dn " << dn << " [" << dnfirst << "," << dnlast << "] dnv " << dnv
// return remote pointer to to-be-journaled inode
void add_primary_dentry(CDentry *dn, CInode *in, bool dirty,
bool dirty_parent=false, bool dirty_pool=false,
- bool need_snapflush=false) {
+ bool need_snapflush=false, bool export_ephemeral_distributed=false,
+ bool export_ephemeral_random=false) {
__u8 state = 0;
if (dirty) state |= fullbit::STATE_DIRTY;
if (dirty_parent) state |= fullbit::STATE_DIRTYPARENT;
if (dirty_pool) state |= fullbit::STATE_DIRTYPOOL;
if (need_snapflush) state |= fullbit::STATE_NEED_SNAPFLUSH;
+ if (export_ephemeral_distributed) state |= fullbit::STATE_EPHEMERAL_DISTRIBUTED;
+ if (export_ephemeral_random) state |= fullbit::STATE_EPHEMERAL_RANDOM;
add_primary_dentry(add_dir(dn->get_dir(), false), dn, in, state);
}
void add_primary_dentry(dirlump& lump, CDentry *dn, CInode *in, __u8 state) {
{
in->inode = inode;
in->xattrs = xattrs;
+ if (in->inode.is_dir()) {
+ in->is_export_ephemeral_distributed_pinned = is_export_ephemeral_distributed();
+ in->is_export_ephemeral_random_pinned = is_export_ephemeral_random();
+ dout(10) << "I'm in update_inode inside journal.cc and is_export_ephemeral_distrib for inode " << *in << "is" << in->is_export_ephemeral_distributed_pinned << dendl;
+ }
in->maybe_export_pin();
if (in->inode.is_dir()) {
if (!(in->dirfragtree == dirfragtree)) {
mds_rank_t export_pin = MDS_RANK_NONE;
+ double export_ephemeral_random_pin = 0;
+ bool export_ephemeral_distributed_pin = false;
+
// special stuff
version_t version = 0; // auth only
version_t file_data_version = 0; // auth only
template<template<typename> class Allocator>
void inode_t<Allocator>::encode(bufferlist &bl, uint64_t features) const
{
- ENCODE_START(15, 6, bl);
+ ENCODE_START(16, 6, bl);
encode(ino, bl);
encode(rdev, bl);
encode(export_pin, bl);
+ encode(export_ephemeral_random_pin, bl);
+ encode(export_ephemeral_distributed_pin, bl);
+
ENCODE_FINISH(bl);
}
template<template<typename> class Allocator>
void inode_t<Allocator>::decode(bufferlist::const_iterator &p)
{
- DECODE_START_LEGACY_COMPAT_LEN(15, 6, 6, p);
+ DECODE_START_LEGACY_COMPAT_LEN(16, 6, 6, p);
decode(ino, p);
decode(rdev, p);
export_pin = MDS_RANK_NONE;
}
+ if (struct_v >= 16) {
+ decode(export_ephemeral_random_pin, p);
+ decode(export_ephemeral_distributed_pin, p);
+ } else {
+ export_ephemeral_random_pin = 0;
+ export_ephemeral_distributed_pin = false;
+ }
+
DECODE_FINISH(p);
}
f->dump_unsigned("time_warp_seq", time_warp_seq);
f->dump_unsigned("change_attr", change_attr);
f->dump_int("export_pin", export_pin);
+ f->dump_int("export_ephemeral_random_pin", export_ephemeral_random_pin);
+ f->dump_int("export_ephemeral_distributed_pin", export_ephemeral_distributed_pin);
f->open_array_section("client_ranges");
for (const auto &p : client_ranges) {