The idea of this change is to use a more allocation efficient structure.
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
// Detect clients failing to respond to modifications to capabilities in
// CLIENT_CAPS messages.
{
- std::list<client_t> late_clients;
- mds->locker->get_late_revoking_clients(&late_clients,
- mds->mdsmap->get_session_timeout());
- std::list<MDSHealthMetric> late_cap_metrics;
-
- for (std::list<client_t>::iterator i = late_clients.begin(); i != late_clients.end(); ++i) {
+ auto&& late_clients = mds->locker->get_late_revoking_clients(mds->mdsmap->get_session_timeout());
+ std::vector<MDSHealthMetric> late_cap_metrics;
+ for (const auto& client : late_clients) {
// client_t is equivalent to session.info.inst.name.num
// Construct an entity_name_t to lookup into SessionMap
- entity_name_t ename(CEPH_ENTITY_TYPE_CLIENT, i->v);
+ entity_name_t ename(CEPH_ENTITY_TYPE_CLIENT, client.v);
Session const *s = mds->sessionmap.get_session(ename);
if (s == NULL) {
// Shouldn't happen, but not worth crashing if it does as this is
// just health-reporting code.
- derr << "Client ID without session: " << i->v << dendl;
+ derr << "Client ID without session: " << client.v << dendl;
continue;
}
std::ostringstream oss;
oss << "Client " << s->get_human_name() << " failing to respond to capability release";
MDSHealthMetric m(MDS_HEALTH_CLIENT_LATE_RELEASE, HEALTH_WARN, oss.str());
- m.metadata["client_id"] = stringify(i->v);
- late_cap_metrics.push_back(m);
+ m.metadata["client_id"] = stringify(client.v);
+ late_cap_metrics.emplace_back(std::move(m));
}
if (late_cap_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
- health.metrics.splice(health.metrics.end(), late_cap_metrics);
+ auto&& m = late_cap_metrics;
+ health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m));
} else {
std::ostringstream oss;
oss << "Many clients (" << late_cap_metrics.size()
<< ") failing to respond to capability release";
MDSHealthMetric m(MDS_HEALTH_CLIENT_LATE_RELEASE_MANY, HEALTH_WARN, oss.str());
m.metadata["client_count"] = stringify(late_cap_metrics.size());
- health.metrics.push_back(m);
- late_cap_metrics.clear();
+ health.metrics.push_back(std::move(m));
}
}
const auto recall_warning_threshold = g_conf().get_val<Option::size_t>("mds_recall_warning_threshold");
const auto max_completed_requests = g_conf()->mds_max_completed_requests;
const auto max_completed_flushes = g_conf()->mds_max_completed_flushes;
- std::list<MDSHealthMetric> late_recall_metrics;
- std::list<MDSHealthMetric> large_completed_requests_metrics;
+ std::vector<MDSHealthMetric> late_recall_metrics;
+ std::vector<MDSHealthMetric> large_completed_requests_metrics;
for (auto& session : sessions) {
const uint64_t recall_caps = session->get_recall_caps();
if (recall_caps > recall_warning_threshold) {
oss << "Client " << session->get_human_name() << " failing to respond to cache pressure";
MDSHealthMetric m(MDS_HEALTH_CLIENT_RECALL, HEALTH_WARN, oss.str());
m.metadata["client_id"] = stringify(session->get_client());
- late_recall_metrics.push_back(m);
+ late_recall_metrics.emplace_back(std::move(m));
}
if ((session->get_num_trim_requests_warnings() > 0 &&
session->get_num_completed_requests() >= max_completed_requests) ||
oss << "Client " << session->get_human_name() << " failing to advance its oldest client/flush tid";
MDSHealthMetric m(MDS_HEALTH_CLIENT_OLDEST_TID, HEALTH_WARN, oss.str());
m.metadata["client_id"] = stringify(session->get_client());
- large_completed_requests_metrics.push_back(m);
+ large_completed_requests_metrics.emplace_back(std::move(m));
}
}
if (late_recall_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
- health.metrics.splice(health.metrics.end(), late_recall_metrics);
+ auto&& m = late_recall_metrics;
+ health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m));
} else {
std::ostringstream oss;
oss << "Many clients (" << late_recall_metrics.size()
}
if (large_completed_requests_metrics.size() <= (size_t)g_conf()->mds_health_summarize_threshold) {
- health.metrics.splice(health.metrics.end(), large_completed_requests_metrics);
+ auto&& m = large_completed_requests_metrics;
+ health.metrics.insert(std::end(health.metrics), std::cbegin(m), std::cend(m));
} else {
std::ostringstream oss;
oss << "Many clients (" << large_completed_requests_metrics.size()
get(PIN_SUBTREE);
}
-void CDir::split(int bits, list<CDir*>& subs, MDSContext::vec& waiters, bool replay)
+void CDir::split(int bits, std::vector<CDir*>* subs, MDSContext::vec& waiters, bool replay)
{
dout(10) << "split by " << bits << " bits on " << *this << dendl;
dout(10) << " subfrag " << fg << " " << *f << dendl;
subfrags[n++] = f;
- subs.push_back(f);
+ subs->push_back(f);
f->set_dir_auth(get_dir_auth());
f->freeze_tree_state = freeze_tree_state;
finish_old_fragment(waiters, replay);
}
-void CDir::merge(list<CDir*>& subs, MDSContext::vec& waiters, bool replay)
+void CDir::merge(const std::vector<CDir*>& subs, MDSContext::vec& waiters, bool replay)
{
dout(10) << "merge " << subs << dendl;
+ ceph_assert(subs.size() > 0);
+
set_dir_auth(subs.front()->get_dir_auth());
freeze_tree_state = subs.front()->freeze_tree_state;
- for (auto dir : subs) {
+ for (const auto& dir : subs) {
ceph_assert(get_dir_auth() == dir->get_dir_auth());
ceph_assert(freeze_tree_state == dir->freeze_tree_state);
}
map<string_snap_t, MDSContext::vec > dentry_waiters;
- for (auto dir : subs) {
+ for (const auto& dir : subs) {
dout(10) << " subfrag " << dir->get_frag() << " " << *dir << dendl;
ceph_assert(!dir->is_auth() || dir->is_complete() || replay);
return rval;
}
-void CDir::scrub_dentries_scrubbing(list<CDentry*> *out_dentries)
+std::vector<CDentry*> CDir::scrub_dentries_scrubbing()
{
dout(20) << __func__ << dendl;
ceph_assert(scrub_infop && scrub_infop->directory_scrubbing);
- for (set<dentry_key_t>::iterator i =
- scrub_infop->directories_scrubbing.begin();
- i != scrub_infop->directories_scrubbing.end();
- ++i) {
- CDentry *d = lookup(i->name, i->snapid);
+ std::vector<CDentry*> result;
+ for (auto& scrub_info : scrub_infop->directories_scrubbing) {
+ CDentry *d = lookup(scrub_info.name, scrub_info.snapid);
ceph_assert(d);
- out_dentries->push_back(d);
+ result.push_back(d);
}
- for (set<dentry_key_t>::iterator i = scrub_infop->others_scrubbing.begin();
- i != scrub_infop->others_scrubbing.end();
- ++i) {
- CDentry *d = lookup(i->name, i->snapid);
+ for (auto& scrub_info : scrub_infop->others_scrubbing) {
+ CDentry *d = lookup(scrub_info.name, scrub_info.snapid);
ceph_assert(d);
- out_dentries->push_back(d);
+ result.push_back(d);
}
+ return result;
}
void CDir::scrub_dentry_finished(CDentry *dn)
* list will be filled with all CDentry * which have been returned
* from scrub_dentry_next() but not sent back via scrub_dentry_finished().
*/
- void scrub_dentries_scrubbing(std::list<CDentry*> *out_dentries);
+ std::vector<CDentry*> scrub_dentries_scrubbing();
/**
* Report to the CDir that a CDentry has been scrubbed. Call this
* for every CDentry returned from scrub_dentry_next().
public:
- void split(int bits, std::list<CDir*>& subs, MDSContext::vec& waiters, bool replay);
- void merge(std::list<CDir*>& subs, MDSContext::vec& waiters, bool replay);
+ void split(int bits, std::vector<CDir*>* subs, MDSContext::vec& waiters, bool replay);
+ void merge(const std::vector<CDir*>& subs, MDSContext::vec& waiters, bool replay);
bool should_split() const {
return (int)get_frag_size() > g_conf()->mds_bal_split_size;
return dirfragtree[h];
}
-bool CInode::get_dirfrags_under(frag_t fg, list<CDir*>& ls)
+std::pair<bool, std::vector<CDir*>> CInode::get_dirfrags_under(frag_t fg)
{
- bool all = true;
+ std::pair<bool, std::vector<CDir*>> result;
+ auto& all = result.first;
+ auto& dirs = result.second;
+
{
frag_vec_t leaves;
dirfragtree.get_leaves_under(fg, leaves);
for (const auto &leaf : leaves) {
if (auto it = dirfrags.find(leaf); it != dirfrags.end()) {
- ls.push_back(it->second);
+ dirs.push_back(it->second);
} else {
all = false;
}
}
if (all)
- return all;
+ return result;
fragtree_t tmpdft;
tmpdft.force_to_leaf(g_ceph_context, fg);
for (auto &p : dirfrags) {
tmpdft.force_to_leaf(g_ceph_context, p.first);
if (fg.contains(p.first) && !dirfragtree.is_leaf(p.first))
- ls.push_back(p.second);
+ dirs.push_back(p.second);
}
all = true;
}
}
- return all;
+ return result;
}
void CInode::verify_dirfrags()
if (dir) return dir;
// find a child?
- list<CDir*> ls;
- get_dirfrags_under(fg, ls);
- if (!ls.empty())
- return ls.front();
+ auto&& p = get_dirfrags_under(fg);
+ if (!p.second.empty())
+ return p.second.front();
// try parents?
while (fg.bits() > 0) {
}
return NULL;
}
- bool get_dirfrags_under(frag_t fg, std::list<CDir*>& ls);
+ std::pair<bool, std::vector<CDir*>> get_dirfrags_under(frag_t fg);
CDir* get_approx_dirfrag(frag_t fg);
template<typename Container>
standby_epochs[new_info.global_id] = epoch;
}
-std::list<mds_gid_t> FSMap::stop(mds_gid_t who)
+std::vector<mds_gid_t> FSMap::stop(mds_gid_t who)
{
ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
auto fs = filesystems.at(mds_roles.at(who));
fs->mds_map.stopped.insert(info.rank);
// Also drop any standby replays that were following this rank
- std::list<mds_gid_t> standbys;
+ std::vector<mds_gid_t> standbys;
for (const auto &i : fs->mds_map.mds_info) {
const auto &other_gid = i.first;
const auto &other_info = i.second;
* @returns a list of any additional GIDs that were removed from the map
* as a side effect (like standby replays)
*/
- std::list<mds_gid_t> stop(mds_gid_t who);
+ std::vector<mds_gid_t> stop(mds_gid_t who);
/**
* The rank held by 'who', if any, is to be relinquished, and
}
}
-void Locker::get_late_revoking_clients(std::list<client_t> *result,
- double timeout) const
+std::vector<client_t> Locker::get_late_revoking_clients(double timeout) const
{
- if (!any_late_revoking_caps(revoking_caps, timeout)) {
- // Fast path: no misbehaving clients, execute in O(1)
- return;
- }
+ std::vector<client_t> result;
- // Slow path: execute in O(N_clients)
- for (auto &p : revoking_caps_by_client) {
- if (any_late_revoking_caps(p.second, timeout)) {
- result->push_back(p.first);
+ if (any_late_revoking_caps(revoking_caps, timeout)) {
+ // Slow path: execute in O(N_clients)
+ for (auto &p : revoking_caps_by_client) {
+ if (any_late_revoking_caps(p.second, timeout)) {
+ result.push_back(p.first);
+ }
}
+ } else {
+ // Fast path: no misbehaving clients, execute in O(1)
}
+ return result;
}
// Hard-code instead of surfacing a config settings because this is
void remove_client_cap(CInode *in, Capability *cap);
- void get_late_revoking_clients(std::list<client_t> *result, double timeout) const;
+ std::vector<client_t> get_late_revoking_clients(double timeout) const;
private:
bool any_late_revoking_caps(xlist<Capability*> const &revoking, double timeout) const;
frag_t fg = dir->get_frag();
while (fg != frag_t()) {
frag_t sibfg = fg.get_sibling();
- list<CDir*> sibs;
- bool complete = diri->get_dirfrags_under(sibfg, sibs);
+ auto&& [complete, sibs] = diri->get_dirfrags_under(sibfg);
if (!complete) {
dout(10) << " not all sibs under " << sibfg << " in cache (have " << sibs << ")" << dendl;
break;
}
bool all = true;
- for (list<CDir*>::iterator p = sibs.begin(); p != sibs.end(); ++p) {
- CDir *sib = *p;
+ for (auto& sib : sibs) {
if (!sib->is_auth() || !sib->should_merge()) {
all = false;
break;
++p;
dout(10) << "cancelling fragment " << df << " bit " << info.bits << dendl;
- list<CDir*> dirs;
+ std::vector<CDir*> dirs;
info.dirs.swap(dirs);
fragments.erase(df);
fragment_unmark_unfreeze_dirs(dirs);
ceph_assert(!dir->is_auth());
// wake any waiters
- list<CDir*> q;
- q.push_back(dir);
+ std::queue<CDir*> q;
+ q.push(dir);
while (!q.empty()) {
CDir *d = q.front();
- q.pop_front();
+ q.pop();
d->take_waiting(d_mask, waiters);
// inode waiters too
auto&& ls = dnl->get_inode()->get_dirfrags();
for (const auto& subdir : ls) {
if (!subdir->is_subtree_root())
- q.push_back(subdir);
+ q.push(subdir);
}
}
}
}
}
- std::deque<CDir*> dfq; // dirfrag queue
- dfq.push_back(p->first);
+ std::queue<CDir*> dfq; // dirfrag queue
+ dfq.push(p->first);
bool auth = p->first->authority().first == mds->get_nodeid();
dout(10) << " subtree auth=" << auth << " for " << *p->first << dendl;
while (!dfq.empty()) {
CDir *dir = dfq.front();
- dfq.pop_front();
+ dfq.pop();
// dir
if (auth) {
if (in->is_dir()) {
auto&& dfv = in->get_nested_dirfrags();
for (const auto& dir : dfv) {
- dfq.push_back(dir);
+ dfq.push(dir);
}
}
}
dout(10) << "subtree " << *dir << dendl;
// auth items in this subtree
- std::deque<CDir*> dq;
- dq.push_back(dir);
+ std::queue<CDir*> dq;
+ dq.push(dir);
while (!dq.empty()) {
CDir *dir = dq.front();
- dq.pop_front();
+ dq.pop();
// dir
for (auto &r : dir->get_replicas()) {
{
auto&& dirs = in->get_nested_dirfrags();
for (const auto& dir : dirs) {
- dq.push_back(dir);
+ dq.push(dir);
}
}
}
* @param bits bit adjustment. positive for split, negative for merge.
*/
void MDCache::adjust_dir_fragments(CInode *diri, frag_t basefrag, int bits,
- list<CDir*>& resultfrags,
+ std::vector<CDir*>* resultfrags,
MDSContext::vec& waiters,
bool replay)
{
dout(10) << "adjust_dir_fragments " << basefrag << " " << bits
<< " on " << *diri << dendl;
- list<CDir*> srcfrags;
- diri->get_dirfrags_under(basefrag, srcfrags);
+ auto&& p = diri->get_dirfrags_under(basefrag);
- adjust_dir_fragments(diri, srcfrags, basefrag, bits, resultfrags, waiters, replay);
+ adjust_dir_fragments(diri, p.second, basefrag, bits, resultfrags, waiters, replay);
}
CDir *MDCache::force_dir_fragment(CInode *diri, frag_t fg, bool replay)
dout(10) << "force_dir_fragment " << fg << " on " << *diri << dendl;
- list<CDir*> src, result;
+ std::vector<CDir*> src, result;
MDSContext::vec waiters;
// split a parent?
int split = fg.bits() - parent.bits();
dout(10) << " splitting parent by " << split << " " << *pdir << dendl;
src.push_back(pdir);
- adjust_dir_fragments(diri, src, parent, split, result, waiters, replay);
+ adjust_dir_fragments(diri, src, parent, split, &result, waiters, replay);
dir = diri->get_dirfrag(fg);
if (dir) {
dout(10) << "force_dir_fragment result " << *dir << dendl;
if (!dir) {
// hoover up things under fg?
- diri->get_dirfrags_under(fg, src);
+ {
+ auto&& p = diri->get_dirfrags_under(fg);
+ src.insert(std::end(src), std::cbegin(p.second), std::cend(p.second));
+ }
if (src.empty()) {
dout(10) << "force_dir_fragment no frags under " << fg << dendl;
} else {
dout(10) << " will combine frags under " << fg << ": " << src << dendl;
- adjust_dir_fragments(diri, src, fg, 0, result, waiters, replay);
+ adjust_dir_fragments(diri, src, fg, 0, &result, waiters, replay);
dir = result.front();
dout(10) << "force_dir_fragment result " << *dir << dendl;
}
}
void MDCache::adjust_dir_fragments(CInode *diri,
- list<CDir*>& srcfrags,
+ const std::vector<CDir*>& srcfrags,
frag_t basefrag, int bits,
- list<CDir*>& resultfrags,
+ std::vector<CDir*>* resultfrags,
MDSContext::vec& waiters,
bool replay)
{
if (parent_dir)
parent_subtree = get_subtree_root(parent_dir);
+ ceph_assert(srcfrags.size() >= 1);
if (bits > 0) {
// SPLIT
ceph_assert(srcfrags.size() == 1);
// did i change the subtree map?
if (dir->is_subtree_root()) {
// new frags are now separate subtrees
- for (const auto& dir : resultfrags) {
+ for (const auto& dir : *resultfrags) {
subtrees[dir].clear(); // new frag is now its own subtree
}
if (parent_subtree) {
ceph_assert(subtrees[parent_subtree].count(dir));
subtrees[parent_subtree].erase(dir);
- for (const auto& dir : resultfrags) {
+ for (const auto& dir : *resultfrags) {
ceph_assert(dir->is_subtree_root());
subtrees[parent_subtree].insert(dir);
}
// are my constituent bits subtrees? if so, i will be too.
// (it's all or none, actually.)
bool any_subtree = false, any_non_subtree = false;
- for (CDir *dir : srcfrags) {
+ for (const auto& dir : srcfrags) {
if (dir->is_subtree_root())
any_subtree = true;
else
set<CDir*> new_bounds;
if (any_subtree) {
- for (CDir *dir : srcfrags) {
+ for (const auto& dir : srcfrags) {
// this simplifies the code that find subtrees underneath the dirfrag
if (!dir->is_subtree_root()) {
dir->state_set(CDir::STATE_AUXSUBTREE);
}
}
- for (CDir *dir : srcfrags) {
+ for (const auto& dir : srcfrags) {
ceph_assert(dir->is_subtree_root());
dout(10) << " taking srcfrag subtree bounds from " << *dir << dendl;
map<CDir*, set<CDir*> >::iterator q = subtrees.find(dir);
show_subtrees(10);
}
- resultfrags.push_back(f);
+ resultfrags->push_back(f);
}
}
}
};
-bool MDCache::can_fragment(CInode *diri, list<CDir*>& dirs)
+bool MDCache::can_fragment(CInode *diri, const std::vector<CDir*>& dirs)
{
if (is_readonly()) {
dout(7) << "can_fragment: read-only FS, no fragmenting for now" << dendl;
ceph_assert(dir->is_auth());
CInode *diri = dir->inode;
- list<CDir*> dirs;
+ std::vector<CDir*> dirs;
dirs.push_back(dir);
if (!can_fragment(diri, dirs)) {
{
dout(7) << "merge_dir to " << frag << " on " << *diri << dendl;
- list<CDir*> dirs;
- if (!diri->get_dirfrags_under(frag, dirs)) {
+ auto&& [all, dirs] = diri->get_dirfrags_under(frag);
+ if (!all) {
dout(7) << "don't have all frags under " << frag << " for " << *diri << dendl;
return;
}
fragment_mark_and_complete(mdr);
}
-void MDCache::fragment_freeze_dirs(list<CDir*>& dirs)
+void MDCache::fragment_freeze_dirs(const std::vector<CDir*>& dirs)
{
bool any_subtree = false, any_non_subtree = false;
- for (CDir* dir : dirs) {
+ for (const auto& dir : dirs) {
dir->auth_pin(dir); // until we mark and complete them
dir->state_set(CDir::STATE_FRAGMENTING);
dir->freeze_dir();
if (any_subtree && any_non_subtree) {
// either all dirfrags are subtree roots or all are not.
- for (CDir *dir : dirs) {
+ for (const auto& dir : dirs) {
if (dir->is_subtree_root()) {
ceph_assert(dir->state_test(CDir::STATE_AUXSUBTREE));
} else {
fragment_frozen(mdr, 0);
}
-void MDCache::fragment_unmark_unfreeze_dirs(list<CDir*>& dirs)
+void MDCache::fragment_unmark_unfreeze_dirs(const std::vector<CDir*>& dirs)
{
dout(10) << "fragment_unmark_unfreeze_dirs " << dirs << dendl;
for (const auto& dir : dirs) {
if (info.num_remote_waiters > 0 ||
(!dir->inode->is_root() && dir->get_parent_dir()->is_freezing())) {
dout(10) << " cancel fragmenting " << df << " bit " << info.bits << dendl;
- list<CDir*> dirs;
+ std::vector<CDir*> dirs;
info.dirs.swap(dirs);
fragments.erase(df);
fragment_unmark_unfreeze_dirs(dirs);
// refragment
MDSContext::vec waiters;
adjust_dir_fragments(diri, info.dirs, basedirfrag.frag, info.bits,
- info.resultfrags, waiters, false);
+ &info.resultfrags, waiters, false);
if (g_conf()->mds_debug_frag)
diri->verify_dirfrags();
mds->queue_waiters(waiters);
for (const auto& fg : le->orig_frags)
ceph_assert(!diri->dirfragtree.is_leaf(fg));
- le->metablob.add_dir_context(*info.resultfrags.begin());
+ le->metablob.add_dir_context(info.resultfrags.front());
for (const auto& dir : info.resultfrags) {
if (diri->is_auth()) {
le->metablob.add_fragmented_dir(dir, false, false);
// refragment
MDSContext::vec waiters;
- list<CDir*> resultfrags;
- adjust_dir_fragments(diri, base, bits, resultfrags, waiters, false);
+ std::vector<CDir*> resultfrags;
+ adjust_dir_fragments(diri, base, bits, &resultfrags, waiters, false);
if (g_conf()->mds_debug_frag)
diri->verify_dirfrags();
frag_vec_t old_frags;
diri->dirfragtree.get_leaves_under(p->first.frag, old_frags);
- list<CDir*> resultfrags;
+ std::vector<CDir*> resultfrags;
if (uf.old_frags.empty()) {
// created by old format EFragment
MDSContext::vec waiters;
- adjust_dir_fragments(diri, p->first.frag, -uf.bits, resultfrags, waiters, true);
+ adjust_dir_fragments(diri, p->first.frag, -uf.bits, &resultfrags, waiters, true);
} else {
auto bp = uf.rollback.cbegin();
for (const auto& fg : uf.old_frags) {
struct fragment_info_t {
int bits;
- list<CDir*> dirs;
- list<CDir*> resultfrags;
+ std::vector<CDir*> dirs;
+ std::vector<CDir*> resultfrags;
MDRequestRef mdr;
set<mds_rank_t> notify_ack_waiting;
bool finishing = false;
typedef map<dirfrag_t,fragment_info_t>::iterator fragment_info_iterator;
void adjust_dir_fragments(CInode *diri, frag_t basefrag, int bits,
- list<CDir*>& frags, MDSContext::vec& waiters, bool replay);
+ std::vector<CDir*>* frags, MDSContext::vec& waiters, bool replay);
void adjust_dir_fragments(CInode *diri,
- list<CDir*>& srcfrags,
+ const std::vector<CDir*>& srcfrags,
frag_t basefrag, int bits,
- list<CDir*>& resultfrags,
+ std::vector<CDir*>* resultfrags,
MDSContext::vec& waiters,
bool replay);
CDir *force_dir_fragment(CInode *diri, frag_t fg, bool replay=true);
void get_force_dirfrag_bound_set(const vector<dirfrag_t>& dfs, set<CDir*>& bounds);
- bool can_fragment(CInode *diri, list<CDir*>& dirs);
- void fragment_freeze_dirs(list<CDir*>& dirs);
+ bool can_fragment(CInode *diri, const std::vector<CDir*>& dirs);
+ void fragment_freeze_dirs(const std::vector<CDir*>& dirs);
void fragment_mark_and_complete(MDRequestRef& mdr);
void fragment_frozen(MDRequestRef& mdr, int r);
- void fragment_unmark_unfreeze_dirs(list<CDir*>& dirs);
+ void fragment_unmark_unfreeze_dirs(const std::vector<CDir*>& dirs);
void fragment_drop_locks(fragment_info_t &info);
void fragment_maybe_finish(const fragment_info_iterator& it);
void dispatch_fragment_dir(MDRequestRef& mdr);
void MutationImpl::pop_and_dirty_projected_fnodes()
{
- while (!projected_fnodes.empty()) {
- CDir *dir = projected_fnodes.front();
- projected_fnodes.pop_front();
+ for (const auto& dir : projected_fnodes) {
dir->pop_and_dirty_projected_fnode(ls);
}
+ projected_fnodes.clear();
}
void MutationImpl::add_updated_lock(ScatterLock *lock)
// for applying projected inode changes
list<CInode*> projected_inodes;
- list<CDir*> projected_fnodes;
+ std::vector<CDir*> projected_fnodes;
list<ScatterLock*> updated_locks;
list<CInode*> dirty_cow_inodes;
{
size_t total_items = 0;
unsigned used_objs = 1;
- std::list<unsigned> objs_to_write;
+ std::vector<unsigned> objs_to_write;
bool journaled = false;
for (unsigned i = 0; i < omap_num_objs; i++) {
total_items += omap_num_items[i];
uint64_t expire_to);
bool recovered;
- std::list<Context*> waiting_for_recovery;
+ std::vector<Context*> waiting_for_recovery;
void _go_readonly(int r);
if (header->get_recursive()) {
frag_vec_t scrubbing_frags;
- list<CDir*> scrubbing_cdirs;
+ std::queue<CDir*> scrubbing_cdirs;
in->scrub_dirfrags_scrubbing(&scrubbing_frags);
dout(20) << __func__ << " iterating over " << scrubbing_frags.size()
<< " scrubbing frags" << dendl;
// turn frags into CDir *
CDir *dir = in->get_dirfrag(fg);
if (dir) {
- scrubbing_cdirs.push_back(dir);
+ scrubbing_cdirs.push(dir);
dout(25) << __func__ << " got CDir " << *dir << " presently scrubbing" << dendl;
} else {
in->scrub_dirfrag_finished(fg);
dout(20) << __func__ << " consuming from " << scrubbing_cdirs.size()
<< " scrubbing cdirs" << dendl;
- list<CDir*>::iterator i = scrubbing_cdirs.begin();
while (g_conf()->mds_max_scrub_ops_in_progress > scrubs_in_progress) {
// select next CDir
CDir *cur_dir = NULL;
- if (i != scrubbing_cdirs.end()) {
- cur_dir = *i;
- ++i;
+ if (!scrubbing_cdirs.empty()) {
+ cur_dir = scrubbing_cdirs.front();
+ scrubbing_cdirs.pop();
dout(20) << __func__ << " got cur_dir = " << *cur_dir << dendl;
} else {
bool ready = get_next_cdir(in, &cur_dir);
dout(20) << __func__ << " get_next_cdir ready=" << ready << dendl;
if (ready && cur_dir) {
- scrubbing_cdirs.push_back(cur_dir);
+ scrubbing_cdirs.push(cur_dir);
} else if (!ready) {
// We are waiting for load of a frag
all_frags_done = false;
if (r == ENOENT) {
// Nothing left to scrub, are we done?
- std::list<CDentry*> scrubbing;
- dir->scrub_dentries_scrubbing(&scrubbing);
+ auto&& scrubbing = dir->scrub_dentries_scrubbing();
if (scrubbing.empty()) {
dout(20) << __func__ << " dirfrag done: " << *dir << dendl;
// FIXME: greg: What's the diff meant to be between done and terminal
// list of pending context completions for asynchronous scrub
// control operations.
- std::list<Context *> control_ctxs;
+ std::vector<Context *> control_ctxs;
// list of inodes for which scrub operations are running -- used
// to diplay out in `scrub status`.
return;
}
- std::list<client_t> to_evict;
- mds->locker->get_late_revoking_clients(&to_evict, cap_revoke_eviction_timeout);
+ auto&& to_evict = mds->locker->get_late_revoking_clients(cap_revoke_eviction_timeout);
for (auto const &client: to_evict) {
mds->clog->warn() << "client id " << client << " has not responded to"
size_t Server::apply_blacklist(const std::set<entity_addr_t> &blacklist)
{
- std::list<Session*> victims;
+ std::vector<Session*> victims;
const auto& sessions = mds->sessionmap.get_sessions();
for (const auto& p : sessions) {
if (!p.first.is_client()) {
}
}
- for (const auto s : victims) {
+ for (const auto& s : victims) {
kill_session(s, nullptr);
}
// Whenever we see a dentry within a dirlump, we remember it as a child of
// the dirlump's inode
- std::map<inodeno_t, std::list<std::string> > children;
+ std::map<inodeno_t, std::vector<std::string> > children;
// Whenever we see a location for an inode, remember it: this allows us to
// build a path given an inode
{
dout(10) << "EFragment.replay " << op_name(op) << " " << ino << " " << basefrag << " by " << bits << dendl;
- list<CDir*> resultfrags;
+ std::vector<CDir*> resultfrags;
MDSContext::vec waiters;
// in may be NULL if it wasn't in our cache yet. if it's a prepare
mds->mdcache->add_uncommitted_fragment(dirfrag_t(ino, basefrag), bits, orig_frags, segment, &rollback);
if (in)
- mds->mdcache->adjust_dir_fragments(in, basefrag, bits, resultfrags, waiters, true);
+ mds->mdcache->adjust_dir_fragments(in, basefrag, bits, &resultfrags, waiters, true);
break;
case OP_ROLLBACK: {
in->dirfragtree.get_leaves_under(basefrag, old_frags);
if (orig_frags.empty()) {
// old format EFragment
- mds->mdcache->adjust_dir_fragments(in, basefrag, -bits, resultfrags, waiters, true);
+ mds->mdcache->adjust_dir_fragments(in, basefrag, -bits, &resultfrags, waiters, true);
} else {
for (const auto& fg : orig_frags)
mds->mdcache->force_dir_fragment(in, fg);
*/
struct MDSHealth
{
- std::list<MDSHealthMetric> metrics;
+ std::vector<MDSHealthMetric> metrics;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);