The size of the elements in the list are not necessarily uniform.
Lists are used to encode ``std::list``, ``std::vector``, ``std::deque``,
-``std::set`` and ``ceph::unordered_set``.
+``std::set`` and ``std::unordered_set``.
Blob
----
ceph_pair<K,V> entries[length];
}
-Maps are used to encode ``std::map``, ``std::multimap`` and
-``ceph::unordered_map``.
+Maps are used to encode ``std::map``, ``std::multimap``,
+``std::unordered_map`` and ``std::unordered_multimap``.
Complex Types
=============
#endif
#include <time.h>
-
-#include "include/unordered_map.h"
#include "include/hash_namespace.h"
#ifndef __LP64__
#include <fstream>
#include <map>
#include <set>
+#include <unordered_map>
using namespace std;
#include "common/ceph_mutex.h"
};
Inode *root = 0;
-ceph::unordered_map<ino_t, Inode*> inode_map;
+std::unordered_map<ino_t, Inode*> inode_map;
bool make_inode_path(string &buf, Inode *in)
{
did.insert(in);
if (in->dir) {
ldout(cct, 1) << " dir " << in->dir << " size " << in->dir->dentries.size() << dendl;
- for (ceph::unordered_map<string, Dentry*>::iterator it = in->dir->dentries.begin();
+ for (auto it = in->dir->dentries.begin();
it != in->dir->dentries.end();
++it) {
ldout(cct, 1) << " " << in->ino << " dn " << it->first << " " << it->second << " ref " << it->second->ref << dendl;
dump_inode(f, root.get(), did, true);
// make a second pass to catch anything disconnected
- for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
- it != inode_map.end();
- ++it) {
+ for (auto it = inode_map.begin(); it != inode_map.end(); ++it) {
if (did.count(it->second))
continue;
dump_inode(f, it->second, did, true);
bufferlist extra_bl;
inodeno_t created_ino;
bool got_created_ino = false;
- ceph::unordered_map<vinodeno_t, Inode*>::iterator p;
+ std::unordered_map<vinodeno_t, Inode*>::iterator p;
extra_bl = reply->get_extra_bl();
if (extra_bl.length() >= 8) {
// field with -ENOSPC as long as we're sure all the ops we cancelled were
// affecting this pool, and all the objectsets we're purging were also
// in this pool.
- for (unordered_map<vinodeno_t,Inode*>::iterator i = inode_map.begin();
- i != inode_map.end(); ++i)
- {
+ for (auto i = inode_map.begin(); i != inode_map.end(); ++i) {
Inode *inode = i->second;
if (inode->oset.dirty_or_tx
&& (pool == -1 || inode->layout.pool_id == pool)) {
bool allow_multi = session->mds_features.test(CEPHFS_FEATURE_MULTI_RECONNECT);
// i have an open session.
- ceph::unordered_set<inodeno_t> did_snaprealm;
- for (ceph::unordered_map<vinodeno_t, Inode*>::iterator p = inode_map.begin();
- p != inode_map.end();
- ++p) {
+ std::unordered_set<inodeno_t> did_snaprealm;
+ for (auto p = inode_map.begin(); p != inode_map.end(); ++p) {
Inode *in = p->second;
auto it = in->caps.find(mds);
if (it != in->caps.end()) {
if (can_invalidate_dentries) {
if (dentry_invalidate_cb && root->dir) {
- for (ceph::unordered_map<string, Dentry*>::iterator p = root->dir->dentries.begin();
+ for (auto p = root->dir->dentries.begin();
p != root->dir->dentries.end();
++p) {
if (p->second->inode)
int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds());
if (r == 0 && inode != NULL) {
- unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
+ auto p = inode_map.find(vino);
ceph_assert(p != inode_map.end());
*inode = p->second;
_ll_get(*inode);
ldout(cct, 3) << __func__ << " " << vino << dendl;
// Check the cache first
- unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
+ auto p = inode_map.find(vino);
if (p != inode_map.end()) {
*inode = p->second;
_ll_get(*inode);
{
ldout(cct, 10) << __func__ << dendl;
std::set<InodeRef> to_be_put; //this set will be deconstructed item by item when exit
- ceph::unordered_map<vinodeno_t, Inode*>::iterator next;
- for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
- it != inode_map.end();
- it = next) {
+ std::unordered_map<vinodeno_t, Inode*>::iterator next;
+ for (auto it = inode_map.begin(); it != inode_map.end(); it = next) {
Inode *in = it->second;
next = it;
++next;
std::scoped_lock lock(client_lock);
vinodeno_t vino = _map_faked_ino(ino);
- unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
+ auto p = inode_map.find(vino);
if (p == inode_map.end())
return NULL;
Inode *in = p->second;
std::scoped_lock lock(client_lock);
- unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
+ auto p = inode_map.find(vino);
if (p == inode_map.end())
return NULL;
Inode *in = p->second;
#include "include/interval_set.h"
#include "include/lru.h"
#include "include/types.h"
-#include "include/unordered_map.h"
-#include "include/unordered_set.h"
#include "include/cephfs/metrics/Types.h"
#include "mds/mdstypes.h"
#include "mds/MDSAuthCaps.h"
#include <set>
#include <string>
#include <thread>
+#include <unordered_map>
+#include <unordered_set>
using std::set;
using std::map;
// file handles, etc.
interval_set<int> free_fd_set; // unused fds
- ceph::unordered_map<int, Fh*> fd_map;
+ std::unordered_map<int, Fh*> fd_map;
set<Fh*> ll_unclosed_fh_set;
- ceph::unordered_set<dir_result_t*> opened_dirs;
+ std::unordered_set<dir_result_t*> opened_dirs;
uint64_t fd_gen = 1;
bool mount_aborted = false;
bool blocklisted = false;
- ceph::unordered_map<vinodeno_t, Inode*> inode_map;
- ceph::unordered_map<ino_t, vinodeno_t> faked_ino_map;
+ std::unordered_map<vinodeno_t, Inode*> inode_map;
+ std::unordered_map<ino_t, vinodeno_t> faked_ino_map;
interval_set<ino_t> free_faked_inos;
ino_t last_used_faked_ino;
ino_t last_used_faked_root;
xlist<Inode*> delayed_list;
int num_flushing_caps = 0;
- ceph::unordered_map<inodeno_t,SnapRealm*> snap_realms;
+ std::unordered_map<inodeno_t, SnapRealm*> snap_realms;
std::map<std::string, std::string> metadata;
ceph::coarse_mono_time last_auto_reconnect;
#define CEPH_CLIENT_DIR_H
#include <string>
+#include <unordered_map>
#include <vector>
class Dentry;
class Dir {
public:
Inode *parent_inode; // my inode
- ceph::unordered_map<std::string, Dentry*> dentries;
+ std::unordered_map<std::string, Dentry*> dentries;
unsigned num_null_dentries = 0;
std::vector<Dentry*> readdir_cache;
#include <iostream>
#include <sstream>
+#include <unordered_map>
#include "common/config.h"
utime_t start = ceph_clock_now();
- ceph::unordered_map<int64_t, int64_t> open_files;
- ceph::unordered_map<int64_t, dir_result_t*> open_dirs;
+ std::unordered_map<int64_t, int64_t> open_files;
+ std::unordered_map<int64_t, dir_result_t*> open_dirs;
- ceph::unordered_map<int64_t, Fh*> ll_files;
- ceph::unordered_map<int64_t, dir_result_t*> ll_dirs;
- ceph::unordered_map<uint64_t, int64_t> ll_inos;
+ std::unordered_map<int64_t, Fh*> ll_files;
+ std::unordered_map<int64_t, dir_result_t*> ll_dirs;
+ std::unordered_map<uint64_t, int64_t> ll_inos;
Inode *i1, *i2;
dout(10) << "trace finished on line " << t.get_line() << dendl;
// close open files
- for (ceph::unordered_map<int64_t, int64_t>::iterator fi = open_files.begin();
- fi != open_files.end();
- ++fi) {
+ for (auto fi = open_files.begin(); fi != open_files.end(); ++fi) {
dout(1) << "leftover close " << fi->second << dendl;
if (fi->second > 0) client->close(fi->second);
}
- for (ceph::unordered_map<int64_t, dir_result_t*>::iterator fi = open_dirs.begin();
- fi != open_dirs.end();
- ++fi) {
+ for (auto fi = open_dirs.begin(); fi != open_dirs.end(); ++fi) {
dout(1) << "leftover closedir " << fi->second << dendl;
if (fi->second != 0) client->closedir(fi->second);
}
- for (ceph::unordered_map<int64_t,Fh*>::iterator fi = ll_files.begin();
- fi != ll_files.end();
- ++fi) {
+ for (auto fi = ll_files.begin(); fi != ll_files.end(); ++fi) {
dout(1) << "leftover ll_release " << fi->second << dendl;
if (fi->second) client->ll_release(fi->second);
}
- for (ceph::unordered_map<int64_t,dir_result_t*>::iterator fi = ll_dirs.begin();
- fi != ll_dirs.end();
- ++fi) {
+ for (auto fi = ll_dirs.begin(); fi != ll_dirs.end(); ++fi) {
dout(1) << "leftover ll_releasedir " << fi->second << dendl;
if (fi->second) client->ll_releasedir(fi->second);
}
frag_info_t empty;
statq.push_back(empty);
- ceph::unordered_map<inodeno_t, int> nlink;
- ceph::unordered_map<inodeno_t, int> nlink_seen;
+ std::unordered_map<inodeno_t, int> nlink;
+ std::unordered_map<inodeno_t, int> nlink_seen;
UserPerm perms = client->pick_my_perms();
while (!dirq.empty()) {
}
}
- for (ceph::unordered_map<inodeno_t,int>::iterator p = nlink.begin(); p != nlink.end(); ++p) {
+ for (auto p = nlink.begin(); p != nlink.end(); ++p) {
if (nlink_seen[p->first] != p->second)
dout(0) << p->first << " nlink " << p->second << " != " << nlink_seen[p->first] << "seen" << dendl;
}
#include "fuse_ll.h"
#include <fuse_lowlevel.h>
+#include <unordered_map>
+
#define dout_context g_ceph_context
#define FINO_INO(x) ((x) & ((1ull<<48)-1ull))
* reserved for CEPH_SNAPDIR.
*/
struct ceph_fuse_fake_inode_stag {
- ceph::unordered_map<uint64_t,int> snap_stag_map; // <snapid, stagid>
- ceph::unordered_map<int, uint64_t> stag_snap_map; // <stagid, snapid>
+ std::unordered_map<uint64_t, int> snap_stag_map; // <snapid, stagid>
+ std::unordered_map<int, uint64_t> stag_snap_map; // <stagid, snapid>
int last_stag = 1;
};
using namespace std;
-static const ceph::unordered_map<int,int> cephfs_errno_to_system_errno = {
+static const std::unordered_map<int, int> cephfs_errno_to_system_errno = {
{EBLOCKLISTED, ESHUTDOWN},
{EPERM, EPERM},
{ESTALE, ESTALE},
ceph::mutex stag_lock = ceph::make_mutex("fuse_ll.cc stag_lock");
// a map of <ceph ino, fino stag/snapid map>
- ceph::unordered_map<uint64_t, struct ceph_fuse_fake_inode_stag> g_fino_maps;
+ std::unordered_map<uint64_t, struct ceph_fuse_fake_inode_stag> g_fino_maps;
pthread_key_t fuse_req_key = 0;
void set_fuse_req(fuse_req_t);
#include "include/types.h"
+#include <unordered_map>
+
struct cls_2pc_reservation
{
using id_t = uint32_t;
};
WRITE_CLASS_ENCODER(cls_2pc_reservation)
-using cls_2pc_reservations = ceph::unordered_map<cls_2pc_reservation::id_t, cls_2pc_reservation>;
+using cls_2pc_reservations = std::unordered_map<cls_2pc_reservation::id_t, cls_2pc_reservation>;
struct cls_2pc_urgent_data
{
#include <fmt/format.h>
#include "include/types.h" // for version_t
-#include "include/unordered_set.h"
#include "include/utime.h"
#include "include/utime_fmt.h"
#include "msg/msg_fmt.h"
#include <list>
#include <map>
#include <string>
+#include <unordered_set>
namespace ceph {
class Formatter;
// channel -> [(seq#, entry), ...]
std::map<std::string,std::list<std::pair<uint64_t,LogEntry>>> tail_by_channel;
uint64_t seq = 0;
- ceph::unordered_set<LogEntryKey> keys;
+ std::unordered_set<LogEntryKey> keys;
// ---- quincy+ ----
LRUSet<LogEntryKey> recent_keys;
#include <vector>
#include "common/ceph_mutex.h"
-#include "include/unordered_map.h"
#include "common/config_obs.h"
#include "common/HeartbeatMap.h"
#include "common/Thread.h"
int result = 0;
{
std::lock_guard locker(m_lock);
- ceph::unordered_map<Context *, int>::iterator it =
- m_context_results.find(ctx);
+ auto it = m_context_results.find(ctx);
if (it != m_context_results.end()) {
result = it->second;
m_context_results.erase(it);
}
private:
ceph::mutex m_lock = ceph::make_mutex("ContextWQ::m_lock");
- ceph::unordered_map<Context*, int> m_context_results;
+ std::unordered_map<Context*, int> m_context_results;
};
class ShardedThreadPool {
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/valgrind.h"
-#include "include/unordered_map.h"
#include <algorithm> // for std::for_each()
#include <bitset>
#include <map>
#include <string>
#include <vector>
+#include <unordered_map>
/******* Constants **********/
#define lockdep_dout(v) lsubdout(g_lockdep_ceph_ctx, lockdep, v)
static pthread_mutex_t lockdep_mutex = PTHREAD_MUTEX_INITIALIZER;
static CephContext *g_lockdep_ceph_ctx = NULL;
static lockdep_stopper_t lockdep_stopper;
-static ceph::unordered_map<std::string, int> lock_ids;
+static std::unordered_map<std::string, int> lock_ids;
static std::map<int, std::string> lock_names;
static std::map<int, int> lock_refs;
static constexpr size_t MAX_LOCKS = 128 * 1024; // increase me as needed
static std::bitset<MAX_LOCKS> free_ids; // bit set = free
-static ceph::unordered_map<pthread_t, std::map<int,ceph::BackTrace*> > held;
+static std::unordered_map<pthread_t, std::map<int, ceph::BackTrace*>> held;
static constexpr size_t NR_LOCKS = 4096; // the initial number of locks
static std::vector<std::bitset<MAX_LOCKS>> follows(NR_LOCKS); // follows[a][b] means b taken after a
static std::vector<std::map<int,ceph::BackTrace *>> follows_bt(NR_LOCKS);
if (!g_lockdep)
return id;
- ceph::unordered_map<std::string, int>::iterator p = lock_ids.find(name);
+ auto p = lock_ids.find(name);
if (p == lock_ids.end()) {
id = lockdep_get_free_id();
if (id < 0) {
#include "common/ceph_mutex.h"
#include "common/ceph_context.h"
#include "common/dout.h"
-#include "include/unordered_map.h"
+
+#include <unordered_map>
template <class K, class V>
class SharedLRU {
private:
using C = std::less<K>;
using H = std::hash<K>;
- ceph::unordered_map<K, typename std::list<std::pair<K, VPtr> >::iterator, H> contents;
+ std::unordered_map<K, typename std::list<std::pair<K, VPtr>>::iterator, H> contents;
std::list<std::pair<K, VPtr> > lru;
std::map<K, std::pair<WeakVPtr, V*>, C> weak_refs;
#include "include/int_types.h"
#include "include/types.h"
#include "include/fs_types.h"
-#include "include/unordered_map.h"
+
+#include <unordered_map>
// We're only converting errors defined in errno.h, not standard Windows
// system error codes that are usually retrievied using GetLastErrorCode().
{
DWORD err = ::GetLastError();
return win32_strerror(err);
-}
\ No newline at end of file
+}
#include <string_view>
#include <tuple>
#include <optional>
+#include <unordered_map>
+#include <unordered_set>
+
#include <boost/container/small_vector.hpp>
#include <boost/optional/optional_io.hpp>
#include <boost/tuple/tuple.hpp>
-#include "include/unordered_map.h"
-#include "include/unordered_set.h"
#include "common/ceph_time.h"
#include "include/int_types.h"
template<class T, class U, class Comp, class Alloc>
inline void decode(std::multimap<T,U,Comp,Alloc>& m, bufferlist::const_iterator& p);
template<class T, class U, class Hash, class Pred, class Alloc>
-inline void encode(const unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl,
+inline void encode(const std::unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl,
uint64_t features);
template<class T, class U, class Hash, class Pred, class Alloc>
-inline void encode(const unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl);
+inline void encode(const std::unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl);
template<class T, class U, class Hash, class Pred, class Alloc>
-inline void decode(unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p);
+inline void decode(std::unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p);
template<class T, class Hash, class Pred, class Alloc>
-inline void encode(const ceph::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist& bl);
+inline void encode(const std::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist& bl);
template<class T, class Hash, class Pred, class Alloc>
-inline void decode(ceph::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p);
+inline void decode(std::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p);
template<class T, class Alloc>
inline void encode(const std::deque<T,Alloc>& ls, bufferlist& bl, uint64_t features);
template<class T, class Alloc>
}
}
-// ceph::unordered_map
+// std::unordered_map
template<class T, class U, class Hash, class Pred, class Alloc>
-inline void encode(const unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl,
+inline void encode(const std::unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl,
uint64_t features)
{
__u32 n = (__u32)(m.size());
}
}
template<class T, class U, class Hash, class Pred, class Alloc>
-inline void encode(const unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl)
+inline void encode(const std::unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist& bl)
{
__u32 n = (__u32)(m.size());
encode(n, bl);
}
}
template<class T, class U, class Hash, class Pred, class Alloc>
-inline void decode(unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p)
+inline void decode(std::unordered_map<T,U,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p)
{
__u32 n;
decode(n, p);
}
template <std::move_constructible T, std::move_constructible U, class Hash, class Pred, class Alloc>
-inline void decode(unordered_map<T, U, Hash, Pred, Alloc>& m, bufferlist::const_iterator& p)
+inline void decode(std::unordered_map<T, U, Hash, Pred, Alloc>& m, bufferlist::const_iterator& p)
{
__u32 n;
decode(n, p);
}
}
-// ceph::unordered_set
+// std::unordered_set
template<class T, class Hash, class Pred, class Alloc>
-inline void encode(const ceph::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist& bl)
+inline void encode(const std::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist& bl)
{
__u32 n = (__u32)(m.size());
encode(n, bl);
encode(*p, bl);
}
template<class T, class Hash, class Pred, class Alloc>
-inline void decode(ceph::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p)
+inline void decode(std::unordered_set<T,Hash,Pred,Alloc>& m, bufferlist::const_iterator& p)
{
__u32 n;
decode(n, p);
#include <fmt/format.h>
#include "include/rados.h"
-#include "include/unordered_map.h"
#include "common/Formatter.h"
#include "hash.h"
#include <ostream>
#include <iomanip>
-
-#include "include/unordered_map.h"
-
#include "object.h"
#include "intarith.h"
/*
* comparators for stl containers
*/
-// for ceph::unordered_map:
-// ceph::unordered_map<const char*, long, hash<const char*>, eqstr> vals;
+// for std::unordered_map:
+// std::unordered_map<const char*, long, hash<const char*>, eqstr> vals;
struct eqstr
{
bool operator()(const char* s1, const char* s2) const
+++ /dev/null
-#ifndef CEPH_UNORDERED_MAP_H
-#define CEPH_UNORDERED_MAP_H
-
-#include <unordered_map>
-
-namespace ceph {
- using std::unordered_map;
- using std::unordered_multimap;
-}
-
-#endif
+++ /dev/null
-#ifndef CEPH_UNORDERED_SET_H
-#define CEPH_UNORDERED_SET_H
-
-#include <unordered_set>
-
-namespace ceph {
- using std::unordered_set;
-}
-
-#endif
#define CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
#include "common/snap_types.h"
-#include "include/unordered_map.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
+
#include <queue>
+#include <unordered_map>
class Context;
ceph_tid_t m_tid;
ceph::mutex& m_lock;
librbd::ImageCtx *m_ictx;
- ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes;
+ std::unordered_map<std::string, std::queue<write_result_d*>> m_writes;
friend class C_OrderedWrite;
};
#include "CDentry.h"
#include "CDir.h"
-#include "include/unordered_set.h"
-
-using ceph::unordered_set;
+#include <unordered_set>
class CDir;
class CInode;
interval_set<inodeno_t> purging_inodes;
MDSContext* purged_cb = nullptr;
- std::map<int, ceph::unordered_set<version_t> > pending_commit_tids; // mdstable
+ std::map<int, std::unordered_set<version_t>> pending_commit_tids; // mdstable
std::set<metareqid_t> uncommitted_leaders;
std::set<metareqid_t> uncommitted_peers;
std::set<dirfrag_t> uncommitted_fragments;
#include <chrono>
#include <string_view>
#include <thread>
+#include <unordered_map>
#include "common/DecayCounter.h"
#include "common/MemoryModel.h"
void repair_dirfrag_stats_work(const MDRequestRef& mdr);
void rdlock_dirfrags_stats_work(const MDRequestRef& mdr);
- ceph::unordered_map<inodeno_t,CInode*> inode_map; // map of head inodes by ino
+ std::unordered_map<inodeno_t, CInode*> inode_map; // map of head inodes by ino
std::map<vinodeno_t, CInode*> snap_inode_map; // map of snap inodes by ino
CInode *root = nullptr; // root inode
CInode *myin = nullptr; // .ceph/mds%d dir
std::map<CInode*,std::list<std::pair<CDir*,CDir*> > > projected_subtree_renames; // renamed ino -> target dir
// -- requests --
- ceph::unordered_map<metareqid_t, MDRequestRef> active_requests;
+ std::unordered_map<metareqid_t, MDRequestRef> active_requests;
// -- recovery --
std::set<mds_rank_t> recovery_set;
#define CEPH_MDS_MUTATION_H
#include <optional>
+#include <unordered_map>
#include "include/interval_set.h"
#include "include/elist.h"
// flag mutation as peer
mds_rank_t peer_to_mds = MDS_RANK_NONE; // this is a peer request if >= 0.
- ceph::unordered_map<MDSCacheObject*, ObjectState> object_states;
+ std::unordered_map<MDSCacheObject*, ObjectState> object_states;
int num_pins = 0;
int num_auth_pins = 0;
int num_remote_auth_pins = 0;
bool any_finished = false;
bool any_repaired = false;
std::set<std::string> scrubbing_tags;
- std::unordered_map<std::string, unordered_map<int, std::vector<_inodeno_t>>> uninline_failed_meta_info;
+ std::unordered_map<std::string, std::unordered_map<int, std::vector<_inodeno_t>>> uninline_failed_meta_info;
std::unordered_map<_inodeno_t, std::string> paths;
std::unordered_map<std::string, std::vector<uint64_t>> counters;
void SessionMap::dump()
{
dout(10) << "dump" << dendl;
- for (ceph::unordered_map<entity_name_t,Session*>::iterator p = session_map.begin();
- p != session_map.end();
- ++p)
+ for (auto p = session_map.begin(); p != session_map.end(); ++p)
dout(10) << p->first << " " << p->second
<< " state " << p->second->get_state_name()
<< " completed " << p->second->info.completed_requests
} else {
// I/O is complete. Update `by_state`
dout(10) << __func__ << ": omap load complete" << dendl;
- for (ceph::unordered_map<entity_name_t, Session*>::iterator i = session_map.begin();
- i != session_map.end(); ++i) {
+ for (auto i = session_map.begin(); i != session_map.end(); ++i) {
Session *s = i->second;
auto by_state_entry = by_state.find(s->get_state());
if (by_state_entry == by_state.end())
// Mark all sessions dirty, so that on next save() we will write
// a complete OMAP version of the data loaded from the legacy format
- for (ceph::unordered_map<entity_name_t, Session*>::iterator i = session_map.begin();
- i != session_map.end(); ++i) {
+ for (auto i = session_map.begin(); i != session_map.end(); ++i) {
// Don't use mark_dirty because on this occasion we want to ignore the
// keys_per_op limit and do one big write (upgrade must be atomic)
dirty_sessions.insert(i->first);
SessionMapStore::decode_legacy(p);
// Update `by_state`
- for (ceph::unordered_map<entity_name_t, Session*>::iterator i = session_map.begin();
- i != session_map.end(); ++i) {
+ for (auto i = session_map.begin(); i != session_map.end(); ++i) {
Session *s = i->second;
auto by_state_entry = by_state.find(s->get_state());
if (by_state_entry == by_state.end())
void SessionMap::wipe_ino_prealloc()
{
- for (ceph::unordered_map<entity_name_t,Session*>::iterator p = session_map.begin();
- p != session_map.end();
- ++p) {
+ for (auto p = session_map.begin(); p != session_map.end(); ++p) {
p->second->pending_prealloc_inos.clear();
p->second->free_prealloc_inos.clear();
p->second->delegated_inos.clear();
#define CEPH_MDS_SESSIONMAP_H
#include <set>
-
-#include "include/unordered_map.h"
+#include <unordered_map>
#include "include/Context.h"
#include "include/xlist.h"
protected:
version_t version = 0;
- ceph::unordered_map<entity_name_t, Session*> session_map;
+ std::unordered_map<entity_name_t, Session*> session_map;
PerfCounters *logger =nullptr;
// total request load avg
session_map_entry-> second : nullptr);
}
const Session* get_session(entity_name_t w) const {
- ceph::unordered_map<entity_name_t, Session*>::const_iterator p = session_map.find(w);
+ auto p = session_map.find(w);
if (p == session_map.end()) {
return NULL;
} else {
touched_sessions.clear();
// pending commit atids
- for (map<int, ceph::unordered_set<version_t> >::iterator p = pending_commit_tids.begin();
+ for (auto p = pending_commit_tids.begin();
p != pending_commit_tids.end();
++p) {
MDSTableClient *client = mds->get_table_client(p->first);
ceph_assert(client);
- for (ceph::unordered_set<version_t>::iterator q = p->second.begin();
- q != p->second.end();
- ++q) {
+ for (auto q = p->second.begin(); q != p->second.end(); ++q) {
dout(10) << "try_to_expire " << get_mdstable_name(p->first) << " transaction " << *q
<< " pending commit (not yet acked), waiting" << dendl;
ceph_assert(!client->has_committed(*q));
#include <map>
#include <set>
#include <string>
+#include <unordered_map>
#include "common/ceph_mutex.h"
#include "common/LogClient.h"
std::set<ConnectionRef> daemon_connections;
/// connections for osds
- ceph::unordered_map<int,std::set<ConnectionRef>> osd_cons;
+ std::unordered_map<int, std::set<ConnectionRef>> osd_cons;
ServiceMap pending_service_map; // uncommitted
public md_config_obs_t {
private:
std::multimap<utime_t,LogEntry> pending_log;
- unordered_set<LogEntryKey> pending_keys;
+ std::unordered_set<LogEntryKey> pending_keys;
LogSummary summary;
}
// expire blocklisted items?
- for (ceph::unordered_map<entity_addr_t,utime_t>::iterator p = osdmap.blocklist.begin();
- p != osdmap.blocklist.end();
- ++p) {
+ for (auto p = osdmap.blocklist.begin(); p != osdmap.blocklist.end(); ++p) {
if (p->second < now) {
dout(10) << "expiring blocklist item " << p->first << " expired " << p->second << " < now " << now << dendl;
pending_inc.old_blocklist.push_back(p->first);
if (f)
f->open_array_section("blocklist");
- for (ceph::unordered_map<entity_addr_t,utime_t>::iterator p = osdmap.blocklist.begin();
- p != osdmap.blocklist.end();
- ++p) {
+ for (auto p = osdmap.blocklist.begin(); p != osdmap.blocklist.end(); ++p) {
if (f) {
f->open_object_section("entry");
f->dump_string("addr", p->first.get_legacy_str());
#include <map>
#include <optional>
+#include <unordered_map>
#include "include/types.h"
#include "include/xlist.h"
#include "include/spinlock.h"
-#include "include/unordered_map.h"
-#include "include/unordered_set.h"
#include "common/ceph_mutex.h"
#include "common/Cond.h"
* NOTE: a Asyncconnection* with state CLOSED may still be in the map but is considered
* invalid and can be replaced by anyone holding the msgr lock
*/
- ceph::unordered_map<entity_addrvec_t, AsyncConnectionRef> conns;
+ std::unordered_map<entity_addrvec_t, AsyncConnectionRef> conns;
/**
* list of connection are in the process of accepting
#include <list>
#include <vector>
#include <thread>
+#include <unordered_map>
#include "common/ceph_context.h"
#include "common/debug.h"
*
* @param qp The qp needed to dead
*/
- ceph::unordered_map<uint32_t, std::pair<QueuePair*, RDMAConnectedSocketImpl*> > qp_conns;
+ std::unordered_map<uint32_t, std::pair<QueuePair*, RDMAConnectedSocketImpl*> > qp_conns;
/// if a queue pair is closed when transmit buffers are active
/// on it, the transmit buffers never get returned via tx_cq. To
{
std::lock_guard l(cache->lock);
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(oid);
+ auto p = onode_map.find(oid);
if (p == onode_map.end()) {
ldout(cache->cct, 30) << __func__ << " " << oid << " miss" << dendl;
cache->logger->inc(l_bluestore_onode_misses);
std::lock_guard l(cache->lock);
ldout(cache->cct, 30) << __func__ << " " << old_oid << " -> " << new_oid
<< dendl;
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator po, pn;
- po = onode_map.find(old_oid);
- pn = onode_map.find(new_oid);
+ auto po = onode_map.find(old_oid);
+ auto pn = onode_map.find(new_oid);
ceph_assert(po != pn);
ceph_assert(po != onode_map.end());
BlueStore::CollectionRef BlueStore::_get_collection(const coll_t& cid)
{
std::shared_lock l(coll_lock);
- ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
{
std::shared_lock l(coll_lock);
ls.reserve(coll_map.size());
- for (ceph::unordered_map<coll_t, CollectionRef>::iterator p = coll_map.begin();
- p != coll_map.end();
- ++p)
+ for (auto p = coll_map.begin(); p != coll_map.end(); ++p)
ls.push_back(p->first);
return 0;
}
#include <mutex>
#include <queue>
#include <shared_mutex> // for std::shared_lock
+#include <unordered_map>
#include <condition_variable>
#include <boost/intrusive/list.hpp>
#include "include/ceph_assert.h"
#include "include/interval_set.h"
-#include "include/unordered_map.h"
#include "include/mempool.h"
#include "include/hash.h"
#include "common/bloom_filter.hpp"
{
std::lock_guard<std::mutex> l(lock);
dout(30) << __func__ << dendl;
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(oid);
+ auto p = onode_map.find(oid);
if (p == onode_map.end()) {
dout(30) << __func__ << " " << oid << " miss" << dendl;
return OnodeRef();
{
std::lock_guard<std::mutex> l(lock);
dout(30) << __func__ << " " << old_oid << " -> " << new_oid << dendl;
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator po, pn;
- po = onode_map.find(old_oid);
- pn = onode_map.find(new_oid);
+ auto po = onode_map.find(old_oid);
+ auto pn = onode_map.find(new_oid);
ceph_assert(po != onode_map.end());
if (pn != onode_map.end()) {
if (lru.empty()) {
return false;
}
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.begin();
+ auto p = onode_map.begin();
ceph_assert(p != onode_map.end());
next->first = p->first;
next->second = p->second;
return true;
}
- ceph::unordered_map<ghobject_t,OnodeRef>::iterator p = onode_map.find(after);
+ auto p = onode_map.find(after);
ceph_assert(p != onode_map.end()); // for now
lru_list_t::iterator pi = lru.iterator_to(*p->second);
++pi;
KStore::CollectionRef KStore::_get_collection(coll_t cid)
{
std::shared_lock l{coll_lock};
- ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
int KStore::list_collections(vector<coll_t>& ls)
{
std::shared_lock l{coll_lock};
- for (ceph::unordered_map<coll_t, CollectionRef>::iterator p = coll_map.begin();
- p != coll_map.end();
- ++p)
+ for (auto p = coll_map.begin(); p != coll_map.end(); ++p)
ls.push_back(p->first);
return 0;
}
#include <atomic>
#include <mutex>
+#include <unordered_map>
#include <condition_variable>
#include "include/ceph_assert.h"
-#include "include/unordered_map.h"
#include "common/Finisher.h"
#include "common/Throttle.h"
#include "common/WorkQueue.h"
&Onode::lru_item> > lru_list_t;
std::mutex lock;
- ceph::unordered_map<ghobject_t,OnodeRef> onode_map; ///< forward lookups
+ std::unordered_map<ghobject_t, OnodeRef> onode_map; ///< forward lookups
lru_list_t lru; ///< lru
OnodeHashLRU(CephContext* cct) : cct(cct) {}
/// rwlock to protect coll_map
ceph::shared_mutex coll_lock = ceph::make_shared_mutex("KStore::coll_lock");
- ceph::unordered_map<coll_t, CollectionRef> coll_map;
+ std::unordered_map<coll_t, CollectionRef> coll_map;
std::map<coll_t,CollectionRef> new_coll_map;
std::mutex nid_lock;
#include "include/types.h"
#include "include/stringify.h"
-#include "include/unordered_map.h"
#include "common/errno.h"
#include "MemStore.h"
#include "include/compat.h"
MemStore::CollectionRef MemStore::get_collection(const coll_t& cid)
{
std::shared_lock l{coll_lock};
- ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
{
dout(10) << __func__ << dendl;
std::shared_lock l{coll_lock};
- for (ceph::unordered_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
- p != coll_map.end();
- ++p) {
+ for (auto p = coll_map.begin(); p != coll_map.end(); ++p) {
ls.push_back(p->first);
}
return 0;
{
dout(10) << __func__ << " " << cid << dendl;
std::lock_guard l{coll_lock};
- ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return -ENOENT;
{
{
std::lock_guard l{coll_lock};
- ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
ceph_assert(cp != coll_map.end());
used_bytes -= cp->second->used_bytes();
coll_map.erase(cp);
#include <atomic>
#include <mutex>
+#include <unordered_map>
+
#include <boost/intrusive_ptr.hpp>
-#include "include/unordered_map.h"
#include "common/Finisher.h"
#include "common/RefCountedObj.h"
#include "os/ObjectStore.h"
int bits = 0;
CephContext *cct;
bool use_page_set;
- ceph::unordered_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
+ std::unordered_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
std::map<ghobject_t, ObjectRef> object_map; ///< for iteration
std::map<std::string,ceph::buffer::ptr> xattr;
/// for object_{map,hash}
class OmapIteratorImpl;
- ceph::unordered_map<coll_t, CollectionRef> coll_map;
+ std::unordered_map<coll_t, CollectionRef> coll_map;
/// rwlock to protect coll_map
ceph::shared_mutex coll_lock{
ceph::make_shared_mutex("MemStore::coll_lock")};
void ExplicitHashHitSet::dump(Formatter *f) const {
f->dump_unsigned("insert_count", count);
f->open_array_section("hash_set");
- for (ceph::unordered_set<uint32_t>::const_iterator p = hits.begin();
- p != hits.end();
- ++p)
+ for (auto p = hits.cbegin(); p != hits.cend(); ++p)
f->dump_unsigned("hash", *p);
f->close_section();
}
void ExplicitObjectHitSet::dump(Formatter *f) const {
f->dump_unsigned("insert_count", count);
f->open_array_section("set");
- for (ceph::unordered_set<hobject_t>::const_iterator p = hits.begin();
- p != hits.end();
- ++p) {
+ for (auto p = hits.cbegin(); p != hits.cend(); ++p) {
f->open_object_section("object");
p->dump(f);
f->close_section();
#define CEPH_OSD_HITSET_H
#include <string_view>
+#include <unordered_set>
#include <boost/scoped_ptr.hpp>
#include "include/encoding.h"
-#include "include/unordered_set.h"
#include "common/bloom_filter.hpp"
#include "common/hobject.h"
*/
class ExplicitHashHitSet : public HitSet::Impl {
uint64_t count;
- ceph::unordered_set<uint32_t> hits;
+ std::unordered_set<uint32_t> hits;
public:
class Params : public HitSet::Params::Impl {
public:
*/
class ExplicitObjectHitSet : public HitSet::Impl {
uint64_t count;
- ceph::unordered_set<hobject_t> hits;
+ std::unordered_set<hobject_t> hits;
public:
class Params : public HitSet::Params::Impl {
public:
#include <map>
#include <memory>
#include <string>
-
-#include "include/unordered_map.h"
+#include <unordered_map>
#include "common/intrusive_timer.h"
#include "common/shared_cache.hpp"
*/
#include "PGLog.h"
-#include "include/unordered_map.h"
#include "common/ceph_context.h"
using std::make_pair;
#include "osd_types.h"
#include "os/ObjectStore.h"
#include <list>
+#include <unordered_map>
#ifdef WITH_SEASTAR
#include <seastar/core/future.hh>
* plus some methods to manipulate it all.
*/
struct IndexedLog : public pg_log_t {
- mutable ceph::unordered_map<hobject_t,pg_log_entry_t*> objects; // ptrs into log. be careful!
- mutable ceph::unordered_map<osd_reqid_t,pg_log_entry_t*> caller_ops;
- mutable ceph::unordered_multimap<osd_reqid_t,pg_log_entry_t*> extra_caller_ops;
- mutable ceph::unordered_map<osd_reqid_t,pg_log_dup_t*> dup_index;
+ mutable std::unordered_map<hobject_t, pg_log_entry_t*> objects; // ptrs into log. be careful!
+ mutable std::unordered_map<osd_reqid_t, pg_log_entry_t*> caller_ops;
+ mutable std::unordered_multimap<osd_reqid_t, pg_log_entry_t*> extra_caller_ops;
+ mutable std::unordered_map<osd_reqid_t, pg_log_dup_t*> dup_index;
// recovery pointers
std::list<pg_log_entry_t>::iterator complete_to; // not inclusive of referenced item
#include "include/ceph_assert.h"
+#include <unordered_map>
+
#define MAX_FLUSH_UNDER_LOCK 20 ///< max bh's we start writeback on
#define BUFFER_MEMORY_WEIGHT CEPH_PAGE_SHIFT // memory usage of BufferHead, count in (1<<n)
/// while holding the lock
ldout(cct, 10) << "release_all" << dendl;
uint64_t unclean = 0;
- vector<ceph::unordered_map<sobject_t, Object*> >::iterator i
- = objects.begin();
+ auto i = objects.begin();
while (i != objects.end()) {
- ceph::unordered_map<sobject_t, Object*>::iterator p = i->begin();
+ auto p = i->begin();
while (p != i->end()) {
- ceph::unordered_map<sobject_t, Object*>::iterator n = p;
+ auto n = p;
++n;
Object *ob = p->second;
loff_t clean = 0, zero = 0, dirty = 0, rx = 0, tx = 0, missing = 0,
error = 0;
- for (vector<ceph::unordered_map<sobject_t, Object*> >::const_iterator i
- = objects.begin();
- i != objects.end();
- ++i) {
- for (ceph::unordered_map<sobject_t, Object*>::const_iterator p
- = i->begin();
- p != i->end();
- ++p) {
+ for (auto i = objects.cbegin(); i != objects.cend(); ++i) {
+ for (auto p = i->cbegin(); p != i->cend(); ++p) {
Object *ob = p->second;
for (map<loff_t, BufferHead*>::const_iterator q = ob->data.begin();
q != ob->data.end();
#include "Objecter.h"
#include "Striper.h"
+#include <unordered_map>
+
class WritebackHandler;
enum {
void *flush_set_callback_arg;
// indexed by pool_id
- std::vector<ceph::unordered_map<sobject_t, Object*> > objects;
+ std::vector<std::unordered_map<sobject_t, Object*>> objects;
std::list<Context*> waitfor_read;
#include <chrono>
#include <fmt/format.h>
+#include <unordered_map>
+
#define dout_subsys ceph_subsys_rgw_notification
namespace rgw::notify {
};
using queues_t = std::set<std::string>;
-using entries_persistency_tracker = ceph::unordered_map<std::string, persistency_tracker>;
-using queues_persistency_tracker = ceph::unordered_map<std::string, entries_persistency_tracker>;
+using entries_persistency_tracker = std::unordered_map<std::string, persistency_tracker>;
+using queues_persistency_tracker = std::unordered_map<std::string, entries_persistency_tracker>;
using rgw::persistent_topic_counters::CountersManager;
// use mmap/mprotect to allocate 128k coroutine stacks
// vim: ts=8 sw=2 smarttab
#include <iostream>
+#include <unordered_set>
#include <unistd.h>
#include "gtest/gtest.h"
#include "include/Context.h"
-#include "include/unordered_set.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/CacheServer.h"
-using ceph::unordered_set;
using namespace ceph::immutable_obj_cache;
class TestCommunication :public ::testing::Test {
std::atomic<uint64_t> m_send_request_index;
std::atomic<uint64_t> m_recv_ack_index;
WaitEvent m_wait_event;
- unordered_set<std::string> m_hit_entry_set;
+ std::unordered_set<std::string> m_hit_entry_set;
TestCommunication()
: m_cache_server(nullptr), m_cache_client(nullptr),
ObjectStoreImitator::CollectionRef
ObjectStoreImitator::_get_collection(const coll_t &cid) {
std::shared_lock l(coll_lock);
- ceph::unordered_map<coll_t, CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
ObjectStore::CollectionHandle
ObjectStoreImitator::open_collection(const coll_t &cid) {
std::shared_lock l(coll_lock);
- ceph::unordered_map<coll_t, CollectionRef>::iterator cp = coll_map.find(cid);
+ auto cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
int ObjectStoreImitator::list_collections(std::vector<coll_t> &ls) {
std::shared_lock l(coll_lock);
ls.reserve(coll_map.size());
- for (ceph::unordered_map<coll_t, CollectionRef>::iterator p =
- coll_map.begin();
- p != coll_map.end(); ++p)
+ for (auto p = coll_map.begin(); p != coll_map.end(); ++p)
ls.push_back(p->first);
return 0;
}
#include "common/pretty_binary.h"
#include "include/stringify.h"
#include "include/coredumpctl.h"
-#include "include/unordered_map.h"
#include "os/kv.h"
#include "store_test_fixture.h"
#include <boost/random/binomial_distribution.hpp>
#include <gtest/gtest.h>
-#include "include/unordered_map.h"
-
void usage(const string &name) {
std::cerr << "Usage: " << name << " [xattr|omap] store_path"
<< std::endl;
#include "common/safe_io.h"
#include "common/strtol.h" // for strict_strtoll()
#include "crush/CrushWrapper.h"
-#include "include/unordered_map.h"
#include "include/random.h"
#include "mon/health_check.h"
#include <time.h>
#include <algorithm>
+#include <unordered_map>
#include "global/global_init.h"
#include "osd/OSDMap.h"
while (1) {
cout << "pass " << ++pass << std::endl;
- ceph::unordered_map<pg_t,vector<int> > m;
+ std::unordered_map<pg_t, vector<int>> m;
for (map<int64_t,pg_pool_t>::const_iterator p = osdmap.get_pools().begin();
p != osdmap.get_pools().end();
++p) {