#include <time.h>
-#include <ext/hash_map>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
+#include "include/hash_namespace.h"
#ifndef __LP64__
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<uint64_t> {
size_t operator()(uint64_t __x) const {
static hash<uint32_t> H;
return H((__x >> 32) ^ (__x & 0xffffffff));
}
};
-}
+CEPH_HASH_NAMESPACE_END
#endif
};
Inode *root = 0;
-hash_map<ino_t, Inode*> inode_map;
+ceph::unordered_map<ino_t, Inode*> inode_map;
bool make_inode_path(string &buf, Inode *in)
{
void Client::tear_down_cache()
{
// fd's
- for (hash_map<int, Fh*>::iterator it = fd_map.begin();
+ for (ceph::unordered_map<int, Fh*>::iterator it = fd_map.begin();
it != fd_map.end();
++it) {
Fh *fh = it->second;
did.insert(in);
if (in->dir) {
ldout(cct, 1) << " dir " << in->dir << " size " << in->dir->dentries.size() << dendl;
- for (hash_map<string, Dentry*>::iterator it = in->dir->dentries.begin();
+ for (ceph::unordered_map<string, Dentry*>::iterator it = in->dir->dentries.begin();
it != in->dir->dentries.end();
++it) {
ldout(cct, 1) << " " << in->ino << " dn " << it->first << " " << it->second << " ref " << it->second->ref << dendl;
dump_inode(f, root, did, true);
// make a second pass to catch anything disconnected
- for (hash_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
it != inode_map.end();
++it) {
if (did.count(it->second))
bufferlist extra_bl;
inodeno_t created_ino;
bool got_created_ino = false;
- hash_map<vinodeno_t, Inode*>::iterator p;
+ ceph::unordered_map<vinodeno_t, Inode*>::iterator p;
extra_bl.claim(reply->get_extra_bl());
if (extra_bl.length() >= 8) {
MClientReconnect *m = new MClientReconnect;
// i have an open session.
- hash_set<inodeno_t> did_snaprealm;
- for (hash_map<vinodeno_t, Inode*>::iterator p = inode_map.begin();
+ ceph::unordered_set<inodeno_t> did_snaprealm;
+ for (ceph::unordered_map<vinodeno_t, Inode*>::iterator p = inode_map.begin();
p != inode_map.end();
++p) {
Inode *in = p->second;
if (cct->_conf->client_oc) {
// flush/release all buffered data
- hash_map<vinodeno_t, Inode*>::iterator next;
- for (hash_map<vinodeno_t, Inode*>::iterator p = inode_map.begin();
+ ceph::unordered_map<vinodeno_t, Inode*>::iterator next;
+ for (ceph::unordered_map<vinodeno_t, Inode*>::iterator p = inode_map.begin();
p != inode_map.end();
p = next) {
next = p;
void Client::_ll_drop_pins()
{
ldout(cct, 10) << "_ll_drop_pins" << dendl;
- hash_map<vinodeno_t, Inode*>::iterator next;
- for (hash_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
+ ceph::unordered_map<vinodeno_t, Inode*>::iterator next;
+ for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin();
it != inode_map.end();
it = next) {
Inode *in = it->second;
using std::map;
using std::fstream;
-#include <ext/hash_map>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
#include "include/filepath.h"
#include "include/interval_set.h"
WritebackHandler *writeback_handler;
// cache
- hash_map<vinodeno_t, Inode*> inode_map;
+ ceph::unordered_map<vinodeno_t, Inode*> inode_map;
Inode* root;
LRU lru; // lru list of Dentry's in our local metadata cache.
// all inodes with caps sit on either cap_list or delayed_caps.
xlist<Inode*> delayed_caps, cap_list;
int num_flushing_caps;
- hash_map<inodeno_t,SnapRealm*> snap_realms;
+ ceph::unordered_map<inodeno_t,SnapRealm*> snap_realms;
SnapRealm *get_snap_realm(inodeno_t r);
SnapRealm *get_snap_realm_maybe(inodeno_t r);
// file handles, etc.
interval_set<int> free_fd_set; // unused fds
- hash_map<int, Fh*> fd_map;
+ ceph::unordered_map<int, Fh*> fd_map;
int get_fd() {
int fd = free_fd_set.range_start();
* Resolve file descriptor, or return NULL.
*/
Fh *get_filehandle(int fd) {
- hash_map<int, Fh*>::iterator p = fd_map.find(fd);
+ ceph::unordered_map<int, Fh*>::iterator p = fd_map.find(fd);
if (p == fd_map.end())
return NULL;
return p->second;
class Dir {
public:
Inode *parent_inode; // my inode
- hash_map<string, Dentry*> dentries;
+ ceph::unordered_map<string, Dentry*> dentries;
map<string, Dentry*> dentry_map;
uint64_t release_count;
uint64_t max_offset;
utime_t start = ceph_clock_now(client->cct);
- hash_map<int64_t, int64_t> open_files;
- hash_map<int64_t, dir_result_t*> open_dirs;
+ ceph::unordered_map<int64_t, int64_t> open_files;
+ ceph::unordered_map<int64_t, dir_result_t*> open_dirs;
- hash_map<int64_t, Fh*> ll_files;
- hash_map<int64_t, void*> ll_dirs;
- hash_map<uint64_t, int64_t> ll_inos;
+ ceph::unordered_map<int64_t, Fh*> ll_files;
+ ceph::unordered_map<int64_t, void*> ll_dirs;
+ ceph::unordered_map<uint64_t, int64_t> ll_inos;
ll_inos[1] = 1; // root inode is known.
lock.Unlock();
// close open files
- for (hash_map<int64_t, int64_t>::iterator fi = open_files.begin();
+ for (ceph::unordered_map<int64_t, int64_t>::iterator fi = open_files.begin();
fi != open_files.end();
++fi) {
dout(1) << "leftover close " << fi->second << dendl;
if (fi->second > 0) client->close(fi->second);
}
- for (hash_map<int64_t, dir_result_t*>::iterator fi = open_dirs.begin();
+ for (ceph::unordered_map<int64_t, dir_result_t*>::iterator fi = open_dirs.begin();
fi != open_dirs.end();
++fi) {
dout(1) << "leftover closedir " << fi->second << dendl;
if (fi->second != 0) client->closedir(fi->second);
}
- for (hash_map<int64_t,Fh*>::iterator fi = ll_files.begin();
+ for (ceph::unordered_map<int64_t,Fh*>::iterator fi = ll_files.begin();
fi != ll_files.end();
++fi) {
dout(1) << "leftover ll_release " << fi->second << dendl;
if (fi->second) client->ll_release(fi->second);
}
- for (hash_map<int64_t,void*>::iterator fi = ll_dirs.begin();
+ for (ceph::unordered_map<int64_t,void*>::iterator fi = ll_dirs.begin();
fi != ll_dirs.end();
++fi) {
dout(1) << "leftover ll_releasedir " << fi->second << dendl;
memset(&empty, 0, sizeof(empty));
statq.push_back(empty);
- hash_map<inodeno_t, int> nlink;
- hash_map<inodeno_t, int> nlink_seen;
+ ceph::unordered_map<inodeno_t, int> nlink;
+ ceph::unordered_map<inodeno_t, int> nlink_seen;
while (!dirq.empty()) {
string dir = dirq.front();
}
}
- for (hash_map<inodeno_t,int>::iterator p = nlink.begin(); p != nlink.end(); ++p) {
+ for (ceph::unordered_map<inodeno_t,int>::iterator p = nlink.begin(); p != nlink.end(); ++p) {
if (nlink_seen[p->first] != p->second)
dout(0) << p->first << " nlink " << p->second << " != " << nlink_seen[p->first] << "seen" << dendl;
}
#include <iostream>
#include <map>
-using namespace __gnu_cxx;
#include "common/Mutex.h"
Mutex stag_lock;
int last_stag;
- hash_map<uint64_t,int> snap_stag_map;
- hash_map<int,uint64_t> stag_snap_map;
+ ceph::unordered_map<uint64_t,int> snap_stag_map;
+ ceph::unordered_map<int,uint64_t> stag_snap_map;
};
#include "include/histogram.h"
#include "include/xlist.h"
#include "msg/Message.h"
-#include <tr1/memory>
+#include "include/memory.h"
class TrackedOp;
-typedef std::tr1::shared_ptr<TrackedOp> TrackedOpRef;
+typedef ceph::shared_ptr<TrackedOp> TrackedOpRef;
class OpTracker;
class OpHistory {
/* Do not use when a particular hash function is needed */
explicit hobject_t(const sobject_t &o) :
oid(o.oid), snap(o.snap), max(false), pool(-1) {
- hash = __gnu_cxx::hash<sobject_t>()(o);
+ hash = CEPH_HASH_NAMESPACE::hash<sobject_t>()(o);
}
// maximum sorted value.
};
WRITE_CLASS_ENCODER(hobject_t)
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<hobject_t> {
size_t operator()(const hobject_t &r) const {
static hash<object_t> H;
return H(r.oid) ^ I(r.snap);
}
};
-}
+CEPH_HASH_NAMESPACE_END
ostream& operator<<(ostream& out, const hobject_t& o);
};
WRITE_CLASS_ENCODER(ghobject_t)
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<ghobject_t> {
size_t operator()(const ghobject_t &r) const {
static hash<object_t> H;
return H(r.hobj.oid) ^ I(r.hobj.snap);
}
};
-}
+CEPH_HASH_NAMESPACE_END
ostream& operator<<(ostream& out, const ghobject_t& o);
#include "include/types.h"
#include "lockdep.h"
-#include <ext/hash_map>
+#include "include/unordered_map.h"
+#include "include/hash_namespace.h"
#if defined(__FreeBSD__) && defined(__LP64__) // On FreeBSD pthread_t is a pointer.
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<>
struct hash<pthread_t>
{
operator()(pthread_t __x) const
{ return (uintptr_t)__x; }
};
-}
+CEPH_HASH_NAMESPACE_END
#endif
/******* Constants **********/
static pthread_mutex_t lockdep_mutex = PTHREAD_MUTEX_INITIALIZER;
static CephContext *g_lockdep_ceph_ctx = NULL;
static lockdep_stopper_t lockdep_stopper;
-static hash_map<const char *, int> lock_ids;
+static ceph::unordered_map<const char *, int> lock_ids;
static map<int, const char *> lock_names;
static int last_id = 0;
-static hash_map<pthread_t, map<int,BackTrace*> > held;
+static ceph::unordered_map<pthread_t, map<int,BackTrace*> > held;
static BackTrace *follows[MAX_LOCKS][MAX_LOCKS]; // follows[a][b] means b taken after a
/******* Functions **********/
{
pthread_mutex_lock(&lockdep_mutex);
- for (hash_map<pthread_t, map<int,BackTrace*> >::iterator p = held.begin();
+ for (ceph::unordered_map<pthread_t, map<int,BackTrace*> >::iterator p = held.begin();
p != held.end();
++p) {
lockdep_dout(0) << "--- thread " << p->first << " ---" << dendl;
for (int j=0; j<MAX_LOCKS; j++)
follows[i][j] = NULL;
- hash_map<const char *, int>::iterator p = lock_ids.find(name);
+ ceph::unordered_map<const char *, int>::iterator p = lock_ids.find(name);
if (p == lock_ids.end()) {
assert(last_id < MAX_LOCKS);
id = last_id++;
#include <boost/scoped_ptr.hpp>
#include <boost/optional.hpp>
-#include <tr1/memory>
+#include "include/memory.h"
#include <set>
#include <map>
#include <utility>
template <class K, class V>
class SharedLRU {
- typedef std::tr1::shared_ptr<V> VPtr;
- typedef std::tr1::weak_ptr<V> WeakVPtr;
+ typedef ceph::shared_ptr<V> VPtr;
+ typedef ceph::weak_ptr<V> WeakVPtr;
Mutex lock;
size_t max_size;
Cond cond;
template <class K, class V>
class SharedPtrRegistry {
public:
- typedef std::tr1::shared_ptr<V> VPtr;
- typedef std::tr1::weak_ptr<V> WeakVPtr;
+ typedef ceph::shared_ptr<V> VPtr;
+ typedef ceph::weak_ptr<V> WeakVPtr;
int waiting;
private:
Mutex lock;
#include <set>
#include <iostream>
-#include <tr1/memory>
+#include "include/memory.h"
#define mydout(cct, v) lgeneric_subdout(cct, context, v)
to_run->complete(0);
}
};
-typedef std::tr1::shared_ptr<RunOnDelete> RunOnDeleteRef;
+typedef ceph::shared_ptr<RunOnDelete> RunOnDeleteRef;
/*
* finish and destroy a list of Contexts
#include "include/int_types.h"
-#include <tr1/memory>
+#include "include/memory.h"
#include "byteorder.h"
#include "buffer.h"
#ifndef _BACKWARD_BACKWARD_WARNING_H
#define _BACKWARD_BACKWARD_WARNING_H // make gcc 4.3 shut up about hash_*
#endif
-#include <ext/hash_map>
-#include <ext/hash_set>
+#include "include/unordered_map.h"
+#include "include/unordered_set.h"
#include "triple.h"
}
template<class T>
-inline void encode(const std::list<std::tr1::shared_ptr<T> >& ls, bufferlist& bl)
+inline void encode(const std::list<ceph::shared_ptr<T> >& ls, bufferlist& bl)
{
// should i pre- or post- count?
if (!ls.empty()) {
unsigned pos = bl.length();
unsigned n = 0;
encode(n, bl);
- for (typename std::list<std::tr1::shared_ptr<T> >::const_iterator p = ls.begin(); p != ls.end(); ++p) {
+ for (typename std::list<ceph::shared_ptr<T> >::const_iterator p = ls.begin(); p != ls.end(); ++p) {
n++;
encode(**p, bl);
}
} else {
__u32 n = ls.size(); // FIXME: this is slow on a list.
encode(n, bl);
- for (typename std::list<std::tr1::shared_ptr<T> >::const_iterator p = ls.begin(); p != ls.end(); ++p)
+ for (typename std::list<ceph::shared_ptr<T> >::const_iterator p = ls.begin(); p != ls.end(); ++p)
encode(**p, bl);
}
}
template<class T>
-inline void decode(std::list<std::tr1::shared_ptr<T> >& ls, bufferlist::iterator& p)
+inline void decode(std::list<ceph::shared_ptr<T> >& ls, bufferlist::iterator& p)
{
__u32 n;
decode(n, p);
ls.clear();
while (n--) {
- std::tr1::shared_ptr<T> v(new T);
+ ceph::shared_ptr<T> v(new T);
decode(*v, p);
ls.push_back(v);
}
// vector (shared_ptr)
template<class T>
-inline void encode(const std::vector<std::tr1::shared_ptr<T> >& v, bufferlist& bl)
+inline void encode(const std::vector<ceph::shared_ptr<T> >& v, bufferlist& bl)
{
__u32 n = v.size();
encode(n, bl);
- for (typename std::vector<std::tr1::shared_ptr<T> >::const_iterator p = v.begin(); p != v.end(); ++p)
+ for (typename std::vector<ceph::shared_ptr<T> >::const_iterator p = v.begin(); p != v.end(); ++p)
if (*p)
encode(**p, bl);
else
encode(T(), bl);
}
template<class T>
-inline void decode(std::vector<std::tr1::shared_ptr<T> >& v, bufferlist::iterator& p)
+inline void decode(std::vector<ceph::shared_ptr<T> >& v, bufferlist::iterator& p)
{
__u32 n;
decode(n, p);
}
}
-// hash_map
+// ceph::unordered_map
template<class T, class U>
-inline void encode(const __gnu_cxx::hash_map<T,U>& m, bufferlist& bl)
+inline void encode(const unordered_map<T,U>& m, bufferlist& bl)
{
__u32 n = m.size();
encode(n, bl);
- for (typename __gnu_cxx::hash_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
+ for (typename unordered_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl);
encode(p->second, bl);
}
}
template<class T, class U>
-inline void decode(__gnu_cxx::hash_map<T,U>& m, bufferlist::iterator& p)
+inline void decode(unordered_map<T,U>& m, bufferlist::iterator& p)
{
__u32 n;
decode(n, p);
}
}
-// hash_set
+// ceph::unordered_set
template<class T>
-inline void encode(const __gnu_cxx::hash_set<T>& m, bufferlist& bl)
+inline void encode(const ceph::unordered_set<T>& m, bufferlist& bl)
{
__u32 n = m.size();
encode(n, bl);
- for (typename __gnu_cxx::hash_set<T>::const_iterator p = m.begin(); p != m.end(); ++p)
+ for (typename ceph::unordered_set<T>::const_iterator p = m.begin(); p != m.end(); ++p)
encode(*p, bl);
}
template<class T>
-inline void decode(__gnu_cxx::hash_set<T>& m, bufferlist::iterator& p)
+inline void decode(ceph::unordered_set<T>& m, bufferlist::iterator& p)
{
__u32 n;
decode(n, p);
#include <iomanip>
using namespace std;
-#include <ext/hash_map>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
+#include "include/hash_namespace.h"
#include "hash.h"
#include "encoding.h"
return out << o.name;
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<object_t> {
size_t operator()(const object_t& r) const {
//static hash<string> H;
return ceph_str_hash_linux(r.name.c_str(), r.name.length());
}
};
-}
+CEPH_HASH_NAMESPACE_END
struct file_object_t {
inline ostream& operator<<(ostream& out, const sobject_t &o) {
return out << o.oid << "/" << o.snap;
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<sobject_t> {
size_t operator()(const sobject_t &r) const {
static hash<object_t> H;
return H(r.oid) ^ I(r.snap);
}
};
-}
+CEPH_HASH_NAMESPACE_END
#endif
using namespace std;
-#include <ext/hash_map>
-using namespace __gnu_cxx;
-
+#include "include/unordered_map.h"
+#include "include/hash_namespace.h"
#include "object.h"
#include "intarith.h"
// -- stl crap --
-namespace __gnu_cxx {
+
+CEPH_HASH_NAMESPACE_START
template<> struct hash< std::string >
{
size_t operator()( const std::string& x ) const
}
};
#endif
-
-}
+CEPH_HASH_NAMESPACE_END
/*
* comparators for stl containers
*/
-// for hash_map:
-// hash_map<const char*, long, hash<const char*>, eqstr> vals;
+// for ceph::unordered_map:
+// ceph::unordered_map<const char*, long, hash<const char*>, eqstr> vals;
struct eqstr
{
bool operator()(const char* s1, const char* s2) const
return out << hex << ino.val << dec;
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash< inodeno_t >
{
size_t operator()( const inodeno_t& x ) const
return H(x.val);
}
};
-}
+CEPH_HASH_NAMESPACE_END
// file modes
tid_t m_tid;
Mutex& m_lock;
librbd::ImageCtx *m_ictx;
- hash_map<std::string, std::queue<write_result_d*> > m_writes;
+ ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes;
friend class C_OrderedWrite;
};
}
#include "CDentry.h"
#include "CDir.h"
-#include <ext/hash_set>
-using __gnu_cxx::hash_set;
+#include "include/unordered_set.h"
+using ceph::unordered_set;
class CDir;
class CInode;
set<CInode*> truncating_inodes;
- map<int, hash_set<version_t> > pending_commit_tids; // mdstable
+ map<int, ceph::unordered_set<version_t> > pending_commit_tids; // mdstable
set<metareqid_t> uncommitted_masters;
set<dirfrag_t> uncommitted_fragments;
} else {
set<int> resolve_set;
mds->mdsmap->get_mds_set(resolve_set, MDSMap::STATE_RESOLVE);
- for (hash_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
+ for (ceph::unordered_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
p != active_requests.end();
++p) {
if (!p->second->is_slave() || !p->second->slave_did_prepare())
// clean up any requests slave to/from this node
list<MDRequest*> finish;
- for (hash_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
+ for (ceph::unordered_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
p != active_requests.end();
++p) {
// slave to the failed node?
{
dout(7) << "trim_unlinked_inodes" << dendl;
list<CInode*> q;
- for (hash_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
p != inode_map.end();
++p) {
CInode *in = p->second;
if (!mds->is_rejoin()) {
// i am survivor. send strong rejoin.
// note request remote_auth_pins, xlocks
- for (hash_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
+ for (ceph::unordered_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
p != active_requests.end();
++p) {
if ( p->second->is_slave())
{
dout(10) << "rejoin_scour_survivor_replicas from mds." << from << dendl;
- for (hash_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
p != inode_map.end();
++p) {
CInode *in = p->second;
map<client_t,MClientSnap*> splits;
- for (hash_map<vinodeno_t,CInode*>::iterator i = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator i = inode_map.begin();
i != inode_map.end();
++i) {
CInode *in = i->second;
{
dout(10) << "reissue_all_caps" << dendl;
- for (hash_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
p != inode_map.end();
++p) {
CInode *in = p->second;
void MDCache::identify_files_to_recover(vector<CInode*>& recover_q, vector<CInode*>& check_q)
{
dout(10) << "identify_files_to_recover" << dendl;
- for (hash_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
p != inode_map.end();
++p) {
CInode *in = p->second;
if (lru.lru_get_size() == 0) {
// root, stray, etc.?
- hash_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
+ ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
while (p != inode_map.end()) {
- hash_map<vinodeno_t,CInode*>::iterator next = p;
+ ceph::unordered_map<vinodeno_t,CInode*>::iterator next = p;
++next;
CInode *in = p->second;
if (!in->is_auth()) {
int MDCache::get_num_client_requests()
{
int count = 0;
- for (hash_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
+ for (ceph::unordered_map<metareqid_t, MDRequest*>::iterator p = active_requests.begin();
p != active_requests.end();
++p) {
if (p->second->reqid.name.is_client() && !p->second->is_slave())
{
dout(7) << "show_cache" << dendl;
- for (hash_map<vinodeno_t,CInode*>::iterator it = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator it = inode_map.begin();
it != inode_map.end();
++it) {
// unlinked?
return;
}
- for (hash_map<vinodeno_t,CInode*>::iterator it = inode_map.begin();
+ for (ceph::unordered_map<vinodeno_t,CInode*>::iterator it = inode_map.begin();
it != inode_map.end();
++it) {
CInode *in = it->second;
// -- my cache --
LRU lru; // dentry lru for expiring items from cache
protected:
- hash_map<vinodeno_t,CInode*> inode_map; // map of inodes by ino
+ ceph::unordered_map<vinodeno_t,CInode*> inode_map; // map of inodes by ino
CInode *root; // root inode
CInode *myin; // .ceph/mds%d dir
// -- requests --
protected:
- hash_map<metareqid_t, MDRequest*> active_requests;
+ ceph::unordered_map<metareqid_t, MDRequest*> active_requests;
public:
int get_num_client_requests();
CInode *hack_pick_random_inode() {
assert(!inode_map.empty());
int n = rand() % inode_map.size();
- hash_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
+ ceph::unordered_map<vinodeno_t,CInode*>::iterator p = inode_map.begin();
while (n--) ++p;
return p->second;
}
void SessionMap::dump()
{
dout(10) << "dump" << dendl;
- for (hash_map<entity_name_t,Session*>::iterator p = session_map.begin();
+ for (ceph::unordered_map<entity_name_t,Session*>::iterator p = session_map.begin();
p != session_map.end();
++p)
dout(10) << p->first << " " << p->second
ENCODE_START(3, 3, bl);
::encode(version, bl);
- for (hash_map<entity_name_t,Session*>::const_iterator p = session_map.begin();
+ for (ceph::unordered_map<entity_name_t,Session*>::const_iterator p = session_map.begin();
p != session_map.end();
++p) {
if (p->second->is_open() ||
void SessionMap::dump(Formatter *f) const
{
f->open_array_section("Sessions");
- for (hash_map<entity_name_t,Session*>::const_iterator p = session_map.begin();
+ for (ceph::unordered_map<entity_name_t,Session*>::const_iterator p = session_map.begin();
p != session_map.end();
++p) {
f->open_object_section("Session");
void SessionMap::wipe_ino_prealloc()
{
- for (hash_map<entity_name_t,Session*>::iterator p = session_map.begin();
+ for (ceph::unordered_map<entity_name_t,Session*>::iterator p = session_map.begin();
p != session_map.end();
++p) {
p->second->pending_prealloc_inos.clear();
#include <set>
using std::set;
-#include <ext/hash_map>
-using __gnu_cxx::hash_map;
+#include "include/unordered_map.h"
#include "include/Context.h"
#include "include/xlist.h"
class SessionMap {
private:
MDS *mds;
- hash_map<entity_name_t, Session*> session_map;
+ ceph::unordered_map<entity_name_t, Session*> session_map;
public:
map<int,xlist<Session*>* > by_state;
void dump();
void get_client_set(set<client_t>& s) {
- for (hash_map<entity_name_t,Session*>::iterator p = session_map.begin();
+ for (ceph::unordered_map<entity_name_t,Session*>::iterator p = session_map.begin();
p != session_map.end();
++p)
if (p->second->info.inst.name.is_client())
s.insert(p->second->info.inst.name.num());
}
void get_client_session_set(set<Session*>& s) {
- for (hash_map<entity_name_t,Session*>::iterator p = session_map.begin();
+ for (ceph::unordered_map<entity_name_t,Session*>::iterator p = session_map.begin();
p != session_map.end();
++p)
if (p->second->info.inst.name.is_client())
private:
mutable bufferlist dnbl;
bool dn_decoded;
- list<std::tr1::shared_ptr<fullbit> > dfull;
+ list<ceph::shared_ptr<fullbit> > dfull;
list<remotebit> dremote;
list<nullbit> dnull;
bool is_importing() { return state & STATE_IMPORTING; }
void mark_importing() { state |= STATE_IMPORTING; }
- list<std::tr1::shared_ptr<fullbit> > &get_dfull() { return dfull; }
+ list<ceph::shared_ptr<fullbit> > &get_dfull() { return dfull; }
list<remotebit> &get_dremote() { return dremote; }
list<nullbit> &get_dnull() { return dnull; }
<< " num " << nfull << "/" << nremote << "/" << nnull
<< std::endl;
_decode_bits();
- for (list<std::tr1::shared_ptr<fullbit> >::iterator p = dfull.begin(); p != dfull.end(); ++p)
+ for (list<ceph::shared_ptr<fullbit> >::iterator p = dfull.begin(); p != dfull.end(); ++p)
(*p)->print(out);
for (list<remotebit>::iterator p = dremote.begin(); p != dremote.end(); ++p)
p->print(out);
// my lumps. preserve the order we added them in a list.
list<dirfrag_t> lump_order;
map<dirfrag_t, dirlump> lump_map;
- list<std::tr1::shared_ptr<fullbit> > roots;
+ list<ceph::shared_ptr<fullbit> > roots;
list<pair<__u8,version_t> > table_tids; // tableclient transactions
sr->encode(snapbl);
lump.nfull++;
- lump.get_dfull().push_back(std::tr1::shared_ptr<fullbit>(new fullbit(dn->get_name(),
+ lump.get_dfull().push_back(ceph::shared_ptr<fullbit>(new fullbit(dn->get_name(),
dn->first, dn->last,
dn->get_projected_version(),
*pi, in->dirfragtree,
else
in->encode_snap_blob(snapbl);
- for (list<std::tr1::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
+ for (list<ceph::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
if ((*p)->inode.ino == in->ino()) {
roots.erase(p);
break;
}
string empty;
- roots.push_back(std::tr1::shared_ptr<fullbit>(new fullbit(empty, in->first, in->last, 0, *pi,
+ roots.push_back(ceph::shared_ptr<fullbit>(new fullbit(empty, in->first, in->last, 0, *pi,
*pdft, *px, in->symlink, snapbl,
dirty ? fullbit::STATE_DIRTY : 0,
&in->old_inodes)));
}
// pending commit atids
- for (map<int, hash_set<version_t> >::iterator p = pending_commit_tids.begin();
+ for (map<int, ceph::unordered_set<version_t> >::iterator p = pending_commit_tids.begin();
p != pending_commit_tids.end();
++p) {
MDSTableClient *client = mds->get_table_client(p->first);
- for (hash_set<version_t>::iterator q = p->second.begin();
+ for (ceph::unordered_set<version_t>::iterator q = p->second.begin();
q != p->second.end();
++q) {
dout(10) << "try_to_expire " << get_mdstable_name(p->first) << " transaction " << *q
f->dump_int("nnull", nnull);
f->open_array_section("full bits");
- for (list<std::tr1::shared_ptr<fullbit> >::const_iterator
+ for (list<ceph::shared_ptr<fullbit> >::const_iterator
iter = dfull.begin(); iter != dfull.end(); ++iter) {
f->open_object_section("fullbit");
(*iter)->dump(f);
::decode(rootbl, bl);
if (rootbl.length()) {
bufferlist::iterator p = rootbl.begin();
- roots.push_back(std::tr1::shared_ptr<fullbit>(new fullbit(p)));
+ roots.push_back(ceph::shared_ptr<fullbit>(new fullbit(p)));
}
}
::decode(table_tids, bl);
f->close_section(); // lumps
f->open_array_section("roots");
- for (list<std::tr1::shared_ptr<fullbit> >::const_iterator i = roots.begin();
+ for (list<ceph::shared_ptr<fullbit> >::const_iterator i = roots.begin();
i != roots.end(); ++i) {
f->open_object_section("root");
(*i)->dump(f);
assert(g_conf->mds_kill_journal_replay_at != 1);
- for (list<std::tr1::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
+ for (list<ceph::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
CInode *in = mds->mdcache->get_inode((*p)->inode.ino);
bool isnew = in ? false:true;
if (!in)
lump._decode_bits();
// full dentry+inode pairs
- for (list<std::tr1::shared_ptr<fullbit> >::iterator pp = lump.get_dfull().begin();
+ for (list<ceph::shared_ptr<fullbit> >::iterator pp = lump.get_dfull().begin();
pp != lump.get_dfull().end();
++pp) {
- std::tr1::shared_ptr<fullbit> p = *pp;
+ ceph::shared_ptr<fullbit> p = *pp;
CDentry *dn = dir->lookup_exact_snap(p->dn, p->dnlast);
if (!dn) {
dn = dir->add_null_dentry(p->dn, p->dnfirst, p->dnlast);
#include <boost/pool/pool.hpp>
#include "include/assert.h"
+#include "include/hash_namespace.h"
#define CEPH_FS_ONDISK_MAGIC "ceph fs volume v011"
(l.ino == r.ino && l.snapid < r.snapid);
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<vinodeno_t> {
size_t operator()(const vinodeno_t &vino) const {
hash<inodeno_t> H;
return H(vino.ino) ^ I(vino.snapid);
}
};
-}
+CEPH_HASH_NAMESPACE_END
inline bool operator>(const metareqid_t& l, const metareqid_t& r) { return !(l <= r); }
inline bool operator>=(const metareqid_t& l, const metareqid_t& r) { return !(l < r); }
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<metareqid_t> {
size_t operator()(const metareqid_t &r) const {
hash<uint64_t> H;
return H(r.name.num()) ^ H(r.name.type()) ^ H(r.tid);
}
};
-}
+CEPH_HASH_NAMESPACE_END
// cap info for client reconnect
return l.ino == r.ino && l.frag == r.frag;
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<dirfrag_t> {
size_t operator()(const dirfrag_t &df) const {
static rjhash<uint64_t> H;
return H(df.ino) ^ I(df.frag);
}
};
-}
+CEPH_HASH_NAMESPACE_END
*
*/
#include <memory>
-#include <tr1/memory>
+#include "include/memory.h"
#include <errno.h>
#include <map>
#include <list>
{
/*
std::stringstream ss;
- for (hash_map<int,int>::iterator p = pm.pg_map.num_pg_by_state.begin();
+ for (ceph::unordered_map<int,int>::iterator p = pm.pg_map.num_pg_by_state.begin();
p != pm.pg_map.num_pg_by_state.end();
++p) {
if (p != pm.pg_map.num_pg_by_state.begin())
#include "mon/MonitorDBStore.h"
#include <memory>
-#include <tr1/memory>
+#include "include/memory.h"
#include <errno.h>
virtual void get_chunk_tx(Transaction &tx, uint64_t max) = 0;
virtual pair<string,string> get_next_key() = 0;
};
- typedef std::tr1::shared_ptr<StoreIteratorImpl> Synchronizer;
+ typedef ceph::shared_ptr<StoreIteratorImpl> Synchronizer;
class WholeStoreIteratorImpl : public StoreIteratorImpl {
KeyValueDB::WholeSpaceIterator iter;
else
iter->seek_to_first();
- return std::tr1::shared_ptr<StoreIteratorImpl>(
+ return ceph::shared_ptr<StoreIteratorImpl>(
new WholeStoreIteratorImpl(iter, prefixes)
);
}
}
// generate some pg_temp entries.
- // let's assume the hash_map iterates in a random-ish order.
+ // let's assume the ceph::unordered_map iterates in a random-ish order.
int n = rand() % mon->pgmon()->pg_map.pg_stat.size();
- hash_map<pg_t,pg_stat_t>::iterator p = mon->pgmon()->pg_map.pg_stat.begin();
- hash_map<pg_t,pg_stat_t>::iterator e = mon->pgmon()->pg_map.pg_stat.end();
+ ceph::unordered_map<pg_t,pg_stat_t>::iterator p = mon->pgmon()->pg_map.pg_stat.begin();
+ ceph::unordered_map<pg_t,pg_stat_t>::iterator e = mon->pgmon()->pg_map.pg_stat.end();
while (n--)
++p;
for (int i=0; i<50; i++) {
std::string sep;
oss << "overloaded osds: ";
bool changed = false;
- for (hash_map<int,osd_stat_t>::const_iterator p = pgm.osd_stat.begin();
+ for (ceph::unordered_map<int,osd_stat_t>::const_iterator p = pgm.osd_stat.begin();
p != pgm.osd_stat.end();
++p) {
float util = p->second.kb_used;
}
// expire blacklisted items?
- for (hash_map<entity_addr_t,utime_t>::iterator p = osdmap.blacklist.begin();
+ for (ceph::unordered_map<entity_addr_t,utime_t>::iterator p = osdmap.blacklist.begin();
p != osdmap.blacklist.end();
++p) {
if (p->second < now) {
if (f)
f->open_array_section("blacklist");
- for (hash_map<entity_addr_t,utime_t>::iterator p = osdmap.blacklist.begin();
+ for (ceph::unordered_map<entity_addr_t,utime_t>::iterator p = osdmap.blacklist.begin();
p != osdmap.blacklist.end();
++p) {
if (f) {
stamp = inc.stamp;
pool_stat_t pg_sum_old = pg_sum;
- hash_map<uint64_t, pool_stat_t> pg_pool_sum_old;
+ ceph::unordered_map<uint64_t, pool_stat_t> pg_pool_sum_old;
bool ratios_changed = false;
if (inc.full_ratio != full_ratio && inc.full_ratio != -1) {
if (pg_pool_sum_old.count(update_pg.pool()) == 0)
pg_pool_sum_old[update_pg.pool()] = pg_pool_sum[update_pg.pool()];
- hash_map<pg_t,pg_stat_t>::iterator t = pg_stat.find(update_pg);
+ ceph::unordered_map<pg_t,pg_stat_t>::iterator t = pg_stat.find(update_pg);
if (t == pg_stat.end()) {
- hash_map<pg_t,pg_stat_t>::value_type v(update_pg, update_stat);
+ ceph::unordered_map<pg_t,pg_stat_t>::value_type v(update_pg, update_stat);
pg_stat.insert(v);
} else {
stat_pg_sub(update_pg, t->second);
int osd = p->first;
const osd_stat_t &new_stats(p->second);
- hash_map<int32_t,osd_stat_t>::iterator t = osd_stat.find(osd);
+ ceph::unordered_map<int32_t,osd_stat_t>::iterator t = osd_stat.find(osd);
if (t == osd_stat.end()) {
- hash_map<int32_t,osd_stat_t>::value_type v(osd, new_stats);
+ ceph::unordered_map<int32_t,osd_stat_t>::value_type v(osd, new_stats);
osd_stat.insert(v);
} else {
stat_osd_sub(t->second);
p != inc.pg_remove.end();
++p) {
const pg_t &removed_pg(*p);
- hash_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(removed_pg);
+ ceph::unordered_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(removed_pg);
if (s != pg_stat.end()) {
stat_pg_sub(removed_pg, s->second);
pg_stat.erase(s);
for (set<int>::iterator p = inc.get_osd_stat_rm().begin();
p != inc.get_osd_stat_rm().end();
++p) {
- hash_map<int32_t,osd_stat_t>::iterator t = osd_stat.find(*p);
+ ceph::unordered_map<int32_t,osd_stat_t>::iterator t = osd_stat.find(*p);
if (t != osd_stat.end()) {
stat_osd_sub(t->second);
osd_stat.erase(t);
{
full_osds.clear();
nearfull_osds.clear();
- for (hash_map<int32_t, osd_stat_t>::iterator i = osd_stat.begin();
+ for (ceph::unordered_map<int32_t, osd_stat_t>::iterator i = osd_stat.begin();
i != osd_stat.end();
++i) {
register_nearfull_status(i->first, i->second);
pg_sum = pool_stat_t();
osd_sum = osd_stat_t();
- for (hash_map<pg_t,pg_stat_t>::iterator p = pg_stat.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::iterator p = pg_stat.begin();
p != pg_stat.end();
++p) {
stat_pg_add(p->first, p->second);
}
- for (hash_map<int32_t,osd_stat_t>::iterator p = osd_stat.begin();
+ for (ceph::unordered_map<int32_t,osd_stat_t>::iterator p = osd_stat.begin();
p != osd_stat.end();
++p)
stat_osd_add(p->second);
void PGMap::update_pg(pg_t pgid, bufferlist& bl)
{
bufferlist::iterator p = bl.begin();
- hash_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(pgid);
+ ceph::unordered_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(pgid);
if (s != pg_stat.end())
stat_pg_sub(pgid, s->second);
pg_stat_t& r = pg_stat[pgid];
void PGMap::remove_pg(pg_t pgid)
{
- hash_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(pgid);
+ ceph::unordered_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(pgid);
if (s != pg_stat.end()) {
stat_pg_sub(pgid, s->second);
pg_stat.erase(s);
void PGMap::update_osd(int osd, bufferlist& bl)
{
bufferlist::iterator p = bl.begin();
- hash_map<int32_t,osd_stat_t>::iterator o = osd_stat.find(osd);
+ ceph::unordered_map<int32_t,osd_stat_t>::iterator o = osd_stat.find(osd);
if (o != osd_stat.end())
stat_osd_sub(o->second);
osd_stat_t& r = osd_stat[osd];
void PGMap::remove_osd(int osd)
{
- hash_map<int32_t,osd_stat_t>::iterator o = osd_stat.find(osd);
+ ceph::unordered_map<int32_t,osd_stat_t>::iterator o = osd_stat.find(osd);
if (o != osd_stat.end()) {
stat_osd_sub(o->second);
osd_stat.erase(o);
{
if (pg_stat.empty())
return 0;
- hash_map<pg_t,pg_stat_t>::const_iterator p = pg_stat.begin();
+ ceph::unordered_map<pg_t,pg_stat_t>::const_iterator p = pg_stat.begin();
epoch_t min = p->second.get_effective_last_epoch_clean();
for (++p; p != pg_stat.end(); ++p) {
epoch_t lec = p->second.get_effective_last_epoch_clean();
}
// also scan osd epochs
// don't trim past the oldest reported osd epoch
- for (hash_map<int32_t, epoch_t>::const_iterator i = osd_epochs.begin();
+ for (ceph::unordered_map<int32_t, epoch_t>::const_iterator i = osd_epochs.begin();
i != osd_epochs.end();
++i) {
if (i->second < min)
if (struct_v >= 6) {
::decode(osd_epochs, bl);
} else {
- for (hash_map<int32_t, osd_stat_t>::iterator i = osd_stat.begin();
+ for (ceph::unordered_map<int32_t, osd_stat_t>::iterator i = osd_stat.begin();
i != osd_stat.end();
++i) {
// This isn't accurate, but will cause trimming to behave like
inc.full_ratio = full_ratio;
inc.nearfull_ratio = nearfull_ratio;
- for (hash_map<pg_t,pg_stat_t>::const_iterator p = pg_stat.begin(); p != pg_stat.end(); ++p) {
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator p = pg_stat.begin(); p != pg_stat.end(); ++p) {
inc.pg_stat_updates[p->first] = p->second;
}
- for (hash_map<int32_t, osd_stat_t>::const_iterator p = osd_stat.begin(); p != osd_stat.end(); ++p) {
+ for (ceph::unordered_map<int32_t, osd_stat_t>::const_iterator p = osd_stat.begin(); p != osd_stat.end(); ++p) {
assert(osd_epochs.count(p->first));
inc.update_stat(p->first,
inc.get_osd_epochs().find(p->first)->second,
void PGMap::dump_pg_stats(Formatter *f, bool brief) const
{
f->open_array_section("pg_stats");
- for (hash_map<pg_t,pg_stat_t>::const_iterator i = pg_stat.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator i = pg_stat.begin();
i != pg_stat.end();
++i) {
f->open_object_section("pg_stat");
void PGMap::dump_pool_stats(Formatter *f) const
{
f->open_array_section("pool_stats");
- for (hash_map<int,pool_stat_t>::const_iterator p = pg_pool_sum.begin();
+ for (ceph::unordered_map<int,pool_stat_t>::const_iterator p = pg_pool_sum.begin();
p != pg_pool_sum.end();
++p) {
f->open_object_section("pool_stat");
void PGMap::dump_osd_stats(Formatter *f) const
{
f->open_array_section("osd_stats");
- for (hash_map<int32_t,osd_stat_t>::const_iterator q = osd_stat.begin();
+ for (ceph::unordered_map<int32_t,osd_stat_t>::const_iterator q = osd_stat.begin();
q != osd_stat.end();
++q) {
f->open_object_section("osd_stat");
}
void PGMap::dump_pg_stats_plain(ostream& ss,
- const hash_map<pg_t, pg_stat_t>& pg_stats) const
+ const ceph::unordered_map<pg_t, pg_stat_t>& pg_stats) const
{
ss << "pg_stat\tobjects\tmip\tdegr\tunf\tbytes\tlog\tdisklog\tstate\tstate_stamp\tv\treported\tup\tacting\tlast_scrub\tscrub_stamp\tlast_deep_scrub\tdeep_scrub_stamp" << std::endl;
- for (hash_map<pg_t, pg_stat_t>::const_iterator i = pg_stats.begin();
+ for (ceph::unordered_map<pg_t, pg_stat_t>::const_iterator i = pg_stats.begin();
i != pg_stats.end(); ++i) {
const pg_stat_t &st(i->second);
ss << i->first
ss << "full_ratio " << full_ratio << std::endl;
ss << "nearfull_ratio " << nearfull_ratio << std::endl;
dump_pg_stats_plain(ss, pg_stat);
- for (hash_map<int,pool_stat_t>::const_iterator p = pg_pool_sum.begin();
+ for (ceph::unordered_map<int,pool_stat_t>::const_iterator p = pg_pool_sum.begin();
p != pg_pool_sum.end();
++p)
ss << "pool " << p->first
<< "\t" << pg_sum.ondisk_log_size
<< std::endl;
ss << "osdstat\tkbused\tkbavail\tkb\thb in\thb out" << std::endl;
- for (hash_map<int32_t,osd_stat_t>::const_iterator p = osd_stat.begin();
+ for (ceph::unordered_map<int32_t,osd_stat_t>::const_iterator p = osd_stat.begin();
p != osd_stat.end();
++p)
ss << p->first
}
void PGMap::get_stuck_stats(PGMap::StuckPG type, utime_t cutoff,
- hash_map<pg_t, pg_stat_t>& stuck_pgs) const
+ ceph::unordered_map<pg_t, pg_stat_t>& stuck_pgs) const
{
- for (hash_map<pg_t, pg_stat_t>::const_iterator i = pg_stat.begin();
+ for (ceph::unordered_map<pg_t, pg_stat_t>::const_iterator i = pg_stat.begin();
i != pg_stat.end();
++i) {
utime_t val;
void PGMap::dump_stuck(Formatter *f, PGMap::StuckPG type, utime_t cutoff) const
{
- hash_map<pg_t, pg_stat_t> stuck_pg_stats;
+ ceph::unordered_map<pg_t, pg_stat_t> stuck_pg_stats;
get_stuck_stats(type, cutoff, stuck_pg_stats);
f->open_array_section("stuck_pg_stats");
- for (hash_map<pg_t,pg_stat_t>::const_iterator i = stuck_pg_stats.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator i = stuck_pg_stats.begin();
i != stuck_pg_stats.end();
++i) {
f->open_object_section("pg_stat");
void PGMap::dump_stuck_plain(ostream& ss, PGMap::StuckPG type, utime_t cutoff) const
{
- hash_map<pg_t, pg_stat_t> stuck_pg_stats;
+ ceph::unordered_map<pg_t, pg_stat_t> stuck_pg_stats;
get_stuck_stats(type, cutoff, stuck_pg_stats);
if (!stuck_pg_stats.empty())
dump_pg_stats_plain(ss, stuck_pg_stats);
void PGMap::dump_osd_perf_stats(Formatter *f) const
{
f->open_array_section("osd_perf_infos");
- for (hash_map<int32_t, osd_stat_t>::const_iterator i = osd_stat.begin();
+ for (ceph::unordered_map<int32_t, osd_stat_t>::const_iterator i = osd_stat.begin();
i != osd_stat.end();
++i) {
f->open_object_section("osd");
tab.define_column("osdid", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("fs_commit_latency(ms)", TextTable::LEFT, TextTable::RIGHT);
tab.define_column("fs_apply_latency(ms)", TextTable::LEFT, TextTable::RIGHT);
- for (hash_map<int32_t, osd_stat_t>::const_iterator i = osd_stat.begin();
+ for (ceph::unordered_map<int32_t, osd_stat_t>::const_iterator i = osd_stat.begin();
i != osd_stat.end();
++i) {
tab << i->first;
void PGMap::pool_recovery_rate_summary(Formatter *f, ostream *out,
uint64_t poolid) const
{
- hash_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
+ ceph::unordered_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
per_pool_sum_delta.find(poolid);
if (p == per_pool_sum_delta.end())
return;
- hash_map<uint64_t,utime_t>::const_iterator ts =
+ ceph::unordered_map<uint64_t,utime_t>::const_iterator ts =
per_pool_sum_deltas_stamps.find(p->first);
assert(ts != per_pool_sum_deltas_stamps.end());
recovery_rate_summary(f, out, p->second.first, ts->second);
void PGMap::pool_recovery_summary(Formatter *f, ostream *out,
uint64_t poolid) const
{
- hash_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
+ ceph::unordered_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
per_pool_sum_delta.find(poolid);
if (p == per_pool_sum_delta.end())
return;
void PGMap::pool_client_io_rate_summary(Formatter *f, ostream *out,
uint64_t poolid) const
{
- hash_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
+ ceph::unordered_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
per_pool_sum_delta.find(poolid);
if (p == per_pool_sum_delta.end())
return;
- hash_map<uint64_t,utime_t>::const_iterator ts =
+ ceph::unordered_map<uint64_t,utime_t>::const_iterator ts =
per_pool_sum_deltas_stamps.find(p->first);
assert(ts != per_pool_sum_deltas_stamps.end());
client_io_rate_summary(f, out, p->second.first, ts->second);
* @param pg_pool_sum_old Map of pool stats for delta calcs.
*/
void PGMap::update_pool_deltas(CephContext *cct, const utime_t ts,
- const hash_map<uint64_t,pool_stat_t>& pg_pool_sum_old)
+ const ceph::unordered_map<uint64_t,pool_stat_t>& pg_pool_sum_old)
{
- for (hash_map<uint64_t,pool_stat_t>::const_iterator it = pg_pool_sum_old.begin();
+ for (ceph::unordered_map<uint64_t,pool_stat_t>::const_iterator it = pg_pool_sum_old.begin();
it != pg_pool_sum_old.end(); ++it) {
update_one_pool_delta(cct, ts, it->first, it->second);
}
if (f)
f->open_array_section("pgs_by_state");
- for (hash_map<int,int>::const_iterator p = num_pg_by_state.begin();
+ for (ceph::unordered_map<int,int>::const_iterator p = num_pg_by_state.begin();
p != num_pg_by_state.end();
++p) {
if (f) {
{
std::stringstream ss;
- for (hash_map<int,int>::const_iterator p = num_pg_by_state.begin();
+ for (ceph::unordered_map<int,int>::const_iterator p = num_pg_by_state.begin();
p != num_pg_by_state.end();
++p) {
if (p != num_pg_by_state.begin())
version_t version;
epoch_t last_osdmap_epoch; // last osdmap epoch i applied to the pgmap
epoch_t last_pg_scan; // osdmap epoch
- hash_map<pg_t,pg_stat_t> pg_stat;
- hash_map<int32_t,osd_stat_t> osd_stat;
+ ceph::unordered_map<pg_t,pg_stat_t> pg_stat;
+ ceph::unordered_map<int32_t,osd_stat_t> osd_stat;
set<int32_t> full_osds;
set<int32_t> nearfull_osds;
float full_ratio;
float nearfull_ratio;
// mapping of osd to most recently reported osdmap epoch
- hash_map<int32_t,epoch_t> osd_epochs;
+ ceph::unordered_map<int32_t,epoch_t> osd_epochs;
class Incremental {
public:
// aggregate stats (soft state), generated by calc_stats()
- hash_map<int,int> num_pg_by_state;
+ ceph::unordered_map<int,int> num_pg_by_state;
int64_t num_pg, num_osd;
- hash_map<int,pool_stat_t> pg_pool_sum;
+ ceph::unordered_map<int,pool_stat_t> pg_pool_sum;
pool_stat_t pg_sum;
osd_stat_t osd_sum;
* keep track of last deltas for each pool, calculated using
* @p pg_pool_sum as baseline.
*/
- hash_map<uint64_t, list< pair<pool_stat_t, utime_t> > > per_pool_sum_deltas;
+ ceph::unordered_map<uint64_t, list< pair<pool_stat_t, utime_t> > > per_pool_sum_deltas;
/**
* keep track of per-pool timestamp deltas, according to last update on
* each pool.
*/
- hash_map<uint64_t, utime_t> per_pool_sum_deltas_stamps;
+ ceph::unordered_map<uint64_t, utime_t> per_pool_sum_deltas_stamps;
/**
* keep track of sum deltas, per-pool, taking into account any previous
* deltas existing in @p per_pool_sum_deltas. The utime_t as second member
* of the pair is the timestamp refering to the last update (i.e., the first
* member of the pair) for a given pool.
*/
- hash_map<uint64_t, pair<pool_stat_t,utime_t> > per_pool_sum_delta;
+ ceph::unordered_map<uint64_t, pair<pool_stat_t,utime_t> > per_pool_sum_delta;
list< pair<pool_stat_t, utime_t> > pg_sum_deltas;
pool_stat_t pg_sum_delta;
const utime_t ts, const pool_stat_t& pg_sum_old);
void update_pool_deltas(CephContext *cct,
const utime_t ts,
- const hash_map<uint64_t, pool_stat_t>& pg_pool_sum_old);
+ const ceph::unordered_map<uint64_t, pool_stat_t>& pg_pool_sum_old);
void clear_delta();
private:
void dump_delta(Formatter *f) const;
void dump_pg_stats_plain(ostream& ss,
- const hash_map<pg_t, pg_stat_t>& pg_stats) const;
+ const ceph::unordered_map<pg_t, pg_stat_t>& pg_stats) const;
void get_stuck_stats(StuckPG type, utime_t cutoff,
- hash_map<pg_t, pg_stat_t>& stuck_pgs) const;
+ ceph::unordered_map<pg_t, pg_stat_t>& stuck_pgs) const;
void dump_stuck(Formatter *f, StuckPG type, utime_t cutoff) const;
void dump_stuck_plain(ostream& ss, StuckPG type, utime_t cutoff) const;
mon->cluster_logger->set(l_cluster_num_pg, pg_map.pg_stat.size());
unsigned active = 0, active_clean = 0, peering = 0;
- for (hash_map<int,int>::iterator p = pg_map.num_pg_by_state.begin();
+ for (ceph::unordered_map<int,int>::iterator p = pg_map.num_pg_by_state.begin();
p != pg_map.num_pg_by_state.end();
++p) {
if (p->first & PG_STATE_ACTIVE) {
* obtained -- the timestamp IS NOT a delta itself.
*/
if (!pg_map.per_pool_sum_deltas.empty()) {
- hash_map<uint64_t,pair<pool_stat_t,utime_t> >::iterator it;
+ ceph::unordered_map<uint64_t,pair<pool_stat_t,utime_t> >::iterator it;
for (it = pg_map.per_pool_sum_delta.begin();
it != pg_map.per_pool_sum_delta.end(); ) {
utime_t age = ceph_clock_now(g_ceph_context) - it->second.second;
}
pool_stat_t pg_sum_old = pg_map.pg_sum;
- hash_map<uint64_t, pool_stat_t> pg_pool_sum_old;
+ ceph::unordered_map<uint64_t, pool_stat_t> pg_pool_sum_old;
// pgs
bufferlist::iterator p = dirty_pgs.begin();
bool PGMonitor::pg_stats_have_changed(int from, const MPGStats *stats) const
{
// any new osd info?
- hash_map<int,osd_stat_t>::const_iterator s = pg_map.osd_stat.find(from);
+ ceph::unordered_map<int,osd_stat_t>::const_iterator s = pg_map.osd_stat.find(from);
if (s == pg_map.osd_stat.end())
return true;
if (s->second != stats->osd_stat)
// any new pg info?
for (map<pg_t,pg_stat_t>::const_iterator p = stats->pg_stat.begin();
p != stats->pg_stat.end(); ++p) {
- hash_map<pg_t,pg_stat_t>::const_iterator t = pg_map.pg_stat.find(p->first);
+ ceph::unordered_map<pg_t,pg_stat_t>::const_iterator t = pg_map.pg_stat.find(p->first);
if (t == pg_map.pg_stat.end())
return true;
if (t->second.reported_epoch != p->second.reported_epoch ||
}
// deleted pools?
- for (hash_map<pg_t,pg_stat_t>::const_iterator p = pg_map.pg_stat.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator p = pg_map.pg_stat.begin();
p != pg_map.pg_stat.end(); ++p) {
if (!osdmap->have_pg_pool(p->first.pool())) {
dout(20) << " removing pg_stat " << p->first << " because "
OSDMap *osdmap = &mon->osdmon()->osdmap;
bool ret = false;
- for (hash_map<pg_t,pg_stat_t>::iterator p = pg_map.pg_stat.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::iterator p = pg_map.pg_stat.begin();
p != pg_map.pg_stat.end();
++p) {
if ((p->second.state & PG_STATE_STALE) == 0 &&
cmd_getval(g_ceph_context, cmdmap, "debugop", debugop, string("unfound_objects_exist"));
if (debugop == "unfound_objects_exist") {
bool unfound_objects_exist = false;
- hash_map<pg_t,pg_stat_t>::const_iterator end = pg_map.pg_stat.end();
- for (hash_map<pg_t,pg_stat_t>::const_iterator s = pg_map.pg_stat.begin();
+ ceph::unordered_map<pg_t,pg_stat_t>::const_iterator end = pg_map.pg_stat.end();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator s = pg_map.pg_stat.begin();
s != end; ++s) {
if (s->second.stats.sum.num_objects_unfound > 0) {
unfound_objects_exist = true;
r = 0;
} else if (debugop == "degraded_pgs_exist") {
bool degraded_pgs_exist = false;
- hash_map<pg_t,pg_stat_t>::const_iterator end = pg_map.pg_stat.end();
- for (hash_map<pg_t,pg_stat_t>::const_iterator s = pg_map.pg_stat.begin();
+ ceph::unordered_map<pg_t,pg_stat_t>::const_iterator end = pg_map.pg_stat.end();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator s = pg_map.pg_stat.begin();
s != end; ++s) {
if (s->second.stats.sum.num_objects_degraded > 0) {
degraded_pgs_exist = true;
}
static void note_stuck_detail(enum PGMap::StuckPG what,
- hash_map<pg_t,pg_stat_t>& stuck_pgs,
+ ceph::unordered_map<pg_t,pg_stat_t>& stuck_pgs,
list<pair<health_status_t,string> > *detail)
{
- for (hash_map<pg_t,pg_stat_t>::iterator p = stuck_pgs.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::iterator p = stuck_pgs.begin();
p != stuck_pgs.end();
++p) {
ostringstream ss;
list<pair<health_status_t,string> > *detail) const
{
map<string,int> note;
- hash_map<int,int>::const_iterator p = pg_map.num_pg_by_state.begin();
- hash_map<int,int>::const_iterator p_end = pg_map.num_pg_by_state.end();
+ ceph::unordered_map<int,int>::const_iterator p = pg_map.num_pg_by_state.begin();
+ ceph::unordered_map<int,int>::const_iterator p_end = pg_map.num_pg_by_state.end();
for (; p != p_end; ++p) {
if (p->first & PG_STATE_STALE)
note["stale"] += p->second;
note["backfill_toofull"] += p->second;
}
- hash_map<pg_t, pg_stat_t> stuck_pgs;
+ ceph::unordered_map<pg_t, pg_stat_t> stuck_pgs;
utime_t now(ceph_clock_now(g_ceph_context));
utime_t cutoff = now - utime_t(g_conf->mon_pg_stuck_threshold, 0);
summary.push_back(make_pair(HEALTH_WARN, ss.str()));
}
if (detail) {
- for (hash_map<pg_t,pg_stat_t>::const_iterator p = pg_map.pg_stat.begin();
+ for (ceph::unordered_map<pg_t,pg_stat_t>::const_iterator p = pg_map.pg_stat.begin();
p != pg_map.pg_stat.end();
++p) {
if ((p->second.state & (PG_STATE_STALE |
if (detail) {
unsigned num_slow_osds = 0;
// do per-osd warnings
- for (hash_map<int32_t,osd_stat_t>::const_iterator p = pg_map.osd_stat.begin();
+ for (ceph::unordered_map<int32_t,osd_stat_t>::const_iterator p = pg_map.osd_stat.begin();
p != pg_map.osd_stat.end();
++p) {
if (_warn_slow_request_histogram(p->second.op_queue_age_hist,
}
}
if (!pg_map.pg_stat.empty()) {
- for (hash_map<int,pool_stat_t>::const_iterator p = pg_map.pg_pool_sum.begin();
+ for (ceph::unordered_map<int,pool_stat_t>::const_iterator p = pg_map.pg_pool_sum.begin();
p != pg_map.pg_pool_sum.end();
++p) {
const pg_pool_t *pi = mon->osdmon()->osdmap.get_pg_pool(p->first);
void Pipe::unregister_pipe()
{
assert(msgr->lock.is_locked());
- hash_map<entity_addr_t,Pipe*>::iterator p = msgr->rank_pipe.find(peer_addr);
+ ceph::unordered_map<entity_addr_t,Pipe*>::iterator p = msgr->rank_pipe.find(peer_addr);
if (p != msgr->rank_pipe.end() && p->second == this) {
ldout(msgr->cct,10) << "unregister_pipe" << dendl;
msgr->rank_pipe.erase(p);
accepting_pipes.clear();
while (!rank_pipe.empty()) {
- hash_map<entity_addr_t,Pipe*>::iterator it = rank_pipe.begin();
+ ceph::unordered_map<entity_addr_t,Pipe*>::iterator it = rank_pipe.begin();
Pipe *p = it->second;
ldout(cct,5) << "mark_down_all " << it->first << " " << p << dendl;
rank_pipe.erase(it);
#include <list>
#include <map>
using namespace std;
-#include <ext/hash_map>
-#include <ext/hash_set>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
+#include "include/unordered_set.h"
#include "common/Mutex.h"
#include "include/atomic.h"
* NOTE: a Pipe* with state CLOSED may still be in the map but is considered
* invalid and can be replaced by anyone holding the msgr lock
*/
- hash_map<entity_addr_t, Pipe*> rank_pipe;
+ ceph::unordered_map<entity_addr_t, Pipe*> rank_pipe;
/**
* list of pipes are in teh process of accepting
*
friend class Pipe;
Pipe *_lookup_pipe(const entity_addr_t& k) {
- hash_map<entity_addr_t, Pipe*>::iterator p = rank_pipe.find(k);
+ ceph::unordered_map<entity_addr_t, Pipe*>::iterator p = rank_pipe.find(k);
if (p == rank_pipe.end())
return NULL;
// see lock cribbing in Pipe::fault()
#include "include/types.h"
#include "include/blobhash.h"
#include "include/encoding.h"
+#include "include/hash_namespace.h"
namespace ceph {
class Formatter;
return out << *(const entity_name_t*)&addr;
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash< entity_name_t >
{
size_t operator()( const entity_name_t &m ) const
return rjhash32(m.type() ^ m.num());
}
};
-}
+CEPH_HASH_NAMESPACE_END
inline bool operator>(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) > 0; }
inline bool operator>=(const entity_addr_t& a, const entity_addr_t& b) { return memcmp(&a, &b, sizeof(a)) >= 0; }
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash< entity_addr_t >
{
size_t operator()( const entity_addr_t& x ) const
return H((const char*)&x, sizeof(x));
}
};
-}
+CEPH_HASH_NAMESPACE_END
/*
inline bool operator>(const entity_inst_t& a, const entity_inst_t& b) { return b < a; }
inline bool operator>=(const entity_inst_t& a, const entity_inst_t& b) { return b <= a; }
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash< entity_inst_t >
{
size_t operator()( const entity_inst_t& x ) const
return H(x.name) ^ I(x.addr);
}
};
-}
+CEPH_HASH_NAMESPACE_END
inline ostream& operator<<(ostream& out, const entity_inst_t &i)
#include <string>
#include <vector>
-#include <tr1/memory>
+#include "include/memory.h"
#include "osd/osd_types.h"
#include "include/object.h"
/// Returned path
string full_path;
/// Ref to parent Index
- std::tr1::shared_ptr<CollectionIndex> parent_ref;
+ ceph::shared_ptr<CollectionIndex> parent_ref;
/// coll_t for parent Index
coll_t parent_coll;
/// Normal Constructor
Path(
string path, ///< [in] Path to return.
- std::tr1::weak_ptr<CollectionIndex> ref) ///< [in] weak_ptr to parent.
+ ceph::weak_ptr<CollectionIndex> ref) ///< [in] weak_ptr to parent.
: full_path(path), parent_ref(ref), parent_coll(parent_ref->coll()) {}
/// Debugging Constructor
coll_t coll() const { return parent_coll; }
/// Getter for parent
- std::tr1::shared_ptr<CollectionIndex> get_index() const {
+ ceph::shared_ptr<CollectionIndex> get_index() const {
return parent_ref;
}
};
public:
/// Type of returned paths
- typedef std::tr1::shared_ptr<Path> IndexedPath;
+ typedef ceph::shared_ptr<Path> IndexedPath;
static IndexedPath get_testing_path(string path, coll_t collection) {
return IndexedPath(new Path(path, collection));
*
* @see IndexManager
*/
- virtual void set_ref(std::tr1::shared_ptr<CollectionIndex> ref) = 0;
+ virtual void set_ref(ceph::shared_ptr<CollectionIndex> ref) = 0;
/**
* Initializes the index.
virtual int split(
uint32_t match, //< [in] value to match
uint32_t bits, //< [in] bits to check
- std::tr1::shared_ptr<CollectionIndex> dest //< [in] destination index
+ ceph::shared_ptr<CollectionIndex> dest //< [in] destination index
) { assert(0); return 0; }
#include <set>
#include <map>
#include <string>
-#include <tr1/memory>
+#include "include/memory.h"
#include <vector>
#include "ObjectMap.h"
#include <string>
#include <vector>
-#include <tr1/memory>
+#include "include/memory.h"
#include <boost/scoped_ptr.hpp>
#include "ObjectMap.h"
coll_t *c, ghobject_t *oid);
private:
/// Implicit lock on Header->seq
- typedef std::tr1::shared_ptr<_Header> Header;
+ typedef ceph::shared_ptr<_Header> Header;
string map_header_key(const ghobject_t &oid);
string header_key(uint64_t seq);
Header header;
/// parent_iter == NULL iff no parent
- std::tr1::shared_ptr<DBObjectMapIteratorImpl> parent_iter;
+ ceph::shared_ptr<DBObjectMapIteratorImpl> parent_iter;
KeyValueDB::Iterator key_iter;
KeyValueDB::Iterator complete_iter;
/// cur_iter points to currently valid iterator
- std::tr1::shared_ptr<ObjectMapIteratorImpl> cur_iter;
+ ceph::shared_ptr<ObjectMapIteratorImpl> cur_iter;
int r;
/// init() called, key_iter, complete_iter, parent_iter filled in
int adjust();
};
- typedef std::tr1::shared_ptr<DBObjectMapIteratorImpl> DBObjectMapIterator;
+ typedef ceph::shared_ptr<DBObjectMapIteratorImpl> DBObjectMapIterator;
DBObjectMapIterator _get_iterator(Header header) {
return DBObjectMapIterator(new DBObjectMapIteratorImpl(this, header));
}
~FDCache() {
cct->_conf->remove_observer(this);
}
- typedef std::tr1::shared_ptr<FD> FDRef;
+ typedef ceph::shared_ptr<FD> FDRef;
FDRef lookup(const ghobject_t &hoid) {
return registry.lookup(hoid);
#include <fstream>
using namespace std;
-#include <ext/hash_map>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
#include "include/assert.h"
#define FILENAME_PREFIX_LEN (FILENAME_SHORT_LEN - FILENAME_HASH_LEN - (sizeof(FILENAME_COOKIE) - 1) - FILENAME_EXTRA)
-void FlatIndex::set_ref(std::tr1::shared_ptr<CollectionIndex> ref) {
+void FlatIndex::set_ref(ceph::shared_ptr<CollectionIndex> ref) {
self_ref = ref;
}
#include <map>
#include <set>
#include <vector>
-#include <tr1/memory>
+#include "include/memory.h"
#include "CollectionIndex.h"
* This class should only be used for converting old filestores.
*/
class FlatIndex : public CollectionIndex {
- std::tr1::weak_ptr<CollectionIndex> self_ref;
+ ceph::weak_ptr<CollectionIndex> self_ref;
string base_path;
coll_t collection;
public:
coll_t coll() const { return collection; }
/// @see CollectionIndex
- void set_ref(std::tr1::shared_ptr<CollectionIndex> ref);
+ void set_ref(ceph::shared_ptr<CollectionIndex> ref);
/// @see CollectionIndex
int cleanup();
int HashIndex::_split(
uint32_t match,
uint32_t bits,
- std::tr1::shared_ptr<CollectionIndex> dest) {
+ ceph::shared_ptr<CollectionIndex> dest) {
assert(collection_version() == dest->collection_version());
unsigned mkdirred = 0;
return col_split_level(
int _split(
uint32_t match,
uint32_t bits,
- std::tr1::shared_ptr<CollectionIndex> dest
+ ceph::shared_ptr<CollectionIndex> dest
);
protected:
*
*/
-#include <tr1/memory>
+#include "include/memory.h"
#include <map>
#if defined(__FreeBSD__)
#ifndef OS_INDEXMANAGER_H
#define OS_INDEXMANAGER_H
-#include <tr1/memory>
+#include "include/memory.h"
#include <map>
#include "common/Mutex.h"
/// Public type for Index
-typedef std::tr1::shared_ptr<CollectionIndex> Index;
+typedef ceph::shared_ptr<CollectionIndex> Index;
/**
* Encapsulates mutual exclusion for CollectionIndexes.
*
bool upgrade;
/// Currently in use CollectionIndices
- map<coll_t,std::tr1::weak_ptr<CollectionIndex> > col_indices;
+ map<coll_t,ceph::weak_ptr<CollectionIndex> > col_indices;
/// Cleans up state for c @see RemoveOnDelete
void put_index(
#include <set>
#include <map>
#include <string>
-#include <tr1/memory>
+#include "include/memory.h"
#include <boost/scoped_ptr.hpp>
#include "ObjectMap.h"
virtual ~TransactionImpl() {};
};
- typedef std::tr1::shared_ptr< TransactionImpl > Transaction;
+ typedef ceph::shared_ptr< TransactionImpl > Transaction;
virtual Transaction get_transaction() = 0;
virtual int submit_transaction(Transaction) = 0;
virtual int status() = 0;
virtual ~WholeSpaceIteratorImpl() { }
};
- typedef std::tr1::shared_ptr< WholeSpaceIteratorImpl > WholeSpaceIterator;
+ typedef ceph::shared_ptr< WholeSpaceIteratorImpl > WholeSpaceIterator;
class IteratorImpl : public ObjectMap::ObjectMapIteratorImpl {
const string prefix;
}
};
- typedef std::tr1::shared_ptr< IteratorImpl > Iterator;
+ typedef ceph::shared_ptr< IteratorImpl > Iterator;
WholeSpaceIterator get_iterator() {
return _get_iterator();
}
Iterator get_iterator(const string &prefix) {
- return std::tr1::shared_ptr<IteratorImpl>(
+ return ceph::shared_ptr<IteratorImpl>(
new IteratorImpl(prefix, get_iterator())
);
}
}
Iterator get_snapshot_iterator(const string &prefix) {
- return std::tr1::shared_ptr<IteratorImpl>(
+ return ceph::shared_ptr<IteratorImpl>(
new IteratorImpl(prefix, get_snapshot_iterator())
);
}
/* Public methods */
-void LFNIndex::set_ref(std::tr1::shared_ptr<CollectionIndex> ref)
+void LFNIndex::set_ref(ceph::shared_ptr<CollectionIndex> ref)
{
self_ref = ref;
}
#include <map>
#include <set>
#include <vector>
-#include <tr1/memory>
+#include "include/memory.h"
#include <exception>
#include "osd/osd_types.h"
/// Path to Index base.
const string base_path;
/// For reference counting the collection @see Path
- std::tr1::weak_ptr<CollectionIndex> self_ref;
+ ceph::weak_ptr<CollectionIndex> self_ref;
protected:
const uint32_t index_version;
virtual ~LFNIndex() {}
/// @see CollectionIndex
- void set_ref(std::tr1::shared_ptr<CollectionIndex> ref);
+ void set_ref(ceph::shared_ptr<CollectionIndex> ref);
/// @see CollectionIndex
int init();
virtual int _split(
uint32_t match, //< [in] value to match
uint32_t bits, //< [in] bits to check
- std::tr1::shared_ptr<CollectionIndex> dest //< [in] destination index
+ ceph::shared_ptr<CollectionIndex> dest //< [in] destination index
) = 0;
/// @see CollectionIndex
int split(
uint32_t match,
uint32_t bits,
- std::tr1::shared_ptr<CollectionIndex> dest
+ ceph::shared_ptr<CollectionIndex> dest
) {
WRAP_RETRY(
r = _split(match, bits, dest);
#include <set>
#include <map>
#include <string>
-#include <tr1/memory>
+#include "include/memory.h"
#include <errno.h>
using std::string;
#include "common/perf_counters.h"
#include <set>
#include <map>
#include <string>
-#include <tr1/memory>
+#include "include/memory.h"
#include <boost/scoped_ptr.hpp>
#include "leveldb/db.h"
#include "leveldb/env.h"
};
KeyValueDB::Transaction get_transaction() {
- return std::tr1::shared_ptr< LevelDBTransactionImpl >(
+ return ceph::shared_ptr< LevelDBTransactionImpl >(
new LevelDBTransactionImpl(this));
}
protected:
WholeSpaceIterator _get_iterator() {
- return std::tr1::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
+ return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
new LevelDBWholeSpaceIteratorImpl(
db->NewIterator(leveldb::ReadOptions())
)
snapshot = db->GetSnapshot();
options.snapshot = snapshot;
- return std::tr1::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
+ return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
new LevelDBSnapshotIteratorImpl(db.get(), snapshot,
db->NewIterator(options))
);
#include "include/types.h"
#include "include/stringify.h"
+#include "include/unordered_map.h"
+#include "include/memory.h"
#include "common/errno.h"
#include "MemStore.h"
Mutex::Locker l(apply_lock); // block any writer
dump_all();
set<coll_t> collections;
- for (hash_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
+ for (ceph::unordered_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
p != coll_map.end();
++p) {
dout(20) << __func__ << " coll " << p->first << " " << p->second << dendl;
void MemStore::dump(Formatter *f)
{
f->open_array_section("collections");
- for (hash_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
+ for (ceph::unordered_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
p != coll_map.end();
++p) {
f->open_object_section("collection");
MemStore::CollectionRef MemStore::get_collection(coll_t cid)
{
RWLock::RLocker l(coll_lock);
- hash_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
{
dout(10) << __func__ << dendl;
RWLock::RLocker l(coll_lock);
- for (hash_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
+ for (ceph::unordered_map<coll_t,CollectionRef>::iterator p = coll_map.begin();
p != coll_map.end();
++p) {
ls.push_back(p->first);
{
dout(10) << __func__ << " " << cid << dendl;
RWLock::WLocker l(coll_lock);
- hash_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
if (cp != coll_map.end())
return -EEXIST;
coll_map[cid].reset(new Collection);
{
dout(10) << __func__ << " " << cid << dendl;
RWLock::WLocker l(coll_lock);
- hash_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return -ENOENT;
{
const void *value, size_t size)
{
dout(10) << __func__ << " " << cid << " " << name << dendl;
- hash_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return -ENOENT;
RWLock::WLocker l(cp->second->lock);
int MemStore::_collection_setattrs(coll_t cid, map<string,bufferptr> &aset)
{
dout(10) << __func__ << " " << cid << dendl;
- hash_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return -ENOENT;
RWLock::WLocker l(cp->second->lock);
int MemStore::_collection_rmattr(coll_t cid, const char *name)
{
dout(10) << __func__ << " " << cid << " " << name << dendl;
- hash_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
+ ceph::unordered_map<coll_t,CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return -ENOENT;
RWLock::WLocker l(cp->second->lock);
#ifndef CEPH_MEMSTORE_H
#define CEPH_MEMSTORE_H
-#include <ext/hash_map>
-using namespace __gnu_cxx;
-
#include "include/assert.h"
+#include "include/unordered_map.h"
+#include "include/memory.h"
#include "common/Finisher.h"
#include "common/RWLock.h"
#include "ObjectStore.h"
f->close_section();
}
};
- typedef std::tr1::shared_ptr<Object> ObjectRef;
+ typedef ceph::shared_ptr<Object> ObjectRef;
struct Collection {
- hash_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
+ ceph::unordered_map<ghobject_t, ObjectRef> object_hash; ///< for lookup
map<ghobject_t, ObjectRef> object_map; ///< for iteration
map<string,bufferptr> xattr;
RWLock lock; ///< for object_{map,hash}
// level.
ObjectRef get_object(ghobject_t oid) {
- hash_map<ghobject_t,ObjectRef>::iterator o = object_hash.find(oid);
+ ceph::unordered_map<ghobject_t,ObjectRef>::iterator o = object_hash.find(oid);
if (o == object_hash.end())
return ObjectRef();
return o->second;
Collection() : lock("MemStore::Collection::lock") {}
};
- typedef std::tr1::shared_ptr<Collection> CollectionRef;
+ typedef ceph::shared_ptr<Collection> CollectionRef;
private:
class OmapIteratorImpl : public ObjectMap::ObjectMapIteratorImpl {
};
- hash_map<coll_t, CollectionRef> coll_map;
+ ceph::unordered_map<coll_t, CollectionRef> coll_map;
RWLock coll_lock; ///< rwlock to protect coll_map
Mutex apply_lock; ///< serialize all updates
#include "SequencerPosition.h"
#include <string>
#include <vector>
-#include <tr1/memory>
+#include "include/memory.h"
/**
* Encapsulates the FileStore key value store
virtual int status() = 0;
virtual ~ObjectMapIteratorImpl() {}
};
- typedef std::tr1::shared_ptr<ObjectMapIteratorImpl> ObjectMapIterator;
+ typedef ceph::shared_ptr<ObjectMapIteratorImpl> ObjectMapIterator;
virtual ObjectMapIterator get_iterator(const ghobject_t &oid) {
return ObjectMapIterator();
}
*/
#include <ctype.h>
#include <sstream>
-#include <tr1/memory>
+#include "include/memory.h"
#include "ObjectStore.h"
#include "common/Formatter.h"
#include "FileStore.h"
#include <map>
#include <boost/tuple/tuple.hpp>
-#include <tr1/memory>
+#include "include/memory.h"
#include "include/buffer.h"
#include "common/Formatter.h"
#include "common/hobject.h"
#include <map>
#include <set>
-#include <tr1/memory>
+#include "include/memory.h"
#include "include/buffer.h"
using namespace std;
}
};
- typedef std::tr1::shared_ptr<ErasureCodeInterface> ErasureCodeInterfaceRef;
+ typedef ceph::shared_ptr<ErasureCodeInterface> ErasureCodeInterfaceRef;
}
#include <boost/scoped_ptr.hpp>
#include "include/encoding.h"
+#include "include/unordered_set.h"
#include "common/bloom_filter.hpp"
#include "common/hobject.h"
#include "common/Formatter.h"
*/
class ExplicitHashHitSet : public HitSet::Impl {
uint64_t count;
- hash_set<uint32_t> hits;
+ ceph::unordered_set<uint32_t> hits;
public:
class Params : public HitSet::Params::Impl {
public:
void dump(Formatter *f) const {
f->dump_unsigned("insert_count", count);
f->open_array_section("hash_set");
- for (hash_set<uint32_t>::const_iterator p = hits.begin(); p != hits.end(); ++p)
+ for (ceph::unordered_set<uint32_t>::const_iterator p = hits.begin(); p != hits.end(); ++p)
f->dump_unsigned("hash", *p);
f->close_section();
}
*/
class ExplicitObjectHitSet : public HitSet::Impl {
uint64_t count;
- hash_set<hobject_t> hits;
+ ceph::unordered_set<hobject_t> hits;
public:
class Params : public HitSet::Params::Impl {
public:
void dump(Formatter *f) const {
f->dump_unsigned("insert_count", count);
f->open_array_section("set");
- for (hash_set<hobject_t>::const_iterator p = hits.begin(); p != hits.end(); ++p) {
+ for (ceph::unordered_set<hobject_t>::const_iterator p = hits.begin(); p != hits.end(); ++p) {
f->open_object_section("object");
p->dump(f);
f->close_section();
list<obj_watch_item_t> watchers;
osd_lock.Lock();
// scan pg's
- for (hash_map<pg_t,PG*>::iterator it = pg_map.begin();
+ for (ceph::unordered_map<pg_t,PG*>::iterator it = pg_map.begin();
it != pg_map.end();
++it) {
cct->_conf->apply_changes(NULL);
// Shutdown PGs
- for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
+ for (ceph::unordered_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
++p) {
dout(20) << " kicking pg " << p->first << dendl;
#ifdef PG_DEBUG_REFS
service.dump_live_pgids();
#endif
- for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
+ for (ceph::unordered_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
++p) {
dout(20) << " kicking pg " << p->first << dendl;
// calculate untion of map range
epoch_t end_epoch = superblock.oldest_map;
epoch_t cur_epoch = superblock.newest_map;
- for (hash_map<pg_t, PG*>::iterator i = pg_map.begin();
+ for (ceph::unordered_map<pg_t, PG*>::iterator i = pg_map.begin();
i != pg_map.end();
++i) {
PG *pg = i->second;
// build heartbeat from set
if (is_active()) {
- for (hash_map<pg_t, PG*>::iterator i = pg_map.begin();
+ for (ceph::unordered_map<pg_t, PG*>::iterator i = pg_map.begin();
i != pg_map.end();
++i) {
PG *pg = i->second;
}
std::set <pg_t> keys;
- for (hash_map<pg_t, PG*>::const_iterator pg_map_e = pg_map.begin();
+ for (ceph::unordered_map<pg_t, PG*>::const_iterator pg_map_e = pg_map.begin();
pg_map_e != pg_map.end(); ++pg_map_e) {
keys.insert(pg_map_e->first);
}
fout << "*** osd " << whoami << ": dump_missing ***" << std::endl;
for (std::set <pg_t>::iterator p = keys.begin();
p != keys.end(); ++p) {
- hash_map<pg_t, PG*>::iterator q = pg_map.find(*p);
+ ceph::unordered_map<pg_t, PG*>::iterator q = pg_map.find(*p);
assert(q != pg_map.end());
PG *pg = q->second;
pg->lock();
}
if (m->scrub_pgs.empty()) {
- for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
+ for (ceph::unordered_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
++p) {
PG *pg = p->second;
}
// scan pg creations
- hash_map<pg_t, create_pg_info>::iterator n = creating_pgs.begin();
+ ceph::unordered_map<pg_t, create_pg_info>::iterator n = creating_pgs.begin();
while (n != creating_pgs.end()) {
- hash_map<pg_t, create_pg_info>::iterator p = n++;
+ ceph::unordered_map<pg_t, create_pg_info>::iterator p = n++;
pg_t pgid = p->first;
// am i still primary?
list<PGRef> to_remove;
// scan pg's
- for (hash_map<pg_t,PG*>::iterator it = pg_map.begin();
+ for (ceph::unordered_map<pg_t,PG*>::iterator it = pg_map.begin();
it != pg_map.end();
++it) {
PG *pg = it->second;
service.publish_map(osdmap);
// scan pg's
- for (hash_map<pg_t,PG*>::iterator it = pg_map.begin();
+ for (ceph::unordered_map<pg_t,PG*>::iterator it = pg_map.begin();
it != pg_map.end();
++it) {
PG *pg = it->second;
#include <map>
#include <memory>
-#include <tr1/memory>
+#include "include/memory.h"
using namespace std;
-#include <ext/hash_map>
-#include <ext/hash_set>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
+#include "include/unordered_set.h"
#include "Watch.h"
#include "common/shared_cache.hpp"
class TestOpsSocketHook;
struct C_CompleteSplits;
-typedef std::tr1::shared_ptr<ObjectStore::Sequencer> SequencerRef;
+typedef ceph::shared_ptr<ObjectStore::Sequencer> SequencerRef;
class DeletingState {
Mutex lock;
return status != DELETED_DIR;
} ///< @return true if we don't need to recreate the collection
};
-typedef std::tr1::shared_ptr<DeletingState> DeletingStateRef;
+typedef ceph::shared_ptr<DeletingState> DeletingStateRef;
class OSD;
class OSDService {
protected:
// -- placement groups --
- hash_map<pg_t, PG*> pg_map;
+ ceph::unordered_map<pg_t, PG*> pg_map;
map<pg_t, list<OpRequestRef> > waiting_for_pg;
map<pg_t, list<PG::CephPeeringEvtRef> > peering_wait_for_split;
PGRecoveryStats pg_recovery_stats;
set<int> prior;
pg_t parent;
};
- hash_map<pg_t, create_pg_info> creating_pgs;
+ ceph::unordered_map<pg_t, create_pg_info> creating_pgs;
double debug_drop_pg_create_probability;
int debug_drop_pg_create_duration;
int debug_drop_pg_create_left; // 0 if we just dropped the last one, -1 if we can drop more
void OSDMap::get_blacklist(list<pair<entity_addr_t,utime_t> > *bl) const
{
- for (hash_map<entity_addr_t,utime_t>::const_iterator it = blacklist.begin() ;
+ for (ceph::unordered_map<entity_addr_t,utime_t>::const_iterator it = blacklist.begin() ;
it != blacklist.end(); ++it) {
bl->push_back(*it);
}
f->close_section(); // primary_temp
f->open_array_section("blacklist");
- for (hash_map<entity_addr_t,utime_t>::const_iterator p = blacklist.begin();
+ for (ceph::unordered_map<entity_addr_t,utime_t>::const_iterator p = blacklist.begin();
p != blacklist.end();
++p) {
stringstream ss;
++p)
out << "primary_temp " << p->first << " " << p->second << "\n";
- for (hash_map<entity_addr_t,utime_t>::const_iterator p = blacklist.begin();
+ for (ceph::unordered_map<entity_addr_t,utime_t>::const_iterator p = blacklist.begin();
p != blacklist.end();
++p)
out << "blacklist " << p->first << " expires " << p->second << "\n";
#include <list>
#include <set>
#include <map>
-#include <tr1/memory>
+#include "include/memory.h"
using namespace std;
-#include <ext/hash_set>
-using __gnu_cxx::hash_set;
+#include "include/unordered_set.h"
/*
* we track up to two intervals during which the osd was alive and
vector<uint8_t> osd_state;
struct addrs_s {
- vector<std::tr1::shared_ptr<entity_addr_t> > client_addr;
- vector<std::tr1::shared_ptr<entity_addr_t> > cluster_addr;
- vector<std::tr1::shared_ptr<entity_addr_t> > hb_back_addr;
- vector<std::tr1::shared_ptr<entity_addr_t> > hb_front_addr;
+ vector<ceph::shared_ptr<entity_addr_t> > client_addr;
+ vector<ceph::shared_ptr<entity_addr_t> > cluster_addr;
+ vector<ceph::shared_ptr<entity_addr_t> > hb_back_addr;
+ vector<ceph::shared_ptr<entity_addr_t> > hb_front_addr;
entity_addr_t blank;
};
- std::tr1::shared_ptr<addrs_s> osd_addrs;
+ ceph::shared_ptr<addrs_s> osd_addrs;
vector<__u32> osd_weight; // 16.16 fixed point, 0x10000 = "in", 0 = "out"
vector<osd_info_t> osd_info;
- std::tr1::shared_ptr< map<pg_t,vector<int> > > pg_temp; // temp pg mapping (e.g. while we rebuild)
- std::tr1::shared_ptr< map<pg_t,int > > primary_temp; // temp primary mapping (e.g. while we rebuild)
+ ceph::shared_ptr< map<pg_t,vector<int> > > pg_temp; // temp pg mapping (e.g. while we rebuild)
+ ceph::shared_ptr< map<pg_t,int > > primary_temp; // temp primary mapping (e.g. while we rebuild)
map<int64_t,pg_pool_t> pools;
map<int64_t,string> pool_name;
map<string,int64_t> name_pool;
- std::tr1::shared_ptr< vector<uuid_d> > osd_uuid;
+ ceph::shared_ptr< vector<uuid_d> > osd_uuid;
vector<osd_xinfo_t> osd_xinfo;
- hash_map<entity_addr_t,utime_t> blacklist;
+ ceph::unordered_map<entity_addr_t,utime_t> blacklist;
epoch_t cluster_snapshot_epoch;
string cluster_snapshot;
bool new_blacklist_entries;
public:
- std::tr1::shared_ptr<CrushWrapper> crush; // hierarchical map
+ ceph::shared_ptr<CrushWrapper> crush; // hierarchical map
friend class OSDMonitor;
friend class PGMonitor;
WRITE_CLASS_ENCODER_FEATURES(OSDMap)
WRITE_CLASS_ENCODER_FEATURES(OSDMap::Incremental)
-typedef std::tr1::shared_ptr<const OSDMap> OSDMapRef;
+typedef ceph::shared_ptr<const OSDMap> OSDMapRef;
inline ostream& operator<<(ostream& out, const OSDMap& m) {
m.print_oneline_summary(out);
#include "common/Mutex.h"
#include "include/xlist.h"
#include "msg/Message.h"
-#include <tr1/memory>
+#include "include/memory.h"
#include "common/TrackedOp.h"
/**
void init_from_message();
- typedef std::tr1::shared_ptr<OpRequest> Ref;
+ typedef ceph::shared_ptr<OpRequest> Ref;
};
typedef OpRequest::Ref OpRequestRef;
pg->unlock();
}
};
-typedef std::tr1::shared_ptr<FlushState> FlushStateRef;
+typedef ceph::shared_ptr<FlushState> FlushStateRef;
void PG::start_flush(ObjectStore::Transaction *t,
list<Context *> *on_applied,
#include <boost/statechart/transition.hpp>
#include <boost/statechart/event_base.hpp>
#include <boost/scoped_ptr.hpp>
-#include <tr1/memory>
+#include "include/memory.h"
// re-include our assert to clobber boost's
#include "include/assert.h"
#include <string>
using namespace std;
-#include <ext/hash_map>
-#include <ext/hash_set>
-using namespace __gnu_cxx;
+#include "include/unordered_map.h"
+#include "include/unordered_set.h"
//#define DEBUG_RECOVERY_OIDS // track set of recovering oids explicitly, to find counting bugs
pg_stat_t pg_stats_publish;
// for ordering writes
- std::tr1::shared_ptr<ObjectStore::Sequencer> osr;
+ ceph::shared_ptr<ObjectStore::Sequencer> osr;
void _update_calc_stats();
void publish_stats_to_osd();
const boost::statechart::event_base &get_event() { return *evt; }
string get_desc() { return desc; }
};
- typedef std::tr1::shared_ptr<CephPeeringEvt> CephPeeringEvtRef;
+ typedef ceph::shared_ptr<CephPeeringEvt> CephPeeringEvtRef;
list<CephPeeringEvtRef> peering_queue; // op queue
list<CephPeeringEvtRef> peering_waiters;
* plus some methods to manipulate it all.
*/
struct IndexedLog : public pg_log_t {
- hash_map<hobject_t,pg_log_entry_t*> objects; // ptrs into log. be careful!
- hash_map<osd_reqid_t,pg_log_entry_t*> caller_ops;
+ ceph::unordered_map<hobject_t,pg_log_entry_t*> objects; // ptrs into log. be careful!
+ ceph::unordered_map<osd_reqid_t,pg_log_entry_t*> caller_ops;
// recovery pointers
list<pg_log_entry_t>::iterator complete_to; // not inclusive of referenced item
return caller_ops.count(r);
}
const pg_log_entry_t *get_request(const osd_reqid_t &r) const {
- hash_map<osd_reqid_t,pg_log_entry_t*>::const_iterator p = caller_ops.find(r);
+ ceph::unordered_map<osd_reqid_t,pg_log_entry_t*>::const_iterator p = caller_ops.find(r);
if (p == caller_ops.end())
return NULL;
return p->second;
#define CEPH_WATCH_H
#include <boost/intrusive_ptr.hpp>
-#include <tr1/memory>
+#include "include/memory.h"
#include <set>
#include "msg/Messenger.h"
class MWatchNotify;
class Watch;
-typedef std::tr1::shared_ptr<Watch> WatchRef;
-typedef std::tr1::weak_ptr<Watch> WWatchRef;
+typedef ceph::shared_ptr<Watch> WatchRef;
+typedef ceph::weak_ptr<Watch> WWatchRef;
class Notify;
-typedef std::tr1::shared_ptr<Notify> NotifyRef;
-typedef std::tr1::weak_ptr<Notify> WNotifyRef;
+typedef ceph::shared_ptr<Notify> NotifyRef;
+typedef ceph::weak_ptr<Notify> WNotifyRef;
struct CancelableContext;
OSDService *osd;
boost::intrusive_ptr<ReplicatedPG> pg;
- std::tr1::shared_ptr<ObjectContext> obc;
+ ceph::shared_ptr<ObjectContext> obc;
std::map<uint64_t, NotifyRef> in_progress_notifies;
Watch(
ReplicatedPG *pg, OSDService *osd,
- std::tr1::shared_ptr<ObjectContext> obc, uint32_t timeout,
+ ceph::shared_ptr<ObjectContext> obc, uint32_t timeout,
uint64_t cookie, entity_name_t entity,
entity_addr_t addr);
string gen_dbg_prefix();
static WatchRef makeWatchRef(
ReplicatedPG *pg, OSDService *osd,
- std::tr1::shared_ptr<ObjectContext> obc, uint32_t timeout, uint64_t cookie, entity_name_t entity, entity_addr_t addr);
+ ceph::shared_ptr<ObjectContext> obc, uint32_t timeout, uint64_t cookie, entity_name_t entity, entity_addr_t addr);
void set_self(WatchRef _self) {
self = _self;
}
/// Does not grant a ref count!
boost::intrusive_ptr<ReplicatedPG> get_pg() { return pg; }
- std::tr1::shared_ptr<ObjectContext> get_obc() { return obc; }
+ ceph::shared_ptr<ObjectContext> get_obc() { return obc; }
uint64_t get_cookie() const { return cookie; }
entity_name_t get_entity() const { return entity; }
#include "HitSet.h"
#include "Watch.h"
#include "OpRequest.h"
+#include "include/hash_namespace.h"
#define CEPH_OSD_ONDISK_MAGIC "ceph osd volume v026"
inline bool operator>(const osd_reqid_t& l, const osd_reqid_t& r) { return !(l <= r); }
inline bool operator>=(const osd_reqid_t& l, const osd_reqid_t& r) { return !(l < r); }
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<osd_reqid_t> {
size_t operator()(const osd_reqid_t &r) const {
static hash<uint64_t> H;
return H(r.name.num() ^ r.tid ^ r.inc);
}
};
-}
+CEPH_HASH_NAMESPACE_END
// -----
ostream& operator<<(ostream& out, const pg_t &pg);
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash< pg_t >
{
size_t operator()( const pg_t& x ) const
return H((x.pool() & 0xffffffff) ^ (x.pool() >> 32) ^ x.ps() ^ x.preferred());
}
};
-}
+CEPH_HASH_NAMESPACE_END
// ----------------------
return out;
}
-namespace __gnu_cxx {
+CEPH_HASH_NAMESPACE_START
template<> struct hash<coll_t> {
size_t operator()(const coll_t &c) const {
size_t h = 0;
return h;
}
};
-}
+CEPH_HASH_NAMESPACE_END
inline ostream& operator<<(ostream& out, const ceph_object_layout &ol)
{
const vector<int> &new_up, ///< [in] up as of osdmap
epoch_t same_interval_since, ///< [in] as of osdmap
epoch_t last_epoch_clean, ///< [in] current
- std::tr1::shared_ptr<const OSDMap> osdmap, ///< [in] current map
- std::tr1::shared_ptr<const OSDMap> lastmap, ///< [in] last map
+ ceph::shared_ptr<const OSDMap> osdmap, ///< [in] current map
+ ceph::shared_ptr<const OSDMap> lastmap, ///< [in] last map
int64_t poolid, ///< [in] pool for pg
pg_t pgid, ///< [in] pgid for pg
map<epoch_t, pg_interval_t> *past_intervals,///< [out] intervals
struct ObjectContext;
-typedef std::tr1::shared_ptr<ObjectContext> ObjectContextRef;
+typedef ceph::shared_ptr<ObjectContext> ObjectContextRef;
struct ObjectContext {
ObjectState obs;
finisher.stop();
perf_stop();
// we should be empty.
- for (vector<hash_map<sobject_t, Object *> >::iterator i = objects.begin();
+ for (vector<ceph::unordered_map<sobject_t, Object *> >::iterator i = objects.begin();
i != objects.end();
++i)
assert(i->empty());
ldout(cct, 10) << "release_all" << dendl;
uint64_t unclean = 0;
- vector<hash_map<sobject_t, Object*> >::iterator i = objects.begin();
+ vector<ceph::unordered_map<sobject_t, Object*> >::iterator i = objects.begin();
while (i != objects.end()) {
- hash_map<sobject_t, Object*>::iterator p = i->begin();
+ ceph::unordered_map<sobject_t, Object*>::iterator p = i->begin();
while (p != i->end()) {
- hash_map<sobject_t, Object*>::iterator n = p;
+ ceph::unordered_map<sobject_t, Object*>::iterator n = p;
++n;
Object *ob = p->second;
ldout(cct, 10) << "verify_stats" << dendl;
loff_t clean = 0, zero = 0, dirty = 0, rx = 0, tx = 0, missing = 0, error = 0;
- for (vector<hash_map<sobject_t, Object*> >::const_iterator i = objects.begin();
+ for (vector<ceph::unordered_map<sobject_t, Object*> >::const_iterator i = objects.begin();
i != objects.end();
++i) {
- for (hash_map<sobject_t, Object*>::const_iterator p = i->begin();
+ for (ceph::unordered_map<sobject_t, Object*>::const_iterator p = i->begin();
p != i->end();
++p) {
Object *ob = p->second;
flush_set_callback_t flush_set_callback;
void *flush_set_callback_arg;
- vector<hash_map<sobject_t, Object*> > objects; // indexed by pool_id
+ vector<ceph::unordered_map<sobject_t, Object*> > objects; // indexed by pool_id
tid_t last_read_tid;
#include <memory>
#include <sstream>
using namespace std;
-using namespace __gnu_cxx;
class Context;
class Messenger;
#include <stdlib.h>
#include <sys/types.h>
#include <time.h>
-#include <tr1/memory>
+#include "include/memory.h"
#include <sys/ioctl.h>
#include "include/rbd_types.h"
TextTable tbl;
const char *devices_path = "/sys/bus/rbd/devices";
- std::tr1::shared_ptr<DIR> device_dir(opendir(devices_path), do_closedir);
+ ceph::shared_ptr<DIR> device_dir(opendir(devices_path), do_closedir);
if (!device_dir.get()) {
r = -errno;
cerr << "rbd: could not open " << devices_path << ": "
}
};
- typedef std::tr1::shared_ptr<ChangeStatus> ChangeStatusPtr;
+ typedef ceph::shared_ptr<ChangeStatus> ChangeStatusPtr;
lru_map<string, ChangeStatusPtr> changes;
#include "KeyValueDBMemory.h"
#include <map>
#include <set>
-#include <tr1/memory>
+#include "include/memory.h"
#include <iostream>
using namespace std;
}
KeyValueDB::WholeSpaceIterator KeyValueDBMemory::_get_iterator() {
- return std::tr1::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
+ return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
new WholeSpaceMemIterator(this)
);
}
KeyValueDB::WholeSpaceIterator KeyValueDBMemory::_get_snapshot_iterator() {
KeyValueDBMemory *snap_db = new KeyValueDBMemory(this);
- return std::tr1::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
+ return ceph::shared_ptr<KeyValueDB::WholeSpaceIteratorImpl>(
new WholeSpaceSnapshotMemIterator(snap_db)
);
}
#include <map>
#include <set>
#include <string>
-#include <tr1/memory>
+#include "include/memory.h"
#include "os/KeyValueDB.h"
#include "include/buffer.h"
#include <dirent.h>
#include <string>
#include <vector>
-#include <tr1/memory>
+#include "include/memory.h"
#include <boost/scoped_ptr.hpp>
#include <sstream>
#include "stdlib.h"
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
-#include <tr1/memory>
+#include "include/memory.h"
#include <map>
#include <set>
#include <deque>
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-#include <tr1/memory>
+#include "include/memory.h"
#include <map>
#include <set>
#include <boost/scoped_ptr.hpp>
#include "bencher.h"
#include "include/utime.h"
#include <unistd.h>
-#include <tr1/memory>
+#include "include/memory.h"
#include "common/Mutex.h"
#include "common/Cond.h"
struct OnWriteApplied : public Context {
Bencher *bench;
uint64_t seq;
- std::tr1::shared_ptr<OnDelete> on_delete;
+ ceph::shared_ptr<OnDelete> on_delete;
OnWriteApplied(
Bencher *bench, uint64_t seq,
- std::tr1::shared_ptr<OnDelete> on_delete
+ ceph::shared_ptr<OnDelete> on_delete
) : bench(bench), seq(seq), on_delete(on_delete) {}
void finish(int r) {
bench->stat_collector->write_applied(seq);
struct OnWriteCommit : public Context {
Bencher *bench;
uint64_t seq;
- std::tr1::shared_ptr<OnDelete> on_delete;
+ ceph::shared_ptr<OnDelete> on_delete;
OnWriteCommit(
Bencher *bench, uint64_t seq,
- std::tr1::shared_ptr<OnDelete> on_delete
+ ceph::shared_ptr<OnDelete> on_delete
) : bench(bench), seq(seq), on_delete(on_delete) {}
void finish(int r) {
bench->stat_collector->write_committed(seq);
Cond cond;
bool done = 0;
{
- std::tr1::shared_ptr<OnFinish> on_finish(
+ ceph::shared_ptr<OnFinish> on_finish(
new OnFinish(&done, &lock, &cond));
uint64_t num = 0;
for (set<std::string>::const_iterator i = objects.begin();
*i,
0,
bl,
- new C_Holder<std::tr1::shared_ptr<OnFinish> >(on_finish),
- new C_Holder<std::tr1::shared_ptr<OnFinish> >(on_finish)
+ new C_Holder<ceph::shared_ptr<OnFinish> >(on_finish),
+ new C_Holder<ceph::shared_ptr<OnFinish> >(on_finish)
);
}
}
OpType op_type = next.get<3>();
switch (op_type) {
case WRITE: {
- std::tr1::shared_ptr<OnDelete> on_delete(
+ ceph::shared_ptr<OnDelete> on_delete(
new OnDelete(new Cleanup(this)));
stat_collector->start_write(seq, length);
while (bl.length() < length) {
private:
boost::scoped_ptr<
Distribution<boost::tuple<std::string,uint64_t,uint64_t, OpType> > > op_dist;
- std::tr1::shared_ptr<StatCollector> stat_collector;
+ ceph::shared_ptr<StatCollector> stat_collector;
boost::scoped_ptr<Backend> backend;
const uint64_t max_in_flight;
const uint64_t max_duration;
public:
Bencher(
Distribution<boost::tuple<std::string, uint64_t, uint64_t, OpType> > *op_gen,
- std::tr1::shared_ptr<StatCollector> stat_collector,
+ ceph::shared_ptr<StatCollector> stat_collector,
Backend *backend,
uint64_t max_in_flight,
uint64_t max_duration,
Context *on_commit)
{
bufferlist &bl_non_const = const_cast<bufferlist&>(bl);
- std::tr1::shared_ptr<librbd::Image> image = (*m_images)[oid];
+ ceph::shared_ptr<librbd::Image> image = (*m_images)[oid];
void *arg = static_cast<void *>(new arg_type(on_commit, on_write_applied));
librbd::RBD::AioCompletion *completion =
new librbd::RBD::AioCompletion(arg, on_complete);
bufferlist *bl,
Context *on_read_complete)
{
- std::tr1::shared_ptr<librbd::Image> image = (*m_images)[oid];
+ ceph::shared_ptr<librbd::Image> image = (*m_images)[oid];
void *arg = static_cast<void *>(new arg_type(on_read_complete, NULL));
librbd::RBD::AioCompletion *completion =
new librbd::RBD::AioCompletion(arg, on_complete);
#include "include/rbd/librbd.hpp"
class RBDBackend : public Backend {
- map<string, std::tr1::shared_ptr<librbd::Image> > *m_images;
+ map<string, ceph::shared_ptr<librbd::Image> > *m_images;
public:
- RBDBackend(map<string, std::tr1::shared_ptr<librbd::Image> > *images)
+ RBDBackend(map<string, ceph::shared_ptr<librbd::Image> > *images)
: m_images(images) {}
void write(
const string &oid,
detailed_ops = &cerr;
}
- std::tr1::shared_ptr<StatCollector> col(
+ ceph::shared_ptr<StatCollector> col(
new DetailedStatCollector(
1, new JSONFormatter, detailed_ops, &cout,
new MorePrinting(g_ceph_context)));
fs.apply_transaction(t);
}
- vector<std::tr1::shared_ptr<Bencher> > benchers(
+ vector<ceph::shared_ptr<Bencher> > benchers(
vm["num-writers"].as<unsigned>());
- for (vector<std::tr1::shared_ptr<Bencher> >::iterator i = benchers.begin();
+ for (vector<ceph::shared_ptr<Bencher> >::iterator i = benchers.begin();
i != benchers.end();
++i) {
set<string> objects;
(*i).reset(bencher);
}
- for (vector<std::tr1::shared_ptr<Bencher> >::iterator i = benchers.begin();
+ for (vector<ceph::shared_ptr<Bencher> >::iterator i = benchers.begin();
i != benchers.end();
++i) {
(*i)->create();
}
- for (vector<std::tr1::shared_ptr<Bencher> >::iterator i = benchers.begin();
+ for (vector<ceph::shared_ptr<Bencher> >::iterator i = benchers.begin();
i != benchers.end();
++i) {
(*i)->join();
librbd::RBD rbd;
{
- map<string, std::tr1::shared_ptr<librbd::Image> > images;
+ map<string, ceph::shared_ptr<librbd::Image> > images;
int order = vm["order"].as<unsigned>();
uint64_t image_size = ((uint64_t)vm["image-size"].as<unsigned>()) << 20;
for (set<string>::const_iterator i = image_names.begin();
cerr << "error creating image " << *i << " r=" << r << std::endl;
return -r;
}
- std::tr1::shared_ptr<librbd::Image> image(new librbd::Image());
+ ceph::shared_ptr<librbd::Image> image(new librbd::Image());
r = rbd.open(ioctx, *image, i->c_str());
if (r < 0) {
cerr << "error opening image " << *i << " r=" << r << std::endl;
*
*/
-#include <tr1/memory>
+#include "include/memory.h"
#include <limits.h>
#include <errno.h>
#include <sys/uio.h>
TEST(BufferList, TestCopyAll) {
const static size_t BIG_SZ = 10737414;
- std::tr1::shared_ptr <unsigned char> big(
+ ceph::shared_ptr <unsigned char> big(
(unsigned char*)malloc(BIG_SZ), free);
unsigned char c = 0;
for (size_t i = 0; i < BIG_SZ; ++i) {
bufferlist bl2;
i.copy_all(bl2);
ASSERT_EQ(bl2.length(), BIG_SZ);
- std::tr1::shared_ptr <unsigned char> big2(
+ ceph::shared_ptr <unsigned char> big2(
(unsigned char*)malloc(BIG_SZ), free);
bl2.copy(0, BIG_SZ, (char*)big2.get());
ASSERT_EQ(memcmp(big.get(), big2.get(), BIG_SZ), 0);
#include <stdint.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <tr1/memory>
+#include "include/memory.h"
using ceph::bufferlist;
using std::cerr;
<< get_temp_dir() << "'. " << cpp_strerror(err) << std::endl;
return err;
}
- std::tr1::shared_ptr<FILE> fpp(fp, fclose);
+ ceph::shared_ptr<FILE> fpp(fp, fclose);
if (unlink_idx >= MAX_FILES_TO_DELETE)
return -ENOBUFS;
if (unlink_idx == 0) {
#include <boost/random/binomial_distribution.hpp>
#include <gtest/gtest.h>
-#include <ext/hash_map>
-using __gnu_cxx::hash_map;
+#include "include/unordered_map.h"
typedef boost::mt11213b gen_type;
class StoreTest : public ::testing::Test {
const std::string base_path("PATH");
EXPECT_EQ(0, ::system("rm -fr PATH"));
EXPECT_EQ(0, ::mkdir("PATH", 0700));
- std::tr1::shared_ptr<CollectionIndex> index(new FlatIndex(collection, base_path));
+ ceph::shared_ptr<CollectionIndex> index(new FlatIndex(collection, base_path));
const std::string key("KEY");
uint64_t hash = 111;
uint64_t pool = 222;
const std::string object_name("ABC");
const std::string filename("PATH/" + object_name + "_head");
EXPECT_EQ(0, ::close(::creat(filename.c_str(), 0600)));
- std::tr1::shared_ptr<CollectionIndex> index(new FlatIndex(collection, base_path));
+ ceph::shared_ptr<CollectionIndex> index(new FlatIndex(collection, base_path));
vector<ghobject_t> ls;
index->collection_list(&ls);
EXPECT_EQ((unsigned)1, ls.size());
virtual int _split(
uint32_t match,
uint32_t bits,
- std::tr1::shared_ptr<CollectionIndex> dest
+ ceph::shared_ptr<CollectionIndex> dest
) { return 0; }
void test_generate_and_parse(const ghobject_t &hoid, const std::string &mangled_expected) {
#include "Object.h"
#include "TestOpStat.h"
#include "test/librados/test.h"
+#include "include/memory.h"
#include "common/sharedptr_registry.hpp"
#include "common/errno.h"
#include "osd/HitSet.h"
ObjectDesc old_value;
int snap;
- std::tr1::shared_ptr<int> in_use;
+ ceph::shared_ptr<int> in_use;
bufferlist result;
int retval;
bool done;
librados::ObjectWriteOperation op;
librados::AioCompletion *comp;
- std::tr1::shared_ptr<int> in_use;
+ ceph::shared_ptr<int> in_use;
RollbackOp(int n,
RadosTestContext *context,
context->update_object_version(oid, comp->get_version64());
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
- in_use = std::tr1::shared_ptr<int>();
+ in_use = ceph::shared_ptr<int>();
context->kick();
}
librados::ObjectReadOperation rd_op;
librados::AioCompletion *comp;
librados::AioCompletion *comp_racing_read;
- std::tr1::shared_ptr<int> in_use;
+ ceph::shared_ptr<int> in_use;
int snap;
int done;
uint64_t version;
bool dirty;
ObjectDesc old_value;
int snap;
- std::tr1::shared_ptr<int> in_use;
+ ceph::shared_ptr<int> in_use;
IsDirtyOp(int n,
RadosTestContext *context,
bool blocking;
int snap;
bool can_fail;
- std::tr1::shared_ptr<int> in_use;
+ ceph::shared_ptr<int> in_use;
CacheFlushOp(int n,
RadosTestContext *context,
librados::AioCompletion *completion;
librados::ObjectReadOperation op;
string oid;
- std::tr1::shared_ptr<int> in_use;
+ ceph::shared_ptr<int> in_use;
CacheEvictOp(int n,
RadosTestContext *context,
obc.start();
atomic_t outstanding_reads;
- vector<std::tr1::shared_ptr<op_data> > ops;
+ vector<ceph::shared_ptr<op_data> > ops;
ObjectCacher::ObjectSet object_set(NULL, 0, 0);
SnapContext snapc;
ceph::buffer::ptr bp(max_op_len);
uint64_t length = random() % (MAX(max_len - 1, 1)) + 1;
std::string oid = "test" + stringify(random() % num_objs);
bool is_read = random() < percent_reads * RAND_MAX;
- std::tr1::shared_ptr<op_data> op(new op_data(oid, offset, length, is_read));
+ ceph::shared_ptr<op_data> op(new op_data(oid, offset, length, is_read));
ops.push_back(op);
std::cout << "op " << i << " " << (is_read ? "read" : "write")
<< " " << op->extent << "\n";
//
int osd_id = 1;
epoch_t epoch = 40;
- std::tr1::shared_ptr<OSDMap> osdmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
- std::tr1::shared_ptr<OSDMap> lastmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
// pool did not exist in the old osdmap
//
{
- std::tr1::shared_ptr<OSDMap> lastmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
// PG is splitting
//
{
- std::tr1::shared_ptr<OSDMap> osdmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
// PG size has changed
//
{
- std::tr1::shared_ptr<OSDMap> osdmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
// The new osdmap is created so that it triggers the
// bug.
//
- std::tr1::shared_ptr<OSDMap> osdmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
new_acting.push_back(osd_id + 4);
new_acting.push_back(osd_id + 5);
- std::tr1::shared_ptr<OSDMap> lastmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
epoch_t last_epoch_clean = epoch - 10;
- std::tr1::shared_ptr<OSDMap> lastmap(new OSDMap());
+ ceph::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
-#include <tr1/memory>
+#include "include/memory.h"
#include <map>
#include <set>
#include <boost/scoped_ptr.hpp>
virtual void operate(map<string, bufferlist> *store) = 0;
virtual ~_Op() {}
};
- typedef std::tr1::shared_ptr<_Op> Op;
+ typedef ceph::shared_ptr<_Op> Op;
struct Remove : public _Op {
set<string> to_remove;
Remove(const set<string> &to_remove) : to_remove(to_remove) {}
class SnapMapperTest : public ::testing::Test {
protected:
boost::scoped_ptr< PausyAsyncMap > driver;
- map<pg_t, std::tr1::shared_ptr<MapperVerifier> > mappers;
+ map<pg_t, ceph::shared_ptr<MapperVerifier> > mappers;
uint32_t pgnum;
virtual void SetUp() {
#include <boost/random/binomial_distribution.hpp>
#include <gtest/gtest.h>
-#include <ext/hash_map>
+#include "include/unordered_map.h"
void usage(const string &name) {
std::cerr << "Usage: " << name << " [xattr|omap] store_path store_journal"
#include "common/ceph_argparse.h"
#include "global/global_init.h"
-#include <ext/hash_map>
-using __gnu_cxx::hash_map;
+#include "include/unordered_map.h"
int dupstore(ObjectStore* src, ObjectStore* dst)
{
if (dst->mount() < 0) return 1;
// objects
- hash_map<ghobject_t, coll_t> did_object;
+ ceph::unordered_map<ghobject_t, coll_t> did_object;
// collections
vector<coll_t> collections;
while (1) {
cout << "pass " << ++pass << std::endl;
- hash_map<pg_t,vector<int> > m;
+ ceph::unordered_map<pg_t,vector<int> > m;
for (map<int64_t,pg_pool_t>::const_iterator p = osdmap.get_pools().begin();
p != osdmap.get_pools().end();
++p) {