From: Sage Weil Date: Tue, 11 Oct 2016 18:33:14 +0000 (-0400) Subject: include/mempool: introduce mempool infrastructure X-Git-Tag: v11.1.0~442^2~19 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=9cbacc81ccc1a397080281e79a08930f2d7ad2e5;p=ceph.git include/mempool: introduce mempool infrastructure This is heavily based on Allen Samuels' prototype. Signed-off-by: Sage Weil --- diff --git a/src/global/CMakeLists.txt b/src/global/CMakeLists.txt index f9a1c264ed32..100cd282d731 100644 --- a/src/global/CMakeLists.txt +++ b/src/global/CMakeLists.txt @@ -1,7 +1,8 @@ set(libglobal_srcs global_init.cc pidfile.cc - signal_handler.cc) + signal_handler.cc + mempool.cc) set(global_common_files global_context.cc) add_library(global_common_objs OBJECT ${global_common_files}) diff --git a/src/global/mempool.cc b/src/global/mempool.cc new file mode 100644 index 000000000000..ec96d7cca415 --- /dev/null +++ b/src/global/mempool.cc @@ -0,0 +1,123 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2016 Allen Samuels + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "include/mempool.h" +#include "include/demangle.h" + + +// default to debug_mode off +static bool debug_mode = false; + +// -------------------------------------------------------------- + +static mempool::pool_t *pools[mempool::num_pools] = { +#define P(x) nullptr, + DEFINE_MEMORY_POOLS_HELPER(P) +#undef P +}; + +mempool::pool_t& mempool::get_pool(mempool::pool_index_t ix) +{ + if (pools[ix]) { + return *pools[ix]; + } + + switch (ix) { +#define P(x) \ + case x: pools[ix] = new mempool::pool_t(#x,debug_mode); break; + DEFINE_MEMORY_POOLS_HELPER(P); +#undef P + default: assert(0); + } + return *pools[ix]; +} + +void mempool::dump(ceph::Formatter *f, size_t skip) +{ + for (size_t i = skip; i < num_pools; ++i) { + const pool_t &pool = mempool::get_pool((pool_index_t)i); + f->open_object_section(pool.get_name().c_str()); + pool.dump(f); + f->close_section(); + } +} + +void mempool::set_debug_mode(bool d) +{ + debug_mode = d; + for (size_t i = 0; i < mempool::num_pools; ++i) { + if (pools[i]) { + pools[i]->debug = d; + } + } +} + +// -------------------------------------------------------------- +// pool_t + +size_t mempool::pool_t::allocated_bytes() const +{ + ssize_t result = 0; + for (size_t i = 0; i < num_shards; ++i) { + result += shard[i].bytes; + } + return (size_t) result; +} + +size_t mempool::pool_t::allocated_items() const +{ + ssize_t result = 0; + for (size_t i = 0; i < num_shards; ++i) { + result += shard[i].items; + } + return (size_t) result; +} + +void mempool::pool_t::get_stats( + stats_t *total, + std::map *by_type) const +{ + for (size_t i = 0; i < num_shards; ++i) { + total->items += shard[i].items; + total->bytes += shard[i].bytes; + if (debug) { + std::unique_lock shard_lock(shard[i].lock); + for (const list_member_t *p = shard[i].containers.next; + p != &shard[i].containers; + p = p->next) { + const pool_allocator_base_t *c = + reinterpret_cast(p); + std::string n = ceph_demangle(c->type_id); + stats_t &s = (*by_type)[n]; + s.bytes = c->items * c->item_size; + s.items = c->items; + } + } + } +} + +void mempool::pool_t::dump(ceph::Formatter *f) const +{ + stats_t total; + std::map by_type; + get_stats(&total, &by_type); + f->dump_object("total", total); + if (!by_type.empty()) { + for (auto &i : by_type) { + f->open_object_section(i.first.c_str()); + i.second.dump(f); + f->close_section(); + } + } +} diff --git a/src/include/demangle.h b/src/include/demangle.h new file mode 100644 index 000000000000..9e46d952f058 --- /dev/null +++ b/src/include/demangle.h @@ -0,0 +1,48 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2016 Allen Samuels + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#ifndef CEPH_INCLUDE_DEMANGLE +#define CEPH_INCLUDE_DEMANGLE + +//// Stole this code from http://stackoverflow.com/questions/281818/unmangling-the-result-of-stdtype-infoname +#ifdef __GNUG__ +#include +#include +#include + +static std::string ceph_demangle(const char* name) +{ + int status = -4; // some arbitrary value to eliminate the compiler warning + + // enable c++11 by passing the flag -std=c++11 to g++ + std::unique_ptr res { + abi::__cxa_demangle(name, NULL, NULL, &status), + std::free + }; + + return (status == 0) ? res.get() : name ; +} + +#else + +// does nothing if not g++ +static std::string demangle(const char* name) +{ + return name; +} + +#endif + + +#endif diff --git a/src/include/mempool.h b/src/include/mempool.h new file mode 100644 index 000000000000..7e6a8dfffa23 --- /dev/null +++ b/src/include/mempool.h @@ -0,0 +1,523 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2016 Allen Samuels + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#ifndef _CEPH_INCLUDE_MEMPOOL_H +#define _CEPH_INCLUDE_MEMPOOL_H +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + +Memory Pools +============ + +A memory pool is a method for accounting the consumption of memory of +a set of containers. + +Memory pools are statically declared (see pool_index_t). + +Each memory pool tracks the number of bytes and items it contains. + +Allocators can be declared and associated with a type so that they are +tracked independently of the pool total. This additional accounting +is optional and only incurs an overhead if the debugging is enabled at +runtime. This allows developers to see what types are consuming the +pool resources. + + +Declaring +--------- + +Using memory pools is very easy. + +To create a new memory pool, simply add a new name into the list of +memory pools that's defined in "DEFINE_MEMORY_POOLS_HELPER". That's +it. :) + +For each memory pool that's created a C++ namespace is also +automatically created (name is same as in DEFINE_MEMORY_POOLS_HELPER). +That namespace contains a set of common STL containers that are predefined +with the appropriate allocators. + +Thus for mempool "unittest_1" we have automatically available to us: + + unittest_1::map + unittest_1::multimap + unittest_1::set + unittest_1::multiset + unittest_1::list + unittest_1::vector + unittest_1::unordered_map + + +Putting objects in a mempool +---------------------------- + +In order to use a memory pool with a particular type, a few additional +declarations are needed. + +For a class: + + struct Foo { + MEMBER_OF_MEMPOOL() + ... + }; + +Then, in an appropriate .cc file, + + MEMPOOL_DEFINE_OBJECT_FACTORY(Foo, foo, unittest_1); + +The second argument can generally be identical to the first, except +when the type contains a nested scope. For example, for +BlueStore::Onode, we need to do + + MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode, + bluestore_meta); + +(This is just because we need to name some static varables and we +can't use :: in a variable name.) + +In order to use the STL containers, a few additional declarations +are needed. For example, + + unittest_1::map myvec; + +requires + + MEMPOOL_DEFINE_FACTORY(int, int, unittest_1); + MEMPOOL_DEFINE_MAP_FACTORY(int, int, unittest_1); + +There are similar macros for SET, LIST, and UNORDERED_MAP. The MAP +macro serves both std::map and std::multimap, and std::vector doesn't +need a second container-specific declaration (because it only +allocates T and T* arrays; there is no internal container-specific +wrapper type). + +unordered_map is trickier. First, it has to allocate the hash +buckets, which requires an extra mempool-wide definition that is +shared by all different types. Second, sometimes the hash value is +cached in the hash node and sometimes it is not. The glibc STL makes +its own decision if you don't explicitly define traits, so you either +need to match your definition with its inference, or explicitly define +traits, or simply define the allocator for with the cached and +uncached case and leave one of them unused (but polluting your +debug dumps). For example, + + unittest_2::unordered_map one; // not cached + unittest_2::unordered_map one; // cached + +needs + + MEMPOOL_DEFINE_UNORDERED_MAP_BASE_FACTORY(unittest_2); // used by both + MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(uint64_t, std::string, false, int_str, + unittest_2); + MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(std::string, uint64_t, true, str_int, + unittest_2); + + +Introspection +------------- + +The simplest way to interrogate the process is with + + Formater *f = ... + mempool::dump(f); + +This will dump information about *all* memory pools. + +You can also interogate a specific pool programatically with + + size_t bytes = unittest_2::allocated_bytes(); + size_t items = unittest_2::allocated_items(); + +Note that you cannot easily query per-type, primarily because debug +mode is optional and you should not rely on that information being +available. + +*/ + +namespace mempool { + +// -------------------------------------------------------------- +// define memory pools + +#define DEFINE_MEMORY_POOLS_HELPER(f) \ + f(unittest_1) \ + f(unittest_2) + +// give them integer ids +#define P(x) x, +enum pool_index_t { + DEFINE_MEMORY_POOLS_HELPER(P) + num_pools // Must be last. +}; +#undef P + +extern void set_debug_mode(bool d); + +// -------------------------------------------------------------- +struct pool_allocator_base_t; +class pool_t; + +// doubly linked list +struct list_member_t { + list_member_t *next; + list_member_t *prev; + list_member_t() : next(this), prev(this) {} + ~list_member_t() { + assert(next == this && prev == this); + } + void insert(list_member_t *i) { + i->next = next; + i->prev = this; + next = i; + } + void remove() { + prev->next = next; + next->prev = prev; + next = this; + prev = this; + } +}; + +// we shard pool stats across many shard_t's to reduce the amount +// of cacheline ping pong. +enum { num_shards = 64 }; + +struct shard_t { + std::atomic bytes = {0}; + std::atomic items = {0}; + mutable std::mutex lock; // only used for containers list + list_member_t containers; // protected by lock +}; + +struct stats_t { + ssize_t items = 0; + ssize_t bytes = 0; + void dump(ceph::Formatter *f) const { + f->dump_int("items", items); + f->dump_int("bytes", bytes); + } +}; + +// Root of all allocators, this enables the container information to +// operation easily. These fields are "always" accurate. +struct pool_allocator_base_t { + list_member_t list_member; + + pool_t *pool = nullptr; + shard_t *shard = nullptr; + const char *type_id = nullptr; + size_t item_size = 0; + + // for debug mode + std::atomic items = {0}; // signed + + // effective constructor + void attach_pool(pool_index_t index, const char *type_id); + + ~pool_allocator_base_t(); +}; + +pool_t& get_pool(pool_index_t ix); + +class pool_t { + std::string name; + shard_t shard[num_shards]; + friend class pool_allocator_base_t; +public: + bool debug; + + pool_t(const std::string& n, bool _debug) + : name(n), debug(_debug) { + } + + // + // How much this pool consumes. O() + // + size_t allocated_bytes() const; + size_t allocated_items() const; + + const std::string& get_name() const { + return name; + } + + shard_t* pick_a_shard() { + // Dirt cheap, see: + // http://fossies.org/dox/glibc-2.24/pthread__self_8c_source.html + size_t me = (size_t)pthread_self(); + size_t i = (me >> 3) % num_shards; + return &shard[i]; + } + + // get pool stats. by_type is not populated if !debug + void get_stats(stats_t *total, + std::map *by_type) const; + + void dump(ceph::Formatter *f) const; +}; + +// skip unittest_[12] by default +void dump(ceph::Formatter *f, size_t skip=2); + +inline void pool_allocator_base_t::attach_pool( + pool_index_t index, + const char *_type_id) +{ + assert(pool == nullptr); + pool = &get_pool(index); + shard = pool->pick_a_shard(); + type_id = _type_id; + + // unconditionally register type, even if debug is currently off + std::unique_lock lock(shard->lock); + shard->containers.insert(&list_member); +} + +inline pool_allocator_base_t::~pool_allocator_base_t() +{ + if (pool) { + std::unique_lock lock(shard->lock); + list_member.remove(); + } +} + + +// Stateless STL allocator for use with containers. All actual state +// is stored in the static pool_allocator_base_t, which saves us from +// passing the allocator to container constructors. + +template +class pool_allocator { + static pool_allocator_base_t base; + +public: + typedef pool_allocator allocator_type; + typedef T value_type; + typedef value_type *pointer; + typedef const value_type * const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + template struct rebind { + typedef pool_allocator other; + }; + + pool_allocator() { + // initialize fields in the static member. this should only happen + // once, but it's also harmless if we do it multiple times. + base.type_id = typeid(T).name(); + base.item_size = sizeof(T); + } + template + pool_allocator(const pool_allocator&) {} + void operator=(const allocator_type&) {} + + void attach_pool(pool_index_t index, const char *type_id) { + base.attach_pool(index, type_id); + } + + pointer allocate(size_t n, void *p = nullptr) { + size_t total = sizeof(T) * n; + base.shard->bytes += total; + base.shard->items += n; + if (base.pool->debug) { + base.items += n; + } + pointer r = reinterpret_cast(new char[total]); + return r; + } + + void deallocate(pointer p, size_type n) { + size_t total = sizeof(T) * n; + base.shard->bytes -= total; + base.shard->items -= n; + if (base.pool->debug) { + base.items -= n; + } + delete[] reinterpret_cast(p); + } + + void destroy(pointer p) { + p->~T(); + } + + template + void destroy(U *p) { + p->~U(); + } + + void construct(pointer p, const_reference val) { + ::new ((void *)p) T(val); + } + + template void construct(U* p,Args&&... args) { + ::new((void *)p) U(std::forward(args)...); + } + + bool operator==(const pool_allocator&) { return true; } + bool operator!=(const pool_allocator&) { return false; } +}; + + +// There is one factory associated with every type that lives in a +// mempool. + +template +class factory { +public: + typedef pool_allocator allocator_type; + static allocator_type alloc; + + factory() { + alloc.attach_pool(pool_ix, typeid(o).name()); + } + static void *allocate() { + return (void *)alloc.allocate(1); + } + static void free(void *p) { + alloc.deallocate((o *)p, 1); + } +}; + +}; + + +// Namespace mempool + +#define P(x) \ + namespace x { \ + static const mempool::pool_index_t pool_ix = mempool::x; \ + template \ + using pool_allocator = mempool::pool_allocator; \ + template > \ + using map = std::map>>; \ + template > \ + using multimap = std::multimap>>; \ + template > \ + using set = std::set>; \ + template \ + using list = std::list>; \ + template \ + using vector = std::vector>; \ + template, \ + typename eq = std::equal_to> \ + using unordered_map = \ + std::unordered_map>>; \ + template \ + using factory = mempool::factory; \ + inline size_t allocated_bytes() { \ + return mempool::get_pool(mempool::x).allocated_bytes(); \ + } \ + inline size_t allocated_items() { \ + return mempool::get_pool(mempool::x).allocated_items(); \ + } \ + }; + +DEFINE_MEMORY_POOLS_HELPER(P) + +#undef P + +// Use this for any type that is contained by a container (unless it +// is a class you defined; see below). +#define MEMPOOL_DEFINE_FACTORY(obj, factoryname, pool) \ + template<> \ + mempool::pool_allocator_base_t \ + mempool::pool_allocator::base = {}; \ + template<> \ + typename pool::factory::allocator_type \ + pool::factory::alloc = {}; \ + static pool::factory _factory_##factoryname; + +// Use this for each class that belongs to a mempool. For example, +// +// class T { +// MEMPOOL_CLASS_HELPERS(); +// ... +// }; +// +#define MEMPOOL_CLASS_HELPERS() \ + void *operator new(size_t size); \ + void *operator new[](size_t size) { assert(0 == "no array new"); } \ + void operator delete(void *); \ + void operator delete[](void *) { assert(0 == "no array delete"); } + +// Use this in some particular .cc file to match each class with a +// MEMPOOL_CLASS_HELPERS(). +#define MEMPOOL_DEFINE_OBJECT_FACTORY(obj,factoryname,pool) \ + MEMPOOL_DEFINE_FACTORY(obj, factoryname, pool) \ + void * obj::operator new(size_t size) { \ + assert(size == sizeof(obj)); \ + return pool::factory::allocate(); \ + } \ + void obj::operator delete(void *p) { \ + pool::factory::free(p); \ + } + +// for std::set +#define MEMPOOL_DEFINE_SET_FACTORY(t, factoryname, pool) \ + MEMPOOL_DEFINE_FACTORY(std::_Rb_tree_node, \ + factoryname##_rbtree_node, pool); + +// for std::list +#define MEMPOOL_DEFINE_LIST_FACTORY(t, factoryname, pool) \ + MEMPOOL_DEFINE_FACTORY(std::_List_node, \ + factoryname##_list_node, pool); + +// for std::map +#define MEMPOOL_DEFINE_MAP_FACTORY(k, v, factoryname, pool) \ + typedef std::pair _factory_type_##factoryname##pair_t; \ + MEMPOOL_DEFINE_FACTORY( \ + _factory_type_##factoryname##pair_t, \ + factoryname##_pair, pool); \ + typedef std::pair _factory_type_##factoryname##pair_t; \ + MEMPOOL_DEFINE_FACTORY( \ + std::_Rb_tree_node<_factory_type_##factoryname##pair_t>, \ + factoryname##_rbtree_node, pool); + +// for std::unordered_map +#define MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(k, v, cached, factoryname, pool) \ + typedef std::pair _factory_type_##factoryname##pair_t; \ + typedef std::__detail::_Hash_node<_factory_type_##factoryname##pair_t, \ + cached> \ + _factory_type_##factoryname##type; \ + MEMPOOL_DEFINE_FACTORY( \ + _factory_type_##factoryname##type, \ + factoryname##_unordered_hash_node, pool); + +#define MEMPOOL_DEFINE_UNORDERED_MAP_BASE_FACTORY(pool) \ + MEMPOOL_DEFINE_FACTORY(std::__detail::_Hash_node_base*, \ + pool##_unordered_hash_node_ptr, pool); + + +#endif diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt index d80c03785171..e2324b10b1cb 100644 --- a/src/test/CMakeLists.txt +++ b/src/test/CMakeLists.txt @@ -705,6 +705,13 @@ add_executable(unittest_denc add_ceph_unittest(unittest_denc ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/unittest_denc) target_link_libraries(unittest_denc os global) +# unittest_mempool +add_executable(unittest_mempool + test_mempool.cc + ) +add_ceph_unittest(unittest_mempool ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/unittest_mempool) +target_link_libraries(unittest_mempool global) + # unittest_crypto add_executable(unittest_crypto crypto.cc diff --git a/src/test/test_mempool.cc b/src/test/test_mempool.cc new file mode 100644 index 000000000000..9f23e3cd7718 --- /dev/null +++ b/src/test/test_mempool.cc @@ -0,0 +1,304 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph distributed storage system + * + * Copyright (C) 2016 Western Digital Corporation + * + * Author: Allen Samuels + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + */ + +#include + +#include "global/global_init.h" +#include "common/ceph_argparse.h" +#include "global/global_context.h" +#include "gtest/gtest.h" +#include "include/mempool.h" + +void check_usage(mempool::pool_index_t ix) +{ + mempool::pool_t *pool = &mempool::get_pool(ix); + mempool::stats_t total; + map m; + pool->get_stats(&total, &m); + size_t usage = pool->allocated_bytes(); + size_t sum = 0; + for (auto& p : m) { + sum += p.second.bytes; + } + if (sum != usage) { + ceph::TableFormatter jf; + pool->dump(&jf); + jf.flush(std::cout); + } + EXPECT_EQ(sum, usage); +} + +template +void eq_elements(const A& a, const B& b) +{ + auto lhs = a.begin(); + auto rhs = b.begin(); + while (lhs != a.end()) { + EXPECT_EQ(*lhs,*rhs); + lhs++; + rhs++; + } + EXPECT_EQ(rhs,b.end()); +} + +template +void eq_pairs(const A& a, const B& b) +{ + auto lhs = a.begin(); + auto rhs = b.begin(); + while (lhs != a.end()) { + EXPECT_EQ(lhs->first,rhs->first); + EXPECT_EQ(lhs->second,rhs->second); + lhs++; + rhs++; + } + EXPECT_EQ(rhs,b.end()); +} + +#define MAKE_INSERTER(inserter) \ + template \ +void do_##inserter(A& a, B& b, int count, int base) { \ + for (int i = 0; i < count; ++i) { \ + a.inserter(base + i); \ + b.inserter(base + i); \ + } \ +} + +MAKE_INSERTER(push_back); +MAKE_INSERTER(insert); + +template +void do_insert_key(A& a, B& b, int count, int base) +{ + for (int i = 0; i < count; ++i) { + a.insert(make_pair(base+i,base+i)); + b.insert(make_pair(base+i,base+i)); + check_usage(mempool::unittest_1); + } +} + +TEST(mempool, vector_context) +{ + check_usage(mempool::unittest_1); + EXPECT_EQ(unittest_1::allocated_bytes(), 0u); + EXPECT_EQ(unittest_1::allocated_items(), 0u); + for (unsigned i = 0; i < 10; ++i) { + vector a; + unittest_1::vector b,c; + eq_elements(a,b); + do_push_back(a,b,i,i); + eq_elements(a,b); + check_usage(mempool::unittest_1); + + mempool::stats_t total; + map by_type; + mempool::get_pool(mempool::unittest_1).get_stats(&total, &by_type); + EXPECT_GE(unittest_1::allocated_bytes(), i * 4u); + EXPECT_GE(unittest_1::allocated_items(), i); + + c.swap(b); + eq_elements(a,c); + check_usage(mempool::unittest_1); + a.clear(); + b.clear(); + c.clear(); + } +} + +TEST(mempool, list_context) +{ + for (unsigned i = 1; i < 10; ++i) { + list a; + unittest_1::list b,c; + eq_elements(a,b); + do_push_back(a,b,i,i); + eq_elements(a,b); + c.swap(b); + eq_elements(a,c); + a.erase(a.begin()); + c.erase(c.begin()); + eq_elements(a,c); + a.clear(); + b.clear(); + c.clear(); + do_push_back(a,b,i,i); + c.splice(c.begin(),b,b.begin(),b.end()); + + mempool::stats_t total; + map by_type; + mempool::get_pool(mempool::unittest_1).get_stats(&total, &by_type); + EXPECT_GE(unittest_1::allocated_bytes(), i * 4u); + EXPECT_EQ(unittest_1::allocated_items(), i); + + eq_elements(a,c); + check_usage(mempool::unittest_1); + } +} + +TEST(mempool, set_context) +{ + for (int i = 0; i < 10; ++i) { + set a; + unittest_1::set b; + do_insert(a,b,i,i); + eq_elements(a,b); + check_usage(mempool::unittest_1); + } + + for (int i = 1; i < 10; ++i) { + set a; + unittest_1::set b; + do_insert(a,b,i,0); + EXPECT_NE(a.find(i/2),a.end()); + EXPECT_NE(b.find(i/2),b.end()); + a.erase(a.find(i/2)); + b.erase(b.find(i/2)); + eq_elements(a,b); + check_usage(mempool::unittest_1); + } +} + +struct obj { + MEMPOOL_CLASS_HELPERS(); + int a; + int b; + obj() : a(1), b(1) {} + obj(int _a) : a(_a), b(2) {} + obj(int _a,int _b) : a(_a), b(_b) {} + friend inline bool operator<(const obj& l, const obj& r) { + return l.a < r.a; + } +}; +MEMPOOL_DEFINE_OBJECT_FACTORY(obj, obj, unittest_2); + +TEST(mempool, test_factory) +{ + obj *o1 = new obj(); + obj *o2 = new obj(10); + obj *o3 = new obj(20,30); + check_usage(mempool::unittest_2); + EXPECT_NE(o1,nullptr); + EXPECT_EQ(o1->a,1); + EXPECT_EQ(o1->b,1); + EXPECT_EQ(o2->a,10); + EXPECT_EQ(o2->b,2); + EXPECT_EQ(o3->a,20); + EXPECT_EQ(o3->b,30); + + delete o1; + delete o2; + delete o3; + check_usage(mempool::unittest_2); +} + +MEMPOOL_DEFINE_FACTORY(int, int, unittest_1); + +TEST(mempool, vector) +{ + { + unittest_1::vector v; + v.push_back(1); + v.push_back(2); + } + { + unittest_2::vector v; + v.push_back(obj()); + v.push_back(obj(1)); + } +} + +MEMPOOL_DEFINE_SET_FACTORY(int, int, unittest_1); +MEMPOOL_DEFINE_SET_FACTORY(obj, obj, unittest_2); + +TEST(mempool, set) +{ + unittest_1::set set_int; + set_int.insert(1); + set_int.insert(2); + unittest_2::set set_obj; + set_obj.insert(obj()); + set_obj.insert(obj(1)); + set_obj.insert(obj(1, 2)); +} + +MEMPOOL_DEFINE_MAP_FACTORY(int, int, int_int, unittest_1); +MEMPOOL_DEFINE_MAP_FACTORY(int, obj, int_obj, unittest_2); + +TEST(mempool, map) +{ + { + unittest_1::map v; + v[1] = 2; + v[3] = 4; + } + { + unittest_2::map v; + v[1] = obj(); + v[2] = obj(2); + v[3] = obj(2, 3); + } +} + +MEMPOOL_DEFINE_LIST_FACTORY(int, int, unittest_1); +MEMPOOL_DEFINE_LIST_FACTORY(obj, obj, unittest_2); + +TEST(mempool, list) +{ + { + unittest_1::list v; + v.push_back(1); + v.push_back(2); + } + { + unittest_2::list v; + v.push_back(obj()); + v.push_back(obj(1)); + } +} + +MEMPOOL_DEFINE_UNORDERED_MAP_BASE_FACTORY(unittest_2); +MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(int, obj, false, int_obj, unittest_2); + +TEST(mempool, unordered_map) +{ + unittest_2::unordered_map h; + h[1] = obj(); + h[2] = obj(1); +} + +int main(int argc, char **argv) +{ + vector args; + argv_to_vec(argc, (const char **)argv, args); + + global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); + common_init_finish(g_ceph_context); + + // enable debug mode for the tests + mempool::set_debug_mode(true); + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + + +/* + * Local Variables: + * compile-command: "cd ../../build ; make -j4 && + * make unittest_mempool && + * valgrind --tool=memcheck ./unittest_mempool --gtest_filter=*.*" + * End: + */