]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
os/filestore: Destroy g_ceph_context and g_conf
authorAdam C. Emerson <aemerson@redhat.com>
Fri, 9 Dec 2016 00:23:52 +0000 (19:23 -0500)
committerAdam C. Emerson <aemerson@redhat.com>
Thu, 22 Dec 2016 19:24:39 +0000 (14:24 -0500)
Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
25 files changed:
src/os/ObjectStore.cc
src/os/filestore/BtrfsFileStoreBackend.cc
src/os/filestore/CollectionIndex.h
src/os/filestore/DBObjectMap.cc
src/os/filestore/DBObjectMap.h
src/os/filestore/FileJournal.cc
src/os/filestore/FileJournal.h
src/os/filestore/FileStore.cc
src/os/filestore/FileStore.h
src/os/filestore/GenericFileStoreBackend.cc
src/os/filestore/HashIndex.cc
src/os/filestore/HashIndex.h
src/os/filestore/IndexManager.cc
src/os/filestore/IndexManager.h
src/os/filestore/Journal.h
src/os/filestore/JournalingObjectStore.cc
src/os/filestore/JournalingObjectStore.h
src/os/filestore/LFNIndex.cc
src/os/filestore/LFNIndex.h
src/os/filestore/WBThrottle.cc
src/os/filestore/XfsFileStoreBackend.cc
src/os/filestore/ZFSFileStoreBackend.cc
src/test/os/TestLFNIndex.cc
src/test/test_filejournal.cc
src/tools/ceph_objectstore_tool.cc

index 7c86949fe463df1ca37a73554faaea1ecaa55459..ee05f6b1f8e84a31175e79de9bc2af48a5063aac 100644 (file)
@@ -104,7 +104,7 @@ int ObjectStore::probe_block_device_fsid(
 #endif
 
   // okay, try FileStore (journal).
-  r = FileStore::get_block_device_fsid(path, fsid);
+  r = FileStore::get_block_device_fsid(cct, path, fsid);
   if (r == 0) {
     lgeneric_dout(cct, 0) << __func__ << " " << path << " is filestore, "
                          << *fsid << dendl;
index 461d5c182749462019eb429ce4875c85ee8b8133..8718432ec671647206254d37744e69b9d9509723 100644 (file)
@@ -43,7 +43,7 @@
 
 #if defined(__linux__)
 
-#define dout_context g_ceph_context
+#define dout_context cct()
 #define dout_subsys ceph_subsys_filestore
 #undef dout_prefix
 #define dout_prefix *_dout << "btrfsfilestorebackend(" << get_basedir_path() << ") "
@@ -56,8 +56,8 @@ BtrfsFileStoreBackend::BtrfsFileStoreBackend(FileStore *fs):
     GenericFileStoreBackend(fs), has_clone_range(false),
     has_snap_create(false), has_snap_destroy(false),
     has_snap_create_v2(false), has_wait_sync(false), stable_commits(false),
-    m_filestore_btrfs_clone_range(g_conf->filestore_btrfs_clone_range),
-    m_filestore_btrfs_snap (g_conf->filestore_btrfs_snap) { }
+    m_filestore_btrfs_clone_range(cct()->_conf->filestore_btrfs_clone_range),
+    m_filestore_btrfs_snap (cct()->_conf->filestore_btrfs_snap) { }
 
 int BtrfsFileStoreBackend::detect_features()
 {
index c56ffeb4f40f91cad351ca05f4d2d01335a7dd2a..51431ac12efe77b4199cb72a7f092239e7545d2c 100644 (file)
 #include "common/RWLock.h"
 
 /**
* CollectionIndex provides an interface for manipulating indexed collections
+  CollectionIndex provides an interface for manipulating indexed collections
  */
 class CollectionIndex {
+public:
+  CephContext* cct;
 protected:
   /**
    * Object encapsulating a returned path.
@@ -175,8 +177,8 @@ protected:
   /// Call prior to removing directory
   virtual int prep_delete() { return 0; }
 
-  explicit CollectionIndex(const coll_t& collection):
-    access_lock("CollectionIndex::access_lock", true, false) {}
+  CollectionIndex(CephContext* cct, const coll_t& collection)
+    : cct(cct), access_lock("CollectionIndex::access_lock", true, false) {}
 
   /*
    * Pre-hash the collection, this collection should map to a PG folder.
index ed42875e68cf846bc7c61f3c5b254918864df6f7..c17395c5a25ac7f4ed5e32832a0575e201f52437 100644 (file)
@@ -19,7 +19,7 @@
 #include "common/config.h"
 #include "include/assert.h"
 
-#define dout_context g_ceph_context
+#define dout_context cct
 #define dout_subsys ceph_subsys_filestore
 #undef dout_prefix
 #define dout_prefix *_dout << "filestore "
@@ -149,7 +149,8 @@ string DBObjectMap::ghobject_key(const ghobject_t &oid)
 //   bad: plana8923501-10...4c.3.ffffffffffffffff.2
 // fixed: plana8923501-10...4c.3.CB767F2D.ffffffffffffffff.2
 // returns 0 for false, 1 for true, negative for error
-int DBObjectMap::is_buggy_ghobject_key_v1(const string &in)
+int DBObjectMap::is_buggy_ghobject_key_v1(CephContext* cct,
+                                         const string &in)
 {
   int dots = 5;  // skip 5 .'s
   const char *s = in.c_str();
@@ -951,7 +952,7 @@ int DBObjectMap::upgrade_to_v2()
         iter->valid() && count < 300;
         iter->next()) {
       dout(20) << __func__ << " key is " << iter->key() << dendl;
-      int r = is_buggy_ghobject_key_v1(iter->key());
+      int r = is_buggy_ghobject_key_v1(cct, iter->key());
       if (r < 0) {
        derr << __func__ << " bad key '" << iter->key() << "'" << dendl;
        return r;
index 1f9cc0edeb60435befb99ce3807ae1d95bea4bd9..8be514377c442921f3e1b6cfa115450c23e3d67b 100644 (file)
@@ -325,7 +325,8 @@ public:
   /// String munging (public for testing)
   static string ghobject_key(const ghobject_t &oid);
   static string ghobject_key_v0(coll_t c, const ghobject_t &oid);
-  static int is_buggy_ghobject_key_v1(const string &in);
+  static int is_buggy_ghobject_key_v1(CephContext* cct,
+                                     const string &in);
 private:
   /// Implicit lock on Header->seq
   typedef ceph::shared_ptr<_Header> Header;
index 55c9b11212a6370b8c08a29fa4e4ddb1f2105938..0781e2c6955f994cf91462aa2a7895abc0237950 100644 (file)
@@ -41,7 +41,7 @@
 #define O_DSYNC O_SYNC
 #endif
 
-#define dout_context g_ceph_context
+#define dout_context cct
 #define dout_subsys ceph_subsys_journal
 #undef dout_prefix
 #define dout_prefix *_dout << "journal "
@@ -164,9 +164,9 @@ int FileJournal::_open_block_device()
           << dendl;
   max_size = bdev_sz;
 
-  block_size = g_conf->journal_block_size;
+  block_size = cct->_conf->journal_block_size;
 
-  if (g_conf->journal_discard) {
+  if (cct->_conf->journal_discard) {
     discard = block_device_support_discard(fn.c_str());
     dout(10) << fn << " support discard: " << (int)discard << dendl;
   }
@@ -178,10 +178,10 @@ int FileJournal::_open_file(int64_t oldsize, blksize_t blksize,
                            bool create)
 {
   int ret;
-  int64_t conf_journal_sz(g_conf->osd_journal_size);
+  int64_t conf_journal_sz(cct->_conf->osd_journal_size);
   conf_journal_sz <<= 20;
 
-  if ((g_conf->osd_journal_size == 0) && (oldsize < ONE_MEG)) {
+  if ((cct->_conf->osd_journal_size == 0) && (oldsize < ONE_MEG)) {
     derr << "I'm sorry, I don't know how large of a journal to create."
         << "Please specify a block device to use as the journal OR "
         << "set osd_journal_size in your ceph.conf" << dendl;
@@ -189,7 +189,7 @@ int FileJournal::_open_file(int64_t oldsize, blksize_t blksize,
   }
 
   if (create && (oldsize < conf_journal_sz)) {
-    uint64_t newsize(g_conf->osd_journal_size);
+    uint64_t newsize(cct->_conf->osd_journal_size);
     newsize <<= 20;
     dout(10) << "_open extending to " << newsize << " bytes" << dendl;
     ret = ::ftruncate(fd, newsize);
@@ -229,9 +229,9 @@ int FileJournal::_open_file(int64_t oldsize, blksize_t blksize,
   else {
     max_size = oldsize;
   }
-  block_size = g_conf->journal_block_size;
+  block_size = cct->_conf->journal_block_size;
 
-  if (create && g_conf->journal_zero_on_create) {
+  if (create && cct->_conf->journal_zero_on_create) {
     derr << "FileJournal::_open_file : zeroing journal" << dendl;
     uint64_t write_size = 1 << 20;
     char *buf;
@@ -313,7 +313,7 @@ int FileJournal::create()
   header.fsid = fsid;
   header.max_size = max_size;
   header.block_size = block_size;
-  if (g_conf->journal_block_align || directio)
+  if (cct->_conf->journal_block_align || directio)
     header.alignment = block_size;
   else
     header.alignment = 16;  // at least stay word aligned on 64bit machines...
@@ -352,7 +352,7 @@ int FileJournal::create()
     goto free_buf;
   }
 
-  needed_space = ((int64_t)g_conf->osd_max_write_size) << 20;
+  needed_space = ((int64_t)cct->_conf->osd_max_write_size) << 20;
   needed_space += (2 * sizeof(entry_header_t)) + get_top();
   if (header.max_size - header.start < needed_space) {
     derr << "FileJournal::create: OSD journal is not large enough to hold "
@@ -813,8 +813,8 @@ int FileJournal::prepare_multi_write(bufferlist& bl, uint64_t& orig_ops, uint64_
   // gather queued writes
   off64_t queue_pos = write_pos;
 
-  int eleft = g_conf->journal_max_write_entries;
-  unsigned bmax = g_conf->journal_max_write_bytes;
+  int eleft = cct->_conf->journal_max_write_entries;
+  unsigned bmax = cct->_conf->journal_max_write_bytes;
 
   if (full_state != FULL_NOTFULL)
     return -ENOSPC;
@@ -865,14 +865,16 @@ int FileJournal::prepare_multi_write(bufferlist& bl, uint64_t& orig_ops, uint64_
       }
       if (eleft) {
         if (--eleft == 0) {
-          dout(20) << "prepare_multi_write hit max events per write " << g_conf->journal_max_write_entries << dendl;
+          dout(20) << "prepare_multi_write hit max events per write "
+                  << cct->_conf->journal_max_write_entries << dendl;
           batch_unpop_write(items);
           goto out;
         }
       }
       if (bmax) {
         if (bl.length() >= bmax) {
-          dout(20) << "prepare_multi_write hit max write size " << g_conf->journal_max_write_bytes << dendl;
+          dout(20) << "prepare_multi_write hit max write size "
+                  << cct->_conf->journal_max_write_bytes << dendl;
           batch_unpop_write(items);
           goto out;
         }
@@ -1025,9 +1027,9 @@ void FileJournal::do_write(bufferlist& bl)
     return;
 
   buffer::ptr hbp;
-  if (g_conf->journal_write_header_frequency &&
+  if (cct->_conf->journal_write_header_frequency &&
       (((++journaled_since_start) %
-       g_conf->journal_write_header_frequency) == 0)) {
+       cct->_conf->journal_write_header_frequency) == 0)) {
     must_write_header = true;
   }
 
@@ -1138,7 +1140,7 @@ void FileJournal::do_write(bufferlist& bl)
       ceph_abort();
     }
 #ifdef HAVE_POSIX_FADVISE
-    if (g_conf->filestore_fadvise)
+    if (cct->_conf->filestore_fadvise)
       posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED);
 #endif
   }
@@ -1283,9 +1285,9 @@ void FileJournal::write_thread_entry()
 void FileJournal::do_aio_write(bufferlist& bl)
 {
 
-  if (g_conf->journal_write_header_frequency &&
+  if (cct->_conf->journal_write_header_frequency &&
       (((++journaled_since_start) %
-       g_conf->journal_write_header_frequency) == 0)) {
+       cct->_conf->journal_write_header_frequency) == 0)) {
     must_write_header = true;
   }
 
@@ -1535,7 +1537,7 @@ void FileJournal::check_aio_completion()
 
 int FileJournal::prepare_entry(vector<ObjectStore::Transaction>& tls, bufferlist* tbl) {
   dout(10) << "prepare_entry " << tls << dendl;
-  int data_len = g_conf->journal_align_min_size - 1;
+  int data_len = cct->_conf->journal_align_min_size - 1;
   int data_align = -1; // -1 indicates that we don't care about the alignment
   bufferlist bl;
   for (vector<ObjectStore::Transaction>::iterator p = tls.begin();
@@ -1839,11 +1841,11 @@ int FileJournal::set_throttle_params()
 {
   stringstream ss;
   bool valid = throttle.set_params(
-    g_conf->journal_throttle_low_threshhold,
-    g_conf->journal_throttle_high_threshhold,
-    g_conf->filestore_expected_throughput_bytes,
-    g_conf->journal_throttle_high_multiple,
-    g_conf->journal_throttle_max_multiple,
+    cct->_conf->journal_throttle_low_threshhold,
+    cct->_conf->journal_throttle_high_threshhold,
+    cct->_conf->filestore_expected_throughput_bytes,
+    cct->_conf->journal_throttle_high_multiple,
+    cct->_conf->journal_throttle_max_multiple,
     header.max_size - get_top(),
     &ss);
 
@@ -1954,7 +1956,7 @@ bool FileJournal::read_entry(
     derr << "Unable to read past sequence " << seq
         << " but header indicates the journal has committed up through "
         << header.committed_up_to << ", journal is corrupt" << dendl;
-    if (g_conf->journal_ignore_corruption) {
+    if (cct->_conf->journal_ignore_corruption) {
       if (corrupt)
        *corrupt = true;
       return false;
index 281b03775642569b16727ba284140545727d8ef4..c804f354d58fa468b6a69174f850fce877bfd1f2 100644 (file)
@@ -31,9 +31,6 @@ using std::deque;
 # include <libaio.h>
 #endif
 
-#define dout_context g_ceph_context
-
-
 /**
  * Implements journaling on top of block device or file.
  *
@@ -391,14 +388,15 @@ private:
   }
 
  public:
-  FileJournal(uuid_d fsid, Finisher *fin, Cond *sync_cond, const char *f, bool dio=false, bool ai=true, bool faio=false) :
-    Journal(fsid, fin, sync_cond),
-    finisher_lock("FileJournal::finisher_lock", false, true, false, g_ceph_context),
+  FileJournal(CephContext* cct, uuid_d fsid, Finisher *fin, Cond *sync_cond,
+             const char *f, bool dio=false, bool ai=true, bool faio=false) :
+    Journal(cct, fsid, fin, sync_cond),
+    finisher_lock("FileJournal::finisher_lock", false, true, false, cct),
     journaled_seq(0),
     plug_journal_completions(false),
-    writeq_lock("FileJournal::writeq_lock", false, true, false, g_ceph_context),
+    writeq_lock("FileJournal::writeq_lock", false, true, false, cct),
     completions_lock(
-      "FileJournal::completions_lock", false, true, false, g_ceph_context),
+      "FileJournal::completions_lock", false, true, false, cct),
     fn(f),
     zero_buf(NULL),
     max_size(0), block_size(0),
@@ -418,30 +416,30 @@ private:
     full_state(FULL_NOTFULL),
     fd(-1),
     writing_seq(0),
-    throttle(g_conf->filestore_caller_concurrency),
-    write_lock("FileJournal::write_lock", false, true, false, g_ceph_context),
+    throttle(cct->_conf->filestore_caller_concurrency),
+    write_lock("FileJournal::write_lock", false, true, false, cct),
     write_stop(true),
     aio_stop(true),
     write_thread(this),
     write_finish_thread(this) {
 
       if (aio && !directio) {
-        derr << "FileJournal::_open_any: aio not supported without directio; disabling aio" << dendl;
+       lderr(cct) << "FileJournal::_open_any: aio not supported without directio; disabling aio" << dendl;
         aio = false;
       }
 #ifndef HAVE_LIBAIO
       if (aio) {
-        derr << "FileJournal::_open_any: libaio not compiled in; disabling aio" << dendl;
+       lderr(cct) << "FileJournal::_open_any: libaio not compiled in; disabling aio" << dendl;
         aio = false;
       }
 #endif
 
-      g_conf->add_observer(this);
+      cct->_conf->add_observer(this);
   }
   ~FileJournal() {
     assert(fd == -1);
     delete[] zero_buf;
-    g_conf->remove_observer(this);
+    cct->_conf->remove_observer(this);
   }
 
   int check();
@@ -539,5 +537,4 @@ private:
 
 WRITE_CLASS_ENCODER(FileJournal::header_t)
 
-#undef dout_context
 #endif
index 1af0dcc37cc8d4603b49b12878a5ece03a97c3fd..b8fc82d118d0fbbd88aedd926da4945998247133 100644 (file)
@@ -90,7 +90,7 @@ using ceph::crypto::SHA1;
 #define tracepoint(...)
 #endif
 
-#define dout_context g_ceph_context
+#define dout_context cct
 #define dout_subsys ceph_subsys_filestore
 #undef dout_prefix
 #define dout_prefix *_dout << "filestore(" << basedir << ") "
@@ -131,12 +131,13 @@ int FileStore::validate_hobject_key(const hobject_t &obj) const
   return len > m_filestore_max_xattr_value_size ? -ENAMETOOLONG : 0;
 }
 
-int FileStore::get_block_device_fsid(const string& path, uuid_d *fsid)
+int FileStore::get_block_device_fsid(CephContext* cct, const string& path,
+                                    uuid_d *fsid)
 {
   // make sure we don't try to use aio or direct_io (and get annoying
   // error messages from failing to do so); performance implications
   // should be irrelevant for this use
-  FileJournal j(*fsid, 0, 0, path.c_str(), false, false);
+  FileJournal j(cct, *fsid, 0, 0, path.c_str(), false, false);
   return j.peek_fsid(*fsid);
 }
 
@@ -247,7 +248,7 @@ int FileStore::lfn_open(const coll_t& cid,
 
   if (create)
     flags |= O_CREAT;
-  if (g_conf->filestore_odsync_write) {
+  if (cct->_conf->filestore_odsync_write) {
     flags |= O_DSYNC;
   }
 
@@ -487,7 +488,7 @@ int FileStore::lfn_unlink(const coll_t& cid, const ghobject_t& o,
        assert(!m_filestore_fail_eio || r != -EIO);
        return r;
       }
-      if (g_conf->filestore_debug_inject_read_err) {
+      if (cct->_conf->filestore_debug_inject_read_err) {
        debug_obj_on_delete(o);
       }
       if (!m_disable_wbthrottle) {
@@ -527,62 +528,62 @@ FileStore::FileStore(CephContext* cct, const std::string &base,
   fsid_fd(-1), op_fd(-1),
   basedir_fd(-1), current_fd(-1),
   backend(NULL),
-  index_manager(do_update),
+  index_manager(cct, do_update),
   lock("FileStore::lock"),
   force_sync(false),
   sync_entry_timeo_lock("FileStore::sync_entry_timeo_lock"),
-  timer(g_ceph_context, sync_entry_timeo_lock),
+  timer(cct, sync_entry_timeo_lock),
   stop(false), sync_thread(this),
-  fdcache(g_ceph_context),
-  wbthrottle(g_ceph_context),
+  fdcache(cct),
+  wbthrottle(cct),
   next_osr_id(0),
-  m_disable_wbthrottle(g_conf->filestore_odsync_write || 
-                      !g_conf->filestore_wbthrottle_enable),
-  throttle_ops(g_conf->filestore_caller_concurrency),
-  throttle_bytes(g_conf->filestore_caller_concurrency),
-  m_ondisk_finisher_num(g_conf->filestore_ondisk_finisher_threads),
-  m_apply_finisher_num(g_conf->filestore_apply_finisher_threads),
-  op_tp(g_ceph_context, "FileStore::op_tp", "tp_fstore_op", g_conf->filestore_op_threads, "filestore_op_threads"),
-  op_wq(this, g_conf->filestore_op_thread_timeout,
-       g_conf->filestore_op_thread_suicide_timeout, &op_tp),
+  m_disable_wbthrottle(cct->_conf->filestore_odsync_write ||
+                      !cct->_conf->filestore_wbthrottle_enable),
+  throttle_ops(cct->_conf->filestore_caller_concurrency),
+  throttle_bytes(cct->_conf->filestore_caller_concurrency),
+  m_ondisk_finisher_num(cct->_conf->filestore_ondisk_finisher_threads),
+  m_apply_finisher_num(cct->_conf->filestore_apply_finisher_threads),
+  op_tp(cct, "FileStore::op_tp", "tp_fstore_op", cct->_conf->filestore_op_threads, "filestore_op_threads"),
+  op_wq(this, cct->_conf->filestore_op_thread_timeout,
+       cct->_conf->filestore_op_thread_suicide_timeout, &op_tp),
   logger(NULL),
   read_error_lock("FileStore::read_error_lock"),
-  m_filestore_commit_timeout(g_conf->filestore_commit_timeout),
-  m_filestore_journal_parallel(g_conf->filestore_journal_parallel ),
-  m_filestore_journal_trailing(g_conf->filestore_journal_trailing),
-  m_filestore_journal_writeahead(g_conf->filestore_journal_writeahead),
-  m_filestore_fiemap_threshold(g_conf->filestore_fiemap_threshold),
-  m_filestore_max_sync_interval(g_conf->filestore_max_sync_interval),
-  m_filestore_min_sync_interval(g_conf->filestore_min_sync_interval),
-  m_filestore_fail_eio(g_conf->filestore_fail_eio),
-  m_filestore_fadvise(g_conf->filestore_fadvise),
+  m_filestore_commit_timeout(cct->_conf->filestore_commit_timeout),
+  m_filestore_journal_parallel(cct->_conf->filestore_journal_parallel ),
+  m_filestore_journal_trailing(cct->_conf->filestore_journal_trailing),
+  m_filestore_journal_writeahead(cct->_conf->filestore_journal_writeahead),
+  m_filestore_fiemap_threshold(cct->_conf->filestore_fiemap_threshold),
+  m_filestore_max_sync_interval(cct->_conf->filestore_max_sync_interval),
+  m_filestore_min_sync_interval(cct->_conf->filestore_min_sync_interval),
+  m_filestore_fail_eio(cct->_conf->filestore_fail_eio),
+  m_filestore_fadvise(cct->_conf->filestore_fadvise),
   do_update(do_update),
-  m_journal_dio(g_conf->journal_dio),
-  m_journal_aio(g_conf->journal_aio),
-  m_journal_force_aio(g_conf->journal_force_aio),
-  m_osd_rollback_to_cluster_snap(g_conf->osd_rollback_to_cluster_snap),
-  m_osd_use_stale_snap(g_conf->osd_use_stale_snap),
+  m_journal_dio(cct->_conf->journal_dio),
+  m_journal_aio(cct->_conf->journal_aio),
+  m_journal_force_aio(cct->_conf->journal_force_aio),
+  m_osd_rollback_to_cluster_snap(cct->_conf->osd_rollback_to_cluster_snap),
+  m_osd_use_stale_snap(cct->_conf->osd_use_stale_snap),
   m_filestore_do_dump(false),
   m_filestore_dump_fmt(true),
-  m_filestore_sloppy_crc(g_conf->filestore_sloppy_crc),
-  m_filestore_sloppy_crc_block_size(g_conf->filestore_sloppy_crc_block_size),
-  m_filestore_max_alloc_hint_size(g_conf->filestore_max_alloc_hint_size),
+  m_filestore_sloppy_crc(cct->_conf->filestore_sloppy_crc),
+  m_filestore_sloppy_crc_block_size(cct->_conf->filestore_sloppy_crc_block_size),
+  m_filestore_max_alloc_hint_size(cct->_conf->filestore_max_alloc_hint_size),
   m_fs_type(0),
   m_filestore_max_inline_xattr_size(0),
   m_filestore_max_inline_xattrs(0),
   m_filestore_max_xattr_value_size(0)
 {
-  m_filestore_kill_at.set(g_conf->filestore_kill_at);
+  m_filestore_kill_at.set(cct->_conf->filestore_kill_at);
   for (int i = 0; i < m_ondisk_finisher_num; ++i) {
     ostringstream oss;
     oss << "filestore-ondisk-" << i;
-    Finisher *f = new Finisher(g_ceph_context, oss.str(), "fn_odsk_fstore");
+    Finisher *f = new Finisher(cct, oss.str(), "fn_odsk_fstore");
     ondisk_finishers.push_back(f);
   }
   for (int i = 0; i < m_apply_finisher_num; ++i) {
     ostringstream oss;
     oss << "filestore-apply-" << i;
-    Finisher *f = new Finisher(g_ceph_context, oss.str(), "fn_appl_fstore");
+    Finisher *f = new Finisher(cct, oss.str(), "fn_appl_fstore");
     apply_finishers.push_back(f);
   }
 
@@ -595,15 +596,15 @@ FileStore::FileStore(CephContext* cct, const std::string &base,
   current_op_seq_fn = sss.str();
 
   ostringstream omss;
-  if (g_conf->filestore_omap_backend_path != "") {
-      omap_dir = g_conf->filestore_omap_backend_path;
+  if (cct->_conf->filestore_omap_backend_path != "") {
+      omap_dir = cct->_conf->filestore_omap_backend_path;
   } else {
       omss << basedir << "/current/omap";
       omap_dir = omss.str();
   }
 
   // initialize logger
-  PerfCountersBuilder plb(g_ceph_context, internal_name, l_filestore_first, l_filestore_last);
+  PerfCountersBuilder plb(cct, internal_name, l_filestore_first, l_filestore_last);
 
   plb.add_u64(l_filestore_journal_queue_ops, "journal_queue_ops", "Operations in journal queue");
   plb.add_u64(l_filestore_journal_ops, "journal_ops", "Active journal entries to be applied");
@@ -629,8 +630,8 @@ FileStore::FileStore(CephContext* cct, const std::string &base,
 
   logger = plb.create_perf_counters();
 
-  g_ceph_context->get_perfcounters_collection()->add(logger);
-  g_ceph_context->_conf->add_observer(this);
+  cct->get_perfcounters_collection()->add(logger);
+  cct->_conf->add_observer(this);
 
   superblock.compat_features = get_fs_initial_compat_set();
 }
@@ -645,8 +646,8 @@ FileStore::~FileStore()
     delete *it;
     *it = NULL;
   }
-  g_ceph_context->_conf->remove_observer(this);
-  g_ceph_context->get_perfcounters_collection()->remove(logger);
+  cct->_conf->remove_observer(this);
+  cct->get_perfcounters_collection()->remove(logger);
 
   if (journal)
     journal->logger = NULL;
@@ -682,7 +683,7 @@ void FileStore::collect_metadata(map<string,string> *pm)
   ss << "0x" << std::hex << m_fs_type << std::dec;
   (*pm)["filestore_f_type"] = ss.str();
 
-  if (g_conf->filestore_collect_device_partition_information) {
+  if (cct->_conf->filestore_collect_device_partition_information) {
     rc = get_device_by_uuid(get_fsid(), "PARTUUID", partition_path,
           dev_node);
   } else {
@@ -725,8 +726,10 @@ void FileStore::new_journal()
 {
   if (journalpath.length()) {
     dout(10) << "open_journal at " << journalpath << dendl;
-    journal = new FileJournal(fsid, &finisher, &sync_cond, journalpath.c_str(),
-                             m_journal_dio, m_journal_aio, m_journal_force_aio);
+    journal = new FileJournal(cct, fsid, &finisher, &sync_cond,
+                             journalpath.c_str(),
+                             m_journal_dio, m_journal_aio,
+                             m_journal_force_aio);
     if (journal)
       journal->logger = logger;
   }
@@ -740,7 +743,7 @@ int FileStore::dump_journal(ostream& out)
   if (!journalpath.length())
     return -EINVAL;
 
-  FileJournal *journal = new FileJournal(fsid, &finisher, &sync_cond, journalpath.c_str(), m_journal_dio);
+  FileJournal *journal = new FileJournal(cct, fsid, &finisher, &sync_cond, journalpath.c_str(), m_journal_dio);
   r = journal->dump(out);
   delete journal;
   return r;
@@ -873,7 +876,7 @@ int FileStore::mkfs()
   }
 
   // superblock
-  superblock.omap_backend = g_conf->filestore_omap_backend;
+  superblock.omap_backend = cct->_conf->filestore_omap_backend;
   ret = write_superblock();
   if (ret < 0) {
     derr << "mkfs: write_superblock() failed: "
@@ -936,7 +939,7 @@ int FileStore::mkfs()
   }
   ret = KeyValueDB::test_init(superblock.omap_backend, omap_dir);
   if (ret < 0) {
-    derr << "mkfs failed to create " << g_conf->filestore_omap_backend << dendl;
+    derr << "mkfs failed to create " << cct->_conf->filestore_omap_backend << dendl;
     goto close_fsid_fd;
   }
   // create fsid under omap
@@ -988,7 +991,7 @@ int FileStore::mkfs()
     dout(1) << "FileStore::mkfs: omap fsid is already set to " << fsid << dendl;
   }
 
-  dout(1) << g_conf->filestore_omap_backend << " db exists/created" << dendl;
+  dout(1) << cct->_conf->filestore_omap_backend << " db exists/created" << dendl;
 
   // journal?
   ret = mkjournal();
@@ -1394,7 +1397,7 @@ int FileStore::mount()
         << cpp_strerror(ret) << dendl;
     goto close_fsid_fd;
   } else if (ret == 0) {
-    if (do_update || (int)version_stamp < g_conf->filestore_update_to) {
+    if (do_update || (int)version_stamp < cct->_conf->filestore_update_to) {
       derr << "FileStore::mount: stale version stamp detected: "
           << version_stamp
           << ". Proceeding, do_update "
@@ -1406,7 +1409,7 @@ int FileStore::mount()
       derr << "FileStore::mount: stale version stamp " << version_stamp
           << ". Please run the FileStore update script before starting the "
           << "OSD, or set filestore_update_to to " << target_version
-          << " (currently " << g_conf->filestore_update_to << ")"
+          << " (currently " << cct->_conf->filestore_update_to << ")"
           << dendl;
       goto close_fsid_fd;
     }
@@ -1617,7 +1620,7 @@ int FileStore::mount()
 
   dout(0) << "start omap initiation" << dendl;
   if (!(generic_flags & SKIP_MOUNT_OMAP)) {
-    KeyValueDB * omap_store = KeyValueDB::create(g_ceph_context,
+    KeyValueDB * omap_store = KeyValueDB::create(cct,
                                                 superblock.omap_backend,
                                                 omap_dir);
     if (omap_store == NULL)
@@ -1628,7 +1631,7 @@ int FileStore::mount()
     }
 
     if (superblock.omap_backend == "rocksdb")
-      ret = omap_store->init(g_conf->filestore_rocksdb_options);
+      ret = omap_store->init(cct->_conf->filestore_rocksdb_options);
     else
       ret = omap_store->init();
 
@@ -1655,7 +1658,7 @@ int FileStore::mount()
     }
     stringstream err2;
 
-    if (g_conf->filestore_debug_omap_check && !dbomap->check(err2)) {
+    if (cct->_conf->filestore_debug_omap_check && !dbomap->check(err2)) {
       derr << err2.str() << dendl;
       delete dbomap;
       ret = -EINVAL;
@@ -1728,7 +1731,7 @@ int FileStore::mount()
     wbthrottle.start();
   } else {
     dout(0) << "mount INFO: WbThrottle is disabled" << dendl;
-    if (g_conf->filestore_odsync_write) {
+    if (cct->_conf->filestore_odsync_write) {
       dout(0) << "mount INFO: O_DSYNC write is enabled" << dendl;
     }
   }
@@ -1749,7 +1752,7 @@ int FileStore::mount()
 
   {
     stringstream err2;
-    if (g_conf->filestore_debug_omap_check && !object_map->check(err2)) {
+    if (cct->_conf->filestore_debug_omap_check && !object_map->check(err2)) {
       derr << err2.str() << dendl;
       ret = -EINVAL;
       goto stop_sync;
@@ -1771,7 +1774,7 @@ int FileStore::mount()
   timer.init();
 
   // upgrade?
-  if (g_conf->filestore_update_to >= (int)get_target_version()) {
+  if (cct->_conf->filestore_update_to >= (int)get_target_version()) {
     int err = upgrade();
     if (err < 0) {
       derr << "error converting store" << dendl;
@@ -1984,12 +1987,12 @@ void FileStore::_do_op(OpSequencer *osr, ThreadPool::TPHandle &handle)
     wbthrottle.throttle();
   }
   // inject a stall?
-  if (g_conf->filestore_inject_stall) {
-    int orig = g_conf->filestore_inject_stall;
+  if (cct->_conf->filestore_inject_stall) {
+    int orig = cct->_conf->filestore_inject_stall;
     dout(5) << "_do_op filestore_inject_stall " << orig << ", sleeping" << dendl;
-    for (int n = 0; n < g_conf->filestore_inject_stall; n++)
+    for (int n = 0; n < cct->_conf->filestore_inject_stall; n++)
       sleep(1);
-    g_conf->set_val("filestore_inject_stall", "0");
+    cct->_conf->set_val("filestore_inject_stall", "0");
     dout(5) << "_do_op done stalling" << dendl;
   }
 
@@ -2058,7 +2061,7 @@ int FileStore::queue_transactions(Sequencer *posr, vector<Transaction>& tls,
   ObjectStore::Transaction::collect_contexts(
     tls, &onreadable, &ondisk, &onreadable_sync);
 
-  if (g_conf->objectstore_blackhole) {
+  if (cct->_conf->objectstore_blackhole) {
     dout(0) << __func__ << " objectstore_blackhole = TRUE, dropping transaction"
            << dendl;
     delete ondisk;
@@ -2076,7 +2079,7 @@ int FileStore::queue_transactions(Sequencer *posr, vector<Transaction>& tls,
     dout(5) << "queue_transactions existing " << osr << " " << *osr << dendl;
   } else {
     osr = new OpSequencer(cct, next_osr_id.inc());
-    osr->set_cct(g_ceph_context);
+    osr->set_cct(cct);
     osr->parent = posr;
     posr->p = osr;
     dout(5) << "queue_transactions new " << osr << " " << *osr << dendl;
@@ -3019,7 +3022,7 @@ void FileStore::_do_transaction(
        *_dout << dendl;
 
        if (r == -EMFILE) {
-         dump_open_fds(g_ceph_context);
+         dump_open_fds(cct);
        }
 
        assert(0 == "unexpected error");
@@ -3064,7 +3067,7 @@ int FileStore::stat(
             << " = " << r
             << " (size " << st->st_size << ")" << dendl;
   }
-  if (g_conf->filestore_debug_inject_read_err &&
+  if (cct->_conf->filestore_debug_inject_read_err &&
       debug_mdata_eio(oid)) {
     return -EIO;
   } else {
@@ -3154,7 +3157,7 @@ int FileStore::read(
 
   dout(10) << "FileStore::read " << cid << "/" << oid << " " << offset << "~"
           << got << "/" << len << dendl;
-  if (g_conf->filestore_debug_inject_read_err &&
+  if (cct->_conf->filestore_debug_inject_read_err &&
       debug_data_eio(oid)) {
     return -EIO;
   } else {
@@ -3407,7 +3410,7 @@ int FileStore::_zero(const coll_t& cid, const ghobject_t& oid, uint64_t offset,
   dout(15) << "zero " << cid << "/" << oid << " " << offset << "~" << len << dendl;
   int ret = 0;
 
-  if (g_conf->filestore_punch_hole) {
+  if (cct->_conf->filestore_punch_hole) {
 #ifdef CEPH_HAVE_FALLOCATE
 # if !defined(DARWIN) && !defined(__FreeBSD__)
 #    ifdef FALLOC_FL_KEEP_SIZE
@@ -3792,8 +3795,9 @@ int FileStore::_clone_range(const coll_t& oldcid, const ghobject_t& oldoid, cons
 
 class SyncEntryTimeout : public Context {
 public:
-  explicit SyncEntryTimeout(int commit_timeo)
-    : m_commit_timeo(commit_timeo)
+  CephContext* cct;
+  explicit SyncEntryTimeout(CephContext* cct, int commit_timeo)
+    : cct(cct), m_commit_timeo(commit_timeo)
   {
   }
 
@@ -3859,7 +3863,7 @@ void FileStore::sync_entry()
 
       sync_entry_timeo_lock.Lock();
       SyncEntryTimeout *sync_entry_timeo =
-       new SyncEntryTimeout(m_filestore_commit_timeout);
+       new SyncEntryTimeout(cct, m_filestore_commit_timeout);
       timer.add_event_after(m_filestore_commit_timeout, sync_entry_timeo);
       sync_entry_timeo_lock.Unlock();
 
@@ -3867,7 +3871,7 @@ void FileStore::sync_entry()
 
       dout(15) << "sync_entry committing " << cp << dendl;
       stringstream errstream;
-      if (g_conf->filestore_debug_omap_check && !object_map->check(errstream)) {
+      if (cct->_conf->filestore_debug_omap_check && !object_map->check(errstream)) {
        derr << errstream.str() << dendl;
        ceph_abort();
       }
@@ -3972,7 +3976,7 @@ void FileStore::sync_entry()
     }
 
     lock.Lock();
-    finish_contexts(g_ceph_context, fin, 0);
+    finish_contexts(cct, fin, 0);
     fin.clear();
     if (!sync_waiters.empty()) {
       dout(10) << "sync_entry more waiters, committing again" << dendl;
@@ -4049,7 +4053,7 @@ void FileStore::flush()
 {
   dout(10) << "flush" << dendl;
 
-  if (g_conf->filestore_blackhole) {
+  if (cct->_conf->filestore_blackhole) {
     // wait forever
     Mutex lock("FileStore::flush::lock");
     Cond cond;
@@ -4293,7 +4297,7 @@ int FileStore::getattr(const coll_t& _cid, const ghobject_t& oid, const char *na
  out:
   dout(10) << "getattr " << cid << "/" << oid << " '" << name << "' = " << r << dendl;
   assert(!m_filestore_fail_eio || r != -EIO);
-  if (g_conf->filestore_debug_inject_read_err &&
+  if (cct->_conf->filestore_debug_inject_read_err &&
       debug_mdata_eio(oid)) {
     return -EIO;
   } else {
@@ -4367,7 +4371,7 @@ int FileStore::getattrs(const coll_t& _cid, const ghobject_t& oid, map<string,bu
   dout(10) << "getattrs " << cid << "/" << oid << " = " << r << dendl;
   assert(!m_filestore_fail_eio || r != -EIO);
 
-  if (g_conf->filestore_debug_inject_read_err &&
+  if (cct->_conf->filestore_debug_inject_read_err &&
       debug_mdata_eio(oid)) {
     return -EIO;
   } else {
@@ -5271,7 +5275,7 @@ void FileStore::_inject_failure()
     dout(5) << "_inject_failure " << (final+1) << " -> " << final << dendl;
     if (final == 0) {
       derr << "_inject_failure KILLING" << dendl;
-      g_ceph_context->_log->flush();
+      cct->_log->flush();
       _exit(1);
     }
   }
@@ -5442,7 +5446,7 @@ int FileStore::_split_collection(const coll_t& cid,
     _close_replay_guard(cid, spos);
     _close_replay_guard(dest, spos);
   }
-  if (!r && g_conf->filestore_debug_verify_split) {
+  if (!r && cct->_conf->filestore_debug_verify_split) {
     vector<ghobject_t> objects;
     ghobject_t next;
     while (1) {
@@ -5626,21 +5630,21 @@ int FileStore::set_throttle_params()
 {
   stringstream ss;
   bool valid = throttle_bytes.set_params(
-    g_conf->filestore_queue_low_threshhold,
-    g_conf->filestore_queue_high_threshhold,
-    g_conf->filestore_expected_throughput_bytes,
-    g_conf->filestore_queue_high_delay_multiple,
-    g_conf->filestore_queue_max_delay_multiple,
-    g_conf->filestore_queue_max_bytes,
+    cct->_conf->filestore_queue_low_threshhold,
+    cct->_conf->filestore_queue_high_threshhold,
+    cct->_conf->filestore_expected_throughput_bytes,
+    cct->_conf->filestore_queue_high_delay_multiple,
+    cct->_conf->filestore_queue_max_delay_multiple,
+    cct->_conf->filestore_queue_max_bytes,
     &ss);
 
   valid &= throttle_ops.set_params(
-    g_conf->filestore_queue_low_threshhold,
-    g_conf->filestore_queue_high_threshhold,
-    g_conf->filestore_expected_throughput_ops,
-    g_conf->filestore_queue_high_delay_multiple,
-    g_conf->filestore_queue_max_delay_multiple,
-    g_conf->filestore_queue_max_ops,
+    cct->_conf->filestore_queue_low_threshhold,
+    cct->_conf->filestore_queue_high_threshhold,
+    cct->_conf->filestore_expected_throughput_ops,
+    cct->_conf->filestore_queue_high_delay_multiple,
+    cct->_conf->filestore_queue_max_delay_multiple,
+    cct->_conf->filestore_queue_max_ops,
     &ss);
 
   logger->set(l_filestore_op_queue_max_ops, throttle_ops.get_max());
@@ -5704,46 +5708,46 @@ void FileStore::set_xattr_limits_via_conf()
   switch (m_fs_type) {
 #if defined(__linux__)
   case XFS_SUPER_MAGIC:
-    fs_xattr_size = g_conf->filestore_max_inline_xattr_size_xfs;
-    fs_xattrs = g_conf->filestore_max_inline_xattrs_xfs;
-    fs_xattr_max_value_size = g_conf->filestore_max_xattr_value_size_xfs;
+    fs_xattr_size = cct->_conf->filestore_max_inline_xattr_size_xfs;
+    fs_xattrs = cct->_conf->filestore_max_inline_xattrs_xfs;
+    fs_xattr_max_value_size = cct->_conf->filestore_max_xattr_value_size_xfs;
     break;
   case BTRFS_SUPER_MAGIC:
-    fs_xattr_size = g_conf->filestore_max_inline_xattr_size_btrfs;
-    fs_xattrs = g_conf->filestore_max_inline_xattrs_btrfs;
-    fs_xattr_max_value_size = g_conf->filestore_max_xattr_value_size_btrfs;
+    fs_xattr_size = cct->_conf->filestore_max_inline_xattr_size_btrfs;
+    fs_xattrs = cct->_conf->filestore_max_inline_xattrs_btrfs;
+    fs_xattr_max_value_size = cct->_conf->filestore_max_xattr_value_size_btrfs;
     break;
 #endif
   default:
-    fs_xattr_size = g_conf->filestore_max_inline_xattr_size_other;
-    fs_xattrs = g_conf->filestore_max_inline_xattrs_other;
-    fs_xattr_max_value_size = g_conf->filestore_max_xattr_value_size_other;
+    fs_xattr_size = cct->_conf->filestore_max_inline_xattr_size_other;
+    fs_xattrs = cct->_conf->filestore_max_inline_xattrs_other;
+    fs_xattr_max_value_size = cct->_conf->filestore_max_xattr_value_size_other;
     break;
   }
 
   // Use override value if set
-  if (g_conf->filestore_max_inline_xattr_size)
-    m_filestore_max_inline_xattr_size = g_conf->filestore_max_inline_xattr_size;
+  if (cct->_conf->filestore_max_inline_xattr_size)
+    m_filestore_max_inline_xattr_size = cct->_conf->filestore_max_inline_xattr_size;
   else
     m_filestore_max_inline_xattr_size = fs_xattr_size;
 
   // Use override value if set
-  if (g_conf->filestore_max_inline_xattrs)
-    m_filestore_max_inline_xattrs = g_conf->filestore_max_inline_xattrs;
+  if (cct->_conf->filestore_max_inline_xattrs)
+    m_filestore_max_inline_xattrs = cct->_conf->filestore_max_inline_xattrs;
   else
     m_filestore_max_inline_xattrs = fs_xattrs;
 
   // Use override value if set
-  if (g_conf->filestore_max_xattr_value_size)
-    m_filestore_max_xattr_value_size = g_conf->filestore_max_xattr_value_size;
+  if (cct->_conf->filestore_max_xattr_value_size)
+    m_filestore_max_xattr_value_size = cct->_conf->filestore_max_xattr_value_size;
   else
     m_filestore_max_xattr_value_size = fs_xattr_max_value_size;
 
-  if (m_filestore_max_xattr_value_size < g_conf->osd_max_object_name_len) {
+  if (m_filestore_max_xattr_value_size < cct->_conf->osd_max_object_name_len) {
     derr << "WARNING: max attr value size ("
         << m_filestore_max_xattr_value_size
         << ") is smaller than osd_max_object_name_len ("
-        << g_conf->osd_max_object_name_len
+        << cct->_conf->osd_max_object_name_len
         << ").  Your backend filesystem appears to not support attrs large "
         << "enough to handle the configured max rados name size.  You may get "
         << "unexpected ENAMETOOLONG errors on rados operations or buggy "
index 2288965805cebbb466ca7488b6943ff910fbd4d9..24fe14a580bc3957ae75b90ca172ac664457ad22 100644 (file)
@@ -121,8 +121,8 @@ public:
     return target_version;
   }
 
-  static int get_block_device_fsid(const string& path, uuid_d *fsid);
-
+  static int get_block_device_fsid(CephContext* cct, const string& path,
+                                  uuid_d *fsid);
   struct FSPerfTracker {
     PerfCounters::avg_tracker<uint64_t> os_commit_latency;
     PerfCounters::avg_tracker<uint64_t> os_apply_latency;
@@ -314,7 +314,7 @@ private:
     void flush() {
       Mutex::Locker l(qlock);
 
-      while (g_conf->filestore_blackhole)
+      while (cct->_conf->filestore_blackhole)
        cond.Wait(qlock);  // wait forever
 
 
@@ -831,6 +831,10 @@ public:
   explicit FileStoreBackend(FileStore *fs) : filestore(fs) {}
   virtual ~FileStoreBackend() {}
 
+  CephContext* cct() const {
+    return filestore->cct;
+  }
+
   static FileStoreBackend *create(long f_type, FileStore *fs);
 
   virtual const char *get_name() = 0;
index 7206557169b8e45057608338528f607dbfe65cb2..4821621f456613ee166c92204722d91c7d73e088 100644 (file)
@@ -46,7 +46,7 @@
 #define SLOPPY_CRC_XATTR "user.cephos.scrc"
 
 
-#define dout_context g_ceph_context
+#define dout_context cct()
 #define dout_subsys ceph_subsys_filestore
 #undef dout_prefix
 #define dout_prefix *_dout << "genericfilestorebackend(" << get_basedir_path() << ") "
@@ -60,10 +60,10 @@ GenericFileStoreBackend::GenericFileStoreBackend(FileStore *fs):
   ioctl_fiemap(false),
   seek_data_hole(false),
   use_splice(false),
-  m_filestore_fiemap(g_conf->filestore_fiemap),
-  m_filestore_seek_data_hole(g_conf->filestore_seek_data_hole),
-  m_filestore_fsync_flushes_journal_data(g_conf->filestore_fsync_flushes_journal_data),
-  m_filestore_splice(g_conf->filestore_splice) {}
+  m_filestore_fiemap(cct()->_conf->filestore_fiemap),
+  m_filestore_seek_data_hole(cct()->_conf->filestore_seek_data_hole),
+  m_filestore_fsync_flushes_journal_data(cct()->_conf->filestore_fsync_flushes_journal_data),
+  m_filestore_splice(cct()->_conf->filestore_splice) {}
 
 int GenericFileStoreBackend::detect_features()
 {
index b88fb2a86c63647431af5e98d8935552a35c07b7..39efacd1d2b79bbac3c0dd231a903040074200e3 100644 (file)
@@ -21,7 +21,7 @@
 
 #include "common/errno.h"
 #include "common/debug.h"
-#define dout_context g_ceph_context
+#define dout_context cct
 #define dout_subsys ceph_subsys_filestore
 
 const string HashIndex::SUBDIR_ATTR = "contents";
index 73150c7cba63af6349c5d70d0020533af33d3c79..e3fa3d9ab32e01e51a9acf0b89d979b3ead331ae 100644 (file)
@@ -134,13 +134,14 @@ private:
 public:
   /// Constructor.
   HashIndex(
+    CephContext* cct,
     coll_t collection,     ///< [in] Collection
     const char *base_path, ///< [in] Path to the index root.
     int merge_at,          ///< [in] Merge threshhold.
     int split_multiple,           ///< [in] Split threshhold.
     uint32_t index_version,///< [in] Index version
     double retry_probability=0) ///< [in] retry probability
-    : LFNIndex(collection, base_path, index_version, retry_probability),
+    : LFNIndex(cct, collection, base_path, index_version, retry_probability),
       merge_threshold(merge_at),
       split_multiplier(split_multiple) {}
 
index b5874088741a39f6bbd95a939241ea4b0d9a7dce..c148afc46c80f679d7dcd868c56e6e14ce0a2ffe 100644 (file)
@@ -78,10 +78,10 @@ int IndexManager::init_index(coll_t c, const char *path, uint32_t version) {
   int r = set_version(path, version);
   if (r < 0)
     return r;
-  HashIndex index(c, path, g_conf->filestore_merge_threshold,
-                 g_conf->filestore_split_multiple,
+  HashIndex index(cct, c, path, cct->_conf->filestore_merge_threshold,
+                 cct->_conf->filestore_split_multiple,
                  version,
-                 g_conf->filestore_index_retry_probability);
+                 cct->_conf->filestore_index_retry_probability);
   return index.init();
 }
 
@@ -100,8 +100,9 @@ int IndexManager::build_index(coll_t c, const char *path, CollectionIndex **inde
     case CollectionIndex::HASH_INDEX_TAG_2: // fall through
     case CollectionIndex::HOBJECT_WITH_POOL: {
       // Must be a HashIndex
-      *index = new HashIndex(c, path, g_conf->filestore_merge_threshold,
-                                  g_conf->filestore_split_multiple, version);
+      *index = new HashIndex(cct, c, path,
+                            cct->_conf->filestore_merge_threshold,
+                            cct->_conf->filestore_split_multiple, version);
       return 0;
     }
     default: ceph_abort();
@@ -109,10 +110,10 @@ int IndexManager::build_index(coll_t c, const char *path, CollectionIndex **inde
 
   } else {
     // No need to check
-    *index = new HashIndex(c, path, g_conf->filestore_merge_threshold,
-                                g_conf->filestore_split_multiple,
-                                CollectionIndex::HOBJECT_WITH_POOL,
-                                g_conf->filestore_index_retry_probability);
+    *index = new HashIndex(cct, c, path, cct->_conf->filestore_merge_threshold,
+                          cct->_conf->filestore_split_multiple,
+                          CollectionIndex::HOBJECT_WITH_POOL,
+                          cct->_conf->filestore_index_retry_probability);
     return 0;
   }
 }
index c5748a5f60b7be687b4b077eb2e9158e588e0008..4162ffa5f347ff671fbc4f4d778ad2bdd0b06cec 100644 (file)
@@ -49,6 +49,7 @@ struct Index {
  * This is enforced by using CollectionIndex::access_lock
  */
 class IndexManager {
+  CephContext* cct;
   RWLock lock; ///< Lock for Index Manager
   bool upgrade;
   ceph::unordered_map<coll_t, CollectionIndex* > col_indices;
@@ -68,8 +69,10 @@ class IndexManager {
   bool get_index_optimistic(coll_t c, Index *index);
 public:
   /// Constructor
-  explicit IndexManager(bool upgrade) : lock("IndexManager lock"),
-                                       upgrade(upgrade) {}
+  explicit IndexManager(CephContext* cct,
+                       bool upgrade) : cct(cct),
+                                       lock("IndexManager lock"),
+                                       upgrade(upgrade) {}
 
   ~IndexManager();
 
index ca30da4794feb0748b14f1c9d9ff133e3a4c2a9e..eadb2987fa26a4d1d3b688e622e37236d15587d5 100644 (file)
@@ -31,14 +31,15 @@ protected:
   uuid_d fsid;
   Finisher *finisher;
 public:
+  CephContext* cct;
   PerfCounters *logger;
 protected:
   Cond *do_sync_cond;
   bool wait_on_full;
 
 public:
-  Journal(uuid_d f, Finisher *fin, Cond *c=0) :
-    fsid(f), finisher(fin), logger(NULL),
+  Journal(CephContext* cct, uuid_d f, Finisher *fin, Cond *c=0) :
+    fsid(f), finisher(fin), cct(cct), logger(NULL),
     do_sync_cond(c),
     wait_on_full(false) { }
   virtual ~Journal() { }
index 5163065e336bddbcdf9f36249eb5cdf3b1620f12..9d2e7f29f561bd831d3a572d37ddc1db49168354 100644 (file)
@@ -5,7 +5,7 @@
 #include "common/errno.h"
 #include "common/debug.h"
 
-#define dout_context g_ceph_context
+#define dout_context cct
 #define dout_subsys ceph_subsys_journal
 #undef dout_prefix
 #define dout_prefix *_dout << "journal "
@@ -40,11 +40,12 @@ int JournalingObjectStore::journal_replay(uint64_t fs_op_seq)
 {
   dout(10) << "journal_replay fs op_seq " << fs_op_seq << dendl;
 
-  if (g_conf->journal_replay_from) {
-    dout(0) << "journal_replay forcing replay from " << g_conf->journal_replay_from
+  if (cct->_conf->journal_replay_from) {
+    dout(0) << "journal_replay forcing replay from "
+           << cct->_conf->journal_replay_from
            << " instead of " << fs_op_seq << dendl;
     // the previous op is the last one committed
-    fs_op_seq = g_conf->journal_replay_from - 1;
+    fs_op_seq = cct->_conf->journal_replay_from - 1;
   }
 
   uint64_t op_seq = fs_op_seq;
@@ -124,7 +125,8 @@ uint64_t JournalingObjectStore::ApplyManager::op_apply_start(uint64_t op)
     dout(10) << "op_apply_start blocked, waiting" << dendl;
     blocked_cond.Wait(apply_lock);
   }
-  dout(10) << "op_apply_start " << op << " open_ops " << open_ops << " -> " << (open_ops+1) << dendl;
+  dout(10) << "op_apply_start " << op << " open_ops " << open_ops << " -> "
+          << (open_ops+1) << dendl;
   assert(!blocked);
   assert(op > committed_seq);
   open_ops++;
@@ -134,10 +136,9 @@ uint64_t JournalingObjectStore::ApplyManager::op_apply_start(uint64_t op)
 void JournalingObjectStore::ApplyManager::op_apply_finish(uint64_t op)
 {
   Mutex::Locker l(apply_lock);
-  dout(10) << "op_apply_finish " << op << " open_ops " << open_ops
-          << " -> " << (open_ops-1)
-          << ", max_applied_seq " << max_applied_seq << " -> " << MAX(op, max_applied_seq)
-          << dendl;
+  dout(10) << "op_apply_finish " << op << " open_ops " << open_ops << " -> "
+          << (open_ops-1) << ", max_applied_seq " << max_applied_seq << " -> "
+          << MAX(op, max_applied_seq) << dendl;
   --open_ops;
   assert(open_ops >= 0);
 
@@ -191,11 +192,11 @@ bool JournalingObjectStore::ApplyManager::commit_start()
   {
     Mutex::Locker l(apply_lock);
     dout(10) << "commit_start max_applied_seq " << max_applied_seq
-            << ", open_ops " << open_ops
-            << dendl;
+            << ", open_ops " << open_ops << dendl;
     blocked = true;
     while (open_ops > 0) {
-      dout(10) << "commit_start waiting for " << open_ops << " open ops to drain" << dendl;
+      dout(10) << "commit_start waiting for " << open_ops
+              << " open ops to drain" << dendl;
       blocked_cond.Wait(apply_lock);
     }
     assert(open_ops == 0);
@@ -227,7 +228,8 @@ void JournalingObjectStore::ApplyManager::commit_started()
 {
   Mutex::Locker l(apply_lock);
   // allow new ops. (underlying fs should now be committing all prior ops)
-  dout(10) << "commit_started committing " << committing_seq << ", unblocking" << dendl;
+  dout(10) << "commit_started committing " << committing_seq << ", unblocking"
+          << dendl;
   blocked = false;
   blocked_cond.Signal();
 }
index 5e07065ec61335312f902fbd526ac8e00847fbaa..8b0d3eb193cb282a6c7f1f0ee6703df18d930721 100644 (file)
@@ -27,12 +27,13 @@ protected:
 
 
   class SubmitManager {
+    CephContext* cct;
     Mutex lock;
     uint64_t op_seq;
     uint64_t op_submitted;
   public:
-    SubmitManager() :
-      lock("JOS::SubmitManager::lock", false, true, false, g_ceph_context),
+    SubmitManager(CephContext* cct) :
+      cct(cct), lock("JOS::SubmitManager::lock", false, true, false, cct),
       op_seq(0), op_submitted(0)
     {}
     uint64_t op_submit_start();
@@ -47,6 +48,7 @@ protected:
   } submit_manager;
 
   class ApplyManager {
+    CephContext* cct;
     Journal *&journal;
     Finisher &finisher;
 
@@ -61,13 +63,13 @@ protected:
     uint64_t committing_seq, committed_seq;
 
   public:
-    ApplyManager(Journal *&j, Finisher &f) :
-      journal(j), finisher(f),
-      apply_lock("JOS::ApplyManager::apply_lock", false, true, false, g_ceph_context),
+    ApplyManager(CephContext* cct, Journal *&j, Finisher &f) :
+      cct(cct), journal(j), finisher(f),
+      apply_lock("JOS::ApplyManager::apply_lock", false, true, false, cct),
       blocked(false),
       open_ops(0),
       max_applied_seq(0),
-      com_lock("JOS::ApplyManager::com_lock", false, true, false, g_ceph_context),
+      com_lock("JOS::ApplyManager::com_lock", false, true, false, cct),
       committing_seq(0), committed_seq(0) {}
     void reset() {
       assert(open_ops == 0);
@@ -133,7 +135,8 @@ public:
     : ObjectStore(cct, path),
       journal(NULL),
       finisher(cct, "JournalObjectStore", "fn_jrn_objstore"),
-      apply_manager(journal, finisher),
+      submit_manager(cct),
+      apply_manager(cct, journal, finisher),
       replaying(false) {}
 
   ~JournalingObjectStore() {
index 23557ad141a413dce09cc75627e2d5a65f069640..c498aa2612ad209913affb7dbcc03281ae8c125a 100644 (file)
@@ -35,7 +35,7 @@
 #include "LFNIndex.h"
 using ceph::crypto::SHA1;
 
-#define dout_context g_ceph_context
+#define dout_context cct
 #define dout_subsys ceph_subsys_filestore
 #undef dout_prefix
 #define dout_prefix *_dout << "LFNIndex(" << get_base_path() << ") "
index 2de092ef1e28672460415a4145a3426c1499356c..c23b915206b357ae5ce0d71fadbaa5540706bbcc 100644 (file)
@@ -127,11 +127,12 @@ private:
 public:
   /// Constructor
   LFNIndex(
+    CephContext* cct,
     coll_t collection,
     const char *base_path, ///< [in] path to Index root
     uint32_t index_version,
     double _error_injection_probability=0)
-    : CollectionIndex(collection),
+    : CollectionIndex(cct, collection),
       base_path(base_path),
       index_version(index_version),
       error_injection_enabled(false),
index fb98c97536ce8f7f497e45d20595ec6bbd5c26a3..71884445c76a12cee0679e2eaeb607fba94631fe 100644 (file)
@@ -171,7 +171,7 @@ void *WBThrottle::entry()
     ::fsync(**wb.get<1>());
 #endif
 #ifdef HAVE_POSIX_FADVISE
-    if (g_conf->filestore_fadvise && wb.get<2>().nocache) {
+    if (cct->_conf->filestore_fadvise && wb.get<2>().nocache) {
       int fa_r = posix_fadvise(**wb.get<1>(), 0, 0, POSIX_FADV_DONTNEED);
       assert(fa_r == 0);
     }
@@ -221,7 +221,7 @@ void WBThrottle::clear()
        i != pending_wbs.end();
        ++i) {
 #ifdef HAVE_POSIX_FADVISE
-    if (g_conf->filestore_fadvise && i->second.first.nocache) {
+    if (cct->_conf->filestore_fadvise && i->second.first.nocache) {
       int fa_r = posix_fadvise(**i->second.second, 0, 0, POSIX_FADV_DONTNEED);
       assert(fa_r == 0);
     }
index df9f6e3e3d51504d394dee52c5f4d11680a92d1d..b6d4d3e7401ec5dafaf3317f30b1bd9c8e352500 100644 (file)
@@ -28,7 +28,7 @@
 #include "include/assert.h"
 #include "include/compat.h"
 
-#define dout_context g_ceph_context
+#define dout_context cct()
 #define dout_subsys ceph_subsys_filestore
 #undef dout_prefix
 #define dout_prefix *_dout << "xfsfilestorebackend(" << get_basedir_path() << ") "
@@ -106,7 +106,7 @@ int XfsFileStoreBackend::detect_features()
     goto out_close;
   }
 
-  if (g_conf->filestore_xfs_extsize) {
+  if (cct()->_conf->filestore_xfs_extsize) {
     ret = set_extsize(fd, 1U << 15); // a few pages
     if (ret) {
       ret = 0;
index aa52b8d293392319da19da4a50723d4f25ce6016..30cc6f9c36e0c6fe45de61ccd548c0356e432135 100644 (file)
@@ -36,7 +36,7 @@
 
 ZFSFileStoreBackend::ZFSFileStoreBackend(FileStore *fs) :
   GenericFileStoreBackend(fs), base_zh(NULL), current_zh(NULL),
-  m_filestore_zfs_snap(g_conf->filestore_zfs_snap)
+  m_filestore_zfs_snap(cct->_conf->filestore_zfs_snap)
 {
   int ret = zfs.init();
   if (ret < 0) {
index c377fe482a6d1e8e73445dc5fb47a44905be5f16..367bba2dd945c4d781c2660ad77a03361bc95dc8 100644 (file)
 
 class TestWrapLFNIndex : public LFNIndex {
 public:
-  TestWrapLFNIndex(coll_t collection,
+  TestWrapLFNIndex(CephContext* cct,
+                  coll_t collection,
                   const char *base_path,
-                  uint32_t index_version) : LFNIndex(collection, base_path, index_version) {}
+                  uint32_t index_version)
+    : LFNIndex(cct, collection, base_path, index_version) {}
 
   virtual uint32_t collection_version() {
     return index_version;
@@ -92,7 +94,9 @@ protected:
 
 class TestHASH_INDEX_TAG : public TestWrapLFNIndex, public ::testing::Test {
 public:
-  TestHASH_INDEX_TAG() : TestWrapLFNIndex(coll_t(), "PATH_1", CollectionIndex::HASH_INDEX_TAG) {
+  TestHASH_INDEX_TAG()
+    : TestWrapLFNIndex(g_ceph_context, coll_t(), "PATH_1",
+                      CollectionIndex::HASH_INDEX_TAG) {
   }
 };
 
@@ -110,7 +114,9 @@ TEST_F(TestHASH_INDEX_TAG, generate_and_parse_name) {
 
 class TestHASH_INDEX_TAG_2 : public TestWrapLFNIndex, public ::testing::Test {
 public:
-  TestHASH_INDEX_TAG_2() : TestWrapLFNIndex(coll_t(), "PATH_1", CollectionIndex::HASH_INDEX_TAG_2) {
+  TestHASH_INDEX_TAG_2()
+    : TestWrapLFNIndex(g_ceph_context,
+                      coll_t(), "PATH_1", CollectionIndex::HASH_INDEX_TAG_2) {
   }
 };
 
@@ -133,7 +139,9 @@ TEST_F(TestHASH_INDEX_TAG_2, generate_and_parse_name) {
 
 class TestHOBJECT_WITH_POOL : public TestWrapLFNIndex, public ::testing::Test {
 public:
-  TestHOBJECT_WITH_POOL() : TestWrapLFNIndex(coll_t(), "PATH_1", CollectionIndex::HOBJECT_WITH_POOL) {
+  TestHOBJECT_WITH_POOL()
+    : TestWrapLFNIndex(g_ceph_context, coll_t(),
+                      "PATH_1", CollectionIndex::HOBJECT_WITH_POOL) {
   }
 };
 
@@ -177,7 +185,9 @@ TEST_F(TestHOBJECT_WITH_POOL, generate_and_parse_name) {
 
 class TestLFNIndex : public TestWrapLFNIndex, public ::testing::Test {
 public:
-  TestLFNIndex() : TestWrapLFNIndex(coll_t(), "PATH_1", CollectionIndex::HOBJECT_WITH_POOL) {
+  TestLFNIndex()
+    : TestWrapLFNIndex(g_ceph_context, coll_t(), "PATH_1",
+                      CollectionIndex::HOBJECT_WITH_POOL) {
   }
 
   virtual void SetUp() {
index 0d3c818fbb4c03add7e29d4a2eae49636675098b..8a1ef5d426c6d91ef78520b833301c04ecb52277 100644 (file)
@@ -120,8 +120,8 @@ TEST(TestFileJournal, Create) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
   }
 }
@@ -134,8 +134,8 @@ TEST(TestFileJournal, WriteSmall) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -159,8 +159,8 @@ TEST(TestFileJournal, WriteBig) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -187,8 +187,8 @@ TEST(TestFileJournal, WriteMany) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -220,8 +220,8 @@ TEST(TestFileJournal, WriteManyVecs) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -272,8 +272,8 @@ TEST(TestFileJournal, ReplaySmall) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -332,8 +332,8 @@ TEST(TestFileJournal, ReplayCorrupt) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -417,8 +417,8 @@ TEST(TestFileJournal, WriteTrim) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -469,8 +469,8 @@ TEST(TestFileJournal, WriteTrimSmall) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -519,8 +519,8 @@ TEST(TestFileJournal, ReplayDetectCorruptFooterMagic) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -578,8 +578,8 @@ TEST(TestFileJournal, ReplayDetectCorruptPayload) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
@@ -637,8 +637,8 @@ TEST(TestFileJournal, ReplayDetectCorruptHeader) {
   for (unsigned i = 0 ; i < 3; ++i) {
     SCOPED_TRACE(subtests[i].description);
     fsid.generate_random();
-    FileJournal j(fsid, finisher, &sync_cond, path, subtests[i].directio,
-                 subtests[i].aio, subtests[i].faio);
+    FileJournal j(g_ceph_context, fsid, finisher, &sync_cond, path,
+                 subtests[i].directio, subtests[i].aio, subtests[i].faio);
     ASSERT_EQ(0, j.create());
     j.make_writeable();
 
index 1806e586950905252bc6b23952c30b7a40c67fa2..becf1ed2356e6632dbb304b4ca60300ef35f03de 100644 (file)
@@ -2228,7 +2228,8 @@ int mydump_journal(Formatter *f, string journalpath, bool m_journal_dio)
   if (!journalpath.length())
     return -EINVAL;
 
-  FileJournal *journal = new FileJournal(uuid_d(), NULL, NULL, journalpath.c_str(), m_journal_dio);
+  FileJournal *journal = new FileJournal(g_ceph_context, uuid_d(), NULL, NULL,
+                                        journalpath.c_str(), m_journal_dio);
   r = journal->_fdump(*f, false);
   delete journal;
   return r;