]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: add AIO version of invalidate_cache
authorJason Dillaman <dillaman@redhat.com>
Thu, 26 Feb 2015 22:52:39 +0000 (17:52 -0500)
committerJason Dillaman <dillaman@redhat.com>
Fri, 27 Feb 2015 18:24:21 +0000 (13:24 -0500)
Break the existing synchronous behavior into pre- and post-
flush routines using the existing logic.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
src/librbd/ImageCtx.cc
src/librbd/ImageCtx.h
src/librbd/internal.cc

index e2e90d2745a67f4d795b5a5a61e0bf1f8e6372de..69e007dd60bfc13f850f5c1801d5027f73a5ee5a 100644 (file)
@@ -14,6 +14,8 @@
 #include "librbd/ImageWatcher.h"
 #include "librbd/ObjectMap.h"
 
+#include <boost/bind.hpp>
+
 #define dout_subsys ceph_subsys_rbd
 #undef dout_prefix
 #define dout_prefix *_dout << "librbd::ImageCtx: "
@@ -628,34 +630,49 @@ namespace librbd {
   }
 
   void ImageCtx::shutdown_cache() {
+    flush_async_operations();
     invalidate_cache();
     object_cacher->stop();
   }
 
   int ImageCtx::invalidate_cache() {
-    if (!object_cacher)
-      return 0;
-    flush_async_operations();
+    C_SaferCond ctx;
+    invalidate_cache(&ctx);
+    return ctx.wait();
+  }
+
+  void ImageCtx::invalidate_cache(Context *on_finish) {
+    if (object_cacher == NULL) {
+      on_finish->complete(0);
+      return;
+    }
+
     cache_lock.Lock();
     object_cacher->release_set(object_set);
     cache_lock.Unlock();
-    int r = flush_cache();
+
+    flush_cache_aio(new FunctionContext(boost::bind(
+      &ImageCtx::invalidate_cache_completion, this, _1, on_finish)));
+  }
+
+  void ImageCtx::invalidate_cache_completion(int r, Context *on_finish) {
+    assert(cache_lock.is_locked());
     if (r == -EBLACKLISTED) {
-      Mutex::Locker l(cache_lock);
       lderr(cct) << "Blacklisted during flush!  Purging cache..." << dendl;
       object_cacher->purge_set(object_set);
-    } else if (r) {
+    } else if (r != 0) {
       lderr(cct) << "flush_cache returned " << r << dendl;
     }
-    cache_lock.Lock();
+
     loff_t unclean = object_cacher->release_set(object_set);
-    cache_lock.Unlock();
-    if (unclean) {
+    if (unclean == 0) {
+      r = 0;
+    } else {
       lderr(cct) << "could not release all objects from cache: "
                  << unclean << " bytes remain" << dendl;
-      return -EBUSY;
+      r = -EBUSY;
     }
-    return r;
+    on_finish->complete(r);
   }
 
   void ImageCtx::clear_nonexistence_cache() {
index c8efc0305fce220b1e078cfe5387270fe67af9d4..a35e40b54b4b3a2bd6ab74d11653ce02199d21ae 100644 (file)
@@ -190,6 +190,8 @@ namespace librbd {
     int flush_cache();
     void shutdown_cache();
     int invalidate_cache();
+    void invalidate_cache(Context *on_finish);
+    void invalidate_cache_completion(int r, Context *on_finish);
     void clear_nonexistence_cache();
     int register_watch();
     void unregister_watch();
index cdf3f9bc5124ab51393258725372667aec65769e..be071ee65ea34c596027e73d3314e5455ab5e6dd 100644 (file)
@@ -1682,6 +1682,7 @@ reprotect_and_return_err:
       original_size = ictx->size;
       ictx->snap_lock.put_read();
       if (size < original_size) {
+        ictx->flush_async_operations();
        if (ictx->object_cacher) {
          // need to invalidate since we're deleting objects, and
          // ObjectCacher doesn't track non-existent objects
@@ -2108,6 +2109,7 @@ reprotect_and_return_err:
       // need to flush any pending writes before resizing and rolling back -
       // writes might create new snapshots. Rolling back will replace
       // the current version, so we have to invalidate that too.
+      ictx->flush_async_operations();
       r = ictx->invalidate_cache();
       if (r < 0) {
        return r;