]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mds: dynamiclly allocate data structures for file locks
authorYan, Zheng <zyan@redhat.com>
Mon, 9 Feb 2015 04:56:44 +0000 (12:56 +0800)
committerYan, Zheng <zyan@redhat.com>
Wed, 25 Feb 2015 12:51:19 +0000 (20:51 +0800)
Size of ceph_lock_state_t is about 200 bytes, CInode contains two
ceph_lock_state_t. Dynamiclly allocating them can save about 400
bytes.

Signed-off-by: Yan, Zheng <zyan@redhat.com>
src/mds/CInode.cc
src/mds/CInode.h
src/mds/Locker.cc
src/mds/MDS.h
src/mds/Server.cc
src/mds/flock.h

index 839ad6a4871ed1f99cadeafdbb2926c4c045e3f9..4569234116f260ed3e54cc836f9e9bce7fe72c42 100644 (file)
@@ -1452,8 +1452,7 @@ void CInode::encode_lock_state(int type, bufferlist& bl)
 
   case CEPH_LOCK_IFLOCK:
     ::encode(inode.version, bl);
-    ::encode(fcntl_locks, bl);
-    ::encode(flock_locks, bl);
+    _encode_file_locks(bl);
     break;
 
   case CEPH_LOCK_IPOLICY:
@@ -1707,8 +1706,7 @@ void CInode::decode_lock_state(int type, bufferlist& bl)
 
   case CEPH_LOCK_IFLOCK:
     ::decode(inode.version, p);
-    ::decode(fcntl_locks, p);
-    ::decode(flock_locks, p);
+    _decode_file_locks(p);
     break;
 
   case CEPH_LOCK_IPOLICY:
@@ -2714,8 +2712,8 @@ void CInode::remove_client_cap(client_t client)
   mdcache->num_caps--;
 
   //clean up advisory locks
-  bool fcntl_removed = fcntl_locks.remove_all_from(client);
-  bool flock_removed = flock_locks.remove_all_from(client);
+  bool fcntl_removed = fcntl_locks ? fcntl_locks->remove_all_from(client) : false;
+  bool flock_removed = flock_locks ? flock_locks->remove_all_from(client) : false; 
   if (fcntl_removed || flock_removed) {
     list<MDSInternalContextBase*> waiters;
     take_waiting(CInode::WAIT_FLOCK, waiters);
@@ -3429,8 +3427,8 @@ void CInode::encode_export(bufferlist& bl)
 
   _encode_locks_full(bl);
 
-  ::encode(fcntl_locks, bl);
-  ::encode(flock_locks, bl);
+  _encode_file_locks(bl);
+
   ENCODE_FINISH(bl);
 
   get(PIN_TEMPEXPORTING);
@@ -3521,10 +3519,7 @@ void CInode::decode_import(bufferlist::iterator& p,
 
   _decode_locks_full(p);
 
-  if (struct_v >= 5) {
-    ::decode(fcntl_locks, p);
-    ::decode(flock_locks, p);
-  }
+  _decode_file_locks(p);
 
   DECODE_FINISH(p);
 }
index 35c4f9236703b01eb645d40109e2a7ef7b7c0510..1dfa26f3cedee5493f623504221f18270f477caf 100644 (file)
@@ -451,12 +451,54 @@ public:
 
 protected:
 
-  ceph_lock_state_t fcntl_locks;
-  ceph_lock_state_t flock_locks;
+  ceph_lock_state_t *fcntl_locks;
+  ceph_lock_state_t *flock_locks;
 
+  ceph_lock_state_t *get_fcntl_lock_state() {
+    if (!fcntl_locks)
+      fcntl_locks = new ceph_lock_state_t(g_ceph_context);
+    return fcntl_locks;
+  }
+  void clear_fcntl_lock_state() {
+    delete fcntl_locks;
+    fcntl_locks = NULL;
+  }
+  ceph_lock_state_t *get_flock_lock_state() {
+    if (!flock_locks)
+      flock_locks = new ceph_lock_state_t(g_ceph_context);
+    return flock_locks;
+  }
+  void clear_flock_lock_state() {
+    delete flock_locks;
+    flock_locks = NULL;
+  }
   void clear_file_locks() {
-    fcntl_locks.clear();
-    flock_locks.clear();
+    clear_fcntl_lock_state();
+    clear_flock_lock_state();
+  }
+  void _encode_file_locks(bufferlist& bl) const {
+    bool has_fcntl_locks = fcntl_locks && !fcntl_locks->empty();
+    ::encode(has_fcntl_locks, bl);
+    if (has_fcntl_locks)
+      ::encode(*fcntl_locks, bl);
+    bool has_flock_locks = flock_locks && !flock_locks->empty();
+    ::encode(has_flock_locks, bl);
+    if (has_flock_locks)
+      ::encode(*flock_locks, bl);
+  }
+  void _decode_file_locks(bufferlist::iterator& p) {
+    bool has_fcntl_locks;
+    ::decode(has_fcntl_locks, p);
+    if (has_fcntl_locks)
+      ::decode(*get_fcntl_lock_state(), p);
+    else
+      clear_fcntl_lock_state();
+    bool has_flock_locks;
+    ::decode(has_flock_locks, p);
+    if (has_flock_locks)
+      ::decode(*get_flock_lock_state(), p);
+    else
+      clear_flock_lock_state();
   }
 
   // LogSegment lists i (may) belong to
@@ -500,7 +542,7 @@ public:
     parent(0),
     inode_auth(CDIR_AUTH_DEFAULT),
     replica_caps_wanted(0),
-    fcntl_locks(g_ceph_context), flock_locks(g_ceph_context),
+    fcntl_locks(0), flock_locks(0),
     item_dirty(this), item_caps(this), item_open_file(this), item_dirty_parent(this),
     item_dirty_dirfrag_dir(this), 
     item_dirty_dirfrag_nest(this), 
@@ -530,6 +572,7 @@ public:
     g_num_inos++;
     close_dirfrags();
     close_snaprealm();
+    clear_file_locks();
   }
   
 
index 063322142f1d0b480e3ed0b3d4eef88dd8e6bbae..7a350cfbf6de5cf85b9ce34528140f1d95e7a8f3 100644 (file)
@@ -3054,17 +3054,17 @@ bool Locker::_do_cap_update(CInode *in, Capability *cap,
     for ( int i=0; i < num_locks; ++i) {
       ceph_filelock decoded_lock;
       ::decode(decoded_lock, bli);
-      in->fcntl_locks.held_locks.
+      in->get_fcntl_lock_state()->held_locks.
        insert(pair<uint64_t, ceph_filelock>(decoded_lock.start, decoded_lock));
-      ++in->fcntl_locks.client_held_lock_counts[(client_t)(decoded_lock.client)];
+      ++in->get_fcntl_lock_state()->client_held_lock_counts[(client_t)(decoded_lock.client)];
     }
     ::decode(num_locks, bli);
     for ( int i=0; i < num_locks; ++i) {
       ceph_filelock decoded_lock;
       ::decode(decoded_lock, bli);
-      in->flock_locks.held_locks.
+      in->get_flock_lock_state()->held_locks.
        insert(pair<uint64_t, ceph_filelock>(decoded_lock.start, decoded_lock));
-      ++in->flock_locks.client_held_lock_counts[(client_t)(decoded_lock.client)];
+      ++in->get_flock_lock_state()->client_held_lock_counts[(client_t)(decoded_lock.client)];
     }
   }
 
index 5da3a8b4697f5e430fb3befb2bf5d5dae3072914..dca10c70249418785b4abd679d9350b0e554aaed 100644 (file)
@@ -39,7 +39,7 @@
 #include "Beacon.h"
 
 
-#define CEPH_MDS_PROTOCOL    24 /* cluster internal */
+#define CEPH_MDS_PROTOCOL    25 /* cluster internal */
 
 enum {
   l_mds_first = 2000,
index 39b7bd7a22be9fd8ccadbc6f2701f9dc32a01423..a484393e23638690d118ee11e4b07b2b1ad7634a 100644 (file)
@@ -813,17 +813,15 @@ void Server::recover_filelocks(CInode *in, bufferlist locks, int64_t client)
   for (int i = 0; i < numlocks; ++i) {
     ::decode(lock, p);
     lock.client = client;
-    in->fcntl_locks.held_locks.insert(pair<uint64_t, ceph_filelock>
-                                     (lock.start, lock));
-    ++in->fcntl_locks.client_held_lock_counts[client];
+    in->get_fcntl_lock_state()->held_locks.insert(pair<uint64_t, ceph_filelock>(lock.start, lock));
+    ++in->get_fcntl_lock_state()->client_held_lock_counts[client];
   }
   ::decode(numlocks, p);
   for (int i = 0; i < numlocks; ++i) {
     ::decode(lock, p);
     lock.client = client;
-    in->flock_locks.held_locks.insert(pair<uint64_t, ceph_filelock>
-                                     (lock.start, lock));
-    ++in->flock_locks.client_held_lock_counts[client];
+    in->get_flock_lock_state()->held_locks.insert(pair<uint64_t, ceph_filelock> (lock.start, lock));
+    ++in->get_flock_lock_state()->client_held_lock_counts[client];
   }
 }
 
@@ -3340,14 +3338,14 @@ void Server::handle_client_file_setlock(MDRequestRef& mdr)
     interrupt = true;
     // fall-thru
   case CEPH_LOCK_FLOCK:
-    lock_state = &cur->flock_locks;
+    lock_state = cur->get_flock_lock_state();
     break;
 
   case CEPH_LOCK_FCNTL_INTR:
     interrupt = true;
     // fall-thru
   case CEPH_LOCK_FCNTL:
-    lock_state = &cur->fcntl_locks;
+    lock_state = cur->get_fcntl_lock_state();
     break;
 
   default:
@@ -3430,11 +3428,11 @@ void Server::handle_client_file_readlock(MDRequestRef& mdr)
   ceph_lock_state_t *lock_state = NULL;
   switch (req->head.args.filelock_change.rule) {
   case CEPH_LOCK_FLOCK:
-    lock_state = &cur->flock_locks;
+    lock_state = cur->get_flock_lock_state();
     break;
 
   case CEPH_LOCK_FCNTL:
-    lock_state = &cur->fcntl_locks;
+    lock_state = cur->get_fcntl_lock_state();
     break;
 
   default:
index bf3980d094b3cc8a8e9182bdc969565ade9a1f19..37149eb7c4355a21956377ce826d432e3b095d74 100644 (file)
@@ -215,6 +215,11 @@ public:
     client_held_lock_counts.clear();
     client_waiting_lock_counts.clear();
   }
+  bool empty() const {
+    return held_locks.empty() && waiting_locks.empty() &&
+          client_held_lock_counts.empty() &&
+          client_waiting_lock_counts.empty();
+  }
 };
 WRITE_CLASS_ENCODER(ceph_lock_state_t)