]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mds: locking. remove zero-valued client held/waiting lock counts from the map.
authorGreg Farnum <gregf@hq.newdream.net>
Tue, 11 May 2010 17:22:14 +0000 (10:22 -0700)
committerGreg Farnum <gregf@hq.newdream.net>
Mon, 2 Aug 2010 17:39:56 +0000 (10:39 -0700)
In the unlikely event that you have a very large cluster doing a lot of
locking of different files amongst the whole cluster, this will keep
memory use down.

src/mds/mdstypes.h

index dc06d2e3b67da40d1cf586bf406e436b0dfaf81a..4ab9532fb0b11466d48e9b6f4fb4216739384331 100644 (file)
@@ -476,6 +476,9 @@ struct ceph_lock_state_t {
          --client_held_lock_counts[old_lock->client];
        }
       }
+      if (!client_held_lock_counts.count(old_lock->client)) {
+       client_held_lock_counts.erase(old_lock->client);
+      }
     }
 
     /* okay, we've removed the locks, but removing them might allow some
@@ -496,6 +499,9 @@ struct ceph_lock_state_t {
        ceph_filelock cur_lock = *(*iter);
        waiting_locks.erase(find_specific_elem(*iter, waiting_locks));
        --client_waiting_lock_counts[cur_lock.client];
+       if (!client_waiting_lock_counts.count(cur_lock.client)) {
+         client_waiting_lock_counts.erase(cur_lock.client);
+       }
        if(add_lock(cur_lock, true)) activated_locks.push_back(cur_lock);
       }
     }
@@ -505,12 +511,12 @@ struct ceph_lock_state_t {
     bool cleared_any = false;
     if (client_held_lock_counts.count(client)) {
       remove_all_from(client, held_locks);
-      client_held_lock_counts[client] = 0;
+      client_held_lock_counts.erase(client);
       cleared_any = true;
     }
     if (client_waiting_lock_counts.count(client)) {
       remove_all_from(client, waiting_locks);
-      client_waiting_lock_counts[client] = 0;
+      client_waiting_lock_counts.erase(client);
     }
     return cleared_any;
   }
@@ -607,6 +613,9 @@ private:
          }
        }
       }
+      if (!client_held_lock_counts.count(old_lock->client)) {
+       client_held_lock_counts.erase(old_lock->client);
+      }
     }
   }