))
p.wait()
- def open_background(self, basename="background_file"):
+ def open_background(self, basename="background_file", write=True):
"""
Open a file for writing, then block such that the client
will hold a capability.
path = os.path.join(self.mountpoint, basename)
- pyscript = dedent("""
- import time
+ if write:
+ pyscript = dedent("""
+ import time
- f = open("{path}", 'w')
- f.write('content')
- f.flush()
- f.write('content2')
- while True:
- time.sleep(1)
- """).format(path=path)
+ f = open("{path}", 'w')
+ f.write('content')
+ f.flush()
+ f.write('content2')
+ while True:
+ time.sleep(1)
+ """).format(path=path)
+ else:
+ pyscript = dedent("""
+ import time
+
+ f = open("{path}", 'r')
+ while True:
+ time.sleep(1)
+ """).format(path=path)
rproc = self._run_python(pyscript)
self.background_procs.append(rproc)
"""
session_timeout = self.fs.get_var("session_timeout")
+ self.fs.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false'])
# We only need one client
self.mount_b.umount_wait()
self.mount_a.wait_until_mounted()
self.mount_a.create_destroy()
- def test_stale_caps(self):
+ def _test_stale_caps(self, write):
session_timeout = self.fs.get_var("session_timeout")
# Capability release from stale session
# =====================================
- cap_holder = self.mount_a.open_background()
+ if write:
+ cap_holder = self.mount_a.open_background()
+ else:
+ self.mount_a.run_shell(["touch", "background_file"])
+ self.mount_a.umount_wait()
+ self.mount_a.mount()
+ self.mount_a.wait_until_mounted()
+ cap_holder = self.mount_a.open_background(write=False)
+
+ self.assert_session_count(2)
+ mount_a_gid = self.mount_a.get_global_id()
# Wait for the file to be visible from another client, indicating
# that mount_a has completed its network ops
# Should have succeeded
self.assertEqual(cap_waiter.exitstatus, 0)
+ if write:
+ self.assert_session_count(1)
+ else:
+ self.assert_session_state(mount_a_gid, "stale")
+
cap_waited = b - a
log.info("cap_waiter waited {0}s".format(cap_waited))
self.assertTrue(session_timeout / 2.0 <= cap_waited <= session_timeout * 2.0,
self.mount_a.mount()
self.mount_a.wait_until_mounted()
+ def test_stale_read_caps(self):
+ self._test_stale_caps(False)
+
+ def test_stale_write_caps(self):
+ self._test_stale_caps(True)
+
def test_evicted_caps(self):
# Eviction while holding a capability
# ===================================
self.assert_session_state(mount_b_gid, "open")
time.sleep(session_timeout * 1.5) # Long enough for MDS to consider session stale
- self.assert_session_state(mount_b_gid, "stale")
self.mount_a.run_shell(["touch", "testdir/file2"])
+ self.assert_session_state(mount_b_gid, "stale")
# resume ceph-fuse process of mount_b
self.mount_b.client_remote.run(args=["sudo", "kill", "-CONT", mount_b_pid])
// (caps go stale, lease die)
double queue_max_age = mds->get_dispatch_queue_max_age(ceph_clock_now());
double cutoff = queue_max_age + mds->mdsmap->get_session_timeout();
+ bool defer_session_stale = g_conf->get_val<bool>("mds_defer_session_stale");
std::vector<Session*> to_evict;
continue;
}
- if (!session->is_any_flush_waiter() &&
+ if (defer_session_stale &&
+ !session->is_any_flush_waiter() &&
!mds->locker->is_revoking_any_caps_from(session->get_client())) {
dout(20) << "deferring marking session " << session->info.inst << " stale "
"since it holds no caps" << dendl;