async_dentry_invalidator(m->cct),
interrupt_finisher(m->cct),
remount_finisher(m->cct),
+ async_ino_releasor(m->cct),
objecter_finisher(m->cct),
m_command_hook(this),
fscid(0)
remount_finisher.stop();
}
+ if (ino_release_cb) {
+ ldout(cct, 10) << "shutdown stopping inode release finisher" << dendl;
+ async_ino_releasor.wait_for_empty();
+ async_ino_releasor.stop();
+ }
+
objectcacher->stop(); // outside of client_lock! this does a join.
client_lock.Lock();
}
}
+class C_Client_CacheRelease : public Context {
+private:
+ Client *client;
+ vinodeno_t ino;
+public:
+ C_Client_CacheRelease(Client *c, Inode *in) :
+ client(c) {
+ if (client->use_faked_inos())
+ ino = vinodeno_t(in->faked_ino, CEPH_NOSNAP);
+ else
+ ino = in->vino();
+ }
+ void finish(int r) override {
+ ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock));
+ client->_async_inode_release(ino);
+ }
+};
+
+void Client::_async_inode_release(vinodeno_t ino)
+{
+ if (unmounting)
+ return;
+ ldout(cct, 10) << __func__ << " " << ino << dendl;
+ ino_release_cb(callback_handle, ino);
+}
+
+void Client::_schedule_ino_release_callback(Inode *in) {
+
+ if (ino_release_cb)
+ // we queue the invalidate, which calls the callback and decrements the ref
+ async_ino_releasor.queue(new C_Client_CacheRelease(this, in));
+}
+
void Client::trim_caps(MetaSession *s, uint64_t max)
{
mds_rank_t mds = s->mds_num;
if (all && in->ino != MDS_INO_ROOT) {
ldout(cct, 20) << __func__ << " counting as trimmed: " << *in << dendl;
trimmed++;
+ _schedule_ino_release_callback(in.get());
}
}
}
remount_cb = args->remount_cb;
remount_finisher.start();
}
+ if (args->ino_release_cb) {
+ ino_release_cb = args->ino_release_cb;
+ async_ino_releasor.start();
+ }
if (args->umask_cb)
umask_cb = args->umask_cb;
}
friend class C_Client_Remount;
friend class C_Client_RequestInterrupt;
friend class C_Deleg_Timeout; // Asserts on client_lock, called when a delegation is unreturned
+ friend class C_Client_CacheRelease; // Asserts on client_lock
friend class SyntheticClient;
friend void intrusive_ptr_release(Inode *in);
void _invalidate_inode_cache(Inode *in);
void _invalidate_inode_cache(Inode *in, int64_t off, int64_t len);
void _async_invalidate(vinodeno_t ino, int64_t off, int64_t len);
+
+ void _schedule_ino_release_callback(Inode *in);
+ void _async_inode_release(vinodeno_t ino);
+
bool _release(Inode *in);
/**
client_ino_callback_t ino_invalidate_cb = nullptr;
client_dentry_callback_t dentry_invalidate_cb = nullptr;
client_umask_callback_t umask_cb = nullptr;
+ client_ino_release_t ino_release_cb = nullptr;
void *callback_handle = nullptr;
bool can_invalidate_dentries = false;
Finisher async_dentry_invalidator;
Finisher interrupt_finisher;
Finisher remount_finisher;
+ Finisher async_ino_releasor;
Finisher objecter_finisher;
Context *tick_event = nullptr;
/* fetch umask of actor */
typedef mode_t (*client_umask_callback_t)(void *handle);
+/* request that application release Inode references */
+typedef void (*client_ino_release_t)(void *handle, vinodeno_t ino);
+
/*
* The handle is an opaque value that gets passed to some callbacks. Any fields
* set to NULL will be left alone. There is no way to unregister callbacks.
client_switch_interrupt_callback_t switch_intr_cb;
client_remount_callback_t remount_cb;
client_umask_callback_t umask_cb;
+ client_ino_release_t ino_release_cb;
};
#ifdef __cplusplus