]> git.apps.os.sepia.ceph.com Git - ceph.git/commit
kclient: drop s_mutex in __ceph_mdsc_send_cap
authorSage Weil <sage@newdream.net>
Wed, 30 Apr 2008 23:44:51 +0000 (16:44 -0700)
committerSage Weil <sage@newdream.net>
Wed, 30 Apr 2008 23:44:51 +0000 (16:44 -0700)
commitc630feb44f61ad258979701937829b9c1b6fa6db
treebd1e174a0513e5acf3a3d92797022fc0a9b3709c
parent6c9fece36463527b7783840b0d61e0ce77b7e0e0
kclient: drop s_mutex in __ceph_mdsc_send_cap

we want to avoid ever touching the delayed work whie holding s_mutex:

[10124.715227] =======================================================
[10124.718705] [ INFO: possible circular locking dependency detected ]
[10124.718705] 2.6.25 #18
[10124.718705] -------------------------------------------------------
[10124.718705] events/1/10 is trying to acquire lock:
[10124.718705]  (&s->s_mutex){--..}, at: [<ffffffff880034a7>] ceph_check_caps+0x458/0x4f8 [ceph]
[10124.718705]
[10124.718705] but task is already holding lock:
[10124.718705]  (&(&ci->i_cap_dwork)->work){--..}, at: [<ffffffff80242fa7>] run_workqueue+0x9f/0x1f6
[10124.718705]
[10124.718705] which lock already depends on the new lock.
[10124.718705]
[10124.718705]
[10124.718705] the existing dependency chain (in reverse order) is:
[10124.718705]
[10124.718705] -> #1 (&(&ci->i_cap_dwork)->work){--..}:
[10124.718706]        [<ffffffff80251573>] __lock_acquire+0xa8b/0xc8a
[10124.718706]        [<ffffffff80251800>] lock_acquire+0x8e/0xb2
[10124.718706]        [<ffffffff802437af>] __cancel_work_timer+0xe8/0x1ea
[10124.718706]        [<ffffffff802438be>] cancel_delayed_work_sync+0xd/0xf
[10124.718706]        [<ffffffff8800ee26>] __ceph_mdsc_send_cap+0x247/0x264 [ceph]
[10124.718706]        [<ffffffff880034cd>] ceph_check_caps+0x47e/0x4f8 [ceph]
[10124.718706]        [<ffffffff88003610>] ceph_put_wrbuffer_cap_refs+0xc9/0xd2 [ceph]
[10124.718706]        [<ffffffff880090cc>] ceph_writepages+0x782/0x97a [ceph]
[10124.718706]        [<ffffffff8026d3f5>] do_writepages+0x2b/0x3a
[10124.718706]        [<ffffffff802aa8aa>] __writeback_single_inode+0x151/0x282
[10124.718706]        [<ffffffff802aade4>] sync_sb_inodes+0x1ab/0x26f
[10124.718706]        [<ffffffff802ab0cf>] writeback_inodes+0x85/0xe9
[10124.718706]        [<ffffffff8026dc9e>] wb_kupdate+0x9f/0x10d
[10124.718706]        [<ffffffff8026e1b9>] pdflush+0x134/0x1df
[10124.718706]        [<ffffffff8024663c>] kthread+0x49/0x79
[10124.718706]        [<ffffffff8020cd38>] child_rip+0xa/0x12
[10124.718706]        [<ffffffffffffffff>] 0xffffffffffffffff
[10124.718706]
[10124.718706] -> #0 (&s->s_mutex){--..}:
[10124.718706]        [<ffffffff80251475>] __lock_acquire+0x98d/0xc8a
[10124.718706]        [<ffffffff80251800>] lock_acquire+0x8e/0xb2
[10124.718706]        [<ffffffff8054f9aa>] mutex_lock_nested+0xed/0x273
[10124.718706]        [<ffffffff880034a7>] ceph_check_caps+0x458/0x4f8 [ceph]
[10124.718706]        [<ffffffff8800393e>] ceph_cap_delayed_work+0x101/0x14a [ceph]
[10124.718706]        [<ffffffff80242ff6>] run_workqueue+0xee/0x1f6
[10124.718706]        [<ffffffff80243b72>] worker_thread+0xdb/0xe8
[10124.718706]        [<ffffffff8024663c>] kthread+0x49/0x79
[10124.718706]        [<ffffffff8020cd38>] child_rip+0xa/0x12
[10124.718706]        [<ffffffffffffffff>] 0xffffffffffffffff
[10124.718706]
[10124.718706] other info that might help us debug this:
[10124.718706]
[10124.718706] 2 locks held by events/1/10:
[10124.718706]  #0:  (events){--..}, at: [<ffffffff80242fa7>] run_workqueue+0x9f/0x1f6
[10124.718706]  #1:  (&(&ci->i_cap_dwork)->work){--..}, at: [<ffffffff80242fa7>] run_workqueue+0x9f/0x1f6
[10124.718706]
[10124.718706] stack backtrace:
[10124.718706] Pid: 10, comm: events/1 Not tainted 2.6.25 #18
[10124.718706]
[10124.718706] Call Trace:
[10124.718706]  [<ffffffff8024f3ea>] print_circular_bug_tail+0x70/0x7b
[10124.718706]  [<ffffffff80251475>] __lock_acquire+0x98d/0xc8a
[10124.718706]  [<ffffffff80550a0f>] ? trace_hardirqs_on_thunk+0x35/0x3a
[10124.718706]  [<ffffffff8021287c>] ? native_sched_clock+0x4a/0x66
[10124.718706]  [<ffffffff880034a7>] ? :ceph:ceph_check_caps+0x458/0x4f8
[10124.718706]  [<ffffffff80251800>] lock_acquire+0x8e/0xb2
[10124.718706]  [<ffffffff880034a7>] ? :ceph:ceph_check_caps+0x458/0x4f8
[10124.718706]  [<ffffffff8054f9aa>] mutex_lock_nested+0xed/0x273
[10124.718706]  [<ffffffff880034a7>] ? :ceph:ceph_check_caps+0x458/0x4f8
[10124.718706]  [<ffffffff880034a7>] :ceph:ceph_check_caps+0x458/0x4f8
[10124.718706]  [<ffffffff8800383d>] ? :ceph:ceph_cap_delayed_work+0x0/0x14a
[10124.718706]  [<ffffffff8800393e>] :ceph:ceph_cap_delayed_work+0x101/0x14a
[10124.718706]  [<ffffffff80242ff6>] run_workqueue+0xee/0x1f6
[10124.718706]  [<ffffffff80243b72>] worker_thread+0xdb/0xe8
[10124.718706]  [<ffffffff8024675a>] ? autoremove_wake_function+0x0/0x38
[10124.718706]  [<ffffffff80243a97>] ? worker_thread+0x0/0xe8
[10124.718706]  [<ffffffff8024663c>] kthread+0x49/0x79
[10124.718706]  [<ffffffff8020cd38>] child_rip+0xa/0x12
[10124.718706]  [<ffffffff8020c44f>] ? restore_args+0x0/0x30
[10124.718706]  [<ffffffff802465f3>] ? kthread+0x0/0x79
[10124.718706]  [<ffffffff8020cd2e>] ? child_rip+0x0/0x12
[10124.718706]
[10175.822587] ceph_super: kill_sb ffff81010da60000
src/kernel/inode.c
src/kernel/mds_client.c