]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-client.git/commitdiff
perf/core: Fix refcount bug and potential UAF in perf_mmap
authorHaocheng Yu <yuhaocheng035@gmail.com>
Mon, 2 Feb 2026 16:20:56 +0000 (00:20 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 23 Feb 2026 10:19:25 +0000 (11:19 +0100)
Syzkaller reported a refcount_t: addition on 0; use-after-free warning
in perf_mmap.

The issue is caused by a race condition between a failing mmap() setup
and a concurrent mmap() on a dependent event (e.g., using output
redirection).

In perf_mmap(), the ring_buffer (rb) is allocated and assigned to
event->rb with the mmap_mutex held. The mutex is then released to
perform map_range().

If map_range() fails, perf_mmap_close() is called to clean up.
However, since the mutex was dropped, another thread attaching to
this event (via inherited events or output redirection) can acquire
the mutex, observe the valid event->rb pointer, and attempt to
increment its reference count. If the cleanup path has already
dropped the reference count to zero, this results in a
use-after-free or refcount saturation warning.

Fix this by extending the scope of mmap_mutex to cover the
map_range() call. This ensures that the ring buffer initialization
and mapping (or cleanup on failure) happens atomically effectively,
preventing other threads from accessing a half-initialized or
dying ring buffer.

Closes: https://lore.kernel.org/oe-kbuild-all/202602020208.m7KIjdzW-lkp@intel.com/
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Haocheng Yu <yuhaocheng035@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260202162057.7237-1-yuhaocheng035@gmail.com
kernel/events/core.c

index 4f86d22f281ab7b087435c01289715ec6b25f34d..22a0f405585b50413fa809dca6741a1688a2ed0f 100644 (file)
@@ -7465,28 +7465,28 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                        ret = perf_mmap_aux(vma, event, nr_pages);
                if (ret)
                        return ret;
-       }
 
-       /*
-        * Since pinned accounting is per vm we cannot allow fork() to copy our
-        * vma.
-        */
-       vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
-       vma->vm_ops = &perf_mmap_vmops;
+               /*
+                * Since pinned accounting is per vm we cannot allow fork() to copy our
+                * vma.
+                */
+               vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
+               vma->vm_ops = &perf_mmap_vmops;
 
-       mapped = get_mapped(event, event_mapped);
-       if (mapped)
-               mapped(event, vma->vm_mm);
+               mapped = get_mapped(event, event_mapped);
+               if (mapped)
+                       mapped(event, vma->vm_mm);
 
-       /*
-        * Try to map it into the page table. On fail, invoke
-        * perf_mmap_close() to undo the above, as the callsite expects
-        * full cleanup in this case and therefore does not invoke
-        * vmops::close().
-        */
-       ret = map_range(event->rb, vma);
-       if (ret)
-               perf_mmap_close(vma);
+               /*
+                * Try to map it into the page table. On fail, invoke
+                * perf_mmap_close() to undo the above, as the callsite expects
+                * full cleanup in this case and therefore does not invoke
+                * vmops::close().
+                */
+               ret = map_range(event->rb, vma);
+               if (ret)
+                       perf_mmap_close(vma);
+       }
 
        return ret;
 }