__mmap_lock_trace_released(mm, false);
 }
 
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
-       if (mmap_read_trylock(mm)) {
-               rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
-               return true;
-       }
-       return false;
-}
-
 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 {
        up_read_non_owner(&mm->mmap_lock);
 
         * with build_id.
         */
        if (!user || !current || !current->mm || irq_work_busy ||
-           !mmap_read_trylock_non_owner(current->mm)) {
+           !mmap_read_trylock(current->mm)) {
                /* cannot access current->mm, fall back to ips */
                for (i = 0; i < trace_nr; i++) {
                        id_offs[i].status = BPF_STACK_BUILD_ID_IP;
        }
 
        if (!work) {
-               mmap_read_unlock_non_owner(current->mm);
+               mmap_read_unlock(current->mm);
        } else {
                work->mm = current->mm;
+
+               /* The lock will be released once we're out of interrupt
+                * context. Tell lockdep that we've released it now so
+                * it doesn't complain that we forgot to release it.
+                */
+               rwsem_release(¤t->mm->mmap_lock.dep_map, _RET_IP_);
                irq_work_queue(&work->irq_work);
        }
 }