/* Called by bpf_task_storage_get*() helpers */
 static void *__bpf_task_storage_get(struct bpf_map *map,
                                    struct task_struct *task, void *value,
-                                   u64 flags, gfp_t gfp_flags)
+                                   u64 flags, gfp_t gfp_flags, bool nobusy)
 {
        struct bpf_local_storage_data *sdata;
 
-       sdata = task_storage_lookup(task, map, true);
+       sdata = task_storage_lookup(task, map, nobusy);
        if (sdata)
                return sdata->data;
 
        /* only allocate new storage, when the task is refcounted */
        if (refcount_read(&task->usage) &&
-           (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) {
+           (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
                sdata = bpf_local_storage_update(
                        task, (struct bpf_local_storage_map *)map, value,
                        BPF_NOEXIST, gfp_flags);
 BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
           task, void *, value, u64, flags, gfp_t, gfp_flags)
 {
+       bool nobusy;
        void *data;
 
        WARN_ON_ONCE(!bpf_rcu_lock_held());
        if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
                return (unsigned long)NULL;
 
-       if (!bpf_task_storage_trylock())
-               return (unsigned long)NULL;
+       nobusy = bpf_task_storage_trylock();
        data = __bpf_task_storage_get(map, task, value, flags,
-                                     gfp_flags);
-       bpf_task_storage_unlock();
+                                     gfp_flags, nobusy);
+       if (nobusy)
+               bpf_task_storage_unlock();
        return (unsigned long)data;
 }