rc = sk_backlog_rcv(sk, skb);
 
                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+       } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
 
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-       u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+       u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
 
        else {
                dprintk("%s: adding to backlog...\n", __func__);
                llc_set_backlog_type(skb, LLC_PACKET);
-               if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+               if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                        goto drop_unlock;
        }
 out:
 
                bh_lock_sock(sk);
 
                if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
-                       if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+                       if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                                sctp_chunk_free(chunk);
                        else
                                backloged = 1;
                        return 0;
        } else {
                if (!sctp_newsk_ready(sk)) {
-                       if (!sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+                       if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                                return 0;
                        sctp_chunk_free(chunk);
                } else {
        struct sctp_ep_common *rcvr = chunk->rcvr;
        int ret;
 
-       ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+       ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
        if (!ret) {
                /* Hold the assoc/ep while hanging on the backlog queue.
                 * This way, we know structures we need will not disappear
 
        struct tipc_msg *hdr = buf_msg(skb);
 
        if (unlikely(msg_in_group(hdr)))
-               return sk->sk_rcvbuf;
+               return READ_ONCE(sk->sk_rcvbuf);
 
        if (unlikely(!msg_connected(hdr)))
-               return sk->sk_rcvbuf << msg_importance(hdr);
+               return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
 
        if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
-               return sk->sk_rcvbuf;
+               return READ_ONCE(sk->sk_rcvbuf);
 
        return FLOWCTL_MSG_LIM;
 }
 
                if (!sock_owned_by_user(sk)) {
                        queued = x25_process_rx_frame(sk, skb);
                } else {
-                       queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+                       queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
                }
                bh_unlock_sock(sk);
                sock_put(sk);