static inline int sk_stream_wspace(const struct sock *sk)
 {
-       return sk->sk_sndbuf - sk->sk_wmem_queued;
+       return READ_ONCE(sk->sk_sndbuf) - sk->sk_wmem_queued;
 }
 
 void sk_stream_write_space(struct sock *sk);
 
 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
 {
-       if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+       if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
                return false;
 
        return sk->sk_prot->stream_memory_free ?
 
 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 {
-       if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
-               sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
-               sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
-       }
+       u32 val;
+
+       if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+               return;
+
+       val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+
+       WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
 }
 
 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
  */
 static inline bool sock_writeable(const struct sock *sk)
 {
-       return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+       return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
 }
 
 static inline gfp_t gfp_any(void)
 
                case SO_SNDBUF:
                        val = min_t(u32, val, sysctl_wmem_max);
                        sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-                       sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+                       WRITE_ONCE(sk->sk_sndbuf,
+                                  max_t(int, val * 2, SOCK_MIN_SNDBUF));
                        break;
                case SO_MAX_PACING_RATE: /* 32bit version */
                        if (val != ~0U)
 
                 */
                val = min_t(int, val, INT_MAX / 2);
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-               sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+               WRITE_ONCE(sk->sk_sndbuf,
+                          max_t(int, val * 2, SOCK_MIN_SNDBUF));
                /* Wake up sending tasks if we upped the value. */
                sk->sk_write_space(sk);
                break;
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
                             gfp_t priority)
 {
-       if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+       if (force ||
+           refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
                struct sk_buff *skb = alloc_skb(size, priority);
+
                if (skb) {
                        skb_set_owner_w(skb, sk);
                        return skb;
                        break;
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+               if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
                        break;
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        break;
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        goto failure;
 
-               if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+               if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
                        break;
 
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
        /* Do not wake up a writer until he can make "significant"
         * progress.  --DaveM
         */
-       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
        mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
        mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
        mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
-       mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+       mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
        mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
        mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
        mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
 
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
+       WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
        WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
 
        sk_sockets_allocated_inc(sk);
 
        sndmem *= nr_segs * per_mss;
 
        if (sk->sk_sndbuf < sndmem)
-               sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
+               WRITE_ONCE(sk->sk_sndbuf,
+                          min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)