tcp: annotate lockless access to tcp_memory_pressure
authorEric Dumazet <edumazet@google.com>
Wed, 9 Oct 2019 22:10:15 +0000 (15:10 -0700)
committerJakub Kicinski <jakub.kicinski@netronome.com>
Thu, 10 Oct 2019 04:35:00 +0000 (21:35 -0700)
tcp_memory_pressure is read without holding any lock,
and its value could be changed on other cpus.

Use READ_ONCE() to annotate these lockless reads.

The write side is already using atomic ops.

Fixes: b8da51ebb1aa ("tcp: introduce tcp_under_memory_pressure()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
include/net/tcp.h
net/ipv4/tcp.c

index c9a3f9688223b231e5a8c90c2494ad85f3d63cfc..88e63d64c698229379a953101a8aab2bca55ed1a 100644 (file)
@@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
            mem_cgroup_under_socket_pressure(sk->sk_memcg))
                return true;
 
-       return tcp_memory_pressure;
+       return READ_ONCE(tcp_memory_pressure);
 }
 /*
  * The next routines deal with comparing 32 bit unsigned ints
index f98a1882e537dca0102e829cb349be50302d83ab..888c92b63f5a6dc4b935cca7c979c1e559126d44 100644 (file)
@@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (tcp_memory_pressure)
+       if (READ_ONCE(tcp_memory_pressure))
                return;
        val = jiffies;
 
@@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (!tcp_memory_pressure)
+       if (!READ_ONCE(tcp_memory_pressure))
                return;
        val = xchg(&tcp_memory_pressure, 0);
        if (val)