net: annotate lockless accesses to sk->sk_max_ack_backlog
authorEric Dumazet <edumazet@google.com>
Tue, 5 Nov 2019 22:11:54 +0000 (14:11 -0800)
committerDavid S. Miller <davem@davemloft.net>
Thu, 7 Nov 2019 00:14:48 +0000 (16:14 -0800)
sk->sk_max_ack_backlog can be read without any lock being held
at least in TCP/DCCP cases.

We need to use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing
and/or potential KCSAN warnings.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sock.h
net/dccp/proto.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/sched/em_meta.c
net/sctp/diag.c
net/sctp/socket.c

index a126784aa7d9b6f59c8937c8c94d5bd7843988a4..d4d3ef5ba0490366e1e25884a5edf54186c940d8 100644 (file)
@@ -869,7 +869,7 @@ static inline void sk_acceptq_added(struct sock *sk)
 
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
-       return READ_ONCE(sk->sk_ack_backlog) > sk->sk_max_ack_backlog;
+       return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 /*
index 5bad08dc431611d5387c8d3c1858ee2c43cb9b68..a52e8ba1ced046b178fa069b1e0d690c537c6bc0 100644 (file)
@@ -944,7 +944,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
        if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
                goto out;
 
-       sk->sk_max_ack_backlog = backlog;
+       WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
        /* Really, if the socket is already in listen state
         * we can only allow the backlog to be adjusted.
         */
index 70f92aaca4110b3ecd691949203f28978597e9c9..53de8e00990e276448df1c60e47620be3b58f517 100644 (file)
@@ -208,7 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
        if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
                goto out;
 
-       sk->sk_max_ack_backlog = backlog;
+       WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
        /* Really, if the socket is already in listen state
         * we can only allow the backlog to be adjusted.
         */
index eb30fc1770def741950215f59a4e3ab0f91c6293..e4c6e8b4049063f5239a5e99a185016ad3bb5790 100644 (file)
@@ -716,7 +716,7 @@ static void reqsk_timer_handler(struct timer_list *t)
         * ones are about to clog our table.
         */
        qlen = reqsk_queue_len(queue);
-       if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
+       if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
                int young = reqsk_queue_len_young(queue) << 1;
 
                while (thresh > 2) {
index 68375f7ffdce1fbbb4cf443660703c98b61fd9e3..fb1666440e1064a9ab2f2993b23fdb744e82f5c5 100644 (file)
@@ -3226,7 +3226,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
                 * tcpi_sacked  -> max backlog
                 */
                info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
-               info->tcpi_sacked = sk->sk_max_ack_backlog;
+               info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
                return;
        }
 
index edfbab54c46f4cac1b0a7960718d0b6308978957..0d08f9e2d8d0322fcdd3a465a3a9712b36605954 100644 (file)
@@ -22,7 +22,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 
        if (inet_sk_state_load(sk) == TCP_LISTEN) {
                r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
-               r->idiag_wqueue = sk->sk_max_ack_backlog;
+               r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
        } else if (sk->sk_type == SOCK_STREAM) {
                const struct tcp_sock *tp = tcp_sk(sk);
 
index ebb6e2430861d23a42431e4143f229395d9321c5..d99966a55c84fa0f5142ed72faeceb9baab86f5e 100644 (file)
@@ -532,7 +532,7 @@ META_COLLECTOR(int_sk_max_ack_bl)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_max_ack_backlog;
+       dst->value = READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 META_COLLECTOR(int_sk_prio)
index f873f15407de4e7d9a246d41e07602f33da8064d..8a15146faaebdcb869233a08318e4fb5a1e1129b 100644 (file)
@@ -426,7 +426,7 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                r->idiag_wqueue = infox->asoc->sndbuf_used;
        } else {
                r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog);
-               r->idiag_wqueue = sk->sk_max_ack_backlog;
+               r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog);
        }
        if (infox->sctpinfo)
                sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
index ffd3262b7a41eac2e3d825c3f0665066f376ea3c..53abb97e0061c14fd4a9c3090a4a5cbe0af9c5a9 100644 (file)
@@ -8376,7 +8376,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
                }
        }
 
-       sk->sk_max_ack_backlog = backlog;
+       WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
        return sctp_hash_endpoint(ep);
 }
 
@@ -8430,7 +8430,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
 
        /* If we are already listening, just update the backlog */
        if (sctp_sstate(sk, LISTENING))
-               sk->sk_max_ack_backlog = backlog;
+               WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
        else {
                err = sctp_listen_start(sk, backlog);
                if (err)