mptcp: annotate lockless accesses to sk->sk_err
authorEric Dumazet <edumazet@google.com>
Wed, 15 Mar 2023 20:57:45 +0000 (20:57 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 17 Mar 2023 08:25:05 +0000 (08:25 +0000)
mptcp_poll() reads sk->sk_err without socket lock held/owned.

Add READ_ONCE() and WRITE_ONCE() to avoid load/store tearing.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/subflow.c

index 56628b52d1001a967eb2e504bdbeac0c4cd17acc..cbaa1b49f7fe949b9de8f4be0cf74cea6cecc106 100644 (file)
@@ -2019,7 +2019,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
            nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
                return -EMSGSIZE;
 
-       sk_err = ssk->sk_err;
+       sk_err = READ_ONCE(ssk->sk_err);
        if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
            nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
                return -EMSGSIZE;
index 3ad9c46202fc63a5b3a870bf2ba994a8d9148264..3005a5adf715e8d147c119b0b4c13fcc58fe99f6 100644 (file)
@@ -2463,15 +2463,15 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
        /* Mirror the tcp_reset() error propagation */
        switch (sk->sk_state) {
        case TCP_SYN_SENT:
-               sk->sk_err = ECONNREFUSED;
+               WRITE_ONCE(sk->sk_err, ECONNREFUSED);
                break;
        case TCP_CLOSE_WAIT:
-               sk->sk_err = EPIPE;
+               WRITE_ONCE(sk->sk_err, EPIPE);
                break;
        case TCP_CLOSE:
                return;
        default:
-               sk->sk_err = ECONNRESET;
+               WRITE_ONCE(sk->sk_err, ECONNRESET);
        }
 
        inet_sk_state_store(sk, TCP_CLOSE);
@@ -3791,7 +3791,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
 
        /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
        smp_rmb();
-       if (sk->sk_err)
+       if (READ_ONCE(sk->sk_err))
                mask |= EPOLLERR;
 
        return mask;
index 4ae1a7304cf0da1840a1d236969549d18cf8ff97..01874059a16865ecb4ec464443f68a30c814f565 100644 (file)
@@ -1335,7 +1335,7 @@ fallback:
                        subflow->reset_reason = MPTCP_RST_EMPTCP;
 
 reset:
-                       ssk->sk_err = EBADMSG;
+                       WRITE_ONCE(ssk->sk_err, EBADMSG);
                        tcp_set_state(ssk, TCP_CLOSE);
                        while ((skb = skb_peek(&ssk->sk_receive_queue)))
                                sk_eat_skb(ssk, skb);
@@ -1419,7 +1419,7 @@ void __mptcp_error_report(struct sock *sk)
                ssk_state = inet_sk_state_load(ssk);
                if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
                        inet_sk_state_store(sk, ssk_state);
-               sk->sk_err = -err;
+               WRITE_ONCE(sk->sk_err, -err);
 
                /* This barrier is coupled with smp_rmb() in mptcp_poll() */
                smp_wmb();