tcp: remove READ_ONCE(req->ts_recent)
authorEric Dumazet <edumazet@google.com>
Sat, 1 Mar 2025 20:14:23 +0000 (20:14 +0000)
committerJakub Kicinski <kuba@kernel.org>
Mon, 3 Mar 2025 23:44:19 +0000 (15:44 -0800)
After commit 8d52da23b6c6 ("tcp: Defer ts_recent changes
until req is owned"), req->ts_recent is not changed anymore.

It is set once in tcp_openreq_init(), bpf_sk_assign_tcp_reqsk()
or cookie_tcp_reqsk_alloc() before the req can be seen by other
cpus/threads.

This completes the revert of eba20811f326 ("tcp: annotate
data-races around tcp_rsk(req)->ts_recent").

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Wang Hai <wanghai38@huawei.com>
Reviewed-by: Jason Xing <kerneljasonxing@gmail.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250301201424.2046477-6-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv6/tcp_ipv6.c

index ae07613e4f335063723f49d7fd70a240412922ef..d9405b012dff079f7cafd9d422ff4445a27eb064 100644 (file)
@@ -1155,7 +1155,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        tcp_rsk(req)->rcv_nxt,
                        tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
                        tcp_rsk_tsval(tcp_rsk(req)),
-                       READ_ONCE(req->ts_recent),
+                       req->ts_recent,
                        0, &key,
                        inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
                        ip_hdr(skb)->tos,
index ba4a5d7f251d8ed093b38155d9b1a9f50bfcfe32..3cb8f281186b205e2b03d1b78e1750a024b94f6a 100644 (file)
@@ -585,7 +585,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
 
        if (newtp->rx_opt.tstamp_ok) {
                newtp->tcp_usec_ts = treq->req_usec_ts;
-               newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
+               newtp->rx_opt.ts_recent = req->ts_recent;
                newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
                newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
        } else {
@@ -673,7 +673,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
 
                if (tmp_opt.saw_tstamp) {
-                       tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
+                       tmp_opt.ts_recent = req->ts_recent;
                        if (tmp_opt.rcv_tsecr) {
                                if (inet_rsk(req)->tstamp_ok && !fastopen)
                                        tsecr_reject = !between(tmp_opt.rcv_tsecr,
index 0a660075add5bea05a61b4fe2d9d334a89d956a7..24e56bf96747253c1a508ddfe27ebd38da7c219e 100644 (file)
@@ -949,7 +949,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                        tcp_rsk(req)->snt_tsval_first = opts->tsval;
                }
                WRITE_ONCE(tcp_rsk(req)->snt_tsval_last, opts->tsval);
-               opts->tsecr = READ_ONCE(req->ts_recent);
+               opts->tsecr = req->ts_recent;
                remaining -= TCPOLEN_TSTAMP_ALIGNED;
        }
        if (likely(ireq->sack_ok)) {
index fe75ad8e606cbca77d69326dc00273e7b214edee..85c4820bfe1588e4553784129d13408dea70763a 100644 (file)
@@ -1279,7 +1279,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
                        tcp_rsk(req)->rcv_nxt,
                        tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
                        tcp_rsk_tsval(tcp_rsk(req)),
-                       READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
+                       req->ts_recent, sk->sk_bound_dev_if,
                        &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
                        READ_ONCE(sk->sk_priority),
                        READ_ONCE(tcp_rsk(req)->txhash));