tcp: do not block bh during prequeue processing
authorEric Dumazet <edumazet@google.com>
Fri, 29 Apr 2016 21:16:48 +0000 (14:16 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 2 May 2016 21:02:25 +0000 (17:02 -0400)
AFAIK, nothing in current TCP stack absolutely wants BH
being disabled once socket is owned by a thread running in
process context.

As mentioned in my prior patch ("tcp: give prequeue mode some care"),
processing a batch of packets might take time, better not block BH
at all.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp.c
net/ipv4/tcp_input.c

index b24c6ed4a04f301cf2e174d5d55525d014a90f89..4787f86ae64cfa96509a197b4f5fbe86426905cc 100644 (file)
@@ -1449,12 +1449,8 @@ static void tcp_prequeue_process(struct sock *sk)
 
        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
 
-       /* RX process wants to run with disabled BHs, though it is not
-        * necessary */
-       local_bh_disable();
        while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                sk_backlog_rcv(sk, skb);
-       local_bh_enable();
 
        /* Clear memory counter. */
        tp->ucopy.memory = 0;
index ac85fb42a5a2473406210b705bb2dcafc385a50a..6171f92be0903f5a5d17f027dbe6b31829bcc043 100644 (file)
@@ -4611,14 +4611,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
                        __set_current_state(TASK_RUNNING);
 
-                       local_bh_enable();
                        if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
                                tp->ucopy.len -= chunk;
                                tp->copied_seq += chunk;
                                eaten = (chunk == skb->len);
                                tcp_rcv_space_adjust(sk);
                        }
-                       local_bh_disable();
                }
 
                if (eaten <= 0) {
@@ -5134,7 +5132,6 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        int chunk = skb->len - hlen;
        int err;
 
-       local_bh_enable();
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
        else
@@ -5146,32 +5143,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
                tcp_rcv_space_adjust(sk);
        }
 
-       local_bh_disable();
        return err;
 }
 
-static __sum16 __tcp_checksum_complete_user(struct sock *sk,
-                                           struct sk_buff *skb)
-{
-       __sum16 result;
-
-       if (sock_owned_by_user(sk)) {
-               local_bh_enable();
-               result = __tcp_checksum_complete(skb);
-               local_bh_disable();
-       } else {
-               result = __tcp_checksum_complete(skb);
-       }
-       return result;
-}
-
-static inline bool tcp_checksum_complete_user(struct sock *sk,
-                                            struct sk_buff *skb)
-{
-       return !skb_csum_unnecessary(skb) &&
-              __tcp_checksum_complete_user(sk, skb);
-}
-
 /* Does PAWS and seqno based validation of an incoming segment, flags will
  * play significant role here.
  */
@@ -5386,7 +5360,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                }
                        }
                        if (!eaten) {
-                               if (tcp_checksum_complete_user(sk, skb))
+                               if (tcp_checksum_complete(skb))
                                        goto csum_error;
 
                                if ((int)skb->truesize > sk->sk_forward_alloc)
@@ -5430,7 +5404,7 @@ no_ack:
        }
 
 slow_path:
-       if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
+       if (len < (th->doff << 2) || tcp_checksum_complete(skb))
                goto csum_error;
 
        if (!th->ack && !th->rst && !th->syn)