net: minor: tcp: use tcp_skb_mss helper in tcp_tso_segment
[linux-2.6-block.git] / net / ipv4 / tcp.c
index ab450c099aa49a3d4b68ca531e7f5426a8eabaf1..6a1cf95abc987ddd335f879c7a567c34769c83c7 100644 (file)
@@ -436,6 +436,8 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        struct sock *sk = sock->sk;
        const struct tcp_sock *tp = tcp_sk(sk);
 
+       sock_rps_record_flow(sk);
+
        sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
@@ -2903,7 +2905,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
        oldlen = (u16)~skb->len;
        __skb_pull(skb, thlen);
 
-       mss = skb_shinfo(skb)->gso_size;
+       mss = tcp_skb_mss(skb);
        if (unlikely(skb->len <= mss))
                goto out;
 
@@ -2917,6 +2919,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                               SKB_GSO_TCP_ECN |
                               SKB_GSO_TCPV6 |
                               SKB_GSO_GRE |
+                              SKB_GSO_MPLS |
                               SKB_GSO_UDP_TUNNEL |
                               0) ||
                             !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
@@ -2988,7 +2991,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                swap(gso_skb->truesize, skb->truesize);
        }
 
-       delta = htonl(oldlen + (skb->tail - skb->transport_header) +
+       delta = htonl(oldlen + (skb_tail_pointer(skb) -
+                               skb_transport_header(skb)) +
                      skb->data_len);
        th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
                                (__force u32)delta));
@@ -3067,7 +3071,7 @@ found:
                flush |= *(u32 *)((u8 *)th + i) ^
                         *(u32 *)((u8 *)th2 + i);
 
-       mss = skb_shinfo(p)->gso_size;
+       mss = tcp_skb_mss(p);
 
        flush |= (len - 1) >= mss;
        flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
@@ -3115,9 +3119,8 @@ int tcp_gro_complete(struct sk_buff *skb)
 EXPORT_SYMBOL(tcp_gro_complete);
 
 #ifdef CONFIG_TCP_MD5SIG
-static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
-static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
+static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
+static DEFINE_MUTEX(tcp_md5sig_mutex);
 
 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
 {
@@ -3132,30 +3135,14 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
        free_percpu(pool);
 }
 
-void tcp_free_md5sig_pool(void)
-{
-       struct tcp_md5sig_pool __percpu *pool = NULL;
-
-       spin_lock_bh(&tcp_md5sig_pool_lock);
-       if (--tcp_md5sig_users == 0) {
-               pool = tcp_md5sig_pool;
-               tcp_md5sig_pool = NULL;
-       }
-       spin_unlock_bh(&tcp_md5sig_pool_lock);
-       if (pool)
-               __tcp_free_md5sig_pool(pool);
-}
-EXPORT_SYMBOL(tcp_free_md5sig_pool);
-
-static struct tcp_md5sig_pool __percpu *
-__tcp_alloc_md5sig_pool(struct sock *sk)
+static void __tcp_alloc_md5sig_pool(void)
 {
        int cpu;
        struct tcp_md5sig_pool __percpu *pool;
 
        pool = alloc_percpu(struct tcp_md5sig_pool);
        if (!pool)
-               return NULL;
+               return;
 
        for_each_possible_cpu(cpu) {
                struct crypto_hash *hash;
@@ -3166,53 +3153,27 @@ __tcp_alloc_md5sig_pool(struct sock *sk)
 
                per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
        }
-       return pool;
+       /* before setting tcp_md5sig_pool, we must commit all writes
+        * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
+        */
+       smp_wmb();
+       tcp_md5sig_pool = pool;
+       return;
 out_free:
        __tcp_free_md5sig_pool(pool);
-       return NULL;
 }
 
-struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
+bool tcp_alloc_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool __percpu *pool;
-       bool alloc = false;
-
-retry:
-       spin_lock_bh(&tcp_md5sig_pool_lock);
-       pool = tcp_md5sig_pool;
-       if (tcp_md5sig_users++ == 0) {
-               alloc = true;
-               spin_unlock_bh(&tcp_md5sig_pool_lock);
-       } else if (!pool) {
-               tcp_md5sig_users--;
-               spin_unlock_bh(&tcp_md5sig_pool_lock);
-               cpu_relax();
-               goto retry;
-       } else
-               spin_unlock_bh(&tcp_md5sig_pool_lock);
-
-       if (alloc) {
-               /* we cannot hold spinlock here because this may sleep. */
-               struct tcp_md5sig_pool __percpu *p;
-
-               p = __tcp_alloc_md5sig_pool(sk);
-               spin_lock_bh(&tcp_md5sig_pool_lock);
-               if (!p) {
-                       tcp_md5sig_users--;
-                       spin_unlock_bh(&tcp_md5sig_pool_lock);
-                       return NULL;
-               }
-               pool = tcp_md5sig_pool;
-               if (pool) {
-                       /* oops, it has already been assigned. */
-                       spin_unlock_bh(&tcp_md5sig_pool_lock);
-                       __tcp_free_md5sig_pool(p);
-               } else {
-                       tcp_md5sig_pool = pool = p;
-                       spin_unlock_bh(&tcp_md5sig_pool_lock);
-               }
+       if (unlikely(!tcp_md5sig_pool)) {
+               mutex_lock(&tcp_md5sig_mutex);
+
+               if (!tcp_md5sig_pool)
+                       __tcp_alloc_md5sig_pool();
+
+               mutex_unlock(&tcp_md5sig_mutex);
        }
-       return pool;
+       return tcp_md5sig_pool != NULL;
 }
 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
@@ -3229,28 +3190,15 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
        struct tcp_md5sig_pool __percpu *p;
 
        local_bh_disable();
-
-       spin_lock(&tcp_md5sig_pool_lock);
-       p = tcp_md5sig_pool;
+       p = ACCESS_ONCE(tcp_md5sig_pool);
        if (p)
-               tcp_md5sig_users++;
-       spin_unlock(&tcp_md5sig_pool_lock);
-
-       if (p)
-               return this_cpu_ptr(p);
+               return __this_cpu_ptr(p);
 
        local_bh_enable();
        return NULL;
 }
 EXPORT_SYMBOL(tcp_get_md5sig_pool);
 
-void tcp_put_md5sig_pool(void)
-{
-       local_bh_enable();
-       tcp_free_md5sig_pool();
-}
-EXPORT_SYMBOL(tcp_put_md5sig_pool);
-
 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
                        const struct tcphdr *th)
 {