Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[linux-2.6-block.git] / net / core / sock.c
index 7626b6aacd685551cf13bf5fe8fbbbbc6105e56f..76ff58d43e261c34e630034571d7c48f34d16ffc 100644 (file)
@@ -274,25 +274,27 @@ static void sock_disable_timestamp(struct sock *sk, int flag)
 
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
-       int err = 0;
+       int err;
        int skb_len;
+       unsigned long flags;
+       struct sk_buff_head *list = &sk->sk_receive_queue;
 
        /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
           number of warnings when compiling with -W --ANK
         */
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
            (unsigned)sk->sk_rcvbuf) {
-               err = -ENOMEM;
-               goto out;
+               atomic_inc(&sk->sk_drops);
+               return -ENOMEM;
        }
 
        err = sk_filter(sk, skb);
        if (err)
-               goto out;
+               return err;
 
        if (!sk_rmem_schedule(sk, skb->truesize)) {
-               err = -ENOBUFS;
-               goto out;
+               atomic_inc(&sk->sk_drops);
+               return -ENOBUFS;
        }
 
        skb->dev = NULL;
@@ -305,12 +307,14 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
         */
        skb_len = skb->len;
 
-       skb_queue_tail(&sk->sk_receive_queue, skb);
+       spin_lock_irqsave(&list->lock, flags);
+       skb->dropcount = atomic_read(&sk->sk_drops);
+       __skb_queue_tail(list, skb);
+       spin_unlock_irqrestore(&list->lock, flags);
 
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_data_ready(sk, skb_len);
-out:
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
@@ -348,11 +352,18 @@ discard_and_relse:
 }
 EXPORT_SYMBOL(sk_receive_skb);
 
+void sk_reset_txq(struct sock *sk)
+{
+       sk_tx_queue_clear(sk);
+}
+EXPORT_SYMBOL(sk_reset_txq);
+
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
        struct dst_entry *dst = sk->sk_dst_cache;
 
        if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+               sk_tx_queue_clear(sk);
                sk->sk_dst_cache = NULL;
                dst_release(dst);
                return NULL;
@@ -406,17 +417,18 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
        if (copy_from_user(devname, optval, optlen))
                goto out;
 
-       if (devname[0] == '\0') {
-               index = 0;
-       } else {
-               struct net_device *dev = dev_get_by_name(net, devname);
+       index = 0;
+       if (devname[0] != '\0') {
+               struct net_device *dev;
 
+               rcu_read_lock();
+               dev = dev_get_by_name_rcu(net, devname);
+               if (dev)
+                       index = dev->ifindex;
+               rcu_read_unlock();
                ret = -ENODEV;
                if (!dev)
                        goto out;
-
-               index = dev->ifindex;
-               dev_put(dev);
        }
 
        lock_sock(sk);
@@ -702,6 +714,12 @@ set_rcvbuf:
 
                /* We implement the SO_SNDLOWAT etc to
                   not be settable (1003.1g 5.3) */
+       case SO_RXQ_OVFL:
+               if (valbool)
+                       sock_set_flag(sk, SOCK_RXQ_OVFL);
+               else
+                       sock_reset_flag(sk, SOCK_RXQ_OVFL);
+               break;
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -901,6 +919,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = sk->sk_mark;
                break;
 
+       case SO_RXQ_OVFL:
+               v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -939,7 +961,8 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
        void *sptr = nsk->sk_security;
 #endif
        BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
-                    sizeof(osk->sk_node) + sizeof(osk->sk_refcnt));
+                    sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
+                    sizeof(osk->sk_tx_queue_mapping));
        memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
               osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
 #ifdef CONFIG_SECURITY_NETWORK
@@ -983,6 +1006,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
 
                if (!try_module_get(prot->owner))
                        goto out_free_sec;
+               sk_tx_queue_clear(sk);
        }
 
        return sk;