Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
authorDavid S. Miller <davem@davemloft.net>
Tue, 21 Jan 2020 11:18:20 +0000 (12:18 +0100)
committerDavid S. Miller <davem@davemloft.net>
Tue, 21 Jan 2020 11:18:20 +0000 (12:18 +0100)
Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2020-01-21

1) Add support for TCP encapsulation of IKE and ESP messages,
   as defined by RFC 8229. Patchset from Sabrina Dubroca.

Please note that there is a merge conflict in:

net/unix/af_unix.c

between commit:

3c32da19a858 ("unix: Show number of pending scm files of receive queue in fdinfo")

from the net-next tree and commit:

b50b0580d27b ("net: add queue argument to __skb_wait_for_more_packets and __skb_{,try_}recv_datagram")

from the ipsec-next tree.

The conflict can be solved as done in linux-next.

Please pull or let me know if there are problems.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1  2 
include/linux/skbuff.h
net/ipv4/udp.c
net/unix/af_unix.c

diff --combined include/linux/skbuff.h
index aaf73b34f72fe8c6a94f91ab1d7fec8ba235fcd8,49a10f9cc538a9dd39481fbcc701a91bddc17b17..26beae7db264a238e3c0c0e1e141eda1dca17a3c
@@@ -1478,11 -1478,6 +1478,11 @@@ static inline void skb_mark_not_on_list
        skb->next = NULL;
  }
  
 +/* Iterate through singly-linked GSO fragments of an skb. */
 +#define skb_list_walk_safe(first, skb, next_skb)                               \
 +      for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb);  \
 +           (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
 +
  static inline void skb_list_del_init(struct sk_buff *skb)
  {
        __list_del_entry(&skb->list);
@@@ -3464,7 -3459,8 +3464,8 @@@ static inline void skb_frag_list_init(s
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
  
  
- int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+ int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
+                               int *err, long *timeo_p,
                                const struct sk_buff *skb);
  struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
                                          struct sk_buff_head *queue,
                                                           struct sk_buff *skb),
                                          int *off, int *err,
                                          struct sk_buff **last);
- struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
+                                       struct sk_buff_head *queue,
+                                       unsigned int flags,
                                        void (*destructor)(struct sock *sk,
                                                           struct sk_buff *skb),
                                        int *off, int *err,
                                        struct sk_buff **last);
- struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ struct sk_buff *__skb_recv_datagram(struct sock *sk,
+                                   struct sk_buff_head *sk_queue,
+                                   unsigned int flags,
                                    void (*destructor)(struct sock *sk,
                                                       struct sk_buff *skb),
                                    int *off, int *err);
@@@ -4096,9 -4096,6 +4101,9 @@@ enum skb_ext_id 
  #endif
  #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
        TC_SKB_EXT,
 +#endif
 +#if IS_ENABLED(CONFIG_MPTCP)
 +      SKB_EXT_MPTCP,
  #endif
        SKB_EXT_NUM, /* must be last */
  };
@@@ -4120,9 -4117,6 +4125,9 @@@ struct skb_ext 
        char data[0] __aligned(8);
  };
  
 +struct skb_ext *__skb_ext_alloc(void);
 +void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
 +                  struct skb_ext *ext);
  void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
  void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
  void __skb_ext_put(struct skb_ext *ext);
diff --combined net/ipv4/udp.c
index 208da091746995b2779fe0c260e7c830b3fc83fb,e5738d1217a1a578a8da0bc1aef4eb78211056bd..e4fd4408b7753acae4bd17f478e804c81b0c9c28
@@@ -1475,7 -1475,7 +1475,7 @@@ int __udp_enqueue_schedule_skb(struct s
         * queue contains some other skb
         */
        rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
 -      if (rmem > (size + sk->sk_rcvbuf))
 +      if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
                goto uncharge_drop;
  
        spin_lock(&list->lock);
@@@ -1708,7 -1708,8 +1708,8 @@@ busy_check
  
                /* sk_queue is empty, reader_queue may contain peeked packets */
        } while (timeo &&
-                !__skb_wait_for_more_packets(sk, &error, &timeo,
+                !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
+                                             &error, &timeo,
                                              (struct sk_buff *)sk_queue));
  
        *err = error;
@@@ -2104,7 -2105,8 +2105,7 @@@ static int udp_queue_rcv_skb(struct soc
        BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
        __skb_push(skb, -skb_mac_offset(skb));
        segs = udp_rcv_segment(sk, skb, true);
 -      for (skb = segs; skb; skb = next) {
 -              next = skb->next;
 +      skb_list_walk_safe(segs, skb, next) {
                __skb_pull(skb, skb_transport_offset(skb));
                ret = udp_queue_rcv_one_skb(sk, skb);
                if (ret > 0)
diff --combined net/unix/af_unix.c
index 6756a3ccc3920db415ca4b679d8131fa462f9601,a7f707fc4cac575c95c53384685bd89402bfe4ed..321af97c7bbe586dd4c71269cd070ff6cffa5937
@@@ -676,16 -676,6 +676,16 @@@ static int unix_set_peek_off(struct soc
        return 0;
  }
  
 +static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
 +{
 +      struct sock *sk = sock->sk;
 +      struct unix_sock *u;
 +
 +      if (sk) {
 +              u = unix_sk(sock->sk);
 +              seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
 +      }
 +}
  
  static const struct proto_ops unix_stream_ops = {
        .family =       PF_UNIX,
        .sendpage =     unix_stream_sendpage,
        .splice_read =  unix_stream_splice_read,
        .set_peek_off = unix_set_peek_off,
 +      .show_fdinfo =  unix_show_fdinfo,
  };
  
  static const struct proto_ops unix_dgram_ops = {
        .mmap =         sock_no_mmap,
        .sendpage =     sock_no_sendpage,
        .set_peek_off = unix_set_peek_off,
 +      .show_fdinfo =  unix_show_fdinfo,
  };
  
  static const struct proto_ops unix_seqpacket_ops = {
        .mmap =         sock_no_mmap,
        .sendpage =     sock_no_sendpage,
        .set_peek_off = unix_set_peek_off,
 +      .show_fdinfo =  unix_show_fdinfo,
  };
  
  static struct proto unix_proto = {
@@@ -801,7 -788,6 +801,7 @@@ static struct sock *unix_create1(struc
        mutex_init(&u->bindlock); /* single task binding lock */
        init_waitqueue_head(&u->peer_wait);
        init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
 +      memset(&u->scm_stat, 0, sizeof(struct scm_stat));
        unix_insert_socket(unix_sockets_unbound(sk), sk);
  out:
        if (sk == NULL)
@@@ -1586,28 -1572,6 +1586,28 @@@ static bool unix_skb_scm_eq(struct sk_b
               unix_secdata_eq(scm, skb);
  }
  
 +static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
 +{
 +      struct scm_fp_list *fp = UNIXCB(skb).fp;
 +      struct unix_sock *u = unix_sk(sk);
 +
 +      lockdep_assert_held(&sk->sk_receive_queue.lock);
 +
 +      if (unlikely(fp && fp->count))
 +              u->scm_stat.nr_fds += fp->count;
 +}
 +
 +static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
 +{
 +      struct scm_fp_list *fp = UNIXCB(skb).fp;
 +      struct unix_sock *u = unix_sk(sk);
 +
 +      lockdep_assert_held(&sk->sk_receive_queue.lock);
 +
 +      if (unlikely(fp && fp->count))
 +              u->scm_stat.nr_fds -= fp->count;
 +}
 +
  /*
   *    Send AF_UNIX data.
   */
@@@ -1793,10 -1757,7 +1793,10 @@@ restart_locked
        if (sock_flag(other, SOCK_RCVTSTAMP))
                __net_timestamp(skb);
        maybe_add_creds(skb, sock, other);
 -      skb_queue_tail(&other->sk_receive_queue, skb);
 +      spin_lock(&other->sk_receive_queue.lock);
 +      scm_stat_add(other, skb);
 +      __skb_queue_tail(&other->sk_receive_queue, skb);
 +      spin_unlock(&other->sk_receive_queue.lock);
        unix_state_unlock(other);
        other->sk_data_ready(other);
        sock_put(other);
@@@ -1898,10 -1859,7 +1898,10 @@@ static int unix_stream_sendmsg(struct s
                        goto pipe_err_free;
  
                maybe_add_creds(skb, sock, other);
 -              skb_queue_tail(&other->sk_receive_queue, skb);
 +              spin_lock(&other->sk_receive_queue.lock);
 +              scm_stat_add(other, skb);
 +              __skb_queue_tail(&other->sk_receive_queue, skb);
 +              spin_unlock(&other->sk_receive_queue.lock);
                unix_state_unlock(other);
                other->sk_data_ready(other);
                sent += size;
@@@ -2100,8 -2058,8 +2100,8 @@@ static int unix_dgram_recvmsg(struct so
                mutex_lock(&u->iolock);
  
                skip = sk_peek_offset(sk, flags);
-               skb = __skb_try_recv_datagram(sk, flags, scm_stat_del,
-                                             &skip, &err, &last);
+               skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
 -                                            NULL, &skip, &err, &last);
++                                            scm_stat_del, &skip, &err, &last);
                if (skb)
                        break;
  
                if (err != -EAGAIN)
                        break;
        } while (timeo &&
-                !__skb_wait_for_more_packets(sk, &err, &timeo, last));
+                !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
+                                             &err, &timeo, last));
  
        if (!skb) { /* implies iolock unlocked */
                unix_state_lock(sk);
@@@ -2395,12 -2354,8 +2396,12 @@@ unlock
  
                        sk_peek_offset_bwd(sk, chunk);
  
 -                      if (UNIXCB(skb).fp)
 +                      if (UNIXCB(skb).fp) {
 +                              spin_lock(&sk->sk_receive_queue.lock);
 +                              scm_stat_del(sk, skb);
 +                              spin_unlock(&sk->sk_receive_queue.lock);
                                unix_detach_fds(&scm, skb);
 +                      }
  
                        if (unix_skb_len(skb))
                                break;
@@@ -2911,7 -2866,7 +2912,7 @@@ static int __init af_unix_init(void
  {
        int rc = -1;
  
 -      BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
 +      BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
  
        rc = proto_register(&unix_proto, 1);
        if (rc != 0) {