l2tp: take a reference for kernel sockets in l2tp_tunnel_sock_lookup
[linux-2.6-block.git] / net / l2tp / l2tp_core.c
1 /*
2  * L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:     Martijn van Oosterhout <kleptog@svana.org>
10  *              James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *              Michal Ostrowski <mostrows@speakeasy.net>
13  *              Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *              David S. Miller (davem@redhat.com)
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License version 2 as
18  * published by the Free Software Foundation.
19  */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/kernel.h>
30 #include <linux/spinlock.h>
31 #include <linux/kthread.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/errno.h>
35 #include <linux/jiffies.h>
36
37 #include <linux/netdevice.h>
38 #include <linux/net.h>
39 #include <linux/inetdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/udp.h>
45 #include <linux/l2tp.h>
46 #include <linux/hash.h>
47 #include <linux/sort.h>
48 #include <linux/file.h>
49 #include <linux/nsproxy.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
52 #include <net/dst.h>
53 #include <net/ip.h>
54 #include <net/udp.h>
55 #include <net/inet_common.h>
56 #include <net/xfrm.h>
57 #include <net/protocol.h>
58 #include <net/inet6_connection_sock.h>
59 #include <net/inet_ecn.h>
60 #include <net/ip6_route.h>
61 #include <net/ip6_checksum.h>
62
63 #include <asm/byteorder.h>
64 #include <linux/atomic.h>
65
66 #include "l2tp_core.h"
67
68 #define L2TP_DRV_VERSION        "V2.0"
69
70 /* L2TP header constants */
71 #define L2TP_HDRFLAG_T     0x8000
72 #define L2TP_HDRFLAG_L     0x4000
73 #define L2TP_HDRFLAG_S     0x0800
74 #define L2TP_HDRFLAG_O     0x0200
75 #define L2TP_HDRFLAG_P     0x0100
76
77 #define L2TP_HDR_VER_MASK  0x000F
78 #define L2TP_HDR_VER_2     0x0002
79 #define L2TP_HDR_VER_3     0x0003
80
81 /* L2TPv3 default L2-specific sublayer */
82 #define L2TP_SLFLAG_S      0x40000000
83 #define L2TP_SL_SEQ_MASK   0x00ffffff
84
85 #define L2TP_HDR_SIZE_SEQ               10
86 #define L2TP_HDR_SIZE_NOSEQ             6
87
88 /* Default trace flags */
89 #define L2TP_DEFAULT_DEBUG_FLAGS        0
90
91 /* Private data stored for received packets in the skb.
92  */
93 struct l2tp_skb_cb {
94         u32                     ns;
95         u16                     has_seq;
96         u16                     length;
97         unsigned long           expires;
98 };
99
100 #define L2TP_SKB_CB(skb)        ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102 static atomic_t l2tp_tunnel_count;
103 static atomic_t l2tp_session_count;
104 static struct workqueue_struct *l2tp_wq;
105
106 /* per-net private data for this module */
107 static unsigned int l2tp_net_id;
108 struct l2tp_net {
109         struct list_head l2tp_tunnel_list;
110         spinlock_t l2tp_tunnel_list_lock;
111         struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
112         spinlock_t l2tp_session_hlist_lock;
113 };
114
115 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
117
118 static inline struct l2tp_net *l2tp_pernet(struct net *net)
119 {
120         BUG_ON(!net);
121
122         return net_generic(net, l2tp_net_id);
123 }
124
125 /* Tunnel reference counts. Incremented per session that is added to
126  * the tunnel.
127  */
128 static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
129 {
130         atomic_inc(&tunnel->ref_count);
131 }
132
133 static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
134 {
135         if (atomic_dec_and_test(&tunnel->ref_count))
136                 l2tp_tunnel_free(tunnel);
137 }
138 #ifdef L2TP_REFCNT_DEBUG
139 #define l2tp_tunnel_inc_refcount(_t)                                    \
140 do {                                                                    \
141         pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n",        \
142                  __func__, __LINE__, (_t)->name,                        \
143                  atomic_read(&_t->ref_count));                          \
144         l2tp_tunnel_inc_refcount_1(_t);                                 \
145 } while (0)
146 #define l2tp_tunnel_dec_refcount(_t)
147 do {                                                                    \
148         pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n",        \
149                  __func__, __LINE__, (_t)->name,                        \
150                  atomic_read(&_t->ref_count));                          \
151         l2tp_tunnel_dec_refcount_1(_t);                                 \
152 } while (0)
153 #else
154 #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
155 #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
156 #endif
157
158 /* Session hash global list for L2TPv3.
159  * The session_id SHOULD be random according to RFC3931, but several
160  * L2TP implementations use incrementing session_ids.  So we do a real
161  * hash on the session_id, rather than a simple bitmask.
162  */
163 static inline struct hlist_head *
164 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
165 {
166         return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
167
168 }
169
170 /* Lookup the tunnel socket, possibly involving the fs code if the socket is
171  * owned by userspace.  A struct sock returned from this function must be
172  * released using l2tp_tunnel_sock_put once you're done with it.
173  */
174 struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
175 {
176         int err = 0;
177         struct socket *sock = NULL;
178         struct sock *sk = NULL;
179
180         if (!tunnel)
181                 goto out;
182
183         if (tunnel->fd >= 0) {
184                 /* Socket is owned by userspace, who might be in the process
185                  * of closing it.  Look the socket up using the fd to ensure
186                  * consistency.
187                  */
188                 sock = sockfd_lookup(tunnel->fd, &err);
189                 if (sock)
190                         sk = sock->sk;
191         } else {
192                 /* Socket is owned by kernelspace */
193                 sk = tunnel->sock;
194                 sock_hold(sk);
195         }
196
197 out:
198         return sk;
199 }
200 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
201
202 /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
203 void l2tp_tunnel_sock_put(struct sock *sk)
204 {
205         struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
206         if (tunnel) {
207                 if (tunnel->fd >= 0) {
208                         /* Socket is owned by userspace */
209                         sockfd_put(sk->sk_socket);
210                 }
211                 sock_put(sk);
212         }
213         sock_put(sk);
214 }
215 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
216
217 /* Lookup a session by id in the global session list
218  */
219 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
220 {
221         struct l2tp_net *pn = l2tp_pernet(net);
222         struct hlist_head *session_list =
223                 l2tp_session_id_hash_2(pn, session_id);
224         struct l2tp_session *session;
225
226         rcu_read_lock_bh();
227         hlist_for_each_entry_rcu(session, session_list, global_hlist) {
228                 if (session->session_id == session_id) {
229                         rcu_read_unlock_bh();
230                         return session;
231                 }
232         }
233         rcu_read_unlock_bh();
234
235         return NULL;
236 }
237
238 /* Session hash list.
239  * The session_id SHOULD be random according to RFC2661, but several
240  * L2TP implementations (Cisco and Microsoft) use incrementing
241  * session_ids.  So we do a real hash on the session_id, rather than a
242  * simple bitmask.
243  */
244 static inline struct hlist_head *
245 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
246 {
247         return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
248 }
249
250 /* Lookup a session by id
251  */
252 struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
253 {
254         struct hlist_head *session_list;
255         struct l2tp_session *session;
256
257         /* In L2TPv3, session_ids are unique over all tunnels and we
258          * sometimes need to look them up before we know the
259          * tunnel.
260          */
261         if (tunnel == NULL)
262                 return l2tp_session_find_2(net, session_id);
263
264         session_list = l2tp_session_id_hash(tunnel, session_id);
265         read_lock_bh(&tunnel->hlist_lock);
266         hlist_for_each_entry(session, session_list, hlist) {
267                 if (session->session_id == session_id) {
268                         read_unlock_bh(&tunnel->hlist_lock);
269                         return session;
270                 }
271         }
272         read_unlock_bh(&tunnel->hlist_lock);
273
274         return NULL;
275 }
276 EXPORT_SYMBOL_GPL(l2tp_session_find);
277
278 struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
279 {
280         int hash;
281         struct l2tp_session *session;
282         int count = 0;
283
284         read_lock_bh(&tunnel->hlist_lock);
285         for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
286                 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
287                         if (++count > nth) {
288                                 read_unlock_bh(&tunnel->hlist_lock);
289                                 return session;
290                         }
291                 }
292         }
293
294         read_unlock_bh(&tunnel->hlist_lock);
295
296         return NULL;
297 }
298 EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
299
300 /* Lookup a session by interface name.
301  * This is very inefficient but is only used by management interfaces.
302  */
303 struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
304 {
305         struct l2tp_net *pn = l2tp_pernet(net);
306         int hash;
307         struct l2tp_session *session;
308
309         rcu_read_lock_bh();
310         for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
311                 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
312                         if (!strcmp(session->ifname, ifname)) {
313                                 rcu_read_unlock_bh();
314                                 return session;
315                         }
316                 }
317         }
318
319         rcu_read_unlock_bh();
320
321         return NULL;
322 }
323 EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
324
325 /* Lookup a tunnel by id
326  */
327 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
328 {
329         struct l2tp_tunnel *tunnel;
330         struct l2tp_net *pn = l2tp_pernet(net);
331
332         rcu_read_lock_bh();
333         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
334                 if (tunnel->tunnel_id == tunnel_id) {
335                         rcu_read_unlock_bh();
336                         return tunnel;
337                 }
338         }
339         rcu_read_unlock_bh();
340
341         return NULL;
342 }
343 EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
344
345 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
346 {
347         struct l2tp_net *pn = l2tp_pernet(net);
348         struct l2tp_tunnel *tunnel;
349         int count = 0;
350
351         rcu_read_lock_bh();
352         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
353                 if (++count > nth) {
354                         rcu_read_unlock_bh();
355                         return tunnel;
356                 }
357         }
358
359         rcu_read_unlock_bh();
360
361         return NULL;
362 }
363 EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
364
365 /*****************************************************************************
366  * Receive data handling
367  *****************************************************************************/
368
369 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
370  * number.
371  */
372 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
373 {
374         struct sk_buff *skbp;
375         struct sk_buff *tmp;
376         u32 ns = L2TP_SKB_CB(skb)->ns;
377         struct l2tp_stats *sstats;
378
379         spin_lock_bh(&session->reorder_q.lock);
380         sstats = &session->stats;
381         skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
382                 if (L2TP_SKB_CB(skbp)->ns > ns) {
383                         __skb_queue_before(&session->reorder_q, skbp, skb);
384                         l2tp_dbg(session, L2TP_MSG_SEQ,
385                                  "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
386                                  session->name, ns, L2TP_SKB_CB(skbp)->ns,
387                                  skb_queue_len(&session->reorder_q));
388                         u64_stats_update_begin(&sstats->syncp);
389                         sstats->rx_oos_packets++;
390                         u64_stats_update_end(&sstats->syncp);
391                         goto out;
392                 }
393         }
394
395         __skb_queue_tail(&session->reorder_q, skb);
396
397 out:
398         spin_unlock_bh(&session->reorder_q.lock);
399 }
400
401 /* Dequeue a single skb.
402  */
403 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
404 {
405         struct l2tp_tunnel *tunnel = session->tunnel;
406         int length = L2TP_SKB_CB(skb)->length;
407         struct l2tp_stats *tstats, *sstats;
408
409         /* We're about to requeue the skb, so return resources
410          * to its current owner (a socket receive buffer).
411          */
412         skb_orphan(skb);
413
414         tstats = &tunnel->stats;
415         u64_stats_update_begin(&tstats->syncp);
416         sstats = &session->stats;
417         u64_stats_update_begin(&sstats->syncp);
418         tstats->rx_packets++;
419         tstats->rx_bytes += length;
420         sstats->rx_packets++;
421         sstats->rx_bytes += length;
422         u64_stats_update_end(&tstats->syncp);
423         u64_stats_update_end(&sstats->syncp);
424
425         if (L2TP_SKB_CB(skb)->has_seq) {
426                 /* Bump our Nr */
427                 session->nr++;
428                 if (tunnel->version == L2TP_HDR_VER_2)
429                         session->nr &= 0xffff;
430                 else
431                         session->nr &= 0xffffff;
432
433                 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
434                          session->name, session->nr);
435         }
436
437         /* call private receive handler */
438         if (session->recv_skb != NULL)
439                 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
440         else
441                 kfree_skb(skb);
442
443         if (session->deref)
444                 (*session->deref)(session);
445 }
446
447 /* Dequeue skbs from the session's reorder_q, subject to packet order.
448  * Skbs that have been in the queue for too long are simply discarded.
449  */
450 static void l2tp_recv_dequeue(struct l2tp_session *session)
451 {
452         struct sk_buff *skb;
453         struct sk_buff *tmp;
454         struct l2tp_stats *sstats;
455
456         /* If the pkt at the head of the queue has the nr that we
457          * expect to send up next, dequeue it and any other
458          * in-sequence packets behind it.
459          */
460 start:
461         spin_lock_bh(&session->reorder_q.lock);
462         sstats = &session->stats;
463         skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
464                 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
465                         u64_stats_update_begin(&sstats->syncp);
466                         sstats->rx_seq_discards++;
467                         sstats->rx_errors++;
468                         u64_stats_update_end(&sstats->syncp);
469                         l2tp_dbg(session, L2TP_MSG_SEQ,
470                                  "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
471                                  session->name, L2TP_SKB_CB(skb)->ns,
472                                  L2TP_SKB_CB(skb)->length, session->nr,
473                                  skb_queue_len(&session->reorder_q));
474                         session->reorder_skip = 1;
475                         __skb_unlink(skb, &session->reorder_q);
476                         kfree_skb(skb);
477                         if (session->deref)
478                                 (*session->deref)(session);
479                         continue;
480                 }
481
482                 if (L2TP_SKB_CB(skb)->has_seq) {
483                         if (session->reorder_skip) {
484                                 l2tp_dbg(session, L2TP_MSG_SEQ,
485                                          "%s: advancing nr to next pkt: %u -> %u",
486                                          session->name, session->nr,
487                                          L2TP_SKB_CB(skb)->ns);
488                                 session->reorder_skip = 0;
489                                 session->nr = L2TP_SKB_CB(skb)->ns;
490                         }
491                         if (L2TP_SKB_CB(skb)->ns != session->nr) {
492                                 l2tp_dbg(session, L2TP_MSG_SEQ,
493                                          "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
494                                          session->name, L2TP_SKB_CB(skb)->ns,
495                                          L2TP_SKB_CB(skb)->length, session->nr,
496                                          skb_queue_len(&session->reorder_q));
497                                 goto out;
498                         }
499                 }
500                 __skb_unlink(skb, &session->reorder_q);
501
502                 /* Process the skb. We release the queue lock while we
503                  * do so to let other contexts process the queue.
504                  */
505                 spin_unlock_bh(&session->reorder_q.lock);
506                 l2tp_recv_dequeue_skb(session, skb);
507                 goto start;
508         }
509
510 out:
511         spin_unlock_bh(&session->reorder_q.lock);
512 }
513
514 static inline int l2tp_verify_udp_checksum(struct sock *sk,
515                                            struct sk_buff *skb)
516 {
517         struct udphdr *uh = udp_hdr(skb);
518         u16 ulen = ntohs(uh->len);
519         __wsum psum;
520
521         if (sk->sk_no_check || skb_csum_unnecessary(skb))
522                 return 0;
523
524 #if IS_ENABLED(CONFIG_IPV6)
525         if (sk->sk_family == PF_INET6) {
526                 if (!uh->check) {
527                         LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
528                         return 1;
529                 }
530                 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
531                     !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
532                                      &ipv6_hdr(skb)->daddr, ulen,
533                                      IPPROTO_UDP, skb->csum)) {
534                         skb->ip_summed = CHECKSUM_UNNECESSARY;
535                         return 0;
536                 }
537                 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
538                                                          &ipv6_hdr(skb)->daddr,
539                                                          skb->len, IPPROTO_UDP,
540                                                          0));
541         } else
542 #endif
543         {
544                 struct inet_sock *inet;
545                 if (!uh->check)
546                         return 0;
547                 inet = inet_sk(sk);
548                 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
549                                           ulen, IPPROTO_UDP, 0);
550
551                 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
552                     !csum_fold(csum_add(psum, skb->csum)))
553                         return 0;
554                 skb->csum = psum;
555         }
556
557         return __skb_checksum_complete(skb);
558 }
559
560 /* Do receive processing of L2TP data frames. We handle both L2TPv2
561  * and L2TPv3 data frames here.
562  *
563  * L2TPv2 Data Message Header
564  *
565  *  0                   1                   2                   3
566  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
567  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
568  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
569  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
570  * |           Tunnel ID           |           Session ID          |
571  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
572  * |             Ns (opt)          |             Nr (opt)          |
573  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
574  * |      Offset Size (opt)        |    Offset pad... (opt)
575  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
576  *
577  * Data frames are marked by T=0. All other fields are the same as
578  * those in L2TP control frames.
579  *
580  * L2TPv3 Data Message Header
581  *
582  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
583  * |                      L2TP Session Header                      |
584  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
585  * |                      L2-Specific Sublayer                     |
586  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
587  * |                        Tunnel Payload                      ...
588  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
589  *
590  * L2TPv3 Session Header Over IP
591  *
592  *  0                   1                   2                   3
593  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
594  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
595  * |                           Session ID                          |
596  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
597  * |               Cookie (optional, maximum 64 bits)...
598  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
599  *                                                                 |
600  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
601  *
602  * L2TPv3 L2-Specific Sublayer Format
603  *
604  *  0                   1                   2                   3
605  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
606  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
607  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
608  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
609  *
610  * Cookie value, sublayer format and offset (pad) are negotiated with
611  * the peer when the session is set up. Unlike L2TPv2, we do not need
612  * to parse the packet header to determine if optional fields are
613  * present.
614  *
615  * Caller must already have parsed the frame and determined that it is
616  * a data (not control) frame before coming here. Fields up to the
617  * session-id have already been parsed and ptr points to the data
618  * after the session-id.
619  */
620 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
621                       unsigned char *ptr, unsigned char *optr, u16 hdrflags,
622                       int length, int (*payload_hook)(struct sk_buff *skb))
623 {
624         struct l2tp_tunnel *tunnel = session->tunnel;
625         int offset;
626         u32 ns, nr;
627         struct l2tp_stats *sstats = &session->stats;
628
629         /* The ref count is increased since we now hold a pointer to
630          * the session. Take care to decrement the refcnt when exiting
631          * this function from now on...
632          */
633         l2tp_session_inc_refcount(session);
634         if (session->ref)
635                 (*session->ref)(session);
636
637         /* Parse and check optional cookie */
638         if (session->peer_cookie_len > 0) {
639                 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
640                         l2tp_info(tunnel, L2TP_MSG_DATA,
641                                   "%s: cookie mismatch (%u/%u). Discarding.\n",
642                                   tunnel->name, tunnel->tunnel_id,
643                                   session->session_id);
644                         u64_stats_update_begin(&sstats->syncp);
645                         sstats->rx_cookie_discards++;
646                         u64_stats_update_end(&sstats->syncp);
647                         goto discard;
648                 }
649                 ptr += session->peer_cookie_len;
650         }
651
652         /* Handle the optional sequence numbers. Sequence numbers are
653          * in different places for L2TPv2 and L2TPv3.
654          *
655          * If we are the LAC, enable/disable sequence numbers under
656          * the control of the LNS.  If no sequence numbers present but
657          * we were expecting them, discard frame.
658          */
659         ns = nr = 0;
660         L2TP_SKB_CB(skb)->has_seq = 0;
661         if (tunnel->version == L2TP_HDR_VER_2) {
662                 if (hdrflags & L2TP_HDRFLAG_S) {
663                         ns = ntohs(*(__be16 *) ptr);
664                         ptr += 2;
665                         nr = ntohs(*(__be16 *) ptr);
666                         ptr += 2;
667
668                         /* Store L2TP info in the skb */
669                         L2TP_SKB_CB(skb)->ns = ns;
670                         L2TP_SKB_CB(skb)->has_seq = 1;
671
672                         l2tp_dbg(session, L2TP_MSG_SEQ,
673                                  "%s: recv data ns=%u, nr=%u, session nr=%u\n",
674                                  session->name, ns, nr, session->nr);
675                 }
676         } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
677                 u32 l2h = ntohl(*(__be32 *) ptr);
678
679                 if (l2h & 0x40000000) {
680                         ns = l2h & 0x00ffffff;
681
682                         /* Store L2TP info in the skb */
683                         L2TP_SKB_CB(skb)->ns = ns;
684                         L2TP_SKB_CB(skb)->has_seq = 1;
685
686                         l2tp_dbg(session, L2TP_MSG_SEQ,
687                                  "%s: recv data ns=%u, session nr=%u\n",
688                                  session->name, ns, session->nr);
689                 }
690         }
691
692         /* Advance past L2-specific header, if present */
693         ptr += session->l2specific_len;
694
695         if (L2TP_SKB_CB(skb)->has_seq) {
696                 /* Received a packet with sequence numbers. If we're the LNS,
697                  * check if we sre sending sequence numbers and if not,
698                  * configure it so.
699                  */
700                 if ((!session->lns_mode) && (!session->send_seq)) {
701                         l2tp_info(session, L2TP_MSG_SEQ,
702                                   "%s: requested to enable seq numbers by LNS\n",
703                                   session->name);
704                         session->send_seq = -1;
705                         l2tp_session_set_header_len(session, tunnel->version);
706                 }
707         } else {
708                 /* No sequence numbers.
709                  * If user has configured mandatory sequence numbers, discard.
710                  */
711                 if (session->recv_seq) {
712                         l2tp_warn(session, L2TP_MSG_SEQ,
713                                   "%s: recv data has no seq numbers when required. Discarding.\n",
714                                   session->name);
715                         u64_stats_update_begin(&sstats->syncp);
716                         sstats->rx_seq_discards++;
717                         u64_stats_update_end(&sstats->syncp);
718                         goto discard;
719                 }
720
721                 /* If we're the LAC and we're sending sequence numbers, the
722                  * LNS has requested that we no longer send sequence numbers.
723                  * If we're the LNS and we're sending sequence numbers, the
724                  * LAC is broken. Discard the frame.
725                  */
726                 if ((!session->lns_mode) && (session->send_seq)) {
727                         l2tp_info(session, L2TP_MSG_SEQ,
728                                   "%s: requested to disable seq numbers by LNS\n",
729                                   session->name);
730                         session->send_seq = 0;
731                         l2tp_session_set_header_len(session, tunnel->version);
732                 } else if (session->send_seq) {
733                         l2tp_warn(session, L2TP_MSG_SEQ,
734                                   "%s: recv data has no seq numbers when required. Discarding.\n",
735                                   session->name);
736                         u64_stats_update_begin(&sstats->syncp);
737                         sstats->rx_seq_discards++;
738                         u64_stats_update_end(&sstats->syncp);
739                         goto discard;
740                 }
741         }
742
743         /* Session data offset is handled differently for L2TPv2 and
744          * L2TPv3. For L2TPv2, there is an optional 16-bit value in
745          * the header. For L2TPv3, the offset is negotiated using AVPs
746          * in the session setup control protocol.
747          */
748         if (tunnel->version == L2TP_HDR_VER_2) {
749                 /* If offset bit set, skip it. */
750                 if (hdrflags & L2TP_HDRFLAG_O) {
751                         offset = ntohs(*(__be16 *)ptr);
752                         ptr += 2 + offset;
753                 }
754         } else
755                 ptr += session->offset;
756
757         offset = ptr - optr;
758         if (!pskb_may_pull(skb, offset))
759                 goto discard;
760
761         __skb_pull(skb, offset);
762
763         /* If caller wants to process the payload before we queue the
764          * packet, do so now.
765          */
766         if (payload_hook)
767                 if ((*payload_hook)(skb))
768                         goto discard;
769
770         /* Prepare skb for adding to the session's reorder_q.  Hold
771          * packets for max reorder_timeout or 1 second if not
772          * reordering.
773          */
774         L2TP_SKB_CB(skb)->length = length;
775         L2TP_SKB_CB(skb)->expires = jiffies +
776                 (session->reorder_timeout ? session->reorder_timeout : HZ);
777
778         /* Add packet to the session's receive queue. Reordering is done here, if
779          * enabled. Saved L2TP protocol info is stored in skb->sb[].
780          */
781         if (L2TP_SKB_CB(skb)->has_seq) {
782                 if (session->reorder_timeout != 0) {
783                         /* Packet reordering enabled. Add skb to session's
784                          * reorder queue, in order of ns.
785                          */
786                         l2tp_recv_queue_skb(session, skb);
787                 } else {
788                         /* Packet reordering disabled. Discard out-of-sequence
789                          * packets
790                          */
791                         if (L2TP_SKB_CB(skb)->ns != session->nr) {
792                                 u64_stats_update_begin(&sstats->syncp);
793                                 sstats->rx_seq_discards++;
794                                 u64_stats_update_end(&sstats->syncp);
795                                 l2tp_dbg(session, L2TP_MSG_SEQ,
796                                          "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
797                                          session->name, L2TP_SKB_CB(skb)->ns,
798                                          L2TP_SKB_CB(skb)->length, session->nr,
799                                          skb_queue_len(&session->reorder_q));
800                                 goto discard;
801                         }
802                         skb_queue_tail(&session->reorder_q, skb);
803                 }
804         } else {
805                 /* No sequence numbers. Add the skb to the tail of the
806                  * reorder queue. This ensures that it will be
807                  * delivered after all previous sequenced skbs.
808                  */
809                 skb_queue_tail(&session->reorder_q, skb);
810         }
811
812         /* Try to dequeue as many skbs from reorder_q as we can. */
813         l2tp_recv_dequeue(session);
814
815         l2tp_session_dec_refcount(session);
816
817         return;
818
819 discard:
820         u64_stats_update_begin(&sstats->syncp);
821         sstats->rx_errors++;
822         u64_stats_update_end(&sstats->syncp);
823         kfree_skb(skb);
824
825         if (session->deref)
826                 (*session->deref)(session);
827
828         l2tp_session_dec_refcount(session);
829 }
830 EXPORT_SYMBOL(l2tp_recv_common);
831
832 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
833  * here. The skb is not on a list when we get here.
834  * Returns 0 if the packet was a data packet and was successfully passed on.
835  * Returns 1 if the packet was not a good data packet and could not be
836  * forwarded.  All such packets are passed up to userspace to deal with.
837  */
838 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
839                               int (*payload_hook)(struct sk_buff *skb))
840 {
841         struct l2tp_session *session = NULL;
842         unsigned char *ptr, *optr;
843         u16 hdrflags;
844         u32 tunnel_id, session_id;
845         u16 version;
846         int length;
847         struct l2tp_stats *tstats;
848
849         if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
850                 goto discard_bad_csum;
851
852         /* UDP always verifies the packet length. */
853         __skb_pull(skb, sizeof(struct udphdr));
854
855         /* Short packet? */
856         if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
857                 l2tp_info(tunnel, L2TP_MSG_DATA,
858                           "%s: recv short packet (len=%d)\n",
859                           tunnel->name, skb->len);
860                 goto error;
861         }
862
863         /* Trace packet contents, if enabled */
864         if (tunnel->debug & L2TP_MSG_DATA) {
865                 length = min(32u, skb->len);
866                 if (!pskb_may_pull(skb, length))
867                         goto error;
868
869                 pr_debug("%s: recv\n", tunnel->name);
870                 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
871         }
872
873         /* Point to L2TP header */
874         optr = ptr = skb->data;
875
876         /* Get L2TP header flags */
877         hdrflags = ntohs(*(__be16 *) ptr);
878
879         /* Check protocol version */
880         version = hdrflags & L2TP_HDR_VER_MASK;
881         if (version != tunnel->version) {
882                 l2tp_info(tunnel, L2TP_MSG_DATA,
883                           "%s: recv protocol version mismatch: got %d expected %d\n",
884                           tunnel->name, version, tunnel->version);
885                 goto error;
886         }
887
888         /* Get length of L2TP packet */
889         length = skb->len;
890
891         /* If type is control packet, it is handled by userspace. */
892         if (hdrflags & L2TP_HDRFLAG_T) {
893                 l2tp_dbg(tunnel, L2TP_MSG_DATA,
894                          "%s: recv control packet, len=%d\n",
895                          tunnel->name, length);
896                 goto error;
897         }
898
899         /* Skip flags */
900         ptr += 2;
901
902         if (tunnel->version == L2TP_HDR_VER_2) {
903                 /* If length is present, skip it */
904                 if (hdrflags & L2TP_HDRFLAG_L)
905                         ptr += 2;
906
907                 /* Extract tunnel and session ID */
908                 tunnel_id = ntohs(*(__be16 *) ptr);
909                 ptr += 2;
910                 session_id = ntohs(*(__be16 *) ptr);
911                 ptr += 2;
912         } else {
913                 ptr += 2;       /* skip reserved bits */
914                 tunnel_id = tunnel->tunnel_id;
915                 session_id = ntohl(*(__be32 *) ptr);
916                 ptr += 4;
917         }
918
919         /* Find the session context */
920         session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
921         if (!session || !session->recv_skb) {
922                 /* Not found? Pass to userspace to deal with */
923                 l2tp_info(tunnel, L2TP_MSG_DATA,
924                           "%s: no session found (%u/%u). Passing up.\n",
925                           tunnel->name, tunnel_id, session_id);
926                 goto error;
927         }
928
929         l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
930
931         return 0;
932
933 discard_bad_csum:
934         LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
935         UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
936         tstats = &tunnel->stats;
937         u64_stats_update_begin(&tstats->syncp);
938         tstats->rx_errors++;
939         u64_stats_update_end(&tstats->syncp);
940         kfree_skb(skb);
941
942         return 0;
943
944 error:
945         /* Put UDP header back */
946         __skb_push(skb, sizeof(struct udphdr));
947
948         return 1;
949 }
950
951 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
952  * Return codes:
953  * 0 : success.
954  * <0: error
955  * >0: skb should be passed up to userspace as UDP.
956  */
957 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
958 {
959         struct l2tp_tunnel *tunnel;
960
961         tunnel = l2tp_sock_to_tunnel(sk);
962         if (tunnel == NULL)
963                 goto pass_up;
964
965         l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
966                  tunnel->name, skb->len);
967
968         if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
969                 goto pass_up_put;
970
971         sock_put(sk);
972         return 0;
973
974 pass_up_put:
975         sock_put(sk);
976 pass_up:
977         return 1;
978 }
979 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
980
981 /************************************************************************
982  * Transmit handling
983  ***********************************************************************/
984
985 /* Build an L2TP header for the session into the buffer provided.
986  */
987 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
988 {
989         struct l2tp_tunnel *tunnel = session->tunnel;
990         __be16 *bufp = buf;
991         __be16 *optr = buf;
992         u16 flags = L2TP_HDR_VER_2;
993         u32 tunnel_id = tunnel->peer_tunnel_id;
994         u32 session_id = session->peer_session_id;
995
996         if (session->send_seq)
997                 flags |= L2TP_HDRFLAG_S;
998
999         /* Setup L2TP header. */
1000         *bufp++ = htons(flags);
1001         *bufp++ = htons(tunnel_id);
1002         *bufp++ = htons(session_id);
1003         if (session->send_seq) {
1004                 *bufp++ = htons(session->ns);
1005                 *bufp++ = 0;
1006                 session->ns++;
1007                 session->ns &= 0xffff;
1008                 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1009                          session->name, session->ns);
1010         }
1011
1012         return bufp - optr;
1013 }
1014
1015 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1016 {
1017         struct l2tp_tunnel *tunnel = session->tunnel;
1018         char *bufp = buf;
1019         char *optr = bufp;
1020
1021         /* Setup L2TP header. The header differs slightly for UDP and
1022          * IP encapsulations. For UDP, there is 4 bytes of flags.
1023          */
1024         if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1025                 u16 flags = L2TP_HDR_VER_3;
1026                 *((__be16 *) bufp) = htons(flags);
1027                 bufp += 2;
1028                 *((__be16 *) bufp) = 0;
1029                 bufp += 2;
1030         }
1031
1032         *((__be32 *) bufp) = htonl(session->peer_session_id);
1033         bufp += 4;
1034         if (session->cookie_len) {
1035                 memcpy(bufp, &session->cookie[0], session->cookie_len);
1036                 bufp += session->cookie_len;
1037         }
1038         if (session->l2specific_len) {
1039                 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1040                         u32 l2h = 0;
1041                         if (session->send_seq) {
1042                                 l2h = 0x40000000 | session->ns;
1043                                 session->ns++;
1044                                 session->ns &= 0xffffff;
1045                                 l2tp_dbg(session, L2TP_MSG_SEQ,
1046                                          "%s: updated ns to %u\n",
1047                                          session->name, session->ns);
1048                         }
1049
1050                         *((__be32 *) bufp) = htonl(l2h);
1051                 }
1052                 bufp += session->l2specific_len;
1053         }
1054         if (session->offset)
1055                 bufp += session->offset;
1056
1057         return bufp - optr;
1058 }
1059
1060 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1061                           struct flowi *fl, size_t data_len)
1062 {
1063         struct l2tp_tunnel *tunnel = session->tunnel;
1064         unsigned int len = skb->len;
1065         int error;
1066         struct l2tp_stats *tstats, *sstats;
1067
1068         /* Debug */
1069         if (session->send_seq)
1070                 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
1071                          session->name, data_len, session->ns - 1);
1072         else
1073                 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
1074                          session->name, data_len);
1075
1076         if (session->debug & L2TP_MSG_DATA) {
1077                 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1078                 unsigned char *datap = skb->data + uhlen;
1079
1080                 pr_debug("%s: xmit\n", session->name);
1081                 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1082                                      datap, min_t(size_t, 32, len - uhlen));
1083         }
1084
1085         /* Queue the packet to IP for output */
1086         skb->local_df = 1;
1087 #if IS_ENABLED(CONFIG_IPV6)
1088         if (skb->sk->sk_family == PF_INET6)
1089                 error = inet6_csk_xmit(skb, NULL);
1090         else
1091 #endif
1092                 error = ip_queue_xmit(skb, fl);
1093
1094         /* Update stats */
1095         tstats = &tunnel->stats;
1096         u64_stats_update_begin(&tstats->syncp);
1097         sstats = &session->stats;
1098         u64_stats_update_begin(&sstats->syncp);
1099         if (error >= 0) {
1100                 tstats->tx_packets++;
1101                 tstats->tx_bytes += len;
1102                 sstats->tx_packets++;
1103                 sstats->tx_bytes += len;
1104         } else {
1105                 tstats->tx_errors++;
1106                 sstats->tx_errors++;
1107         }
1108         u64_stats_update_end(&tstats->syncp);
1109         u64_stats_update_end(&sstats->syncp);
1110
1111         return 0;
1112 }
1113
1114 /* Automatically called when the skb is freed.
1115  */
1116 static void l2tp_sock_wfree(struct sk_buff *skb)
1117 {
1118         sock_put(skb->sk);
1119 }
1120
1121 /* For data skbs that we transmit, we associate with the tunnel socket
1122  * but don't do accounting.
1123  */
1124 static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1125 {
1126         sock_hold(sk);
1127         skb->sk = sk;
1128         skb->destructor = l2tp_sock_wfree;
1129 }
1130
1131 #if IS_ENABLED(CONFIG_IPV6)
1132 static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1133                                 int udp_len)
1134 {
1135         struct ipv6_pinfo *np = inet6_sk(sk);
1136         struct udphdr *uh = udp_hdr(skb);
1137
1138         if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1139             !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1140                 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1141                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1142                 uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
1143                                             IPPROTO_UDP, csum);
1144                 if (uh->check == 0)
1145                         uh->check = CSUM_MANGLED_0;
1146         } else {
1147                 skb->ip_summed = CHECKSUM_PARTIAL;
1148                 skb->csum_start = skb_transport_header(skb) - skb->head;
1149                 skb->csum_offset = offsetof(struct udphdr, check);
1150                 uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
1151                                              udp_len, IPPROTO_UDP, 0);
1152         }
1153 }
1154 #endif
1155
1156 /* If caller requires the skb to have a ppp header, the header must be
1157  * inserted in the skb data before calling this function.
1158  */
1159 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1160 {
1161         int data_len = skb->len;
1162         struct l2tp_tunnel *tunnel = session->tunnel;
1163         struct sock *sk = tunnel->sock;
1164         struct flowi *fl;
1165         struct udphdr *uh;
1166         struct inet_sock *inet;
1167         __wsum csum;
1168         int headroom;
1169         int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1170         int udp_len;
1171         int ret = NET_XMIT_SUCCESS;
1172
1173         /* Check that there's enough headroom in the skb to insert IP,
1174          * UDP and L2TP headers. If not enough, expand it to
1175          * make room. Adjust truesize.
1176          */
1177         headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1178                 uhlen + hdr_len;
1179         if (skb_cow_head(skb, headroom)) {
1180                 kfree_skb(skb);
1181                 return NET_XMIT_DROP;
1182         }
1183
1184         skb_orphan(skb);
1185         /* Setup L2TP header */
1186         session->build_header(session, __skb_push(skb, hdr_len));
1187
1188         /* Reset skb netfilter state */
1189         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1190         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1191                               IPSKB_REROUTED);
1192         nf_reset(skb);
1193
1194         bh_lock_sock(sk);
1195         if (sock_owned_by_user(sk)) {
1196                 kfree_skb(skb);
1197                 ret = NET_XMIT_DROP;
1198                 goto out_unlock;
1199         }
1200
1201         /* Get routing info from the tunnel socket */
1202         skb_dst_drop(skb);
1203         skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1204
1205         inet = inet_sk(sk);
1206         fl = &inet->cork.fl;
1207         switch (tunnel->encap) {
1208         case L2TP_ENCAPTYPE_UDP:
1209                 /* Setup UDP header */
1210                 __skb_push(skb, sizeof(*uh));
1211                 skb_reset_transport_header(skb);
1212                 uh = udp_hdr(skb);
1213                 uh->source = inet->inet_sport;
1214                 uh->dest = inet->inet_dport;
1215                 udp_len = uhlen + hdr_len + data_len;
1216                 uh->len = htons(udp_len);
1217                 uh->check = 0;
1218
1219                 /* Calculate UDP checksum if configured to do so */
1220 #if IS_ENABLED(CONFIG_IPV6)
1221                 if (sk->sk_family == PF_INET6)
1222                         l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1223                 else
1224 #endif
1225                 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1226                         skb->ip_summed = CHECKSUM_NONE;
1227                 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1228                          (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1229                         skb->ip_summed = CHECKSUM_COMPLETE;
1230                         csum = skb_checksum(skb, 0, udp_len, 0);
1231                         uh->check = csum_tcpudp_magic(inet->inet_saddr,
1232                                                       inet->inet_daddr,
1233                                                       udp_len, IPPROTO_UDP, csum);
1234                         if (uh->check == 0)
1235                                 uh->check = CSUM_MANGLED_0;
1236                 } else {
1237                         skb->ip_summed = CHECKSUM_PARTIAL;
1238                         skb->csum_start = skb_transport_header(skb) - skb->head;
1239                         skb->csum_offset = offsetof(struct udphdr, check);
1240                         uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1241                                                        inet->inet_daddr,
1242                                                        udp_len, IPPROTO_UDP, 0);
1243                 }
1244                 break;
1245
1246         case L2TP_ENCAPTYPE_IP:
1247                 break;
1248         }
1249
1250         l2tp_skb_set_owner_w(skb, sk);
1251
1252         l2tp_xmit_core(session, skb, fl, data_len);
1253 out_unlock:
1254         bh_unlock_sock(sk);
1255
1256         return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1259
1260 /*****************************************************************************
1261  * Tinnel and session create/destroy.
1262  *****************************************************************************/
1263
1264 /* Tunnel socket destruct hook.
1265  * The tunnel context is deleted only when all session sockets have been
1266  * closed.
1267  */
1268 static void l2tp_tunnel_destruct(struct sock *sk)
1269 {
1270         struct l2tp_tunnel *tunnel;
1271         struct l2tp_net *pn;
1272
1273         tunnel = sk->sk_user_data;
1274         if (tunnel == NULL)
1275                 goto end;
1276
1277         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1278
1279
1280         /* Disable udp encapsulation */
1281         switch (tunnel->encap) {
1282         case L2TP_ENCAPTYPE_UDP:
1283                 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1284                 (udp_sk(sk))->encap_type = 0;
1285                 (udp_sk(sk))->encap_rcv = NULL;
1286                 (udp_sk(sk))->encap_destroy = NULL;
1287                 break;
1288         case L2TP_ENCAPTYPE_IP:
1289                 break;
1290         }
1291
1292         /* Remove hooks into tunnel socket */
1293         sk->sk_destruct = tunnel->old_sk_destruct;
1294         sk->sk_user_data = NULL;
1295         tunnel->sock = NULL;
1296
1297         /* Remove the tunnel struct from the tunnel list */
1298         pn = l2tp_pernet(tunnel->l2tp_net);
1299         spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1300         list_del_rcu(&tunnel->list);
1301         spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1302         atomic_dec(&l2tp_tunnel_count);
1303
1304         l2tp_tunnel_closeall(tunnel);
1305         l2tp_tunnel_dec_refcount(tunnel);
1306
1307         /* Call the original destructor */
1308         if (sk->sk_destruct)
1309                 (*sk->sk_destruct)(sk);
1310 end:
1311         return;
1312 }
1313
1314 /* When the tunnel is closed, all the attached sessions need to go too.
1315  */
1316 void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1317 {
1318         int hash;
1319         struct hlist_node *walk;
1320         struct hlist_node *tmp;
1321         struct l2tp_session *session;
1322
1323         BUG_ON(tunnel == NULL);
1324
1325         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1326                   tunnel->name);
1327
1328         write_lock_bh(&tunnel->hlist_lock);
1329         for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1330 again:
1331                 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1332                         session = hlist_entry(walk, struct l2tp_session, hlist);
1333
1334                         l2tp_info(session, L2TP_MSG_CONTROL,
1335                                   "%s: closing session\n", session->name);
1336
1337                         hlist_del_init(&session->hlist);
1338
1339                         /* Since we should hold the sock lock while
1340                          * doing any unbinding, we need to release the
1341                          * lock we're holding before taking that lock.
1342                          * Hold a reference to the sock so it doesn't
1343                          * disappear as we're jumping between locks.
1344                          */
1345                         if (session->ref != NULL)
1346                                 (*session->ref)(session);
1347
1348                         write_unlock_bh(&tunnel->hlist_lock);
1349
1350                         if (tunnel->version != L2TP_HDR_VER_2) {
1351                                 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1352
1353                                 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1354                                 hlist_del_init_rcu(&session->global_hlist);
1355                                 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1356                                 synchronize_rcu();
1357                         }
1358
1359                         if (session->session_close != NULL)
1360                                 (*session->session_close)(session);
1361
1362                         if (session->deref != NULL)
1363                                 (*session->deref)(session);
1364
1365                         l2tp_session_dec_refcount(session);
1366
1367                         write_lock_bh(&tunnel->hlist_lock);
1368
1369                         /* Now restart from the beginning of this hash
1370                          * chain.  We always remove a session from the
1371                          * list so we are guaranteed to make forward
1372                          * progress.
1373                          */
1374                         goto again;
1375                 }
1376         }
1377         write_unlock_bh(&tunnel->hlist_lock);
1378 }
1379 EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1380
1381 /* Tunnel socket destroy hook for UDP encapsulation */
1382 static void l2tp_udp_encap_destroy(struct sock *sk)
1383 {
1384         struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1385         if (tunnel) {
1386                 l2tp_tunnel_closeall(tunnel);
1387                 sock_put(sk);
1388         }
1389 }
1390
1391 /* Really kill the tunnel.
1392  * Come here only when all sessions have been cleared from the tunnel.
1393  */
1394 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1395 {
1396         BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1397         BUG_ON(tunnel->sock != NULL);
1398         l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1399         kfree_rcu(tunnel, rcu);
1400 }
1401
1402 /* Workqueue tunnel deletion function */
1403 static void l2tp_tunnel_del_work(struct work_struct *work)
1404 {
1405         struct l2tp_tunnel *tunnel = NULL;
1406         struct socket *sock = NULL;
1407         struct sock *sk = NULL;
1408
1409         tunnel = container_of(work, struct l2tp_tunnel, del_work);
1410         sk = l2tp_tunnel_sock_lookup(tunnel);
1411         if (!sk)
1412                 return;
1413
1414         sock = sk->sk_socket;
1415         BUG_ON(!sock);
1416
1417         /* If the tunnel socket was created directly by the kernel, use the
1418          * sk_* API to release the socket now.  Otherwise go through the
1419          * inet_* layer to shut the socket down, and let userspace close it.
1420          * In either case the tunnel resources are freed in the socket
1421          * destructor when the tunnel socket goes away.
1422          */
1423         if (sock->file == NULL) {
1424                 kernel_sock_shutdown(sock, SHUT_RDWR);
1425                 sk_release_kernel(sk);
1426         } else {
1427                 inet_shutdown(sock, 2);
1428         }
1429
1430         l2tp_tunnel_sock_put(sk);
1431 }
1432
1433 /* Create a socket for the tunnel, if one isn't set up by
1434  * userspace. This is used for static tunnels where there is no
1435  * managing L2TP daemon.
1436  *
1437  * Since we don't want these sockets to keep a namespace alive by
1438  * themselves, we drop the socket's namespace refcount after creation.
1439  * These sockets are freed when the namespace exits using the pernet
1440  * exit hook.
1441  */
1442 static int l2tp_tunnel_sock_create(struct net *net,
1443                                 u32 tunnel_id,
1444                                 u32 peer_tunnel_id,
1445                                 struct l2tp_tunnel_cfg *cfg,
1446                                 struct socket **sockp)
1447 {
1448         int err = -EINVAL;
1449         struct socket *sock = NULL;
1450         struct sockaddr_in udp_addr = {0};
1451         struct sockaddr_l2tpip ip_addr = {0};
1452 #if IS_ENABLED(CONFIG_IPV6)
1453         struct sockaddr_in6 udp6_addr = {0};
1454         struct sockaddr_l2tpip6 ip6_addr = {0};
1455 #endif
1456
1457         switch (cfg->encap) {
1458         case L2TP_ENCAPTYPE_UDP:
1459 #if IS_ENABLED(CONFIG_IPV6)
1460                 if (cfg->local_ip6 && cfg->peer_ip6) {
1461                         err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1462                         if (err < 0)
1463                                 goto out;
1464
1465                         sk_change_net(sock->sk, net);
1466
1467                         udp6_addr.sin6_family = AF_INET6;
1468                         memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1469                                sizeof(udp6_addr.sin6_addr));
1470                         udp6_addr.sin6_port = htons(cfg->local_udp_port);
1471                         err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1472                                           sizeof(udp6_addr));
1473                         if (err < 0)
1474                                 goto out;
1475
1476                         udp6_addr.sin6_family = AF_INET6;
1477                         memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1478                                sizeof(udp6_addr.sin6_addr));
1479                         udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1480                         err = kernel_connect(sock,
1481                                              (struct sockaddr *) &udp6_addr,
1482                                              sizeof(udp6_addr), 0);
1483                         if (err < 0)
1484                                 goto out;
1485                 } else
1486 #endif
1487                 {
1488                         err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1489                         if (err < 0)
1490                                 goto out;
1491
1492                         sk_change_net(sock->sk, net);
1493
1494                         udp_addr.sin_family = AF_INET;
1495                         udp_addr.sin_addr = cfg->local_ip;
1496                         udp_addr.sin_port = htons(cfg->local_udp_port);
1497                         err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1498                                           sizeof(udp_addr));
1499                         if (err < 0)
1500                                 goto out;
1501
1502                         udp_addr.sin_family = AF_INET;
1503                         udp_addr.sin_addr = cfg->peer_ip;
1504                         udp_addr.sin_port = htons(cfg->peer_udp_port);
1505                         err = kernel_connect(sock,
1506                                              (struct sockaddr *) &udp_addr,
1507                                              sizeof(udp_addr), 0);
1508                         if (err < 0)
1509                                 goto out;
1510                 }
1511
1512                 if (!cfg->use_udp_checksums)
1513                         sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1514
1515                 break;
1516
1517         case L2TP_ENCAPTYPE_IP:
1518 #if IS_ENABLED(CONFIG_IPV6)
1519                 if (cfg->local_ip6 && cfg->peer_ip6) {
1520                         err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1521                                           IPPROTO_L2TP, &sock);
1522                         if (err < 0)
1523                                 goto out;
1524
1525                         sk_change_net(sock->sk, net);
1526
1527                         ip6_addr.l2tp_family = AF_INET6;
1528                         memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1529                                sizeof(ip6_addr.l2tp_addr));
1530                         ip6_addr.l2tp_conn_id = tunnel_id;
1531                         err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1532                                           sizeof(ip6_addr));
1533                         if (err < 0)
1534                                 goto out;
1535
1536                         ip6_addr.l2tp_family = AF_INET6;
1537                         memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1538                                sizeof(ip6_addr.l2tp_addr));
1539                         ip6_addr.l2tp_conn_id = peer_tunnel_id;
1540                         err = kernel_connect(sock,
1541                                              (struct sockaddr *) &ip6_addr,
1542                                              sizeof(ip6_addr), 0);
1543                         if (err < 0)
1544                                 goto out;
1545                 } else
1546 #endif
1547                 {
1548                         err = sock_create_kern(AF_INET, SOCK_DGRAM,
1549                                           IPPROTO_L2TP, &sock);
1550                         if (err < 0)
1551                                 goto out;
1552
1553                         sk_change_net(sock->sk, net);
1554
1555                         ip_addr.l2tp_family = AF_INET;
1556                         ip_addr.l2tp_addr = cfg->local_ip;
1557                         ip_addr.l2tp_conn_id = tunnel_id;
1558                         err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1559                                           sizeof(ip_addr));
1560                         if (err < 0)
1561                                 goto out;
1562
1563                         ip_addr.l2tp_family = AF_INET;
1564                         ip_addr.l2tp_addr = cfg->peer_ip;
1565                         ip_addr.l2tp_conn_id = peer_tunnel_id;
1566                         err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1567                                              sizeof(ip_addr), 0);
1568                         if (err < 0)
1569                                 goto out;
1570                 }
1571                 break;
1572
1573         default:
1574                 goto out;
1575         }
1576
1577 out:
1578         *sockp = sock;
1579         if ((err < 0) && sock) {
1580                 kernel_sock_shutdown(sock, SHUT_RDWR);
1581                 sk_release_kernel(sock->sk);
1582                 *sockp = NULL;
1583         }
1584
1585         return err;
1586 }
1587
1588 static struct lock_class_key l2tp_socket_class;
1589
1590 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1591 {
1592         struct l2tp_tunnel *tunnel = NULL;
1593         int err;
1594         struct socket *sock = NULL;
1595         struct sock *sk = NULL;
1596         struct l2tp_net *pn;
1597         enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1598
1599         /* Get the tunnel socket from the fd, which was opened by
1600          * the userspace L2TP daemon. If not specified, create a
1601          * kernel socket.
1602          */
1603         if (fd < 0) {
1604                 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1605                                 cfg, &sock);
1606                 if (err < 0)
1607                         goto err;
1608         } else {
1609                 sock = sockfd_lookup(fd, &err);
1610                 if (!sock) {
1611                         pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1612                                tunnel_id, fd, err);
1613                         err = -EBADF;
1614                         goto err;
1615                 }
1616
1617                 /* Reject namespace mismatches */
1618                 if (!net_eq(sock_net(sock->sk), net)) {
1619                         pr_err("tunl %u: netns mismatch\n", tunnel_id);
1620                         err = -EINVAL;
1621                         goto err;
1622                 }
1623         }
1624
1625         sk = sock->sk;
1626
1627         if (cfg != NULL)
1628                 encap = cfg->encap;
1629
1630         /* Quick sanity checks */
1631         switch (encap) {
1632         case L2TP_ENCAPTYPE_UDP:
1633                 err = -EPROTONOSUPPORT;
1634                 if (sk->sk_protocol != IPPROTO_UDP) {
1635                         pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1636                                tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1637                         goto err;
1638                 }
1639                 break;
1640         case L2TP_ENCAPTYPE_IP:
1641                 err = -EPROTONOSUPPORT;
1642                 if (sk->sk_protocol != IPPROTO_L2TP) {
1643                         pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1644                                tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1645                         goto err;
1646                 }
1647                 break;
1648         }
1649
1650         /* Check if this socket has already been prepped */
1651         tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
1652         if (tunnel != NULL) {
1653                 /* This socket has already been prepped */
1654                 err = -EBUSY;
1655                 goto err;
1656         }
1657
1658         tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1659         if (tunnel == NULL) {
1660                 err = -ENOMEM;
1661                 goto err;
1662         }
1663
1664         tunnel->version = version;
1665         tunnel->tunnel_id = tunnel_id;
1666         tunnel->peer_tunnel_id = peer_tunnel_id;
1667         tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1668
1669         tunnel->magic = L2TP_TUNNEL_MAGIC;
1670         sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1671         rwlock_init(&tunnel->hlist_lock);
1672
1673         /* The net we belong to */
1674         tunnel->l2tp_net = net;
1675         pn = l2tp_pernet(net);
1676
1677         if (cfg != NULL)
1678                 tunnel->debug = cfg->debug;
1679
1680         /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1681         tunnel->encap = encap;
1682         if (encap == L2TP_ENCAPTYPE_UDP) {
1683                 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1684                 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1685                 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1686                 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1687 #if IS_ENABLED(CONFIG_IPV6)
1688                 if (sk->sk_family == PF_INET6)
1689                         udpv6_encap_enable();
1690                 else
1691 #endif
1692                 udp_encap_enable();
1693         }
1694
1695         sk->sk_user_data = tunnel;
1696
1697         /* Hook on the tunnel socket destructor so that we can cleanup
1698          * if the tunnel socket goes away.
1699          */
1700         tunnel->old_sk_destruct = sk->sk_destruct;
1701         sk->sk_destruct = &l2tp_tunnel_destruct;
1702         tunnel->sock = sk;
1703         tunnel->fd = fd;
1704         lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1705
1706         sk->sk_allocation = GFP_ATOMIC;
1707
1708         /* Init delete workqueue struct */
1709         INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1710
1711         /* Add tunnel to our list */
1712         INIT_LIST_HEAD(&tunnel->list);
1713         atomic_inc(&l2tp_tunnel_count);
1714
1715         /* Bump the reference count. The tunnel context is deleted
1716          * only when this drops to zero. Must be done before list insertion
1717          */
1718         l2tp_tunnel_inc_refcount(tunnel);
1719         spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1720         list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1721         spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1722
1723         err = 0;
1724 err:
1725         if (tunnelp)
1726                 *tunnelp = tunnel;
1727
1728         /* If tunnel's socket was created by the kernel, it doesn't
1729          *  have a file.
1730          */
1731         if (sock && sock->file)
1732                 sockfd_put(sock);
1733
1734         return err;
1735 }
1736 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1737
1738 /* This function is used by the netlink TUNNEL_DELETE command.
1739  */
1740 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1741 {
1742         l2tp_tunnel_closeall(tunnel);
1743         return (false == queue_work(l2tp_wq, &tunnel->del_work));
1744 }
1745 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1746
1747 /* Really kill the session.
1748  */
1749 void l2tp_session_free(struct l2tp_session *session)
1750 {
1751         struct l2tp_tunnel *tunnel;
1752
1753         BUG_ON(atomic_read(&session->ref_count) != 0);
1754
1755         tunnel = session->tunnel;
1756         if (tunnel != NULL) {
1757                 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1758
1759                 /* Delete the session from the hash */
1760                 write_lock_bh(&tunnel->hlist_lock);
1761                 hlist_del_init(&session->hlist);
1762                 write_unlock_bh(&tunnel->hlist_lock);
1763
1764                 /* Unlink from the global hash if not L2TPv2 */
1765                 if (tunnel->version != L2TP_HDR_VER_2) {
1766                         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1767
1768                         spin_lock_bh(&pn->l2tp_session_hlist_lock);
1769                         hlist_del_init_rcu(&session->global_hlist);
1770                         spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1771                         synchronize_rcu();
1772                 }
1773
1774                 if (session->session_id != 0)
1775                         atomic_dec(&l2tp_session_count);
1776
1777                 sock_put(tunnel->sock);
1778
1779                 /* This will delete the tunnel context if this
1780                  * is the last session on the tunnel.
1781                  */
1782                 session->tunnel = NULL;
1783                 l2tp_tunnel_dec_refcount(tunnel);
1784         }
1785
1786         kfree(session);
1787
1788         return;
1789 }
1790 EXPORT_SYMBOL_GPL(l2tp_session_free);
1791
1792 /* This function is used by the netlink SESSION_DELETE command and by
1793    pseudowire modules.
1794  */
1795 int l2tp_session_delete(struct l2tp_session *session)
1796 {
1797         if (session->session_close != NULL)
1798                 (*session->session_close)(session);
1799
1800         l2tp_session_dec_refcount(session);
1801
1802         return 0;
1803 }
1804 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1805
1806
1807 /* We come here whenever a session's send_seq, cookie_len or
1808  * l2specific_len parameters are set.
1809  */
1810 static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1811 {
1812         if (version == L2TP_HDR_VER_2) {
1813                 session->hdr_len = 6;
1814                 if (session->send_seq)
1815                         session->hdr_len += 4;
1816         } else {
1817                 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1818                 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1819                         session->hdr_len += 4;
1820         }
1821
1822 }
1823
1824 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1825 {
1826         struct l2tp_session *session;
1827
1828         session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1829         if (session != NULL) {
1830                 session->magic = L2TP_SESSION_MAGIC;
1831                 session->tunnel = tunnel;
1832
1833                 session->session_id = session_id;
1834                 session->peer_session_id = peer_session_id;
1835                 session->nr = 0;
1836
1837                 sprintf(&session->name[0], "sess %u/%u",
1838                         tunnel->tunnel_id, session->session_id);
1839
1840                 skb_queue_head_init(&session->reorder_q);
1841
1842                 INIT_HLIST_NODE(&session->hlist);
1843                 INIT_HLIST_NODE(&session->global_hlist);
1844
1845                 /* Inherit debug options from tunnel */
1846                 session->debug = tunnel->debug;
1847
1848                 if (cfg) {
1849                         session->pwtype = cfg->pw_type;
1850                         session->debug = cfg->debug;
1851                         session->mtu = cfg->mtu;
1852                         session->mru = cfg->mru;
1853                         session->send_seq = cfg->send_seq;
1854                         session->recv_seq = cfg->recv_seq;
1855                         session->lns_mode = cfg->lns_mode;
1856                         session->reorder_timeout = cfg->reorder_timeout;
1857                         session->offset = cfg->offset;
1858                         session->l2specific_type = cfg->l2specific_type;
1859                         session->l2specific_len = cfg->l2specific_len;
1860                         session->cookie_len = cfg->cookie_len;
1861                         memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1862                         session->peer_cookie_len = cfg->peer_cookie_len;
1863                         memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1864                 }
1865
1866                 if (tunnel->version == L2TP_HDR_VER_2)
1867                         session->build_header = l2tp_build_l2tpv2_header;
1868                 else
1869                         session->build_header = l2tp_build_l2tpv3_header;
1870
1871                 l2tp_session_set_header_len(session, tunnel->version);
1872
1873                 /* Bump the reference count. The session context is deleted
1874                  * only when this drops to zero.
1875                  */
1876                 l2tp_session_inc_refcount(session);
1877                 l2tp_tunnel_inc_refcount(tunnel);
1878
1879                 /* Ensure tunnel socket isn't deleted */
1880                 sock_hold(tunnel->sock);
1881
1882                 /* Add session to the tunnel's hash list */
1883                 write_lock_bh(&tunnel->hlist_lock);
1884                 hlist_add_head(&session->hlist,
1885                                l2tp_session_id_hash(tunnel, session_id));
1886                 write_unlock_bh(&tunnel->hlist_lock);
1887
1888                 /* And to the global session list if L2TPv3 */
1889                 if (tunnel->version != L2TP_HDR_VER_2) {
1890                         struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1891
1892                         spin_lock_bh(&pn->l2tp_session_hlist_lock);
1893                         hlist_add_head_rcu(&session->global_hlist,
1894                                            l2tp_session_id_hash_2(pn, session_id));
1895                         spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1896                 }
1897
1898                 /* Ignore management session in session count value */
1899                 if (session->session_id != 0)
1900                         atomic_inc(&l2tp_session_count);
1901         }
1902
1903         return session;
1904 }
1905 EXPORT_SYMBOL_GPL(l2tp_session_create);
1906
1907 /*****************************************************************************
1908  * Init and cleanup
1909  *****************************************************************************/
1910
1911 static __net_init int l2tp_init_net(struct net *net)
1912 {
1913         struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1914         int hash;
1915
1916         INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1917         spin_lock_init(&pn->l2tp_tunnel_list_lock);
1918
1919         for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1920                 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1921
1922         spin_lock_init(&pn->l2tp_session_hlist_lock);
1923
1924         return 0;
1925 }
1926
1927 static __net_exit void l2tp_exit_net(struct net *net)
1928 {
1929         struct l2tp_net *pn = l2tp_pernet(net);
1930         struct l2tp_tunnel *tunnel = NULL;
1931
1932         rcu_read_lock_bh();
1933         list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1934                 (void)l2tp_tunnel_delete(tunnel);
1935         }
1936         rcu_read_unlock_bh();
1937 }
1938
1939 static struct pernet_operations l2tp_net_ops = {
1940         .init = l2tp_init_net,
1941         .exit = l2tp_exit_net,
1942         .id   = &l2tp_net_id,
1943         .size = sizeof(struct l2tp_net),
1944 };
1945
1946 static int __init l2tp_init(void)
1947 {
1948         int rc = 0;
1949
1950         rc = register_pernet_device(&l2tp_net_ops);
1951         if (rc)
1952                 goto out;
1953
1954         l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
1955         if (!l2tp_wq) {
1956                 pr_err("alloc_workqueue failed\n");
1957                 rc = -ENOMEM;
1958                 goto out;
1959         }
1960
1961         pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1962
1963 out:
1964         return rc;
1965 }
1966
1967 static void __exit l2tp_exit(void)
1968 {
1969         unregister_pernet_device(&l2tp_net_ops);
1970         if (l2tp_wq) {
1971                 destroy_workqueue(l2tp_wq);
1972                 l2tp_wq = NULL;
1973         }
1974 }
1975
1976 module_init(l2tp_init);
1977 module_exit(l2tp_exit);
1978
1979 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1980 MODULE_DESCRIPTION("L2TP core");
1981 MODULE_LICENSE("GPL");
1982 MODULE_VERSION(L2TP_DRV_VERSION);
1983