l2tp: Split pppol2tp patch into separate l2tp and ppp parts
[linux-2.6-block.git] / net / l2tp / l2tp_core.c
CommitLineData
fd558d18
JC
1/*
2 * L2TP core.
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This file contains some code of the original L2TPv2 pppol2tp
7 * driver, which has the following copyright:
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/module.h>
22#include <linux/string.h>
23#include <linux/list.h>
24#include <linux/uaccess.h>
25
26#include <linux/kernel.h>
27#include <linux/spinlock.h>
28#include <linux/kthread.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/errno.h>
32#include <linux/jiffies.h>
33
34#include <linux/netdevice.h>
35#include <linux/net.h>
36#include <linux/inetdevice.h>
37#include <linux/skbuff.h>
38#include <linux/init.h>
39#include <linux/ip.h>
40#include <linux/udp.h>
41#include <linux/hash.h>
42#include <linux/sort.h>
43#include <linux/file.h>
44#include <linux/nsproxy.h>
45#include <net/net_namespace.h>
46#include <net/netns/generic.h>
47#include <net/dst.h>
48#include <net/ip.h>
49#include <net/udp.h>
50#include <net/xfrm.h>
51
52#include <asm/byteorder.h>
53#include <asm/atomic.h>
54
55#include "l2tp_core.h"
56
57#define L2TP_DRV_VERSION "V2.0"
58
59/* L2TP header constants */
60#define L2TP_HDRFLAG_T 0x8000
61#define L2TP_HDRFLAG_L 0x4000
62#define L2TP_HDRFLAG_S 0x0800
63#define L2TP_HDRFLAG_O 0x0200
64#define L2TP_HDRFLAG_P 0x0100
65
66#define L2TP_HDR_VER_MASK 0x000F
67#define L2TP_HDR_VER_2 0x0002
68
69/* L2TPv3 default L2-specific sublayer */
70#define L2TP_SLFLAG_S 0x40000000
71#define L2TP_SL_SEQ_MASK 0x00ffffff
72
73#define L2TP_HDR_SIZE_SEQ 10
74#define L2TP_HDR_SIZE_NOSEQ 6
75
76/* Default trace flags */
77#define L2TP_DEFAULT_DEBUG_FLAGS 0
78
79#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
80 do { \
81 if ((_mask) & (_type)) \
82 printk(_lvl "L2TP: " _fmt, ##args); \
83 } while (0)
84
85/* Private data stored for received packets in the skb.
86 */
87struct l2tp_skb_cb {
88 u16 ns;
89 u16 has_seq;
90 u16 length;
91 unsigned long expires;
92};
93
94#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
95
96static atomic_t l2tp_tunnel_count;
97static atomic_t l2tp_session_count;
98
99/* per-net private data for this module */
100static unsigned int l2tp_net_id;
101struct l2tp_net {
102 struct list_head l2tp_tunnel_list;
103 rwlock_t l2tp_tunnel_list_lock;
104};
105
106static inline struct l2tp_net *l2tp_pernet(struct net *net)
107{
108 BUG_ON(!net);
109
110 return net_generic(net, l2tp_net_id);
111}
112
113/* Session hash list.
114 * The session_id SHOULD be random according to RFC2661, but several
115 * L2TP implementations (Cisco and Microsoft) use incrementing
116 * session_ids. So we do a real hash on the session_id, rather than a
117 * simple bitmask.
118 */
119static inline struct hlist_head *
120l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
121{
122 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
123}
124
125/* Lookup a session by id
126 */
127struct l2tp_session *l2tp_session_find(struct l2tp_tunnel *tunnel, u32 session_id)
128{
129 struct hlist_head *session_list =
130 l2tp_session_id_hash(tunnel, session_id);
131 struct l2tp_session *session;
132 struct hlist_node *walk;
133
134 read_lock_bh(&tunnel->hlist_lock);
135 hlist_for_each_entry(session, walk, session_list, hlist) {
136 if (session->session_id == session_id) {
137 read_unlock_bh(&tunnel->hlist_lock);
138 return session;
139 }
140 }
141 read_unlock_bh(&tunnel->hlist_lock);
142
143 return NULL;
144}
145EXPORT_SYMBOL_GPL(l2tp_session_find);
146
147struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
148{
149 int hash;
150 struct hlist_node *walk;
151 struct l2tp_session *session;
152 int count = 0;
153
154 read_lock_bh(&tunnel->hlist_lock);
155 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
156 hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
157 if (++count > nth) {
158 read_unlock_bh(&tunnel->hlist_lock);
159 return session;
160 }
161 }
162 }
163
164 read_unlock_bh(&tunnel->hlist_lock);
165
166 return NULL;
167}
168EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
169
170/* Lookup a tunnel by id
171 */
172struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
173{
174 struct l2tp_tunnel *tunnel;
175 struct l2tp_net *pn = l2tp_pernet(net);
176
177 read_lock_bh(&pn->l2tp_tunnel_list_lock);
178 list_for_each_entry(tunnel, &pn->l2tp_tunnel_list, list) {
179 if (tunnel->tunnel_id == tunnel_id) {
180 read_unlock_bh(&pn->l2tp_tunnel_list_lock);
181 return tunnel;
182 }
183 }
184 read_unlock_bh(&pn->l2tp_tunnel_list_lock);
185
186 return NULL;
187}
188EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
189
190struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
191{
192 struct l2tp_net *pn = l2tp_pernet(net);
193 struct l2tp_tunnel *tunnel;
194 int count = 0;
195
196 read_lock_bh(&pn->l2tp_tunnel_list_lock);
197 list_for_each_entry(tunnel, &pn->l2tp_tunnel_list, list) {
198 if (++count > nth) {
199 read_unlock_bh(&pn->l2tp_tunnel_list_lock);
200 return tunnel;
201 }
202 }
203
204 read_unlock_bh(&pn->l2tp_tunnel_list_lock);
205
206 return NULL;
207}
208EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
209
210/*****************************************************************************
211 * Receive data handling
212 *****************************************************************************/
213
214/* Queue a skb in order. We come here only if the skb has an L2TP sequence
215 * number.
216 */
217static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
218{
219 struct sk_buff *skbp;
220 struct sk_buff *tmp;
221 u16 ns = L2TP_SKB_CB(skb)->ns;
222
223 spin_lock_bh(&session->reorder_q.lock);
224 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
225 if (L2TP_SKB_CB(skbp)->ns > ns) {
226 __skb_queue_before(&session->reorder_q, skbp, skb);
227 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
228 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
229 session->name, ns, L2TP_SKB_CB(skbp)->ns,
230 skb_queue_len(&session->reorder_q));
231 session->stats.rx_oos_packets++;
232 goto out;
233 }
234 }
235
236 __skb_queue_tail(&session->reorder_q, skb);
237
238out:
239 spin_unlock_bh(&session->reorder_q.lock);
240}
241
242/* Dequeue a single skb.
243 */
244static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
245{
246 struct l2tp_tunnel *tunnel = session->tunnel;
247 int length = L2TP_SKB_CB(skb)->length;
248
249 /* We're about to requeue the skb, so return resources
250 * to its current owner (a socket receive buffer).
251 */
252 skb_orphan(skb);
253
254 tunnel->stats.rx_packets++;
255 tunnel->stats.rx_bytes += length;
256 session->stats.rx_packets++;
257 session->stats.rx_bytes += length;
258
259 if (L2TP_SKB_CB(skb)->has_seq) {
260 /* Bump our Nr */
261 session->nr++;
262 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
263 "%s: updated nr to %hu\n", session->name, session->nr);
264 }
265
266 /* call private receive handler */
267 if (session->recv_skb != NULL)
268 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
269 else
270 kfree_skb(skb);
271
272 if (session->deref)
273 (*session->deref)(session);
274}
275
276/* Dequeue skbs from the session's reorder_q, subject to packet order.
277 * Skbs that have been in the queue for too long are simply discarded.
278 */
279static void l2tp_recv_dequeue(struct l2tp_session *session)
280{
281 struct sk_buff *skb;
282 struct sk_buff *tmp;
283
284 /* If the pkt at the head of the queue has the nr that we
285 * expect to send up next, dequeue it and any other
286 * in-sequence packets behind it.
287 */
288 spin_lock_bh(&session->reorder_q.lock);
289 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
290 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
291 session->stats.rx_seq_discards++;
292 session->stats.rx_errors++;
293 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
294 "%s: oos pkt %hu len %d discarded (too old), "
295 "waiting for %hu, reorder_q_len=%d\n",
296 session->name, L2TP_SKB_CB(skb)->ns,
297 L2TP_SKB_CB(skb)->length, session->nr,
298 skb_queue_len(&session->reorder_q));
299 __skb_unlink(skb, &session->reorder_q);
300 kfree_skb(skb);
301 if (session->deref)
302 (*session->deref)(session);
303 continue;
304 }
305
306 if (L2TP_SKB_CB(skb)->has_seq) {
307 if (L2TP_SKB_CB(skb)->ns != session->nr) {
308 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
309 "%s: holding oos pkt %hu len %d, "
310 "waiting for %hu, reorder_q_len=%d\n",
311 session->name, L2TP_SKB_CB(skb)->ns,
312 L2TP_SKB_CB(skb)->length, session->nr,
313 skb_queue_len(&session->reorder_q));
314 goto out;
315 }
316 }
317 __skb_unlink(skb, &session->reorder_q);
318
319 /* Process the skb. We release the queue lock while we
320 * do so to let other contexts process the queue.
321 */
322 spin_unlock_bh(&session->reorder_q.lock);
323 l2tp_recv_dequeue_skb(session, skb);
324 spin_lock_bh(&session->reorder_q.lock);
325 }
326
327out:
328 spin_unlock_bh(&session->reorder_q.lock);
329}
330
331static inline int l2tp_verify_udp_checksum(struct sock *sk,
332 struct sk_buff *skb)
333{
334 struct udphdr *uh = udp_hdr(skb);
335 u16 ulen = ntohs(uh->len);
336 struct inet_sock *inet;
337 __wsum psum;
338
339 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
340 return 0;
341
342 inet = inet_sk(sk);
343 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
344 IPPROTO_UDP, 0);
345
346 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
347 !csum_fold(csum_add(psum, skb->csum)))
348 return 0;
349
350 skb->csum = psum;
351
352 return __skb_checksum_complete(skb);
353}
354
355/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
356 * here. The skb is not on a list when we get here.
357 * Returns 0 if the packet was a data packet and was successfully passed on.
358 * Returns 1 if the packet was not a good data packet and could not be
359 * forwarded. All such packets are passed up to userspace to deal with.
360 */
361int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
362 int (*payload_hook)(struct sk_buff *skb))
363{
364 struct l2tp_session *session = NULL;
365 unsigned char *ptr, *optr;
366 u16 hdrflags;
367 u32 tunnel_id, session_id;
368 int length;
369 int offset;
370 u16 version;
371 u16 ns, nr;
372
373 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
374 goto discard_bad_csum;
375
376 /* UDP always verifies the packet length. */
377 __skb_pull(skb, sizeof(struct udphdr));
378
379 /* Short packet? */
380 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
381 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
382 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
383 goto error;
384 }
385
386 /* Point to L2TP header */
387 optr = ptr = skb->data;
388
389 /* Trace packet contents, if enabled */
390 if (tunnel->debug & L2TP_MSG_DATA) {
391 length = min(32u, skb->len);
392 if (!pskb_may_pull(skb, length))
393 goto error;
394
395 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
396
397 offset = 0;
398 do {
399 printk(" %02X", ptr[offset]);
400 } while (++offset < length);
401
402 printk("\n");
403 }
404
405 /* Get L2TP header flags */
406 hdrflags = ntohs(*(__be16 *)ptr);
407
408 /* Check protocol version */
409 version = hdrflags & L2TP_HDR_VER_MASK;
410 if (version != tunnel->version) {
411 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
412 "%s: recv protocol version mismatch: got %d expected %d\n",
413 tunnel->name, version, tunnel->version);
414 goto error;
415 }
416
417 /* Get length of L2TP packet */
418 length = skb->len;
419
420 /* If type is control packet, it is handled by userspace. */
421 if (hdrflags & L2TP_HDRFLAG_T) {
422 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
423 "%s: recv control packet, len=%d\n", tunnel->name, length);
424 goto error;
425 }
426
427 /* Skip flags */
428 ptr += 2;
429
430 /* If length is present, skip it */
431 if (hdrflags & L2TP_HDRFLAG_L)
432 ptr += 2;
433
434 /* Extract tunnel and session ID */
435 tunnel_id = ntohs(*(__be16 *) ptr);
436 ptr += 2;
437 session_id = ntohs(*(__be16 *) ptr);
438 ptr += 2;
439
440 /* Find the session context */
441 session = l2tp_session_find(tunnel, session_id);
442 if (!session) {
443 /* Not found? Pass to userspace to deal with */
444 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
445 "%s: no session found (%hu/%hu). Passing up.\n",
446 tunnel->name, tunnel_id, session_id);
447 goto error;
448 }
449
450 /* The ref count is increased since we now hold a pointer to
451 * the session. Take care to decrement the refcnt when exiting
452 * this function from now on...
453 */
454 l2tp_session_inc_refcount(session);
455 if (session->ref)
456 (*session->ref)(session);
457
458 /* Handle the optional sequence numbers. Sequence numbers are
459 * in different places for L2TPv2 and L2TPv3.
460 *
461 * If we are the LAC, enable/disable sequence numbers under
462 * the control of the LNS. If no sequence numbers present but
463 * we were expecting them, discard frame.
464 */
465 ns = nr = 0;
466 L2TP_SKB_CB(skb)->has_seq = 0;
467 if (hdrflags & L2TP_HDRFLAG_S) {
468 ns = (u16) ntohs(*(__be16 *) ptr);
469 ptr += 2;
470 nr = ntohs(*(__be16 *) ptr);
471 ptr += 2;
472
473 /* Store L2TP info in the skb */
474 L2TP_SKB_CB(skb)->ns = ns;
475 L2TP_SKB_CB(skb)->has_seq = 1;
476
477 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
478 "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
479 session->name, ns, nr, session->nr);
480 }
481
482 if (L2TP_SKB_CB(skb)->has_seq) {
483 /* Received a packet with sequence numbers. If we're the LNS,
484 * check if we sre sending sequence numbers and if not,
485 * configure it so.
486 */
487 if ((!session->lns_mode) && (!session->send_seq)) {
488 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
489 "%s: requested to enable seq numbers by LNS\n",
490 session->name);
491 session->send_seq = -1;
492 }
493 } else {
494 /* No sequence numbers.
495 * If user has configured mandatory sequence numbers, discard.
496 */
497 if (session->recv_seq) {
498 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
499 "%s: recv data has no seq numbers when required. "
500 "Discarding\n", session->name);
501 session->stats.rx_seq_discards++;
502 goto discard;
503 }
504
505 /* If we're the LAC and we're sending sequence numbers, the
506 * LNS has requested that we no longer send sequence numbers.
507 * If we're the LNS and we're sending sequence numbers, the
508 * LAC is broken. Discard the frame.
509 */
510 if ((!session->lns_mode) && (session->send_seq)) {
511 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
512 "%s: requested to disable seq numbers by LNS\n",
513 session->name);
514 session->send_seq = 0;
515 } else if (session->send_seq) {
516 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
517 "%s: recv data has no seq numbers when required. "
518 "Discarding\n", session->name);
519 session->stats.rx_seq_discards++;
520 goto discard;
521 }
522 }
523
524 /* If offset bit set, skip it. */
525 if (hdrflags & L2TP_HDRFLAG_O) {
526 offset = ntohs(*(__be16 *)ptr);
527 ptr += 2 + offset;
528 }
529
530 offset = ptr - optr;
531 if (!pskb_may_pull(skb, offset))
532 goto discard;
533
534 __skb_pull(skb, offset);
535
536 /* If caller wants to process the payload before we queue the
537 * packet, do so now.
538 */
539 if (payload_hook)
540 if ((*payload_hook)(skb))
541 goto discard;
542
543 /* Prepare skb for adding to the session's reorder_q. Hold
544 * packets for max reorder_timeout or 1 second if not
545 * reordering.
546 */
547 L2TP_SKB_CB(skb)->length = length;
548 L2TP_SKB_CB(skb)->expires = jiffies +
549 (session->reorder_timeout ? session->reorder_timeout : HZ);
550
551 /* Add packet to the session's receive queue. Reordering is done here, if
552 * enabled. Saved L2TP protocol info is stored in skb->sb[].
553 */
554 if (L2TP_SKB_CB(skb)->has_seq) {
555 if (session->reorder_timeout != 0) {
556 /* Packet reordering enabled. Add skb to session's
557 * reorder queue, in order of ns.
558 */
559 l2tp_recv_queue_skb(session, skb);
560 } else {
561 /* Packet reordering disabled. Discard out-of-sequence
562 * packets
563 */
564 if (L2TP_SKB_CB(skb)->ns != session->nr) {
565 session->stats.rx_seq_discards++;
566 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
567 "%s: oos pkt %hu len %d discarded, "
568 "waiting for %hu, reorder_q_len=%d\n",
569 session->name, L2TP_SKB_CB(skb)->ns,
570 L2TP_SKB_CB(skb)->length, session->nr,
571 skb_queue_len(&session->reorder_q));
572 goto discard;
573 }
574 skb_queue_tail(&session->reorder_q, skb);
575 }
576 } else {
577 /* No sequence numbers. Add the skb to the tail of the
578 * reorder queue. This ensures that it will be
579 * delivered after all previous sequenced skbs.
580 */
581 skb_queue_tail(&session->reorder_q, skb);
582 }
583
584 /* Try to dequeue as many skbs from reorder_q as we can. */
585 l2tp_recv_dequeue(session);
586
587 l2tp_session_dec_refcount(session);
588
589 return 0;
590
591discard:
592 session->stats.rx_errors++;
593 kfree_skb(skb);
594
595 if (session->deref)
596 (*session->deref)(session);
597
598 l2tp_session_dec_refcount(session);
599
600 return 0;
601
602discard_bad_csum:
603 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
604 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
605 tunnel->stats.rx_errors++;
606 kfree_skb(skb);
607
608 return 0;
609
610error:
611 /* Put UDP header back */
612 __skb_push(skb, sizeof(struct udphdr));
613
614 return 1;
615}
616EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
617
618/* UDP encapsulation receive handler. See net/ipv4/udp.c.
619 * Return codes:
620 * 0 : success.
621 * <0: error
622 * >0: skb should be passed up to userspace as UDP.
623 */
624int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
625{
626 struct l2tp_tunnel *tunnel;
627
628 tunnel = l2tp_sock_to_tunnel(sk);
629 if (tunnel == NULL)
630 goto pass_up;
631
632 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
633 "%s: received %d bytes\n", tunnel->name, skb->len);
634
635 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
636 goto pass_up_put;
637
638 sock_put(sk);
639 return 0;
640
641pass_up_put:
642 sock_put(sk);
643pass_up:
644 return 1;
645}
646EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
647
648/************************************************************************
649 * Transmit handling
650 ***********************************************************************/
651
652/* Build an L2TP header for the session into the buffer provided.
653 */
654static void l2tp_build_l2tpv2_header(struct l2tp_tunnel *tunnel,
655 struct l2tp_session *session,
656 void *buf)
657{
658 __be16 *bufp = buf;
659 u16 flags = L2TP_HDR_VER_2;
660 u32 tunnel_id = tunnel->peer_tunnel_id;
661 u32 session_id = session->peer_session_id;
662
663 if (session->send_seq)
664 flags |= L2TP_HDRFLAG_S;
665
666 /* Setup L2TP header. */
667 *bufp++ = htons(flags);
668 *bufp++ = htons(tunnel_id);
669 *bufp++ = htons(session_id);
670 if (session->send_seq) {
671 *bufp++ = htons(session->ns);
672 *bufp++ = 0;
673 session->ns++;
674 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
675 "%s: updated ns to %hu\n", session->name, session->ns);
676 }
677}
678
679void l2tp_build_l2tp_header(struct l2tp_session *session, void *buf)
680{
681 struct l2tp_tunnel *tunnel = session->tunnel;
682
683 BUG_ON(tunnel->version != L2TP_HDR_VER_2);
684 l2tp_build_l2tpv2_header(tunnel, session, buf);
685}
686EXPORT_SYMBOL_GPL(l2tp_build_l2tp_header);
687
688int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
689{
690 struct l2tp_tunnel *tunnel = session->tunnel;
691 unsigned int len = skb->len;
692 int error;
693
694 /* Debug */
695 if (session->send_seq)
696 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
697 "%s: send %Zd bytes, ns=%hu\n", session->name,
698 data_len, session->ns - 1);
699 else
700 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
701 "%s: send %Zd bytes\n", session->name, data_len);
702
703 if (session->debug & L2TP_MSG_DATA) {
704 int i;
705 unsigned char *datap = skb->data + sizeof(struct udphdr);
706
707 printk(KERN_DEBUG "%s: xmit:", session->name);
708 for (i = 0; i < (len - sizeof(struct udphdr)); i++) {
709 printk(" %02X", *datap++);
710 if (i == 31) {
711 printk(" ...");
712 break;
713 }
714 }
715 printk("\n");
716 }
717
718 /* Queue the packet to IP for output */
719 error = ip_queue_xmit(skb, 1);
720
721 /* Update stats */
722 if (error >= 0) {
723 tunnel->stats.tx_packets++;
724 tunnel->stats.tx_bytes += len;
725 session->stats.tx_packets++;
726 session->stats.tx_bytes += len;
727 } else {
728 tunnel->stats.tx_errors++;
729 session->stats.tx_errors++;
730 }
731
732 return 0;
733}
734EXPORT_SYMBOL_GPL(l2tp_xmit_core);
735
736/* Automatically called when the skb is freed.
737 */
738static void l2tp_sock_wfree(struct sk_buff *skb)
739{
740 sock_put(skb->sk);
741}
742
743/* For data skbs that we transmit, we associate with the tunnel socket
744 * but don't do accounting.
745 */
746static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
747{
748 sock_hold(sk);
749 skb->sk = sk;
750 skb->destructor = l2tp_sock_wfree;
751}
752
753/* If caller requires the skb to have a ppp header, the header must be
754 * inserted in the skb data before calling this function.
755 */
756int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
757{
758 int data_len = skb->len;
759 struct sock *sk = session->tunnel->sock;
760 struct udphdr *uh;
761 unsigned int udp_len;
762 struct inet_sock *inet;
763 __wsum csum;
764 int old_headroom;
765 int new_headroom;
766 int headroom;
767
768 /* Check that there's enough headroom in the skb to insert IP,
769 * UDP and L2TP headers. If not enough, expand it to
770 * make room. Adjust truesize.
771 */
772 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
773 sizeof(struct udphdr) + hdr_len;
774 old_headroom = skb_headroom(skb);
775 if (skb_cow_head(skb, headroom))
776 goto abort;
777
778 new_headroom = skb_headroom(skb);
779 skb_orphan(skb);
780 skb->truesize += new_headroom - old_headroom;
781
782 /* Setup L2TP header */
783 l2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
784 udp_len = sizeof(struct udphdr) + hdr_len + data_len;
785
786 /* Setup UDP header */
787 inet = inet_sk(sk);
788 __skb_push(skb, sizeof(*uh));
789 skb_reset_transport_header(skb);
790 uh = udp_hdr(skb);
791 uh->source = inet->inet_sport;
792 uh->dest = inet->inet_dport;
793 uh->len = htons(udp_len);
794
795 uh->check = 0;
796
797 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
798 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
799 IPSKB_REROUTED);
800 nf_reset(skb);
801
802 /* Get routing info from the tunnel socket */
803 skb_dst_drop(skb);
804 skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
805 l2tp_skb_set_owner_w(skb, sk);
806
807 /* Calculate UDP checksum if configured to do so */
808 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
809 skb->ip_summed = CHECKSUM_NONE;
810 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
811 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
812 skb->ip_summed = CHECKSUM_COMPLETE;
813 csum = skb_checksum(skb, 0, udp_len, 0);
814 uh->check = csum_tcpudp_magic(inet->inet_saddr,
815 inet->inet_daddr,
816 udp_len, IPPROTO_UDP, csum);
817 if (uh->check == 0)
818 uh->check = CSUM_MANGLED_0;
819 } else {
820 skb->ip_summed = CHECKSUM_PARTIAL;
821 skb->csum_start = skb_transport_header(skb) - skb->head;
822 skb->csum_offset = offsetof(struct udphdr, check);
823 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
824 inet->inet_daddr,
825 udp_len, IPPROTO_UDP, 0);
826 }
827
828 l2tp_xmit_core(session, skb, data_len);
829
830abort:
831 return 0;
832}
833EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
834
835/*****************************************************************************
836 * Tinnel and session create/destroy.
837 *****************************************************************************/
838
839/* Tunnel socket destruct hook.
840 * The tunnel context is deleted only when all session sockets have been
841 * closed.
842 */
843void l2tp_tunnel_destruct(struct sock *sk)
844{
845 struct l2tp_tunnel *tunnel;
846
847 tunnel = sk->sk_user_data;
848 if (tunnel == NULL)
849 goto end;
850
851 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
852 "%s: closing...\n", tunnel->name);
853
854 /* Close all sessions */
855 l2tp_tunnel_closeall(tunnel);
856
857 /* No longer an encapsulation socket. See net/ipv4/udp.c */
858 (udp_sk(sk))->encap_type = 0;
859 (udp_sk(sk))->encap_rcv = NULL;
860
861 /* Remove hooks into tunnel socket */
862 tunnel->sock = NULL;
863 sk->sk_destruct = tunnel->old_sk_destruct;
864 sk->sk_user_data = NULL;
865
866 /* Call the original destructor */
867 if (sk->sk_destruct)
868 (*sk->sk_destruct)(sk);
869
870 /* We're finished with the socket */
871 l2tp_tunnel_dec_refcount(tunnel);
872
873end:
874 return;
875}
876EXPORT_SYMBOL(l2tp_tunnel_destruct);
877
878/* When the tunnel is closed, all the attached sessions need to go too.
879 */
880void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
881{
882 int hash;
883 struct hlist_node *walk;
884 struct hlist_node *tmp;
885 struct l2tp_session *session;
886
887 BUG_ON(tunnel == NULL);
888
889 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
890 "%s: closing all sessions...\n", tunnel->name);
891
892 write_lock_bh(&tunnel->hlist_lock);
893 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
894again:
895 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
896 session = hlist_entry(walk, struct l2tp_session, hlist);
897
898 PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
899 "%s: closing session\n", session->name);
900
901 hlist_del_init(&session->hlist);
902
903 /* Since we should hold the sock lock while
904 * doing any unbinding, we need to release the
905 * lock we're holding before taking that lock.
906 * Hold a reference to the sock so it doesn't
907 * disappear as we're jumping between locks.
908 */
909 if (session->ref != NULL)
910 (*session->ref)(session);
911
912 write_unlock_bh(&tunnel->hlist_lock);
913
914 if (session->session_close != NULL)
915 (*session->session_close)(session);
916
917 if (session->deref != NULL)
918 (*session->deref)(session);
919
920 write_lock_bh(&tunnel->hlist_lock);
921
922 /* Now restart from the beginning of this hash
923 * chain. We always remove a session from the
924 * list so we are guaranteed to make forward
925 * progress.
926 */
927 goto again;
928 }
929 }
930 write_unlock_bh(&tunnel->hlist_lock);
931}
932EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
933
934/* Really kill the tunnel.
935 * Come here only when all sessions have been cleared from the tunnel.
936 */
937void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
938{
939 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
940
941 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
942 BUG_ON(tunnel->sock != NULL);
943
944 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
945 "%s: free...\n", tunnel->name);
946
947 /* Remove from tunnel list */
948 write_lock_bh(&pn->l2tp_tunnel_list_lock);
949 list_del_init(&tunnel->list);
950 write_unlock_bh(&pn->l2tp_tunnel_list_lock);
951
952 atomic_dec(&l2tp_tunnel_count);
953 kfree(tunnel);
954}
955EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
956
957int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
958{
959 struct l2tp_tunnel *tunnel = NULL;
960 int err;
961 struct socket *sock = NULL;
962 struct sock *sk = NULL;
963 struct l2tp_net *pn;
964
965 /* Get the tunnel socket from the fd, which was opened by
966 * the userspace L2TP daemon.
967 */
968 err = -EBADF;
969 sock = sockfd_lookup(fd, &err);
970 if (!sock) {
971 printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
972 tunnel_id, fd, err);
973 goto err;
974 }
975
976 sk = sock->sk;
977
978 /* Quick sanity checks */
979 err = -EPROTONOSUPPORT;
980 if (sk->sk_protocol != IPPROTO_UDP) {
981 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
982 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
983 goto err;
984 }
985 err = -EAFNOSUPPORT;
986 if (sock->ops->family != AF_INET) {
987 printk(KERN_ERR "tunl %hu: fd %d wrong family, got %d, expected %d\n",
988 tunnel_id, fd, sock->ops->family, AF_INET);
989 goto err;
990 }
991
992 /* Check if this socket has already been prepped */
993 tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
994 if (tunnel != NULL) {
995 /* This socket has already been prepped */
996 err = -EBUSY;
997 goto err;
998 }
999
1000 if (version != L2TP_HDR_VER_2)
1001 goto err;
1002
1003 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1004 if (tunnel == NULL) {
1005 err = -ENOMEM;
1006 goto err;
1007 }
1008
1009 tunnel->version = version;
1010 tunnel->tunnel_id = tunnel_id;
1011 tunnel->peer_tunnel_id = peer_tunnel_id;
1012 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1013
1014 tunnel->magic = L2TP_TUNNEL_MAGIC;
1015 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1016 rwlock_init(&tunnel->hlist_lock);
1017
1018 /* The net we belong to */
1019 tunnel->l2tp_net = net;
1020 pn = l2tp_pernet(net);
1021
1022 if (cfg)
1023 tunnel->debug = cfg->debug;
1024
1025 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1026 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1027 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1028
1029 sk->sk_user_data = tunnel;
1030
1031 /* Hook on the tunnel socket destructor so that we can cleanup
1032 * if the tunnel socket goes away.
1033 */
1034 tunnel->old_sk_destruct = sk->sk_destruct;
1035 sk->sk_destruct = &l2tp_tunnel_destruct;
1036 tunnel->sock = sk;
1037 sk->sk_allocation = GFP_ATOMIC;
1038
1039 /* Add tunnel to our list */
1040 INIT_LIST_HEAD(&tunnel->list);
1041 write_lock_bh(&pn->l2tp_tunnel_list_lock);
1042 list_add(&tunnel->list, &pn->l2tp_tunnel_list);
1043 write_unlock_bh(&pn->l2tp_tunnel_list_lock);
1044 atomic_inc(&l2tp_tunnel_count);
1045
1046 /* Bump the reference count. The tunnel context is deleted
1047 * only when this drops to zero.
1048 */
1049 l2tp_tunnel_inc_refcount(tunnel);
1050
1051 err = 0;
1052err:
1053 if (tunnelp)
1054 *tunnelp = tunnel;
1055
1056 if (sock)
1057 sockfd_put(sock);
1058
1059 return err;
1060}
1061EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1062
1063/* Really kill the session.
1064 */
1065void l2tp_session_free(struct l2tp_session *session)
1066{
1067 struct l2tp_tunnel *tunnel;
1068
1069 BUG_ON(atomic_read(&session->ref_count) != 0);
1070
1071 tunnel = session->tunnel;
1072 if (tunnel != NULL) {
1073 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1074
1075 /* Delete the session from the hash */
1076 write_lock_bh(&tunnel->hlist_lock);
1077 hlist_del_init(&session->hlist);
1078 write_unlock_bh(&tunnel->hlist_lock);
1079
1080 if (session->session_id != 0)
1081 atomic_dec(&l2tp_session_count);
1082
1083 sock_put(tunnel->sock);
1084
1085 /* This will delete the tunnel context if this
1086 * is the last session on the tunnel.
1087 */
1088 session->tunnel = NULL;
1089 l2tp_tunnel_dec_refcount(tunnel);
1090 }
1091
1092 kfree(session);
1093
1094 return;
1095}
1096EXPORT_SYMBOL_GPL(l2tp_session_free);
1097
1098struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1099{
1100 struct l2tp_session *session;
1101
1102 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1103 if (session != NULL) {
1104 session->magic = L2TP_SESSION_MAGIC;
1105 session->tunnel = tunnel;
1106
1107 session->session_id = session_id;
1108 session->peer_session_id = peer_session_id;
1109
1110 sprintf(&session->name[0], "sess %u/%u",
1111 tunnel->tunnel_id, session->session_id);
1112
1113 skb_queue_head_init(&session->reorder_q);
1114
1115 INIT_HLIST_NODE(&session->hlist);
1116
1117 /* Inherit debug options from tunnel */
1118 session->debug = tunnel->debug;
1119
1120 if (cfg) {
1121 session->debug = cfg->debug;
1122 session->hdr_len = cfg->hdr_len;
1123 session->mtu = cfg->mtu;
1124 session->mru = cfg->mru;
1125 session->send_seq = cfg->send_seq;
1126 session->recv_seq = cfg->recv_seq;
1127 session->lns_mode = cfg->lns_mode;
1128 }
1129
1130 /* Bump the reference count. The session context is deleted
1131 * only when this drops to zero.
1132 */
1133 l2tp_session_inc_refcount(session);
1134 l2tp_tunnel_inc_refcount(tunnel);
1135
1136 /* Ensure tunnel socket isn't deleted */
1137 sock_hold(tunnel->sock);
1138
1139 /* Add session to the tunnel's hash list */
1140 write_lock_bh(&tunnel->hlist_lock);
1141 hlist_add_head(&session->hlist,
1142 l2tp_session_id_hash(tunnel, session_id));
1143 write_unlock_bh(&tunnel->hlist_lock);
1144
1145 /* Ignore management session in session count value */
1146 if (session->session_id != 0)
1147 atomic_inc(&l2tp_session_count);
1148 }
1149
1150 return session;
1151}
1152EXPORT_SYMBOL_GPL(l2tp_session_create);
1153
1154/*****************************************************************************
1155 * Init and cleanup
1156 *****************************************************************************/
1157
1158static __net_init int l2tp_init_net(struct net *net)
1159{
1160 struct l2tp_net *pn;
1161 int err;
1162
1163 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
1164 if (!pn)
1165 return -ENOMEM;
1166
1167 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1168 rwlock_init(&pn->l2tp_tunnel_list_lock);
1169
1170 err = net_assign_generic(net, l2tp_net_id, pn);
1171 if (err)
1172 goto out;
1173
1174 return 0;
1175
1176out:
1177 kfree(pn);
1178 return err;
1179}
1180
1181static __net_exit void l2tp_exit_net(struct net *net)
1182{
1183 struct l2tp_net *pn;
1184
1185 pn = net_generic(net, l2tp_net_id);
1186 /*
1187 * if someone has cached our net then
1188 * further net_generic call will return NULL
1189 */
1190 net_assign_generic(net, l2tp_net_id, NULL);
1191 kfree(pn);
1192}
1193
1194static struct pernet_operations l2tp_net_ops = {
1195 .init = l2tp_init_net,
1196 .exit = l2tp_exit_net,
1197 .id = &l2tp_net_id,
1198 .size = sizeof(struct l2tp_net),
1199};
1200
1201static int __init l2tp_init(void)
1202{
1203 int rc = 0;
1204
1205 rc = register_pernet_device(&l2tp_net_ops);
1206 if (rc)
1207 goto out;
1208
1209 printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
1210
1211out:
1212 return rc;
1213}
1214
1215static void __exit l2tp_exit(void)
1216{
1217 unregister_pernet_device(&l2tp_net_ops);
1218}
1219
1220module_init(l2tp_init);
1221module_exit(l2tp_exit);
1222
1223MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1224MODULE_DESCRIPTION("L2TP core");
1225MODULE_LICENSE("GPL");
1226MODULE_VERSION(L2TP_DRV_VERSION);
1227