net: stmmac: added default rx queue size in stmmac_dma_interrupt
[linux-2.6-block.git] / drivers / net / gtp.c
CommitLineData
459aa660
PN
1/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
2 *
3 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
4 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
5 *
6 * Author: Harald Welte <hwelte@sysmocom.de>
7 * Pablo Neira Ayuso <pablo@netfilter.org>
8 * Andreas Schultz <aschultz@travelping.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/module.h>
459aa660
PN
19#include <linux/skbuff.h>
20#include <linux/udp.h>
21#include <linux/rculist.h>
22#include <linux/jhash.h>
23#include <linux/if_tunnel.h>
24#include <linux/net.h>
25#include <linux/file.h>
26#include <linux/gtp.h>
27
28#include <net/net_namespace.h>
29#include <net/protocol.h>
30#include <net/ip.h>
31#include <net/udp.h>
32#include <net/udp_tunnel.h>
33#include <net/icmp.h>
34#include <net/xfrm.h>
35#include <net/genetlink.h>
36#include <net/netns/generic.h>
37#include <net/gtp.h>
38
39/* An active session for the subscriber. */
40struct pdp_ctx {
41 struct hlist_node hlist_tid;
42 struct hlist_node hlist_addr;
43
44 union {
45 u64 tid;
46 struct {
47 u64 tid;
48 u16 flow;
49 } v0;
50 struct {
51 u32 i_tei;
52 u32 o_tei;
53 } v1;
54 } u;
55 u8 gtp_version;
56 u16 af;
57
58 struct in_addr ms_addr_ip4;
59 struct in_addr sgsn_addr_ip4;
60
61 atomic_t tx_seq;
62 struct rcu_head rcu_head;
63};
64
65/* One instance of the GTP device. */
66struct gtp_dev {
67 struct list_head list;
68
69 struct socket *sock0;
70 struct socket *sock1u;
71
459aa660
PN
72 struct net_device *dev;
73
74 unsigned int hash_size;
75 struct hlist_head *tid_hash;
76 struct hlist_head *addr_hash;
77};
78
c7d03a00 79static unsigned int gtp_net_id __read_mostly;
459aa660
PN
80
81struct gtp_net {
82 struct list_head gtp_dev_list;
83};
84
85static u32 gtp_h_initval;
86
87static inline u32 gtp0_hashfn(u64 tid)
88{
89 u32 *tid32 = (u32 *) &tid;
90 return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
91}
92
93static inline u32 gtp1u_hashfn(u32 tid)
94{
95 return jhash_1word(tid, gtp_h_initval);
96}
97
98static inline u32 ipv4_hashfn(__be32 ip)
99{
100 return jhash_1word((__force u32)ip, gtp_h_initval);
101}
102
103/* Resolve a PDP context structure based on the 64bit TID. */
104static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
105{
106 struct hlist_head *head;
107 struct pdp_ctx *pdp;
108
109 head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
110
111 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
112 if (pdp->gtp_version == GTP_V0 &&
113 pdp->u.v0.tid == tid)
114 return pdp;
115 }
116 return NULL;
117}
118
119/* Resolve a PDP context structure based on the 32bit TEI. */
120static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
121{
122 struct hlist_head *head;
123 struct pdp_ctx *pdp;
124
125 head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
126
127 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
128 if (pdp->gtp_version == GTP_V1 &&
129 pdp->u.v1.i_tei == tid)
130 return pdp;
131 }
132 return NULL;
133}
134
135/* Resolve a PDP context based on IPv4 address of MS. */
136static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
137{
138 struct hlist_head *head;
139 struct pdp_ctx *pdp;
140
141 head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
142
143 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
144 if (pdp->af == AF_INET &&
145 pdp->ms_addr_ip4.s_addr == ms_addr)
146 return pdp;
147 }
148
149 return NULL;
150}
151
152static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
153 unsigned int hdrlen)
154{
155 struct iphdr *iph;
156
157 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
158 return false;
159
88edf103 160 iph = (struct iphdr *)(skb->data + hdrlen);
459aa660 161
88edf103 162 return iph->saddr == pctx->ms_addr_ip4.s_addr;
459aa660
PN
163}
164
165/* Check if the inner IP source address in this packet is assigned to any
166 * existing mobile subscriber.
167 */
168static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
169 unsigned int hdrlen)
170{
171 switch (ntohs(skb->protocol)) {
172 case ETH_P_IP:
173 return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
174 }
175 return false;
176}
177
178/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
179static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
180 bool xnet)
181{
182 unsigned int hdrlen = sizeof(struct udphdr) +
183 sizeof(struct gtp0_header);
184 struct gtp0_header *gtp0;
185 struct pdp_ctx *pctx;
459aa660
PN
186
187 if (!pskb_may_pull(skb, hdrlen))
188 return -1;
189
190 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
191
192 if ((gtp0->flags >> 5) != GTP_V0)
193 return 1;
194
195 if (gtp0->type != GTP_TPDU)
196 return 1;
197
459aa660
PN
198 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
199 if (!pctx) {
200 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
1796a81d 201 return 1;
459aa660
PN
202 }
203
204 if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
205 netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
1796a81d 206 return 1;
459aa660 207 }
459aa660
PN
208
209 /* Get rid of the GTP + UDP headers. */
210 return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
459aa660
PN
211}
212
213static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
214 bool xnet)
215{
216 unsigned int hdrlen = sizeof(struct udphdr) +
217 sizeof(struct gtp1_header);
218 struct gtp1_header *gtp1;
219 struct pdp_ctx *pctx;
459aa660
PN
220
221 if (!pskb_may_pull(skb, hdrlen))
222 return -1;
223
224 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
225
226 if ((gtp1->flags >> 5) != GTP_V1)
227 return 1;
228
229 if (gtp1->type != GTP_TPDU)
230 return 1;
231
232 /* From 29.060: "This field shall be present if and only if any one or
233 * more of the S, PN and E flags are set.".
234 *
235 * If any of the bit is set, then the remaining ones also have to be
236 * set.
237 */
238 if (gtp1->flags & GTP1_F_MASK)
239 hdrlen += 4;
240
241 /* Make sure the header is larger enough, including extensions. */
242 if (!pskb_may_pull(skb, hdrlen))
243 return -1;
244
93edb8c7
PN
245 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
246
459aa660
PN
247 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
248 if (!pctx) {
249 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
1796a81d 250 return 1;
459aa660
PN
251 }
252
253 if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
254 netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
1796a81d 255 return 1;
459aa660 256 }
459aa660
PN
257
258 /* Get rid of the GTP + UDP headers. */
259 return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
459aa660
PN
260}
261
262static void gtp_encap_disable(struct gtp_dev *gtp)
263{
264 if (gtp->sock0 && gtp->sock0->sk) {
265 udp_sk(gtp->sock0->sk)->encap_type = 0;
266 rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
267 }
268 if (gtp->sock1u && gtp->sock1u->sk) {
269 udp_sk(gtp->sock1u->sk)->encap_type = 0;
270 rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
271 }
272
273 gtp->sock0 = NULL;
274 gtp->sock1u = NULL;
275}
276
277static void gtp_encap_destroy(struct sock *sk)
278{
279 struct gtp_dev *gtp;
280
281 gtp = rcu_dereference_sk_user_data(sk);
282 if (gtp)
283 gtp_encap_disable(gtp);
284}
285
286/* UDP encapsulation receive handler. See net/ipv4/udp.c.
287 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
288 */
289static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
290{
291 struct pcpu_sw_netstats *stats;
292 struct gtp_dev *gtp;
293 bool xnet;
294 int ret;
295
296 gtp = rcu_dereference_sk_user_data(sk);
297 if (!gtp)
298 return 1;
299
300 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
301
3ab1b469 302 xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
459aa660
PN
303
304 switch (udp_sk(sk)->encap_type) {
305 case UDP_ENCAP_GTP0:
306 netdev_dbg(gtp->dev, "received GTP0 packet\n");
307 ret = gtp0_udp_encap_recv(gtp, skb, xnet);
308 break;
309 case UDP_ENCAP_GTP1U:
310 netdev_dbg(gtp->dev, "received GTP1U packet\n");
311 ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
312 break;
313 default:
314 ret = -1; /* Shouldn't happen. */
315 }
316
317 switch (ret) {
318 case 1:
319 netdev_dbg(gtp->dev, "pass up to the process\n");
320 return 1;
321 case 0:
322 netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
323 break;
324 case -1:
325 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
326 kfree_skb(skb);
327 return 0;
328 }
329
330 /* Now that the UDP and the GTP header have been removed, set up the
331 * new network header. This is required by the upper layer to
332 * calculate the transport header.
333 */
334 skb_reset_network_header(skb);
335
336 skb->dev = gtp->dev;
337
338 stats = this_cpu_ptr(gtp->dev->tstats);
339 u64_stats_update_begin(&stats->syncp);
340 stats->rx_packets++;
341 stats->rx_bytes += skb->len;
342 u64_stats_update_end(&stats->syncp);
343
344 netif_rx(skb);
345
346 return 0;
347}
348
349static int gtp_dev_init(struct net_device *dev)
350{
351 struct gtp_dev *gtp = netdev_priv(dev);
352
353 gtp->dev = dev;
354
355 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
356 if (!dev->tstats)
357 return -ENOMEM;
358
359 return 0;
360}
361
362static void gtp_dev_uninit(struct net_device *dev)
363{
364 struct gtp_dev *gtp = netdev_priv(dev);
365
366 gtp_encap_disable(gtp);
367 free_percpu(dev->tstats);
368}
369
370static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
371 const struct sock *sk, __be32 daddr)
372{
373 memset(fl4, 0, sizeof(*fl4));
374 fl4->flowi4_oif = sk->sk_bound_dev_if;
375 fl4->daddr = daddr;
376 fl4->saddr = inet_sk(sk)->inet_saddr;
377 fl4->flowi4_tos = RT_CONN_FLAGS(sk);
378 fl4->flowi4_proto = sk->sk_protocol;
379
380 return ip_route_output_key(net, fl4);
381}
382
383static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
384{
385 int payload_len = skb->len;
386 struct gtp0_header *gtp0;
387
388 gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
389
390 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
391 gtp0->type = GTP_TPDU;
392 gtp0->length = htons(payload_len);
393 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
394 gtp0->flow = htons(pctx->u.v0.flow);
395 gtp0->number = 0xff;
396 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
397 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
398}
399
400static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
401{
402 int payload_len = skb->len;
403 struct gtp1_header *gtp1;
404
405 gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
406
407 /* Bits 8 7 6 5 4 3 2 1
408 * +--+--+--+--+--+--+--+--+
d928be81 409 * |version |PT| 0| E| S|PN|
459aa660
PN
410 * +--+--+--+--+--+--+--+--+
411 * 0 0 1 1 1 0 0 0
412 */
d928be81 413 gtp1->flags = 0x30; /* v1, GTP-non-prime. */
459aa660
PN
414 gtp1->type = GTP_TPDU;
415 gtp1->length = htons(payload_len);
416 gtp1->tid = htonl(pctx->u.v1.o_tei);
417
418 /* TODO: Suppport for extension header, sequence number and N-PDU.
419 * Update the length field if any of them is available.
420 */
421}
422
423struct gtp_pktinfo {
424 struct sock *sk;
425 struct iphdr *iph;
426 struct flowi4 fl4;
427 struct rtable *rt;
428 struct pdp_ctx *pctx;
429 struct net_device *dev;
430 __be16 gtph_port;
431};
432
433static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
434{
435 switch (pktinfo->pctx->gtp_version) {
436 case GTP_V0:
437 pktinfo->gtph_port = htons(GTP0_PORT);
438 gtp0_push_header(skb, pktinfo->pctx);
439 break;
440 case GTP_V1:
441 pktinfo->gtph_port = htons(GTP1U_PORT);
442 gtp1_push_header(skb, pktinfo->pctx);
443 break;
444 }
445}
446
447static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
448 struct sock *sk, struct iphdr *iph,
449 struct pdp_ctx *pctx, struct rtable *rt,
450 struct flowi4 *fl4,
451 struct net_device *dev)
452{
453 pktinfo->sk = sk;
454 pktinfo->iph = iph;
455 pktinfo->pctx = pctx;
456 pktinfo->rt = rt;
457 pktinfo->fl4 = *fl4;
458 pktinfo->dev = dev;
459}
460
461static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
462 struct gtp_pktinfo *pktinfo)
463{
464 struct gtp_dev *gtp = netdev_priv(dev);
465 struct pdp_ctx *pctx;
466 struct rtable *rt;
467 struct flowi4 fl4;
468 struct iphdr *iph;
469 struct sock *sk;
470 __be16 df;
471 int mtu;
472
473 /* Read the IP destination address and resolve the PDP context.
474 * Prepend PDP header with TEI/TID from PDP ctx.
475 */
476 iph = ip_hdr(skb);
477 pctx = ipv4_pdp_find(gtp, iph->daddr);
478 if (!pctx) {
479 netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
480 &iph->daddr);
481 return -ENOENT;
482 }
483 netdev_dbg(dev, "found PDP context %p\n", pctx);
484
485 switch (pctx->gtp_version) {
486 case GTP_V0:
487 if (gtp->sock0)
488 sk = gtp->sock0->sk;
489 else
490 sk = NULL;
491 break;
492 case GTP_V1:
493 if (gtp->sock1u)
494 sk = gtp->sock1u->sk;
495 else
496 sk = NULL;
497 break;
498 default:
499 return -ENOENT;
500 }
501
502 if (!sk) {
503 netdev_dbg(dev, "no userspace socket is available, skip\n");
504 return -ENOENT;
505 }
506
507 rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
508 pctx->sgsn_addr_ip4.s_addr);
509 if (IS_ERR(rt)) {
510 netdev_dbg(dev, "no route to SSGN %pI4\n",
511 &pctx->sgsn_addr_ip4.s_addr);
512 dev->stats.tx_carrier_errors++;
513 goto err;
514 }
515
516 if (rt->dst.dev == dev) {
517 netdev_dbg(dev, "circular route to SSGN %pI4\n",
518 &pctx->sgsn_addr_ip4.s_addr);
519 dev->stats.collisions++;
520 goto err_rt;
521 }
522
523 skb_dst_drop(skb);
524
525 /* This is similar to tnl_update_pmtu(). */
526 df = iph->frag_off;
527 if (df) {
528 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
529 sizeof(struct iphdr) - sizeof(struct udphdr);
530 switch (pctx->gtp_version) {
531 case GTP_V0:
532 mtu -= sizeof(struct gtp0_header);
533 break;
534 case GTP_V1:
535 mtu -= sizeof(struct gtp1_header);
536 break;
537 }
538 } else {
539 mtu = dst_mtu(&rt->dst);
540 }
541
542 rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
543
544 if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
545 mtu < ntohs(iph->tot_len)) {
546 netdev_dbg(dev, "packet too big, fragmentation needed\n");
547 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
548 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
549 htonl(mtu));
550 goto err_rt;
551 }
552
553 gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
554 gtp_push_header(skb, pktinfo);
555
556 return 0;
557err_rt:
558 ip_rt_put(rt);
559err:
560 return -EBADMSG;
561}
562
563static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
564{
565 unsigned int proto = ntohs(skb->protocol);
566 struct gtp_pktinfo pktinfo;
567 int err;
568
569 /* Ensure there is sufficient headroom. */
570 if (skb_cow_head(skb, dev->needed_headroom))
571 goto tx_err;
572
573 skb_reset_inner_headers(skb);
574
575 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
576 rcu_read_lock();
577 switch (proto) {
578 case ETH_P_IP:
579 err = gtp_build_skb_ip4(skb, dev, &pktinfo);
580 break;
581 default:
582 err = -EOPNOTSUPP;
583 break;
584 }
585 rcu_read_unlock();
586
587 if (err < 0)
588 goto tx_err;
589
590 switch (proto) {
591 case ETH_P_IP:
592 netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
593 &pktinfo.iph->saddr, &pktinfo.iph->daddr);
594 udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
595 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
596 pktinfo.iph->tos,
597 ip4_dst_hoplimit(&pktinfo.rt->dst),
c6ce1d08 598 0,
459aa660
PN
599 pktinfo.gtph_port, pktinfo.gtph_port,
600 true, false);
601 break;
602 }
603
604 return NETDEV_TX_OK;
605tx_err:
606 dev->stats.tx_errors++;
607 dev_kfree_skb(skb);
608 return NETDEV_TX_OK;
609}
610
611static const struct net_device_ops gtp_netdev_ops = {
612 .ndo_init = gtp_dev_init,
613 .ndo_uninit = gtp_dev_uninit,
614 .ndo_start_xmit = gtp_dev_xmit,
615 .ndo_get_stats64 = ip_tunnel_get_stats64,
616};
617
618static void gtp_link_setup(struct net_device *dev)
619{
620 dev->netdev_ops = &gtp_netdev_ops;
621 dev->destructor = free_netdev;
622
623 dev->hard_header_len = 0;
624 dev->addr_len = 0;
625
626 /* Zero header length. */
627 dev->type = ARPHRD_NONE;
628 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
629
630 dev->priv_flags |= IFF_NO_QUEUE;
631 dev->features |= NETIF_F_LLTX;
632 netif_keep_dst(dev);
633
634 /* Assume largest header, ie. GTPv0. */
635 dev->needed_headroom = LL_MAX_HEADER +
636 sizeof(struct iphdr) +
637 sizeof(struct udphdr) +
638 sizeof(struct gtp0_header);
639}
640
641static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
642static void gtp_hashtable_free(struct gtp_dev *gtp);
643static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
3ab1b469 644 int fd_gtp0, int fd_gtp1);
459aa660
PN
645
646static int gtp_newlink(struct net *src_net, struct net_device *dev,
647 struct nlattr *tb[], struct nlattr *data[])
648{
649 int hashsize, err, fd0, fd1;
650 struct gtp_dev *gtp;
651 struct gtp_net *gn;
652
653 if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
654 return -EINVAL;
655
656 gtp = netdev_priv(dev);
657
658 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
659 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
660
3ab1b469 661 err = gtp_encap_enable(dev, gtp, fd0, fd1);
459aa660
PN
662 if (err < 0)
663 goto out_err;
664
665 if (!data[IFLA_GTP_PDP_HASHSIZE])
666 hashsize = 1024;
667 else
668 hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
669
670 err = gtp_hashtable_new(gtp, hashsize);
671 if (err < 0)
672 goto out_encap;
673
674 err = register_netdevice(dev);
675 if (err < 0) {
676 netdev_dbg(dev, "failed to register new netdev %d\n", err);
677 goto out_hashtable;
678 }
679
680 gn = net_generic(dev_net(dev), gtp_net_id);
681 list_add_rcu(&gtp->list, &gn->gtp_dev_list);
682
683 netdev_dbg(dev, "registered new GTP interface\n");
684
685 return 0;
686
687out_hashtable:
688 gtp_hashtable_free(gtp);
689out_encap:
690 gtp_encap_disable(gtp);
691out_err:
692 return err;
693}
694
695static void gtp_dellink(struct net_device *dev, struct list_head *head)
696{
697 struct gtp_dev *gtp = netdev_priv(dev);
698
699 gtp_encap_disable(gtp);
700 gtp_hashtable_free(gtp);
701 list_del_rcu(&gtp->list);
702 unregister_netdevice_queue(dev, head);
703}
704
705static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
706 [IFLA_GTP_FD0] = { .type = NLA_U32 },
707 [IFLA_GTP_FD1] = { .type = NLA_U32 },
708 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
709};
710
711static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
712{
713 if (!data)
714 return -EINVAL;
715
716 return 0;
717}
718
719static size_t gtp_get_size(const struct net_device *dev)
720{
721 return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
722}
723
724static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
725{
726 struct gtp_dev *gtp = netdev_priv(dev);
727
728 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
729 goto nla_put_failure;
730
731 return 0;
732
733nla_put_failure:
734 return -EMSGSIZE;
735}
736
737static struct rtnl_link_ops gtp_link_ops __read_mostly = {
738 .kind = "gtp",
739 .maxtype = IFLA_GTP_MAX,
740 .policy = gtp_policy,
741 .priv_size = sizeof(struct gtp_dev),
742 .setup = gtp_link_setup,
743 .validate = gtp_validate,
744 .newlink = gtp_newlink,
745 .dellink = gtp_dellink,
746 .get_size = gtp_get_size,
747 .fill_info = gtp_fill_info,
748};
749
750static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
751{
752 struct net *net;
753
754 /* Examine the link attributes and figure out which network namespace
755 * we are talking about.
756 */
757 if (tb[GTPA_NET_NS_FD])
758 net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
759 else
760 net = get_net(src_net);
761
762 return net;
763}
764
765static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
766{
767 int i;
768
769 gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
770 if (gtp->addr_hash == NULL)
771 return -ENOMEM;
772
773 gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
774 if (gtp->tid_hash == NULL)
775 goto err1;
776
777 gtp->hash_size = hsize;
778
779 for (i = 0; i < hsize; i++) {
780 INIT_HLIST_HEAD(&gtp->addr_hash[i]);
781 INIT_HLIST_HEAD(&gtp->tid_hash[i]);
782 }
783 return 0;
784err1:
785 kfree(gtp->addr_hash);
786 return -ENOMEM;
787}
788
789static void gtp_hashtable_free(struct gtp_dev *gtp)
790{
791 struct pdp_ctx *pctx;
792 int i;
793
794 for (i = 0; i < gtp->hash_size; i++) {
795 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
796 hlist_del_rcu(&pctx->hlist_tid);
797 hlist_del_rcu(&pctx->hlist_addr);
798 kfree_rcu(pctx, rcu_head);
799 }
800 }
801 synchronize_rcu();
802 kfree(gtp->addr_hash);
803 kfree(gtp->tid_hash);
804}
805
806static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
3ab1b469 807 int fd_gtp0, int fd_gtp1)
459aa660
PN
808{
809 struct udp_tunnel_sock_cfg tuncfg = {NULL};
810 struct socket *sock0, *sock1u;
811 int err;
812
813 netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
814
815 sock0 = sockfd_lookup(fd_gtp0, &err);
816 if (sock0 == NULL) {
817 netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
818 return -ENOENT;
819 }
820
821 if (sock0->sk->sk_protocol != IPPROTO_UDP) {
822 netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
823 err = -EINVAL;
824 goto err1;
825 }
826
827 sock1u = sockfd_lookup(fd_gtp1, &err);
828 if (sock1u == NULL) {
829 netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
830 err = -ENOENT;
831 goto err1;
832 }
833
834 if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
835 netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
836 err = -EINVAL;
837 goto err2;
838 }
839
840 netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
841
842 gtp->sock0 = sock0;
843 gtp->sock1u = sock1u;
459aa660
PN
844
845 tuncfg.sk_user_data = gtp;
846 tuncfg.encap_rcv = gtp_encap_recv;
847 tuncfg.encap_destroy = gtp_encap_destroy;
848
849 tuncfg.encap_type = UDP_ENCAP_GTP0;
850 setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
851
852 tuncfg.encap_type = UDP_ENCAP_GTP1U;
853 setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
854
855 err = 0;
856err2:
857 sockfd_put(sock1u);
858err1:
859 sockfd_put(sock0);
860 return err;
861}
862
863static struct net_device *gtp_find_dev(struct net *net, int ifindex)
864{
865 struct gtp_net *gn = net_generic(net, gtp_net_id);
866 struct gtp_dev *gtp;
867
868 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
869 if (ifindex == gtp->dev->ifindex)
870 return gtp->dev;
871 }
872 return NULL;
873}
874
875static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
876{
877 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
878 pctx->af = AF_INET;
879 pctx->sgsn_addr_ip4.s_addr =
880 nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
881 pctx->ms_addr_ip4.s_addr =
882 nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
883
884 switch (pctx->gtp_version) {
885 case GTP_V0:
886 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
887 * label needs to be the same for uplink and downlink packets,
888 * so let's annotate this.
889 */
890 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
891 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
892 break;
893 case GTP_V1:
894 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
895 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
896 break;
897 default:
898 break;
899 }
900}
901
902static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
903{
904 struct gtp_dev *gtp = netdev_priv(dev);
905 u32 hash_ms, hash_tid = 0;
906 struct pdp_ctx *pctx;
907 bool found = false;
908 __be32 ms_addr;
909
910 ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
911 hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
912
913 hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
914 if (pctx->ms_addr_ip4.s_addr == ms_addr) {
915 found = true;
916 break;
917 }
918 }
919
920 if (found) {
921 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
922 return -EEXIST;
923 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
924 return -EOPNOTSUPP;
925
926 ipv4_pdp_fill(pctx, info);
927
928 if (pctx->gtp_version == GTP_V0)
929 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
930 pctx->u.v0.tid, pctx);
931 else if (pctx->gtp_version == GTP_V1)
932 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
933 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
934
935 return 0;
936
937 }
938
939 pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
940 if (pctx == NULL)
941 return -ENOMEM;
942
943 ipv4_pdp_fill(pctx, info);
944 atomic_set(&pctx->tx_seq, 0);
945
946 switch (pctx->gtp_version) {
947 case GTP_V0:
948 /* TS 09.60: "The flow label identifies unambiguously a GTP
949 * flow.". We use the tid for this instead, I cannot find a
950 * situation in which this doesn't unambiguosly identify the
951 * PDP context.
952 */
953 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
954 break;
955 case GTP_V1:
956 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
957 break;
958 }
959
960 hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
961 hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
962
963 switch (pctx->gtp_version) {
964 case GTP_V0:
965 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
966 pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
967 &pctx->ms_addr_ip4, pctx);
968 break;
969 case GTP_V1:
970 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
971 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
972 &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
973 break;
974 }
975
976 return 0;
977}
978
979static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
980{
981 struct net_device *dev;
982 struct net *net;
983
984 if (!info->attrs[GTPA_VERSION] ||
985 !info->attrs[GTPA_LINK] ||
986 !info->attrs[GTPA_SGSN_ADDRESS] ||
987 !info->attrs[GTPA_MS_ADDRESS])
988 return -EINVAL;
989
990 switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
991 case GTP_V0:
992 if (!info->attrs[GTPA_TID] ||
993 !info->attrs[GTPA_FLOW])
994 return -EINVAL;
995 break;
996 case GTP_V1:
997 if (!info->attrs[GTPA_I_TEI] ||
998 !info->attrs[GTPA_O_TEI])
999 return -EINVAL;
1000 break;
1001
1002 default:
1003 return -EINVAL;
1004 }
1005
1006 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1007 if (IS_ERR(net))
1008 return PTR_ERR(net);
1009
1010 /* Check if there's an existing gtpX device to configure */
1011 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
27ee441a
PN
1012 if (dev == NULL) {
1013 put_net(net);
459aa660 1014 return -ENODEV;
27ee441a
PN
1015 }
1016 put_net(net);
459aa660
PN
1017
1018 return ipv4_pdp_add(dev, info);
1019}
1020
1021static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
1022{
1023 struct net_device *dev;
1024 struct pdp_ctx *pctx;
1025 struct gtp_dev *gtp;
1026 struct net *net;
1027
1028 if (!info->attrs[GTPA_VERSION] ||
1029 !info->attrs[GTPA_LINK])
1030 return -EINVAL;
1031
1032 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1033 if (IS_ERR(net))
1034 return PTR_ERR(net);
1035
1036 /* Check if there's an existing gtpX device to configure */
1037 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
27ee441a
PN
1038 if (dev == NULL) {
1039 put_net(net);
459aa660 1040 return -ENODEV;
27ee441a
PN
1041 }
1042 put_net(net);
459aa660
PN
1043
1044 gtp = netdev_priv(dev);
1045
1046 switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
1047 case GTP_V0:
1048 if (!info->attrs[GTPA_TID])
1049 return -EINVAL;
1050 pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
1051 break;
1052 case GTP_V1:
1053 if (!info->attrs[GTPA_I_TEI])
1054 return -EINVAL;
1055 pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
1056 break;
1057
1058 default:
1059 return -EINVAL;
1060 }
1061
1062 if (pctx == NULL)
1063 return -ENOENT;
1064
1065 if (pctx->gtp_version == GTP_V0)
1066 netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1067 pctx->u.v0.tid, pctx);
1068 else if (pctx->gtp_version == GTP_V1)
1069 netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1070 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1071
1072 hlist_del_rcu(&pctx->hlist_tid);
1073 hlist_del_rcu(&pctx->hlist_addr);
1074 kfree_rcu(pctx, rcu_head);
1075
1076 return 0;
1077}
1078
489111e5 1079static struct genl_family gtp_genl_family;
459aa660
PN
1080
1081static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1082 u32 type, struct pdp_ctx *pctx)
1083{
1084 void *genlh;
1085
1086 genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
1087 type);
1088 if (genlh == NULL)
1089 goto nlmsg_failure;
1090
1091 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1092 nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
1093 nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
1094 goto nla_put_failure;
1095
1096 switch (pctx->gtp_version) {
1097 case GTP_V0:
1098 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
1099 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
1100 goto nla_put_failure;
1101 break;
1102 case GTP_V1:
1103 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
1104 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
1105 goto nla_put_failure;
1106 break;
1107 }
1108 genlmsg_end(skb, genlh);
1109 return 0;
1110
1111nlmsg_failure:
1112nla_put_failure:
1113 genlmsg_cancel(skb, genlh);
1114 return -EMSGSIZE;
1115}
1116
1117static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
1118{
1119 struct pdp_ctx *pctx = NULL;
1120 struct net_device *dev;
1121 struct sk_buff *skb2;
1122 struct gtp_dev *gtp;
1123 u32 gtp_version;
1124 struct net *net;
1125 int err;
1126
1127 if (!info->attrs[GTPA_VERSION] ||
1128 !info->attrs[GTPA_LINK])
1129 return -EINVAL;
1130
1131 gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
1132 switch (gtp_version) {
1133 case GTP_V0:
1134 case GTP_V1:
1135 break;
1136 default:
1137 return -EINVAL;
1138 }
1139
1140 net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
1141 if (IS_ERR(net))
1142 return PTR_ERR(net);
1143
1144 /* Check if there's an existing gtpX device to configure */
1145 dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
27ee441a
PN
1146 if (dev == NULL) {
1147 put_net(net);
459aa660 1148 return -ENODEV;
27ee441a
PN
1149 }
1150 put_net(net);
459aa660
PN
1151
1152 gtp = netdev_priv(dev);
1153
1154 rcu_read_lock();
1155 if (gtp_version == GTP_V0 &&
1156 info->attrs[GTPA_TID]) {
1157 u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
1158
1159 pctx = gtp0_pdp_find(gtp, tid);
1160 } else if (gtp_version == GTP_V1 &&
1161 info->attrs[GTPA_I_TEI]) {
1162 u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
1163
1164 pctx = gtp1_pdp_find(gtp, tid);
1165 } else if (info->attrs[GTPA_MS_ADDRESS]) {
1166 __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1167
1168 pctx = ipv4_pdp_find(gtp, ip);
1169 }
1170
1171 if (pctx == NULL) {
1172 err = -ENOENT;
1173 goto err_unlock;
1174 }
1175
1176 skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
1177 if (skb2 == NULL) {
1178 err = -ENOMEM;
1179 goto err_unlock;
1180 }
1181
1182 err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
1183 info->snd_seq, info->nlhdr->nlmsg_type, pctx);
1184 if (err < 0)
1185 goto err_unlock_free;
1186
1187 rcu_read_unlock();
1188 return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
1189
1190err_unlock_free:
1191 kfree_skb(skb2);
1192err_unlock:
1193 rcu_read_unlock();
1194 return err;
1195}
1196
1197static int gtp_genl_dump_pdp(struct sk_buff *skb,
1198 struct netlink_callback *cb)
1199{
1200 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
1201 struct net *net = sock_net(skb->sk);
1202 struct gtp_net *gn = net_generic(net, gtp_net_id);
1203 unsigned long tid = cb->args[1];
1204 int i, k = cb->args[0], ret;
1205 struct pdp_ctx *pctx;
1206
1207 if (cb->args[4])
1208 return 0;
1209
1210 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
1211 if (last_gtp && last_gtp != gtp)
1212 continue;
1213 else
1214 last_gtp = NULL;
1215
1216 for (i = k; i < gtp->hash_size; i++) {
1217 hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
1218 if (tid && tid != pctx->u.tid)
1219 continue;
1220 else
1221 tid = 0;
1222
1223 ret = gtp_genl_fill_info(skb,
1224 NETLINK_CB(cb->skb).portid,
1225 cb->nlh->nlmsg_seq,
1226 cb->nlh->nlmsg_type, pctx);
1227 if (ret < 0) {
1228 cb->args[0] = i;
1229 cb->args[1] = pctx->u.tid;
1230 cb->args[2] = (unsigned long)gtp;
1231 goto out;
1232 }
1233 }
1234 }
1235 }
1236 cb->args[4] = 1;
1237out:
1238 return skb->len;
1239}
1240
1241static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
1242 [GTPA_LINK] = { .type = NLA_U32, },
1243 [GTPA_VERSION] = { .type = NLA_U32, },
1244 [GTPA_TID] = { .type = NLA_U64, },
1245 [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, },
1246 [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
1247 [GTPA_FLOW] = { .type = NLA_U16, },
1248 [GTPA_NET_NS_FD] = { .type = NLA_U32, },
1249 [GTPA_I_TEI] = { .type = NLA_U32, },
1250 [GTPA_O_TEI] = { .type = NLA_U32, },
1251};
1252
1253static const struct genl_ops gtp_genl_ops[] = {
1254 {
1255 .cmd = GTP_CMD_NEWPDP,
1256 .doit = gtp_genl_new_pdp,
1257 .policy = gtp_genl_policy,
1258 .flags = GENL_ADMIN_PERM,
1259 },
1260 {
1261 .cmd = GTP_CMD_DELPDP,
1262 .doit = gtp_genl_del_pdp,
1263 .policy = gtp_genl_policy,
1264 .flags = GENL_ADMIN_PERM,
1265 },
1266 {
1267 .cmd = GTP_CMD_GETPDP,
1268 .doit = gtp_genl_get_pdp,
1269 .dumpit = gtp_genl_dump_pdp,
1270 .policy = gtp_genl_policy,
1271 .flags = GENL_ADMIN_PERM,
1272 },
1273};
1274
56989f6d 1275static struct genl_family gtp_genl_family __ro_after_init = {
489111e5
JB
1276 .name = "gtp",
1277 .version = 0,
1278 .hdrsize = 0,
1279 .maxattr = GTPA_MAX,
1280 .netnsok = true,
1281 .module = THIS_MODULE,
1282 .ops = gtp_genl_ops,
1283 .n_ops = ARRAY_SIZE(gtp_genl_ops),
1284};
1285
459aa660
PN
1286static int __net_init gtp_net_init(struct net *net)
1287{
1288 struct gtp_net *gn = net_generic(net, gtp_net_id);
1289
1290 INIT_LIST_HEAD(&gn->gtp_dev_list);
1291 return 0;
1292}
1293
1294static void __net_exit gtp_net_exit(struct net *net)
1295{
1296 struct gtp_net *gn = net_generic(net, gtp_net_id);
1297 struct gtp_dev *gtp;
1298 LIST_HEAD(list);
1299
1300 rtnl_lock();
1301 list_for_each_entry(gtp, &gn->gtp_dev_list, list)
1302 gtp_dellink(gtp->dev, &list);
1303
1304 unregister_netdevice_many(&list);
1305 rtnl_unlock();
1306}
1307
1308static struct pernet_operations gtp_net_ops = {
1309 .init = gtp_net_init,
1310 .exit = gtp_net_exit,
1311 .id = &gtp_net_id,
1312 .size = sizeof(struct gtp_net),
1313};
1314
1315static int __init gtp_init(void)
1316{
1317 int err;
1318
1319 get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
1320
1321 err = rtnl_link_register(&gtp_link_ops);
1322 if (err < 0)
1323 goto error_out;
1324
489111e5 1325 err = genl_register_family(&gtp_genl_family);
459aa660
PN
1326 if (err < 0)
1327 goto unreg_rtnl_link;
1328
1329 err = register_pernet_subsys(&gtp_net_ops);
1330 if (err < 0)
1331 goto unreg_genl_family;
1332
5b5e0928 1333 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
459aa660
PN
1334 sizeof(struct pdp_ctx));
1335 return 0;
1336
1337unreg_genl_family:
1338 genl_unregister_family(&gtp_genl_family);
1339unreg_rtnl_link:
1340 rtnl_link_unregister(&gtp_link_ops);
1341error_out:
1342 pr_err("error loading GTP module loaded\n");
1343 return err;
1344}
1345late_initcall(gtp_init);
1346
1347static void __exit gtp_fini(void)
1348{
1349 unregister_pernet_subsys(&gtp_net_ops);
1350 genl_unregister_family(&gtp_genl_family);
1351 rtnl_link_unregister(&gtp_link_ops);
1352
1353 pr_info("GTP module unloaded\n");
1354}
1355module_exit(gtp_fini);
1356
1357MODULE_LICENSE("GPL");
1358MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1359MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1360MODULE_ALIAS_RTNL_LINK("gtp");
ab729823 1361MODULE_ALIAS_GENL_FAMILY("gtp");