veth: Account for packet drops in ndo_xdp_xmit
[linux-2.6-block.git] / drivers / net / veth.c
CommitLineData
e314dbdc
PE
1/*
2 * drivers/net/veth.c
3 *
4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
8 *
9 */
10
e314dbdc 11#include <linux/netdevice.h>
5a0e3ad6 12#include <linux/slab.h>
e314dbdc
PE
13#include <linux/ethtool.h>
14#include <linux/etherdevice.h>
cf05c700 15#include <linux/u64_stats_sync.h>
e314dbdc 16
f7b12606 17#include <net/rtnetlink.h>
e314dbdc
PE
18#include <net/dst.h>
19#include <net/xfrm.h>
af87a3aa 20#include <net/xdp.h>
ecef969e 21#include <linux/veth.h>
9d9779e7 22#include <linux/module.h>
948d4f21
TM
23#include <linux/bpf.h>
24#include <linux/filter.h>
25#include <linux/ptr_ring.h>
948d4f21 26#include <linux/bpf_trace.h>
aa4e689e 27#include <linux/net_tstamp.h>
e314dbdc
PE
28
29#define DRV_NAME "veth"
30#define DRV_VERSION "1.0"
31
9fc8d518 32#define VETH_XDP_FLAG BIT(0)
948d4f21
TM
33#define VETH_RING_SIZE 256
34#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
35
d1396004
TM
36/* Separating two types of XDP xmit */
37#define VETH_XDP_TX BIT(0)
38#define VETH_XDP_REDIR BIT(1)
39
638264dc 40struct veth_rq {
948d4f21
TM
41 struct napi_struct xdp_napi;
42 struct net_device *dev;
43 struct bpf_prog __rcu *xdp_prog;
d1396004 44 struct xdp_mem_info xdp_mem;
948d4f21
TM
45 bool rx_notify_masked;
46 struct ptr_ring xdp_ring;
47 struct xdp_rxq_info xdp_rxq;
e314dbdc
PE
48};
49
638264dc
TM
50struct veth_priv {
51 struct net_device __rcu *peer;
52 atomic64_t dropped;
53 struct bpf_prog *_xdp_prog;
54 struct veth_rq *rq;
55 unsigned int requested_headroom;
56};
57
e314dbdc
PE
58/*
59 * ethtool interface
60 */
61
62static struct {
63 const char string[ETH_GSTRING_LEN];
64} ethtool_stats_keys[] = {
65 { "peer_ifindex" },
66};
67
56607b98
PR
68static int veth_get_link_ksettings(struct net_device *dev,
69 struct ethtool_link_ksettings *cmd)
e314dbdc 70{
56607b98
PR
71 cmd->base.speed = SPEED_10000;
72 cmd->base.duplex = DUPLEX_FULL;
73 cmd->base.port = PORT_TP;
74 cmd->base.autoneg = AUTONEG_DISABLE;
e314dbdc
PE
75 return 0;
76}
77
78static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
79{
33a5ba14
RJ
80 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
81 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
e314dbdc
PE
82}
83
84static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
85{
86 switch(stringset) {
87 case ETH_SS_STATS:
88 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
89 break;
90 }
91}
92
b9f2c044 93static int veth_get_sset_count(struct net_device *dev, int sset)
e314dbdc 94{
b9f2c044
JG
95 switch (sset) {
96 case ETH_SS_STATS:
97 return ARRAY_SIZE(ethtool_stats_keys);
98 default:
99 return -EOPNOTSUPP;
100 }
e314dbdc
PE
101}
102
103static void veth_get_ethtool_stats(struct net_device *dev,
104 struct ethtool_stats *stats, u64 *data)
105{
d0e2c55e
ED
106 struct veth_priv *priv = netdev_priv(dev);
107 struct net_device *peer = rtnl_dereference(priv->peer);
e314dbdc 108
d0e2c55e 109 data[0] = peer ? peer->ifindex : 0;
e314dbdc
PE
110}
111
aa4e689e
MW
112static int veth_get_ts_info(struct net_device *dev,
113 struct ethtool_ts_info *info)
114{
115 info->so_timestamping =
116 SOF_TIMESTAMPING_TX_SOFTWARE |
117 SOF_TIMESTAMPING_RX_SOFTWARE |
118 SOF_TIMESTAMPING_SOFTWARE;
119 info->phc_index = -1;
120
121 return 0;
122}
123
0fc0b732 124static const struct ethtool_ops veth_ethtool_ops = {
e314dbdc
PE
125 .get_drvinfo = veth_get_drvinfo,
126 .get_link = ethtool_op_get_link,
e314dbdc 127 .get_strings = veth_get_strings,
b9f2c044 128 .get_sset_count = veth_get_sset_count,
e314dbdc 129 .get_ethtool_stats = veth_get_ethtool_stats,
56607b98 130 .get_link_ksettings = veth_get_link_ksettings,
aa4e689e 131 .get_ts_info = veth_get_ts_info,
e314dbdc
PE
132};
133
948d4f21
TM
134/* general routines */
135
9fc8d518
TM
136static bool veth_is_xdp_frame(void *ptr)
137{
138 return (unsigned long)ptr & VETH_XDP_FLAG;
139}
140
141static void *veth_ptr_to_xdp(void *ptr)
142{
143 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
144}
145
af87a3aa
TM
146static void *veth_xdp_to_ptr(void *ptr)
147{
148 return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
149}
150
9fc8d518
TM
151static void veth_ptr_free(void *ptr)
152{
153 if (veth_is_xdp_frame(ptr))
154 xdp_return_frame(veth_ptr_to_xdp(ptr));
155 else
156 kfree_skb(ptr);
157}
158
638264dc 159static void __veth_xdp_flush(struct veth_rq *rq)
948d4f21
TM
160{
161 /* Write ptr_ring before reading rx_notify_masked */
162 smp_mb();
638264dc
TM
163 if (!rq->rx_notify_masked) {
164 rq->rx_notify_masked = true;
165 napi_schedule(&rq->xdp_napi);
948d4f21
TM
166 }
167}
168
638264dc 169static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
948d4f21 170{
638264dc 171 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
948d4f21
TM
172 dev_kfree_skb_any(skb);
173 return NET_RX_DROP;
174 }
175
176 return NET_RX_SUCCESS;
177}
178
638264dc
TM
179static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
180 struct veth_rq *rq, bool xdp)
e314dbdc 181{
948d4f21 182 return __dev_forward_skb(dev, skb) ?: xdp ?
638264dc 183 veth_xdp_rx(rq, skb) :
948d4f21
TM
184 netif_rx(skb);
185}
186
187static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
188{
189 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
638264dc 190 struct veth_rq *rq = NULL;
d0e2c55e 191 struct net_device *rcv;
2681128f 192 int length = skb->len;
948d4f21 193 bool rcv_xdp = false;
638264dc 194 int rxq;
e314dbdc 195
d0e2c55e
ED
196 rcu_read_lock();
197 rcv = rcu_dereference(priv->peer);
198 if (unlikely(!rcv)) {
199 kfree_skb(skb);
200 goto drop;
201 }
e314dbdc 202
948d4f21 203 rcv_priv = netdev_priv(rcv);
638264dc
TM
204 rxq = skb_get_queue_mapping(skb);
205 if (rxq < rcv->real_num_rx_queues) {
206 rq = &rcv_priv->rq[rxq];
207 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
208 if (rcv_xdp)
209 skb_record_rx_queue(skb, rxq);
210 }
948d4f21 211
aa4e689e 212 skb_tx_timestamp(skb);
638264dc 213 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
14d73416 214 struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
e314dbdc 215
2681128f
ED
216 u64_stats_update_begin(&stats->syncp);
217 stats->bytes += length;
218 stats->packets++;
219 u64_stats_update_end(&stats->syncp);
220 } else {
d0e2c55e 221drop:
2681128f
ED
222 atomic64_inc(&priv->dropped);
223 }
948d4f21
TM
224
225 if (rcv_xdp)
638264dc 226 __veth_xdp_flush(rq);
948d4f21 227
d0e2c55e 228 rcu_read_unlock();
948d4f21 229
6ed10654 230 return NETDEV_TX_OK;
e314dbdc
PE
231}
232
14d73416 233static u64 veth_stats_one(struct pcpu_lstats *result, struct net_device *dev)
e314dbdc 234{
cf05c700 235 struct veth_priv *priv = netdev_priv(dev);
11687a10 236 int cpu;
e314dbdc 237
2681128f
ED
238 result->packets = 0;
239 result->bytes = 0;
2b1c8b0f 240 for_each_possible_cpu(cpu) {
14d73416 241 struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
2681128f 242 u64 packets, bytes;
cf05c700
ED
243 unsigned int start;
244
245 do {
57a7744e 246 start = u64_stats_fetch_begin_irq(&stats->syncp);
2681128f
ED
247 packets = stats->packets;
248 bytes = stats->bytes;
57a7744e 249 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2681128f
ED
250 result->packets += packets;
251 result->bytes += bytes;
11687a10 252 }
2681128f
ED
253 return atomic64_read(&priv->dropped);
254}
255
bc1f4470 256static void veth_get_stats64(struct net_device *dev,
257 struct rtnl_link_stats64 *tot)
2681128f
ED
258{
259 struct veth_priv *priv = netdev_priv(dev);
d0e2c55e 260 struct net_device *peer;
14d73416 261 struct pcpu_lstats one;
2681128f
ED
262
263 tot->tx_dropped = veth_stats_one(&one, dev);
264 tot->tx_bytes = one.bytes;
265 tot->tx_packets = one.packets;
266
d0e2c55e
ED
267 rcu_read_lock();
268 peer = rcu_dereference(priv->peer);
269 if (peer) {
270 tot->rx_dropped = veth_stats_one(&one, peer);
271 tot->rx_bytes = one.bytes;
272 tot->rx_packets = one.packets;
273 }
274 rcu_read_unlock();
e314dbdc
PE
275}
276
5c70ef85
G
277/* fake multicast ability */
278static void veth_set_multicast_list(struct net_device *dev)
279{
280}
281
948d4f21
TM
282static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
283 int buflen)
284{
285 struct sk_buff *skb;
286
287 if (!buflen) {
288 buflen = SKB_DATA_ALIGN(headroom + len) +
289 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
290 }
291 skb = build_skb(head, buflen);
292 if (!skb)
293 return NULL;
294
295 skb_reserve(skb, headroom);
296 skb_put(skb, len);
297
298 return skb;
299}
300
638264dc
TM
301static int veth_select_rxq(struct net_device *dev)
302{
303 return smp_processor_id() % dev->real_num_rx_queues;
304}
305
af87a3aa
TM
306static int veth_xdp_xmit(struct net_device *dev, int n,
307 struct xdp_frame **frames, u32 flags)
308{
309 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
310 struct net_device *rcv;
2131479d 311 int i, ret, drops = n;
af87a3aa 312 unsigned int max_len;
638264dc 313 struct veth_rq *rq;
af87a3aa 314
2131479d
TM
315 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
316 ret = -EINVAL;
317 goto drop;
318 }
af87a3aa
TM
319
320 rcv = rcu_dereference(priv->peer);
2131479d
TM
321 if (unlikely(!rcv)) {
322 ret = -ENXIO;
323 goto drop;
324 }
af87a3aa
TM
325
326 rcv_priv = netdev_priv(rcv);
638264dc 327 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
af87a3aa
TM
328 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
329 * side. This means an XDP program is loaded on the peer and the peer
330 * device is up.
331 */
2131479d
TM
332 if (!rcu_access_pointer(rq->xdp_prog)) {
333 ret = -ENXIO;
334 goto drop;
335 }
af87a3aa 336
2131479d 337 drops = 0;
af87a3aa
TM
338 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
339
638264dc 340 spin_lock(&rq->xdp_ring.producer_lock);
af87a3aa
TM
341 for (i = 0; i < n; i++) {
342 struct xdp_frame *frame = frames[i];
343 void *ptr = veth_xdp_to_ptr(frame);
344
345 if (unlikely(frame->len > max_len ||
638264dc 346 __ptr_ring_produce(&rq->xdp_ring, ptr))) {
af87a3aa
TM
347 xdp_return_frame_rx_napi(frame);
348 drops++;
349 }
350 }
638264dc 351 spin_unlock(&rq->xdp_ring.producer_lock);
af87a3aa
TM
352
353 if (flags & XDP_XMIT_FLUSH)
638264dc 354 __veth_xdp_flush(rq);
af87a3aa 355
2131479d
TM
356 if (likely(!drops))
357 return n;
358
359 ret = n - drops;
360drop:
361 atomic64_add(drops, &priv->dropped);
362
363 return ret;
af87a3aa
TM
364}
365
d1396004
TM
366static void veth_xdp_flush(struct net_device *dev)
367{
368 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
369 struct net_device *rcv;
638264dc 370 struct veth_rq *rq;
d1396004
TM
371
372 rcu_read_lock();
373 rcv = rcu_dereference(priv->peer);
374 if (unlikely(!rcv))
375 goto out;
376
377 rcv_priv = netdev_priv(rcv);
638264dc 378 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
d1396004 379 /* xdp_ring is initialized on receive side? */
638264dc 380 if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
d1396004
TM
381 goto out;
382
638264dc 383 __veth_xdp_flush(rq);
d1396004
TM
384out:
385 rcu_read_unlock();
386}
387
388static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
389{
390 struct xdp_frame *frame = convert_to_xdp_frame(xdp);
391
392 if (unlikely(!frame))
393 return -EOVERFLOW;
394
395 return veth_xdp_xmit(dev, 1, &frame, 0);
396}
397
638264dc 398static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
d1396004
TM
399 struct xdp_frame *frame,
400 unsigned int *xdp_xmit)
9fc8d518
TM
401{
402 void *hard_start = frame->data - frame->headroom;
403 void *head = hard_start - sizeof(struct xdp_frame);
404 int len = frame->len, delta = 0;
d1396004 405 struct xdp_frame orig_frame;
9fc8d518
TM
406 struct bpf_prog *xdp_prog;
407 unsigned int headroom;
408 struct sk_buff *skb;
409
410 rcu_read_lock();
638264dc 411 xdp_prog = rcu_dereference(rq->xdp_prog);
9fc8d518
TM
412 if (likely(xdp_prog)) {
413 struct xdp_buff xdp;
414 u32 act;
415
416 xdp.data_hard_start = hard_start;
417 xdp.data = frame->data;
418 xdp.data_end = frame->data + frame->len;
419 xdp.data_meta = frame->data - frame->metasize;
638264dc 420 xdp.rxq = &rq->xdp_rxq;
9fc8d518
TM
421
422 act = bpf_prog_run_xdp(xdp_prog, &xdp);
423
424 switch (act) {
425 case XDP_PASS:
426 delta = frame->data - xdp.data;
427 len = xdp.data_end - xdp.data;
428 break;
d1396004
TM
429 case XDP_TX:
430 orig_frame = *frame;
431 xdp.data_hard_start = head;
432 xdp.rxq->mem = frame->mem;
638264dc
TM
433 if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
434 trace_xdp_exception(rq->dev, xdp_prog, act);
d1396004
TM
435 frame = &orig_frame;
436 goto err_xdp;
437 }
438 *xdp_xmit |= VETH_XDP_TX;
439 rcu_read_unlock();
440 goto xdp_xmit;
441 case XDP_REDIRECT:
442 orig_frame = *frame;
443 xdp.data_hard_start = head;
444 xdp.rxq->mem = frame->mem;
638264dc 445 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
d1396004
TM
446 frame = &orig_frame;
447 goto err_xdp;
448 }
449 *xdp_xmit |= VETH_XDP_REDIR;
450 rcu_read_unlock();
451 goto xdp_xmit;
9fc8d518
TM
452 default:
453 bpf_warn_invalid_xdp_action(act);
454 case XDP_ABORTED:
638264dc 455 trace_xdp_exception(rq->dev, xdp_prog, act);
9fc8d518
TM
456 case XDP_DROP:
457 goto err_xdp;
458 }
459 }
460 rcu_read_unlock();
461
462 headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
463 skb = veth_build_skb(head, headroom, len, 0);
464 if (!skb) {
465 xdp_return_frame(frame);
466 goto err;
467 }
468
469 xdp_scrub_frame(frame);
638264dc 470 skb->protocol = eth_type_trans(skb, rq->dev);
9fc8d518
TM
471err:
472 return skb;
473err_xdp:
474 rcu_read_unlock();
475 xdp_return_frame(frame);
d1396004 476xdp_xmit:
9fc8d518
TM
477 return NULL;
478}
479
638264dc 480static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
d1396004 481 unsigned int *xdp_xmit)
948d4f21
TM
482{
483 u32 pktlen, headroom, act, metalen;
484 void *orig_data, *orig_data_end;
485 struct bpf_prog *xdp_prog;
486 int mac_len, delta, off;
487 struct xdp_buff xdp;
488
4bf9ffa0
TM
489 skb_orphan(skb);
490
948d4f21 491 rcu_read_lock();
638264dc 492 xdp_prog = rcu_dereference(rq->xdp_prog);
948d4f21
TM
493 if (unlikely(!xdp_prog)) {
494 rcu_read_unlock();
495 goto out;
496 }
497
498 mac_len = skb->data - skb_mac_header(skb);
499 pktlen = skb->len + mac_len;
500 headroom = skb_headroom(skb) - mac_len;
501
502 if (skb_shared(skb) || skb_head_is_locked(skb) ||
503 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
504 struct sk_buff *nskb;
505 int size, head_off;
506 void *head, *start;
507 struct page *page;
508
509 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
510 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
511 if (size > PAGE_SIZE)
512 goto drop;
513
514 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
515 if (!page)
516 goto drop;
517
518 head = page_address(page);
519 start = head + VETH_XDP_HEADROOM;
520 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
521 page_frag_free(head);
522 goto drop;
523 }
524
525 nskb = veth_build_skb(head,
526 VETH_XDP_HEADROOM + mac_len, skb->len,
527 PAGE_SIZE);
528 if (!nskb) {
529 page_frag_free(head);
530 goto drop;
531 }
532
533 skb_copy_header(nskb, skb);
534 head_off = skb_headroom(nskb) - skb_headroom(skb);
535 skb_headers_offset_update(nskb, head_off);
948d4f21
TM
536 consume_skb(skb);
537 skb = nskb;
538 }
539
540 xdp.data_hard_start = skb->head;
541 xdp.data = skb_mac_header(skb);
542 xdp.data_end = xdp.data + pktlen;
543 xdp.data_meta = xdp.data;
638264dc 544 xdp.rxq = &rq->xdp_rxq;
948d4f21
TM
545 orig_data = xdp.data;
546 orig_data_end = xdp.data_end;
547
548 act = bpf_prog_run_xdp(xdp_prog, &xdp);
549
550 switch (act) {
551 case XDP_PASS:
552 break;
d1396004
TM
553 case XDP_TX:
554 get_page(virt_to_page(xdp.data));
555 consume_skb(skb);
638264dc
TM
556 xdp.rxq->mem = rq->xdp_mem;
557 if (unlikely(veth_xdp_tx(rq->dev, &xdp) < 0)) {
558 trace_xdp_exception(rq->dev, xdp_prog, act);
d1396004
TM
559 goto err_xdp;
560 }
561 *xdp_xmit |= VETH_XDP_TX;
562 rcu_read_unlock();
563 goto xdp_xmit;
564 case XDP_REDIRECT:
565 get_page(virt_to_page(xdp.data));
566 consume_skb(skb);
638264dc
TM
567 xdp.rxq->mem = rq->xdp_mem;
568 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
d1396004
TM
569 goto err_xdp;
570 *xdp_xmit |= VETH_XDP_REDIR;
571 rcu_read_unlock();
572 goto xdp_xmit;
948d4f21
TM
573 default:
574 bpf_warn_invalid_xdp_action(act);
575 case XDP_ABORTED:
638264dc 576 trace_xdp_exception(rq->dev, xdp_prog, act);
948d4f21
TM
577 case XDP_DROP:
578 goto drop;
579 }
580 rcu_read_unlock();
581
582 delta = orig_data - xdp.data;
583 off = mac_len + delta;
584 if (off > 0)
585 __skb_push(skb, off);
586 else if (off < 0)
587 __skb_pull(skb, -off);
588 skb->mac_header -= delta;
589 off = xdp.data_end - orig_data_end;
590 if (off != 0)
591 __skb_put(skb, off);
638264dc 592 skb->protocol = eth_type_trans(skb, rq->dev);
948d4f21
TM
593
594 metalen = xdp.data - xdp.data_meta;
595 if (metalen)
596 skb_metadata_set(skb, metalen);
597out:
598 return skb;
599drop:
600 rcu_read_unlock();
601 kfree_skb(skb);
602 return NULL;
d1396004
TM
603err_xdp:
604 rcu_read_unlock();
605 page_frag_free(xdp.data);
606xdp_xmit:
607 return NULL;
948d4f21
TM
608}
609
638264dc 610static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit)
948d4f21
TM
611{
612 int i, done = 0;
613
614 for (i = 0; i < budget; i++) {
638264dc 615 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
9fc8d518 616 struct sk_buff *skb;
948d4f21 617
9fc8d518 618 if (!ptr)
948d4f21
TM
619 break;
620
d1396004 621 if (veth_is_xdp_frame(ptr)) {
638264dc 622 skb = veth_xdp_rcv_one(rq, veth_ptr_to_xdp(ptr),
d1396004
TM
623 xdp_xmit);
624 } else {
638264dc 625 skb = veth_xdp_rcv_skb(rq, ptr, xdp_xmit);
d1396004 626 }
948d4f21
TM
627
628 if (skb)
638264dc 629 napi_gro_receive(&rq->xdp_napi, skb);
948d4f21
TM
630
631 done++;
632 }
633
634 return done;
635}
636
637static int veth_poll(struct napi_struct *napi, int budget)
638{
638264dc
TM
639 struct veth_rq *rq =
640 container_of(napi, struct veth_rq, xdp_napi);
d1396004 641 unsigned int xdp_xmit = 0;
948d4f21
TM
642 int done;
643
d1396004 644 xdp_set_return_frame_no_direct();
638264dc 645 done = veth_xdp_rcv(rq, budget, &xdp_xmit);
948d4f21
TM
646
647 if (done < budget && napi_complete_done(napi, done)) {
648 /* Write rx_notify_masked before reading ptr_ring */
638264dc
TM
649 smp_store_mb(rq->rx_notify_masked, false);
650 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
651 rq->rx_notify_masked = true;
652 napi_schedule(&rq->xdp_napi);
948d4f21
TM
653 }
654 }
655
d1396004 656 if (xdp_xmit & VETH_XDP_TX)
638264dc 657 veth_xdp_flush(rq->dev);
d1396004
TM
658 if (xdp_xmit & VETH_XDP_REDIR)
659 xdp_do_flush_map();
660 xdp_clear_return_frame_no_direct();
661
948d4f21
TM
662 return done;
663}
664
665static int veth_napi_add(struct net_device *dev)
666{
667 struct veth_priv *priv = netdev_priv(dev);
638264dc 668 int err, i;
948d4f21 669
638264dc
TM
670 for (i = 0; i < dev->real_num_rx_queues; i++) {
671 struct veth_rq *rq = &priv->rq[i];
672
673 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
674 if (err)
675 goto err_xdp_ring;
676 }
948d4f21 677
638264dc
TM
678 for (i = 0; i < dev->real_num_rx_queues; i++) {
679 struct veth_rq *rq = &priv->rq[i];
680
681 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
682 napi_enable(&rq->xdp_napi);
683 }
948d4f21
TM
684
685 return 0;
638264dc
TM
686err_xdp_ring:
687 for (i--; i >= 0; i--)
688 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
689
690 return err;
948d4f21
TM
691}
692
693static void veth_napi_del(struct net_device *dev)
694{
695 struct veth_priv *priv = netdev_priv(dev);
638264dc 696 int i;
948d4f21 697
638264dc
TM
698 for (i = 0; i < dev->real_num_rx_queues; i++) {
699 struct veth_rq *rq = &priv->rq[i];
700
701 napi_disable(&rq->xdp_napi);
702 napi_hash_del(&rq->xdp_napi);
703 }
704 synchronize_net();
705
706 for (i = 0; i < dev->real_num_rx_queues; i++) {
707 struct veth_rq *rq = &priv->rq[i];
708
709 netif_napi_del(&rq->xdp_napi);
710 rq->rx_notify_masked = false;
711 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
712 }
948d4f21
TM
713}
714
715static int veth_enable_xdp(struct net_device *dev)
716{
717 struct veth_priv *priv = netdev_priv(dev);
638264dc 718 int err, i;
948d4f21 719
638264dc
TM
720 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
721 for (i = 0; i < dev->real_num_rx_queues; i++) {
722 struct veth_rq *rq = &priv->rq[i];
948d4f21 723
638264dc
TM
724 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
725 if (err < 0)
726 goto err_rxq_reg;
727
728 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
729 MEM_TYPE_PAGE_SHARED,
730 NULL);
731 if (err < 0)
732 goto err_reg_mem;
733
734 /* Save original mem info as it can be overwritten */
735 rq->xdp_mem = rq->xdp_rxq.mem;
736 }
948d4f21
TM
737
738 err = veth_napi_add(dev);
739 if (err)
638264dc 740 goto err_rxq_reg;
948d4f21
TM
741 }
742
638264dc
TM
743 for (i = 0; i < dev->real_num_rx_queues; i++)
744 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
948d4f21
TM
745
746 return 0;
638264dc
TM
747err_reg_mem:
748 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
749err_rxq_reg:
750 for (i--; i >= 0; i--)
751 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
948d4f21
TM
752
753 return err;
754}
755
756static void veth_disable_xdp(struct net_device *dev)
757{
758 struct veth_priv *priv = netdev_priv(dev);
638264dc 759 int i;
948d4f21 760
638264dc
TM
761 for (i = 0; i < dev->real_num_rx_queues; i++)
762 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
948d4f21 763 veth_napi_del(dev);
638264dc
TM
764 for (i = 0; i < dev->real_num_rx_queues; i++) {
765 struct veth_rq *rq = &priv->rq[i];
766
767 rq->xdp_rxq.mem = rq->xdp_mem;
768 xdp_rxq_info_unreg(&rq->xdp_rxq);
769 }
948d4f21
TM
770}
771
e314dbdc
PE
772static int veth_open(struct net_device *dev)
773{
d0e2c55e
ED
774 struct veth_priv *priv = netdev_priv(dev);
775 struct net_device *peer = rtnl_dereference(priv->peer);
948d4f21 776 int err;
e314dbdc 777
d0e2c55e 778 if (!peer)
e314dbdc
PE
779 return -ENOTCONN;
780
948d4f21
TM
781 if (priv->_xdp_prog) {
782 err = veth_enable_xdp(dev);
783 if (err)
784 return err;
785 }
786
d0e2c55e 787 if (peer->flags & IFF_UP) {
e314dbdc 788 netif_carrier_on(dev);
d0e2c55e 789 netif_carrier_on(peer);
e314dbdc 790 }
948d4f21 791
e314dbdc
PE
792 return 0;
793}
794
2cf48a10
EB
795static int veth_close(struct net_device *dev)
796{
797 struct veth_priv *priv = netdev_priv(dev);
2efd32ee 798 struct net_device *peer = rtnl_dereference(priv->peer);
2cf48a10
EB
799
800 netif_carrier_off(dev);
2efd32ee
ED
801 if (peer)
802 netif_carrier_off(peer);
2cf48a10 803
948d4f21
TM
804 if (priv->_xdp_prog)
805 veth_disable_xdp(dev);
806
2cf48a10
EB
807 return 0;
808}
809
91572088 810static int is_valid_veth_mtu(int mtu)
38d40815 811{
91572088 812 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
38d40815
EB
813}
814
7797b93b
TM
815static int veth_alloc_queues(struct net_device *dev)
816{
817 struct veth_priv *priv = netdev_priv(dev);
818 int i;
819
820 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
821 if (!priv->rq)
822 return -ENOMEM;
823
824 for (i = 0; i < dev->num_rx_queues; i++)
825 priv->rq[i].dev = dev;
826
827 return 0;
828}
829
830static void veth_free_queues(struct net_device *dev)
831{
832 struct veth_priv *priv = netdev_priv(dev);
833
834 kfree(priv->rq);
835}
836
e314dbdc
PE
837static int veth_dev_init(struct net_device *dev)
838{
7797b93b
TM
839 int err;
840
14d73416
LR
841 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
842 if (!dev->lstats)
e314dbdc 843 return -ENOMEM;
7797b93b
TM
844
845 err = veth_alloc_queues(dev);
846 if (err) {
14d73416 847 free_percpu(dev->lstats);
7797b93b
TM
848 return err;
849 }
850
e314dbdc
PE
851 return 0;
852}
853
11687a10
DM
854static void veth_dev_free(struct net_device *dev)
855{
7797b93b 856 veth_free_queues(dev);
14d73416 857 free_percpu(dev->lstats);
11687a10
DM
858}
859
bb446c19
WC
860#ifdef CONFIG_NET_POLL_CONTROLLER
861static void veth_poll_controller(struct net_device *dev)
862{
863 /* veth only receives frames when its peer sends one
948d4f21 864 * Since it has nothing to do with disabling irqs, we are guaranteed
bb446c19
WC
865 * never to have pending data when we poll for it so
866 * there is nothing to do here.
867 *
868 * We need this though so netpoll recognizes us as an interface that
869 * supports polling, which enables bridge devices in virt setups to
870 * still use netconsole
871 */
872}
873#endif /* CONFIG_NET_POLL_CONTROLLER */
874
a45253bf
ND
875static int veth_get_iflink(const struct net_device *dev)
876{
877 struct veth_priv *priv = netdev_priv(dev);
878 struct net_device *peer;
879 int iflink;
880
881 rcu_read_lock();
882 peer = rcu_dereference(priv->peer);
883 iflink = peer ? peer->ifindex : 0;
884 rcu_read_unlock();
885
886 return iflink;
887}
888
dc224822
TM
889static netdev_features_t veth_fix_features(struct net_device *dev,
890 netdev_features_t features)
891{
892 struct veth_priv *priv = netdev_priv(dev);
893 struct net_device *peer;
894
895 peer = rtnl_dereference(priv->peer);
896 if (peer) {
897 struct veth_priv *peer_priv = netdev_priv(peer);
898
899 if (peer_priv->_xdp_prog)
900 features &= ~NETIF_F_GSO_SOFTWARE;
901 }
902
903 return features;
904}
905
163e5292
PA
906static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
907{
908 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
909 struct net_device *peer;
910
911 if (new_hr < 0)
912 new_hr = 0;
913
914 rcu_read_lock();
915 peer = rcu_dereference(priv->peer);
916 if (unlikely(!peer))
917 goto out;
918
919 peer_priv = netdev_priv(peer);
920 priv->requested_headroom = new_hr;
921 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
922 dev->needed_headroom = new_hr;
923 peer->needed_headroom = new_hr;
924
925out:
926 rcu_read_unlock();
927}
928
948d4f21
TM
929static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
930 struct netlink_ext_ack *extack)
931{
932 struct veth_priv *priv = netdev_priv(dev);
933 struct bpf_prog *old_prog;
934 struct net_device *peer;
dc224822 935 unsigned int max_mtu;
948d4f21
TM
936 int err;
937
938 old_prog = priv->_xdp_prog;
939 priv->_xdp_prog = prog;
940 peer = rtnl_dereference(priv->peer);
941
942 if (prog) {
943 if (!peer) {
944 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
945 err = -ENOTCONN;
946 goto err;
947 }
948
dc224822
TM
949 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
950 peer->hard_header_len -
951 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
952 if (peer->mtu > max_mtu) {
953 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
954 err = -ERANGE;
955 goto err;
956 }
957
638264dc
TM
958 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
959 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
960 err = -ENOSPC;
961 goto err;
962 }
963
948d4f21
TM
964 if (dev->flags & IFF_UP) {
965 err = veth_enable_xdp(dev);
966 if (err) {
967 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
968 goto err;
969 }
970 }
dc224822
TM
971
972 if (!old_prog) {
973 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
974 peer->max_mtu = max_mtu;
975 }
948d4f21
TM
976 }
977
978 if (old_prog) {
dc224822
TM
979 if (!prog) {
980 if (dev->flags & IFF_UP)
981 veth_disable_xdp(dev);
982
983 if (peer) {
984 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
985 peer->max_mtu = ETH_MAX_MTU;
986 }
987 }
948d4f21
TM
988 bpf_prog_put(old_prog);
989 }
990
dc224822
TM
991 if ((!!old_prog ^ !!prog) && peer)
992 netdev_update_features(peer);
993
948d4f21
TM
994 return 0;
995err:
996 priv->_xdp_prog = old_prog;
997
998 return err;
999}
1000
1001static u32 veth_xdp_query(struct net_device *dev)
1002{
1003 struct veth_priv *priv = netdev_priv(dev);
1004 const struct bpf_prog *xdp_prog;
1005
1006 xdp_prog = priv->_xdp_prog;
1007 if (xdp_prog)
1008 return xdp_prog->aux->id;
1009
1010 return 0;
1011}
1012
1013static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1014{
1015 switch (xdp->command) {
1016 case XDP_SETUP_PROG:
1017 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1018 case XDP_QUERY_PROG:
1019 xdp->prog_id = veth_xdp_query(dev);
1020 return 0;
1021 default:
1022 return -EINVAL;
1023 }
1024}
1025
4456e7bd 1026static const struct net_device_ops veth_netdev_ops = {
ee923623
DL
1027 .ndo_init = veth_dev_init,
1028 .ndo_open = veth_open,
2cf48a10 1029 .ndo_stop = veth_close,
ee923623 1030 .ndo_start_xmit = veth_xmit,
6311cc44 1031 .ndo_get_stats64 = veth_get_stats64,
5c70ef85 1032 .ndo_set_rx_mode = veth_set_multicast_list,
ee923623 1033 .ndo_set_mac_address = eth_mac_addr,
bb446c19
WC
1034#ifdef CONFIG_NET_POLL_CONTROLLER
1035 .ndo_poll_controller = veth_poll_controller,
1036#endif
a45253bf 1037 .ndo_get_iflink = veth_get_iflink,
dc224822 1038 .ndo_fix_features = veth_fix_features,
1a04a821 1039 .ndo_features_check = passthru_features_check,
163e5292 1040 .ndo_set_rx_headroom = veth_set_rx_headroom,
948d4f21 1041 .ndo_bpf = veth_xdp,
af87a3aa 1042 .ndo_xdp_xmit = veth_xdp_xmit,
4456e7bd
SH
1043};
1044
732912d7 1045#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
c80fafbb 1046 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
732912d7 1047 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
28d2b136
PM
1048 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1049 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
8093315a 1050
e314dbdc
PE
1051static void veth_setup(struct net_device *dev)
1052{
1053 ether_setup(dev);
1054
550fd08c 1055 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
23ea5a96 1056 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
02f01ec1 1057 dev->priv_flags |= IFF_NO_QUEUE;
163e5292 1058 dev->priv_flags |= IFF_PHONY_HEADROOM;
550fd08c 1059
4456e7bd 1060 dev->netdev_ops = &veth_netdev_ops;
e314dbdc
PE
1061 dev->ethtool_ops = &veth_ethtool_ops;
1062 dev->features |= NETIF_F_LLTX;
8093315a 1063 dev->features |= VETH_FEATURES;
8d0d21f4 1064 dev->vlan_features = dev->features &
3f8c707b
VY
1065 ~(NETIF_F_HW_VLAN_CTAG_TX |
1066 NETIF_F_HW_VLAN_STAG_TX |
1067 NETIF_F_HW_VLAN_CTAG_RX |
1068 NETIF_F_HW_VLAN_STAG_RX);
cf124db5
DM
1069 dev->needs_free_netdev = true;
1070 dev->priv_destructor = veth_dev_free;
91572088 1071 dev->max_mtu = ETH_MAX_MTU;
a2c725fa 1072
8093315a 1073 dev->hw_features = VETH_FEATURES;
82d81898 1074 dev->hw_enc_features = VETH_FEATURES;
607fca9a 1075 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
e314dbdc
PE
1076}
1077
1078/*
1079 * netlink interface
1080 */
1081
a8b8a889
MS
1082static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1083 struct netlink_ext_ack *extack)
e314dbdc
PE
1084{
1085 if (tb[IFLA_ADDRESS]) {
1086 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1087 return -EINVAL;
1088 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1089 return -EADDRNOTAVAIL;
1090 }
38d40815
EB
1091 if (tb[IFLA_MTU]) {
1092 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1093 return -EINVAL;
1094 }
e314dbdc
PE
1095 return 0;
1096}
1097
1098static struct rtnl_link_ops veth_link_ops;
1099
81adee47 1100static int veth_newlink(struct net *src_net, struct net_device *dev,
7a3f4a18
MS
1101 struct nlattr *tb[], struct nlattr *data[],
1102 struct netlink_ext_ack *extack)
e314dbdc 1103{
7797b93b 1104 int err;
e314dbdc
PE
1105 struct net_device *peer;
1106 struct veth_priv *priv;
1107 char ifname[IFNAMSIZ];
1108 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
5517750f 1109 unsigned char name_assign_type;
3729d502 1110 struct ifinfomsg *ifmp;
81adee47 1111 struct net *net;
e314dbdc
PE
1112
1113 /*
1114 * create and register peer first
e314dbdc 1115 */
e314dbdc
PE
1116 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1117 struct nlattr *nla_peer;
1118
1119 nla_peer = data[VETH_INFO_PEER];
3729d502 1120 ifmp = nla_data(nla_peer);
f7b12606
JP
1121 err = rtnl_nla_parse_ifla(peer_tb,
1122 nla_data(nla_peer) + sizeof(struct ifinfomsg),
fceb6435
JB
1123 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1124 NULL);
e314dbdc
PE
1125 if (err < 0)
1126 return err;
1127
a8b8a889 1128 err = veth_validate(peer_tb, NULL, extack);
e314dbdc
PE
1129 if (err < 0)
1130 return err;
1131
1132 tbp = peer_tb;
3729d502
PM
1133 } else {
1134 ifmp = NULL;
e314dbdc 1135 tbp = tb;
3729d502 1136 }
e314dbdc 1137
191cdb38 1138 if (ifmp && tbp[IFLA_IFNAME]) {
e314dbdc 1139 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
5517750f
TG
1140 name_assign_type = NET_NAME_USER;
1141 } else {
e314dbdc 1142 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
5517750f
TG
1143 name_assign_type = NET_NAME_ENUM;
1144 }
e314dbdc 1145
81adee47
EB
1146 net = rtnl_link_get_net(src_net, tbp);
1147 if (IS_ERR(net))
1148 return PTR_ERR(net);
1149
5517750f
TG
1150 peer = rtnl_create_link(net, ifname, name_assign_type,
1151 &veth_link_ops, tbp);
81adee47
EB
1152 if (IS_ERR(peer)) {
1153 put_net(net);
e314dbdc 1154 return PTR_ERR(peer);
81adee47 1155 }
e314dbdc 1156
191cdb38 1157 if (!ifmp || !tbp[IFLA_ADDRESS])
f2cedb63 1158 eth_hw_addr_random(peer);
e6f8f1a7
PE
1159
1160 if (ifmp && (dev->ifindex != 0))
1161 peer->ifindex = ifmp->ifi_index;
e314dbdc 1162
72d24955
SH
1163 peer->gso_max_size = dev->gso_max_size;
1164 peer->gso_max_segs = dev->gso_max_segs;
1165
e314dbdc 1166 err = register_netdevice(peer);
81adee47
EB
1167 put_net(net);
1168 net = NULL;
e314dbdc
PE
1169 if (err < 0)
1170 goto err_register_peer;
1171
1172 netif_carrier_off(peer);
1173
3729d502
PM
1174 err = rtnl_configure_link(peer, ifmp);
1175 if (err < 0)
1176 goto err_configure_peer;
1177
e314dbdc
PE
1178 /*
1179 * register dev last
1180 *
1181 * note, that since we've registered new device the dev's name
1182 * should be re-allocated
1183 */
1184
1185 if (tb[IFLA_ADDRESS] == NULL)
f2cedb63 1186 eth_hw_addr_random(dev);
e314dbdc 1187
6c8c4446
JP
1188 if (tb[IFLA_IFNAME])
1189 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1190 else
1191 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1192
e314dbdc
PE
1193 err = register_netdevice(dev);
1194 if (err < 0)
1195 goto err_register_dev;
1196
1197 netif_carrier_off(dev);
1198
1199 /*
1200 * tie the deviced together
1201 */
1202
1203 priv = netdev_priv(dev);
d0e2c55e 1204 rcu_assign_pointer(priv->peer, peer);
e314dbdc
PE
1205
1206 priv = netdev_priv(peer);
d0e2c55e 1207 rcu_assign_pointer(priv->peer, dev);
948d4f21 1208
e314dbdc
PE
1209 return 0;
1210
1211err_register_dev:
1212 /* nothing to do */
3729d502 1213err_configure_peer:
e314dbdc
PE
1214 unregister_netdevice(peer);
1215 return err;
1216
1217err_register_peer:
1218 free_netdev(peer);
1219 return err;
1220}
1221
23289a37 1222static void veth_dellink(struct net_device *dev, struct list_head *head)
e314dbdc
PE
1223{
1224 struct veth_priv *priv;
1225 struct net_device *peer;
1226
1227 priv = netdev_priv(dev);
d0e2c55e
ED
1228 peer = rtnl_dereference(priv->peer);
1229
1230 /* Note : dellink() is called from default_device_exit_batch(),
1231 * before a rcu_synchronize() point. The devices are guaranteed
1232 * not being freed before one RCU grace period.
1233 */
1234 RCU_INIT_POINTER(priv->peer, NULL);
24540535 1235 unregister_netdevice_queue(dev, head);
f45a5c26
ED
1236
1237 if (peer) {
1238 priv = netdev_priv(peer);
1239 RCU_INIT_POINTER(priv->peer, NULL);
1240 unregister_netdevice_queue(peer, head);
1241 }
e314dbdc
PE
1242}
1243
23711438
TG
1244static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1245 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1246};
e314dbdc 1247
e5f4e7b9
ND
1248static struct net *veth_get_link_net(const struct net_device *dev)
1249{
1250 struct veth_priv *priv = netdev_priv(dev);
1251 struct net_device *peer = rtnl_dereference(priv->peer);
1252
1253 return peer ? dev_net(peer) : dev_net(dev);
1254}
1255
e314dbdc
PE
1256static struct rtnl_link_ops veth_link_ops = {
1257 .kind = DRV_NAME,
1258 .priv_size = sizeof(struct veth_priv),
1259 .setup = veth_setup,
1260 .validate = veth_validate,
1261 .newlink = veth_newlink,
1262 .dellink = veth_dellink,
1263 .policy = veth_policy,
1264 .maxtype = VETH_INFO_MAX,
e5f4e7b9 1265 .get_link_net = veth_get_link_net,
e314dbdc
PE
1266};
1267
1268/*
1269 * init/fini
1270 */
1271
1272static __init int veth_init(void)
1273{
1274 return rtnl_link_register(&veth_link_ops);
1275}
1276
1277static __exit void veth_exit(void)
1278{
68365458 1279 rtnl_link_unregister(&veth_link_ops);
e314dbdc
PE
1280}
1281
1282module_init(veth_init);
1283module_exit(veth_exit);
1284
1285MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1286MODULE_LICENSE("GPL v2");
1287MODULE_ALIAS_RTNL_LINK(DRV_NAME);