treewide: Use fallthrough pseudo-keyword
[linux-block.git] / drivers / net / veth.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  drivers/net/veth.c
4  *
5  *  Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6  *
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9  *
10  */
11
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/ethtool.h>
15 #include <linux/etherdevice.h>
16 #include <linux/u64_stats_sync.h>
17
18 #include <net/rtnetlink.h>
19 #include <net/dst.h>
20 #include <net/xfrm.h>
21 #include <net/xdp.h>
22 #include <linux/veth.h>
23 #include <linux/module.h>
24 #include <linux/bpf.h>
25 #include <linux/filter.h>
26 #include <linux/ptr_ring.h>
27 #include <linux/bpf_trace.h>
28 #include <linux/net_tstamp.h>
29
30 #define DRV_NAME        "veth"
31 #define DRV_VERSION     "1.0"
32
33 #define VETH_XDP_FLAG           BIT(0)
34 #define VETH_RING_SIZE          256
35 #define VETH_XDP_HEADROOM       (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
37 #define VETH_XDP_TX_BULK_SIZE   16
38
39 struct veth_stats {
40         u64     rx_drops;
41         /* xdp */
42         u64     xdp_packets;
43         u64     xdp_bytes;
44         u64     xdp_redirect;
45         u64     xdp_drops;
46         u64     xdp_tx;
47         u64     xdp_tx_err;
48         u64     peer_tq_xdp_xmit;
49         u64     peer_tq_xdp_xmit_err;
50 };
51
52 struct veth_rq_stats {
53         struct veth_stats       vs;
54         struct u64_stats_sync   syncp;
55 };
56
57 struct veth_rq {
58         struct napi_struct      xdp_napi;
59         struct net_device       *dev;
60         struct bpf_prog __rcu   *xdp_prog;
61         struct xdp_mem_info     xdp_mem;
62         struct veth_rq_stats    stats;
63         bool                    rx_notify_masked;
64         struct ptr_ring         xdp_ring;
65         struct xdp_rxq_info     xdp_rxq;
66 };
67
68 struct veth_priv {
69         struct net_device __rcu *peer;
70         atomic64_t              dropped;
71         struct bpf_prog         *_xdp_prog;
72         struct veth_rq          *rq;
73         unsigned int            requested_headroom;
74 };
75
76 struct veth_xdp_tx_bq {
77         struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
78         unsigned int count;
79 };
80
81 /*
82  * ethtool interface
83  */
84
85 struct veth_q_stat_desc {
86         char    desc[ETH_GSTRING_LEN];
87         size_t  offset;
88 };
89
90 #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
91
92 static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
93         { "xdp_packets",        VETH_RQ_STAT(xdp_packets) },
94         { "xdp_bytes",          VETH_RQ_STAT(xdp_bytes) },
95         { "drops",              VETH_RQ_STAT(rx_drops) },
96         { "xdp_redirect",       VETH_RQ_STAT(xdp_redirect) },
97         { "xdp_drops",          VETH_RQ_STAT(xdp_drops) },
98         { "xdp_tx",             VETH_RQ_STAT(xdp_tx) },
99         { "xdp_tx_errors",      VETH_RQ_STAT(xdp_tx_err) },
100 };
101
102 #define VETH_RQ_STATS_LEN       ARRAY_SIZE(veth_rq_stats_desc)
103
104 static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
105         { "xdp_xmit",           VETH_RQ_STAT(peer_tq_xdp_xmit) },
106         { "xdp_xmit_errors",    VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
107 };
108
109 #define VETH_TQ_STATS_LEN       ARRAY_SIZE(veth_tq_stats_desc)
110
111 static struct {
112         const char string[ETH_GSTRING_LEN];
113 } ethtool_stats_keys[] = {
114         { "peer_ifindex" },
115 };
116
117 static int veth_get_link_ksettings(struct net_device *dev,
118                                    struct ethtool_link_ksettings *cmd)
119 {
120         cmd->base.speed         = SPEED_10000;
121         cmd->base.duplex        = DUPLEX_FULL;
122         cmd->base.port          = PORT_TP;
123         cmd->base.autoneg       = AUTONEG_DISABLE;
124         return 0;
125 }
126
127 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
128 {
129         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
130         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
131 }
132
133 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
134 {
135         char *p = (char *)buf;
136         int i, j;
137
138         switch(stringset) {
139         case ETH_SS_STATS:
140                 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
141                 p += sizeof(ethtool_stats_keys);
142                 for (i = 0; i < dev->real_num_rx_queues; i++) {
143                         for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
144                                 snprintf(p, ETH_GSTRING_LEN,
145                                          "rx_queue_%u_%.18s",
146                                          i, veth_rq_stats_desc[j].desc);
147                                 p += ETH_GSTRING_LEN;
148                         }
149                 }
150                 for (i = 0; i < dev->real_num_tx_queues; i++) {
151                         for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
152                                 snprintf(p, ETH_GSTRING_LEN,
153                                          "tx_queue_%u_%.18s",
154                                          i, veth_tq_stats_desc[j].desc);
155                                 p += ETH_GSTRING_LEN;
156                         }
157                 }
158                 break;
159         }
160 }
161
162 static int veth_get_sset_count(struct net_device *dev, int sset)
163 {
164         switch (sset) {
165         case ETH_SS_STATS:
166                 return ARRAY_SIZE(ethtool_stats_keys) +
167                        VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
168                        VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
169         default:
170                 return -EOPNOTSUPP;
171         }
172 }
173
174 static void veth_get_ethtool_stats(struct net_device *dev,
175                 struct ethtool_stats *stats, u64 *data)
176 {
177         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
178         struct net_device *peer = rtnl_dereference(priv->peer);
179         int i, j, idx;
180
181         data[0] = peer ? peer->ifindex : 0;
182         idx = 1;
183         for (i = 0; i < dev->real_num_rx_queues; i++) {
184                 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
185                 const void *stats_base = (void *)&rq_stats->vs;
186                 unsigned int start;
187                 size_t offset;
188
189                 do {
190                         start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
191                         for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
192                                 offset = veth_rq_stats_desc[j].offset;
193                                 data[idx + j] = *(u64 *)(stats_base + offset);
194                         }
195                 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
196                 idx += VETH_RQ_STATS_LEN;
197         }
198
199         if (!peer)
200                 return;
201
202         rcv_priv = netdev_priv(peer);
203         for (i = 0; i < peer->real_num_rx_queues; i++) {
204                 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
205                 const void *base = (void *)&rq_stats->vs;
206                 unsigned int start, tx_idx = idx;
207                 size_t offset;
208
209                 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
210                 do {
211                         start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
212                         for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
213                                 offset = veth_tq_stats_desc[j].offset;
214                                 data[tx_idx + j] += *(u64 *)(base + offset);
215                         }
216                 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
217         }
218 }
219
220 static const struct ethtool_ops veth_ethtool_ops = {
221         .get_drvinfo            = veth_get_drvinfo,
222         .get_link               = ethtool_op_get_link,
223         .get_strings            = veth_get_strings,
224         .get_sset_count         = veth_get_sset_count,
225         .get_ethtool_stats      = veth_get_ethtool_stats,
226         .get_link_ksettings     = veth_get_link_ksettings,
227         .get_ts_info            = ethtool_op_get_ts_info,
228 };
229
230 /* general routines */
231
232 static bool veth_is_xdp_frame(void *ptr)
233 {
234         return (unsigned long)ptr & VETH_XDP_FLAG;
235 }
236
237 static void *veth_ptr_to_xdp(void *ptr)
238 {
239         return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
240 }
241
242 static void *veth_xdp_to_ptr(void *ptr)
243 {
244         return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
245 }
246
247 static void veth_ptr_free(void *ptr)
248 {
249         if (veth_is_xdp_frame(ptr))
250                 xdp_return_frame(veth_ptr_to_xdp(ptr));
251         else
252                 kfree_skb(ptr);
253 }
254
255 static void __veth_xdp_flush(struct veth_rq *rq)
256 {
257         /* Write ptr_ring before reading rx_notify_masked */
258         smp_mb();
259         if (!rq->rx_notify_masked) {
260                 rq->rx_notify_masked = true;
261                 napi_schedule(&rq->xdp_napi);
262         }
263 }
264
265 static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
266 {
267         if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
268                 dev_kfree_skb_any(skb);
269                 return NET_RX_DROP;
270         }
271
272         return NET_RX_SUCCESS;
273 }
274
275 static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
276                             struct veth_rq *rq, bool xdp)
277 {
278         return __dev_forward_skb(dev, skb) ?: xdp ?
279                 veth_xdp_rx(rq, skb) :
280                 netif_rx(skb);
281 }
282
283 static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
284 {
285         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
286         struct veth_rq *rq = NULL;
287         struct net_device *rcv;
288         int length = skb->len;
289         bool rcv_xdp = false;
290         int rxq;
291
292         rcu_read_lock();
293         rcv = rcu_dereference(priv->peer);
294         if (unlikely(!rcv)) {
295                 kfree_skb(skb);
296                 goto drop;
297         }
298
299         rcv_priv = netdev_priv(rcv);
300         rxq = skb_get_queue_mapping(skb);
301         if (rxq < rcv->real_num_rx_queues) {
302                 rq = &rcv_priv->rq[rxq];
303                 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
304                 if (rcv_xdp)
305                         skb_record_rx_queue(skb, rxq);
306         }
307
308         skb_tx_timestamp(skb);
309         if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
310                 if (!rcv_xdp)
311                         dev_lstats_add(dev, length);
312         } else {
313 drop:
314                 atomic64_inc(&priv->dropped);
315         }
316
317         if (rcv_xdp)
318                 __veth_xdp_flush(rq);
319
320         rcu_read_unlock();
321
322         return NETDEV_TX_OK;
323 }
324
325 static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
326 {
327         struct veth_priv *priv = netdev_priv(dev);
328
329         dev_lstats_read(dev, packets, bytes);
330         return atomic64_read(&priv->dropped);
331 }
332
333 static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
334 {
335         struct veth_priv *priv = netdev_priv(dev);
336         int i;
337
338         result->peer_tq_xdp_xmit_err = 0;
339         result->xdp_packets = 0;
340         result->xdp_tx_err = 0;
341         result->xdp_bytes = 0;
342         result->rx_drops = 0;
343         for (i = 0; i < dev->num_rx_queues; i++) {
344                 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
345                 struct veth_rq_stats *stats = &priv->rq[i].stats;
346                 unsigned int start;
347
348                 do {
349                         start = u64_stats_fetch_begin_irq(&stats->syncp);
350                         peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
351                         xdp_tx_err = stats->vs.xdp_tx_err;
352                         packets = stats->vs.xdp_packets;
353                         bytes = stats->vs.xdp_bytes;
354                         drops = stats->vs.rx_drops;
355                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
356                 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
357                 result->xdp_tx_err += xdp_tx_err;
358                 result->xdp_packets += packets;
359                 result->xdp_bytes += bytes;
360                 result->rx_drops += drops;
361         }
362 }
363
364 static void veth_get_stats64(struct net_device *dev,
365                              struct rtnl_link_stats64 *tot)
366 {
367         struct veth_priv *priv = netdev_priv(dev);
368         struct net_device *peer;
369         struct veth_stats rx;
370         u64 packets, bytes;
371
372         tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
373         tot->tx_bytes = bytes;
374         tot->tx_packets = packets;
375
376         veth_stats_rx(&rx, dev);
377         tot->tx_dropped += rx.xdp_tx_err;
378         tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
379         tot->rx_bytes = rx.xdp_bytes;
380         tot->rx_packets = rx.xdp_packets;
381
382         rcu_read_lock();
383         peer = rcu_dereference(priv->peer);
384         if (peer) {
385                 veth_stats_tx(peer, &packets, &bytes);
386                 tot->rx_bytes += bytes;
387                 tot->rx_packets += packets;
388
389                 veth_stats_rx(&rx, peer);
390                 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
391                 tot->rx_dropped += rx.xdp_tx_err;
392                 tot->tx_bytes += rx.xdp_bytes;
393                 tot->tx_packets += rx.xdp_packets;
394         }
395         rcu_read_unlock();
396 }
397
398 /* fake multicast ability */
399 static void veth_set_multicast_list(struct net_device *dev)
400 {
401 }
402
403 static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
404                                       int buflen)
405 {
406         struct sk_buff *skb;
407
408         skb = build_skb(head, buflen);
409         if (!skb)
410                 return NULL;
411
412         skb_reserve(skb, headroom);
413         skb_put(skb, len);
414
415         return skb;
416 }
417
418 static int veth_select_rxq(struct net_device *dev)
419 {
420         return smp_processor_id() % dev->real_num_rx_queues;
421 }
422
423 static int veth_xdp_xmit(struct net_device *dev, int n,
424                          struct xdp_frame **frames,
425                          u32 flags, bool ndo_xmit)
426 {
427         struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
428         int i, ret = -ENXIO, drops = 0;
429         struct net_device *rcv;
430         unsigned int max_len;
431         struct veth_rq *rq;
432
433         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
434                 return -EINVAL;
435
436         rcu_read_lock();
437         rcv = rcu_dereference(priv->peer);
438         if (unlikely(!rcv))
439                 goto out;
440
441         rcv_priv = netdev_priv(rcv);
442         rq = &rcv_priv->rq[veth_select_rxq(rcv)];
443         /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
444          * side. This means an XDP program is loaded on the peer and the peer
445          * device is up.
446          */
447         if (!rcu_access_pointer(rq->xdp_prog))
448                 goto out;
449
450         max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
451
452         spin_lock(&rq->xdp_ring.producer_lock);
453         for (i = 0; i < n; i++) {
454                 struct xdp_frame *frame = frames[i];
455                 void *ptr = veth_xdp_to_ptr(frame);
456
457                 if (unlikely(frame->len > max_len ||
458                              __ptr_ring_produce(&rq->xdp_ring, ptr))) {
459                         xdp_return_frame_rx_napi(frame);
460                         drops++;
461                 }
462         }
463         spin_unlock(&rq->xdp_ring.producer_lock);
464
465         if (flags & XDP_XMIT_FLUSH)
466                 __veth_xdp_flush(rq);
467
468         ret = n - drops;
469         if (ndo_xmit) {
470                 u64_stats_update_begin(&rq->stats.syncp);
471                 rq->stats.vs.peer_tq_xdp_xmit += n - drops;
472                 rq->stats.vs.peer_tq_xdp_xmit_err += drops;
473                 u64_stats_update_end(&rq->stats.syncp);
474         }
475
476 out:
477         rcu_read_unlock();
478
479         return ret;
480 }
481
482 static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
483                              struct xdp_frame **frames, u32 flags)
484 {
485         int err;
486
487         err = veth_xdp_xmit(dev, n, frames, flags, true);
488         if (err < 0) {
489                 struct veth_priv *priv = netdev_priv(dev);
490
491                 atomic64_add(n, &priv->dropped);
492         }
493
494         return err;
495 }
496
497 static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
498 {
499         int sent, i, err = 0;
500
501         sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
502         if (sent < 0) {
503                 err = sent;
504                 sent = 0;
505                 for (i = 0; i < bq->count; i++)
506                         xdp_return_frame(bq->q[i]);
507         }
508         trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
509
510         u64_stats_update_begin(&rq->stats.syncp);
511         rq->stats.vs.xdp_tx += sent;
512         rq->stats.vs.xdp_tx_err += bq->count - sent;
513         u64_stats_update_end(&rq->stats.syncp);
514
515         bq->count = 0;
516 }
517
518 static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
519 {
520         struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
521         struct net_device *rcv;
522         struct veth_rq *rcv_rq;
523
524         rcu_read_lock();
525         veth_xdp_flush_bq(rq, bq);
526         rcv = rcu_dereference(priv->peer);
527         if (unlikely(!rcv))
528                 goto out;
529
530         rcv_priv = netdev_priv(rcv);
531         rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
532         /* xdp_ring is initialized on receive side? */
533         if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
534                 goto out;
535
536         __veth_xdp_flush(rcv_rq);
537 out:
538         rcu_read_unlock();
539 }
540
541 static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
542                        struct veth_xdp_tx_bq *bq)
543 {
544         struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
545
546         if (unlikely(!frame))
547                 return -EOVERFLOW;
548
549         if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
550                 veth_xdp_flush_bq(rq, bq);
551
552         bq->q[bq->count++] = frame;
553
554         return 0;
555 }
556
557 static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
558                                         struct xdp_frame *frame,
559                                         struct veth_xdp_tx_bq *bq,
560                                         struct veth_stats *stats)
561 {
562         void *hard_start = frame->data - frame->headroom;
563         int len = frame->len, delta = 0;
564         struct xdp_frame orig_frame;
565         struct bpf_prog *xdp_prog;
566         unsigned int headroom;
567         struct sk_buff *skb;
568
569         /* bpf_xdp_adjust_head() assures BPF cannot access xdp_frame area */
570         hard_start -= sizeof(struct xdp_frame);
571
572         rcu_read_lock();
573         xdp_prog = rcu_dereference(rq->xdp_prog);
574         if (likely(xdp_prog)) {
575                 struct xdp_buff xdp;
576                 u32 act;
577
578                 xdp_convert_frame_to_buff(frame, &xdp);
579                 xdp.rxq = &rq->xdp_rxq;
580
581                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
582
583                 switch (act) {
584                 case XDP_PASS:
585                         delta = frame->data - xdp.data;
586                         len = xdp.data_end - xdp.data;
587                         break;
588                 case XDP_TX:
589                         orig_frame = *frame;
590                         xdp.rxq->mem = frame->mem;
591                         if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
592                                 trace_xdp_exception(rq->dev, xdp_prog, act);
593                                 frame = &orig_frame;
594                                 stats->rx_drops++;
595                                 goto err_xdp;
596                         }
597                         stats->xdp_tx++;
598                         rcu_read_unlock();
599                         goto xdp_xmit;
600                 case XDP_REDIRECT:
601                         orig_frame = *frame;
602                         xdp.rxq->mem = frame->mem;
603                         if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
604                                 frame = &orig_frame;
605                                 stats->rx_drops++;
606                                 goto err_xdp;
607                         }
608                         stats->xdp_redirect++;
609                         rcu_read_unlock();
610                         goto xdp_xmit;
611                 default:
612                         bpf_warn_invalid_xdp_action(act);
613                         fallthrough;
614                 case XDP_ABORTED:
615                         trace_xdp_exception(rq->dev, xdp_prog, act);
616                         fallthrough;
617                 case XDP_DROP:
618                         stats->xdp_drops++;
619                         goto err_xdp;
620                 }
621         }
622         rcu_read_unlock();
623
624         headroom = sizeof(struct xdp_frame) + frame->headroom - delta;
625         skb = veth_build_skb(hard_start, headroom, len, frame->frame_sz);
626         if (!skb) {
627                 xdp_return_frame(frame);
628                 stats->rx_drops++;
629                 goto err;
630         }
631
632         xdp_release_frame(frame);
633         xdp_scrub_frame(frame);
634         skb->protocol = eth_type_trans(skb, rq->dev);
635 err:
636         return skb;
637 err_xdp:
638         rcu_read_unlock();
639         xdp_return_frame(frame);
640 xdp_xmit:
641         return NULL;
642 }
643
644 static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
645                                         struct sk_buff *skb,
646                                         struct veth_xdp_tx_bq *bq,
647                                         struct veth_stats *stats)
648 {
649         u32 pktlen, headroom, act, metalen;
650         void *orig_data, *orig_data_end;
651         struct bpf_prog *xdp_prog;
652         int mac_len, delta, off;
653         struct xdp_buff xdp;
654
655         skb_orphan(skb);
656
657         rcu_read_lock();
658         xdp_prog = rcu_dereference(rq->xdp_prog);
659         if (unlikely(!xdp_prog)) {
660                 rcu_read_unlock();
661                 goto out;
662         }
663
664         mac_len = skb->data - skb_mac_header(skb);
665         pktlen = skb->len + mac_len;
666         headroom = skb_headroom(skb) - mac_len;
667
668         if (skb_shared(skb) || skb_head_is_locked(skb) ||
669             skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
670                 struct sk_buff *nskb;
671                 int size, head_off;
672                 void *head, *start;
673                 struct page *page;
674
675                 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
676                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
677                 if (size > PAGE_SIZE)
678                         goto drop;
679
680                 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
681                 if (!page)
682                         goto drop;
683
684                 head = page_address(page);
685                 start = head + VETH_XDP_HEADROOM;
686                 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
687                         page_frag_free(head);
688                         goto drop;
689                 }
690
691                 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
692                                       skb->len, PAGE_SIZE);
693                 if (!nskb) {
694                         page_frag_free(head);
695                         goto drop;
696                 }
697
698                 skb_copy_header(nskb, skb);
699                 head_off = skb_headroom(nskb) - skb_headroom(skb);
700                 skb_headers_offset_update(nskb, head_off);
701                 consume_skb(skb);
702                 skb = nskb;
703         }
704
705         xdp.data_hard_start = skb->head;
706         xdp.data = skb_mac_header(skb);
707         xdp.data_end = xdp.data + pktlen;
708         xdp.data_meta = xdp.data;
709         xdp.rxq = &rq->xdp_rxq;
710
711         /* SKB "head" area always have tailroom for skb_shared_info */
712         xdp.frame_sz = (void *)skb_end_pointer(skb) - xdp.data_hard_start;
713         xdp.frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
714
715         orig_data = xdp.data;
716         orig_data_end = xdp.data_end;
717
718         act = bpf_prog_run_xdp(xdp_prog, &xdp);
719
720         switch (act) {
721         case XDP_PASS:
722                 break;
723         case XDP_TX:
724                 get_page(virt_to_page(xdp.data));
725                 consume_skb(skb);
726                 xdp.rxq->mem = rq->xdp_mem;
727                 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
728                         trace_xdp_exception(rq->dev, xdp_prog, act);
729                         stats->rx_drops++;
730                         goto err_xdp;
731                 }
732                 stats->xdp_tx++;
733                 rcu_read_unlock();
734                 goto xdp_xmit;
735         case XDP_REDIRECT:
736                 get_page(virt_to_page(xdp.data));
737                 consume_skb(skb);
738                 xdp.rxq->mem = rq->xdp_mem;
739                 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
740                         stats->rx_drops++;
741                         goto err_xdp;
742                 }
743                 stats->xdp_redirect++;
744                 rcu_read_unlock();
745                 goto xdp_xmit;
746         default:
747                 bpf_warn_invalid_xdp_action(act);
748                 fallthrough;
749         case XDP_ABORTED:
750                 trace_xdp_exception(rq->dev, xdp_prog, act);
751                 fallthrough;
752         case XDP_DROP:
753                 stats->xdp_drops++;
754                 goto xdp_drop;
755         }
756         rcu_read_unlock();
757
758         /* check if bpf_xdp_adjust_head was used */
759         delta = orig_data - xdp.data;
760         off = mac_len + delta;
761         if (off > 0)
762                 __skb_push(skb, off);
763         else if (off < 0)
764                 __skb_pull(skb, -off);
765         skb->mac_header -= delta;
766
767         /* check if bpf_xdp_adjust_tail was used */
768         off = xdp.data_end - orig_data_end;
769         if (off != 0)
770                 __skb_put(skb, off); /* positive on grow, negative on shrink */
771         skb->protocol = eth_type_trans(skb, rq->dev);
772
773         metalen = xdp.data - xdp.data_meta;
774         if (metalen)
775                 skb_metadata_set(skb, metalen);
776 out:
777         return skb;
778 drop:
779         stats->rx_drops++;
780 xdp_drop:
781         rcu_read_unlock();
782         kfree_skb(skb);
783         return NULL;
784 err_xdp:
785         rcu_read_unlock();
786         page_frag_free(xdp.data);
787 xdp_xmit:
788         return NULL;
789 }
790
791 static int veth_xdp_rcv(struct veth_rq *rq, int budget,
792                         struct veth_xdp_tx_bq *bq,
793                         struct veth_stats *stats)
794 {
795         int i, done = 0;
796
797         for (i = 0; i < budget; i++) {
798                 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
799                 struct sk_buff *skb;
800
801                 if (!ptr)
802                         break;
803
804                 if (veth_is_xdp_frame(ptr)) {
805                         struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
806
807                         stats->xdp_bytes += frame->len;
808                         skb = veth_xdp_rcv_one(rq, frame, bq, stats);
809                 } else {
810                         skb = ptr;
811                         stats->xdp_bytes += skb->len;
812                         skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
813                 }
814
815                 if (skb)
816                         napi_gro_receive(&rq->xdp_napi, skb);
817
818                 done++;
819         }
820
821         u64_stats_update_begin(&rq->stats.syncp);
822         rq->stats.vs.xdp_redirect += stats->xdp_redirect;
823         rq->stats.vs.xdp_bytes += stats->xdp_bytes;
824         rq->stats.vs.xdp_drops += stats->xdp_drops;
825         rq->stats.vs.rx_drops += stats->rx_drops;
826         rq->stats.vs.xdp_packets += done;
827         u64_stats_update_end(&rq->stats.syncp);
828
829         return done;
830 }
831
832 static int veth_poll(struct napi_struct *napi, int budget)
833 {
834         struct veth_rq *rq =
835                 container_of(napi, struct veth_rq, xdp_napi);
836         struct veth_stats stats = {};
837         struct veth_xdp_tx_bq bq;
838         int done;
839
840         bq.count = 0;
841
842         xdp_set_return_frame_no_direct();
843         done = veth_xdp_rcv(rq, budget, &bq, &stats);
844
845         if (done < budget && napi_complete_done(napi, done)) {
846                 /* Write rx_notify_masked before reading ptr_ring */
847                 smp_store_mb(rq->rx_notify_masked, false);
848                 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
849                         rq->rx_notify_masked = true;
850                         napi_schedule(&rq->xdp_napi);
851                 }
852         }
853
854         if (stats.xdp_tx > 0)
855                 veth_xdp_flush(rq, &bq);
856         if (stats.xdp_redirect > 0)
857                 xdp_do_flush();
858         xdp_clear_return_frame_no_direct();
859
860         return done;
861 }
862
863 static int veth_napi_add(struct net_device *dev)
864 {
865         struct veth_priv *priv = netdev_priv(dev);
866         int err, i;
867
868         for (i = 0; i < dev->real_num_rx_queues; i++) {
869                 struct veth_rq *rq = &priv->rq[i];
870
871                 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
872                 if (err)
873                         goto err_xdp_ring;
874         }
875
876         for (i = 0; i < dev->real_num_rx_queues; i++) {
877                 struct veth_rq *rq = &priv->rq[i];
878
879                 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
880                 napi_enable(&rq->xdp_napi);
881         }
882
883         return 0;
884 err_xdp_ring:
885         for (i--; i >= 0; i--)
886                 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
887
888         return err;
889 }
890
891 static void veth_napi_del(struct net_device *dev)
892 {
893         struct veth_priv *priv = netdev_priv(dev);
894         int i;
895
896         for (i = 0; i < dev->real_num_rx_queues; i++) {
897                 struct veth_rq *rq = &priv->rq[i];
898
899                 napi_disable(&rq->xdp_napi);
900                 napi_hash_del(&rq->xdp_napi);
901         }
902         synchronize_net();
903
904         for (i = 0; i < dev->real_num_rx_queues; i++) {
905                 struct veth_rq *rq = &priv->rq[i];
906
907                 netif_napi_del(&rq->xdp_napi);
908                 rq->rx_notify_masked = false;
909                 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
910         }
911 }
912
913 static int veth_enable_xdp(struct net_device *dev)
914 {
915         struct veth_priv *priv = netdev_priv(dev);
916         int err, i;
917
918         if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
919                 for (i = 0; i < dev->real_num_rx_queues; i++) {
920                         struct veth_rq *rq = &priv->rq[i];
921
922                         err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
923                         if (err < 0)
924                                 goto err_rxq_reg;
925
926                         err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
927                                                          MEM_TYPE_PAGE_SHARED,
928                                                          NULL);
929                         if (err < 0)
930                                 goto err_reg_mem;
931
932                         /* Save original mem info as it can be overwritten */
933                         rq->xdp_mem = rq->xdp_rxq.mem;
934                 }
935
936                 err = veth_napi_add(dev);
937                 if (err)
938                         goto err_rxq_reg;
939         }
940
941         for (i = 0; i < dev->real_num_rx_queues; i++)
942                 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
943
944         return 0;
945 err_reg_mem:
946         xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
947 err_rxq_reg:
948         for (i--; i >= 0; i--)
949                 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
950
951         return err;
952 }
953
954 static void veth_disable_xdp(struct net_device *dev)
955 {
956         struct veth_priv *priv = netdev_priv(dev);
957         int i;
958
959         for (i = 0; i < dev->real_num_rx_queues; i++)
960                 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
961         veth_napi_del(dev);
962         for (i = 0; i < dev->real_num_rx_queues; i++) {
963                 struct veth_rq *rq = &priv->rq[i];
964
965                 rq->xdp_rxq.mem = rq->xdp_mem;
966                 xdp_rxq_info_unreg(&rq->xdp_rxq);
967         }
968 }
969
970 static int veth_open(struct net_device *dev)
971 {
972         struct veth_priv *priv = netdev_priv(dev);
973         struct net_device *peer = rtnl_dereference(priv->peer);
974         int err;
975
976         if (!peer)
977                 return -ENOTCONN;
978
979         if (priv->_xdp_prog) {
980                 err = veth_enable_xdp(dev);
981                 if (err)
982                         return err;
983         }
984
985         if (peer->flags & IFF_UP) {
986                 netif_carrier_on(dev);
987                 netif_carrier_on(peer);
988         }
989
990         return 0;
991 }
992
993 static int veth_close(struct net_device *dev)
994 {
995         struct veth_priv *priv = netdev_priv(dev);
996         struct net_device *peer = rtnl_dereference(priv->peer);
997
998         netif_carrier_off(dev);
999         if (peer)
1000                 netif_carrier_off(peer);
1001
1002         if (priv->_xdp_prog)
1003                 veth_disable_xdp(dev);
1004
1005         return 0;
1006 }
1007
1008 static int is_valid_veth_mtu(int mtu)
1009 {
1010         return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1011 }
1012
1013 static int veth_alloc_queues(struct net_device *dev)
1014 {
1015         struct veth_priv *priv = netdev_priv(dev);
1016         int i;
1017
1018         priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1019         if (!priv->rq)
1020                 return -ENOMEM;
1021
1022         for (i = 0; i < dev->num_rx_queues; i++) {
1023                 priv->rq[i].dev = dev;
1024                 u64_stats_init(&priv->rq[i].stats.syncp);
1025         }
1026
1027         return 0;
1028 }
1029
1030 static void veth_free_queues(struct net_device *dev)
1031 {
1032         struct veth_priv *priv = netdev_priv(dev);
1033
1034         kfree(priv->rq);
1035 }
1036
1037 static int veth_dev_init(struct net_device *dev)
1038 {
1039         int err;
1040
1041         dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1042         if (!dev->lstats)
1043                 return -ENOMEM;
1044
1045         err = veth_alloc_queues(dev);
1046         if (err) {
1047                 free_percpu(dev->lstats);
1048                 return err;
1049         }
1050
1051         return 0;
1052 }
1053
1054 static void veth_dev_free(struct net_device *dev)
1055 {
1056         veth_free_queues(dev);
1057         free_percpu(dev->lstats);
1058 }
1059
1060 #ifdef CONFIG_NET_POLL_CONTROLLER
1061 static void veth_poll_controller(struct net_device *dev)
1062 {
1063         /* veth only receives frames when its peer sends one
1064          * Since it has nothing to do with disabling irqs, we are guaranteed
1065          * never to have pending data when we poll for it so
1066          * there is nothing to do here.
1067          *
1068          * We need this though so netpoll recognizes us as an interface that
1069          * supports polling, which enables bridge devices in virt setups to
1070          * still use netconsole
1071          */
1072 }
1073 #endif  /* CONFIG_NET_POLL_CONTROLLER */
1074
1075 static int veth_get_iflink(const struct net_device *dev)
1076 {
1077         struct veth_priv *priv = netdev_priv(dev);
1078         struct net_device *peer;
1079         int iflink;
1080
1081         rcu_read_lock();
1082         peer = rcu_dereference(priv->peer);
1083         iflink = peer ? peer->ifindex : 0;
1084         rcu_read_unlock();
1085
1086         return iflink;
1087 }
1088
1089 static netdev_features_t veth_fix_features(struct net_device *dev,
1090                                            netdev_features_t features)
1091 {
1092         struct veth_priv *priv = netdev_priv(dev);
1093         struct net_device *peer;
1094
1095         peer = rtnl_dereference(priv->peer);
1096         if (peer) {
1097                 struct veth_priv *peer_priv = netdev_priv(peer);
1098
1099                 if (peer_priv->_xdp_prog)
1100                         features &= ~NETIF_F_GSO_SOFTWARE;
1101         }
1102
1103         return features;
1104 }
1105
1106 static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1107 {
1108         struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1109         struct net_device *peer;
1110
1111         if (new_hr < 0)
1112                 new_hr = 0;
1113
1114         rcu_read_lock();
1115         peer = rcu_dereference(priv->peer);
1116         if (unlikely(!peer))
1117                 goto out;
1118
1119         peer_priv = netdev_priv(peer);
1120         priv->requested_headroom = new_hr;
1121         new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1122         dev->needed_headroom = new_hr;
1123         peer->needed_headroom = new_hr;
1124
1125 out:
1126         rcu_read_unlock();
1127 }
1128
1129 static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1130                         struct netlink_ext_ack *extack)
1131 {
1132         struct veth_priv *priv = netdev_priv(dev);
1133         struct bpf_prog *old_prog;
1134         struct net_device *peer;
1135         unsigned int max_mtu;
1136         int err;
1137
1138         old_prog = priv->_xdp_prog;
1139         priv->_xdp_prog = prog;
1140         peer = rtnl_dereference(priv->peer);
1141
1142         if (prog) {
1143                 if (!peer) {
1144                         NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1145                         err = -ENOTCONN;
1146                         goto err;
1147                 }
1148
1149                 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1150                           peer->hard_header_len -
1151                           SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1152                 if (peer->mtu > max_mtu) {
1153                         NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1154                         err = -ERANGE;
1155                         goto err;
1156                 }
1157
1158                 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1159                         NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1160                         err = -ENOSPC;
1161                         goto err;
1162                 }
1163
1164                 if (dev->flags & IFF_UP) {
1165                         err = veth_enable_xdp(dev);
1166                         if (err) {
1167                                 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1168                                 goto err;
1169                         }
1170                 }
1171
1172                 if (!old_prog) {
1173                         peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1174                         peer->max_mtu = max_mtu;
1175                 }
1176         }
1177
1178         if (old_prog) {
1179                 if (!prog) {
1180                         if (dev->flags & IFF_UP)
1181                                 veth_disable_xdp(dev);
1182
1183                         if (peer) {
1184                                 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1185                                 peer->max_mtu = ETH_MAX_MTU;
1186                         }
1187                 }
1188                 bpf_prog_put(old_prog);
1189         }
1190
1191         if ((!!old_prog ^ !!prog) && peer)
1192                 netdev_update_features(peer);
1193
1194         return 0;
1195 err:
1196         priv->_xdp_prog = old_prog;
1197
1198         return err;
1199 }
1200
1201 static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1202 {
1203         switch (xdp->command) {
1204         case XDP_SETUP_PROG:
1205                 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1206         default:
1207                 return -EINVAL;
1208         }
1209 }
1210
1211 static const struct net_device_ops veth_netdev_ops = {
1212         .ndo_init            = veth_dev_init,
1213         .ndo_open            = veth_open,
1214         .ndo_stop            = veth_close,
1215         .ndo_start_xmit      = veth_xmit,
1216         .ndo_get_stats64     = veth_get_stats64,
1217         .ndo_set_rx_mode     = veth_set_multicast_list,
1218         .ndo_set_mac_address = eth_mac_addr,
1219 #ifdef CONFIG_NET_POLL_CONTROLLER
1220         .ndo_poll_controller    = veth_poll_controller,
1221 #endif
1222         .ndo_get_iflink         = veth_get_iflink,
1223         .ndo_fix_features       = veth_fix_features,
1224         .ndo_features_check     = passthru_features_check,
1225         .ndo_set_rx_headroom    = veth_set_rx_headroom,
1226         .ndo_bpf                = veth_xdp,
1227         .ndo_xdp_xmit           = veth_ndo_xdp_xmit,
1228 };
1229
1230 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1231                        NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1232                        NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1233                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1234                        NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1235
1236 static void veth_setup(struct net_device *dev)
1237 {
1238         ether_setup(dev);
1239
1240         dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1241         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1242         dev->priv_flags |= IFF_NO_QUEUE;
1243         dev->priv_flags |= IFF_PHONY_HEADROOM;
1244
1245         dev->netdev_ops = &veth_netdev_ops;
1246         dev->ethtool_ops = &veth_ethtool_ops;
1247         dev->features |= NETIF_F_LLTX;
1248         dev->features |= VETH_FEATURES;
1249         dev->vlan_features = dev->features &
1250                              ~(NETIF_F_HW_VLAN_CTAG_TX |
1251                                NETIF_F_HW_VLAN_STAG_TX |
1252                                NETIF_F_HW_VLAN_CTAG_RX |
1253                                NETIF_F_HW_VLAN_STAG_RX);
1254         dev->needs_free_netdev = true;
1255         dev->priv_destructor = veth_dev_free;
1256         dev->max_mtu = ETH_MAX_MTU;
1257
1258         dev->hw_features = VETH_FEATURES;
1259         dev->hw_enc_features = VETH_FEATURES;
1260         dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1261 }
1262
1263 /*
1264  * netlink interface
1265  */
1266
1267 static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1268                          struct netlink_ext_ack *extack)
1269 {
1270         if (tb[IFLA_ADDRESS]) {
1271                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1272                         return -EINVAL;
1273                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1274                         return -EADDRNOTAVAIL;
1275         }
1276         if (tb[IFLA_MTU]) {
1277                 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1278                         return -EINVAL;
1279         }
1280         return 0;
1281 }
1282
1283 static struct rtnl_link_ops veth_link_ops;
1284
1285 static int veth_newlink(struct net *src_net, struct net_device *dev,
1286                         struct nlattr *tb[], struct nlattr *data[],
1287                         struct netlink_ext_ack *extack)
1288 {
1289         int err;
1290         struct net_device *peer;
1291         struct veth_priv *priv;
1292         char ifname[IFNAMSIZ];
1293         struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1294         unsigned char name_assign_type;
1295         struct ifinfomsg *ifmp;
1296         struct net *net;
1297
1298         /*
1299          * create and register peer first
1300          */
1301         if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1302                 struct nlattr *nla_peer;
1303
1304                 nla_peer = data[VETH_INFO_PEER];
1305                 ifmp = nla_data(nla_peer);
1306                 err = rtnl_nla_parse_ifla(peer_tb,
1307                                           nla_data(nla_peer) + sizeof(struct ifinfomsg),
1308                                           nla_len(nla_peer) - sizeof(struct ifinfomsg),
1309                                           NULL);
1310                 if (err < 0)
1311                         return err;
1312
1313                 err = veth_validate(peer_tb, NULL, extack);
1314                 if (err < 0)
1315                         return err;
1316
1317                 tbp = peer_tb;
1318         } else {
1319                 ifmp = NULL;
1320                 tbp = tb;
1321         }
1322
1323         if (ifmp && tbp[IFLA_IFNAME]) {
1324                 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1325                 name_assign_type = NET_NAME_USER;
1326         } else {
1327                 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1328                 name_assign_type = NET_NAME_ENUM;
1329         }
1330
1331         net = rtnl_link_get_net(src_net, tbp);
1332         if (IS_ERR(net))
1333                 return PTR_ERR(net);
1334
1335         peer = rtnl_create_link(net, ifname, name_assign_type,
1336                                 &veth_link_ops, tbp, extack);
1337         if (IS_ERR(peer)) {
1338                 put_net(net);
1339                 return PTR_ERR(peer);
1340         }
1341
1342         if (!ifmp || !tbp[IFLA_ADDRESS])
1343                 eth_hw_addr_random(peer);
1344
1345         if (ifmp && (dev->ifindex != 0))
1346                 peer->ifindex = ifmp->ifi_index;
1347
1348         peer->gso_max_size = dev->gso_max_size;
1349         peer->gso_max_segs = dev->gso_max_segs;
1350
1351         err = register_netdevice(peer);
1352         put_net(net);
1353         net = NULL;
1354         if (err < 0)
1355                 goto err_register_peer;
1356
1357         netif_carrier_off(peer);
1358
1359         err = rtnl_configure_link(peer, ifmp);
1360         if (err < 0)
1361                 goto err_configure_peer;
1362
1363         /*
1364          * register dev last
1365          *
1366          * note, that since we've registered new device the dev's name
1367          * should be re-allocated
1368          */
1369
1370         if (tb[IFLA_ADDRESS] == NULL)
1371                 eth_hw_addr_random(dev);
1372
1373         if (tb[IFLA_IFNAME])
1374                 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1375         else
1376                 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1377
1378         err = register_netdevice(dev);
1379         if (err < 0)
1380                 goto err_register_dev;
1381
1382         netif_carrier_off(dev);
1383
1384         /*
1385          * tie the deviced together
1386          */
1387
1388         priv = netdev_priv(dev);
1389         rcu_assign_pointer(priv->peer, peer);
1390
1391         priv = netdev_priv(peer);
1392         rcu_assign_pointer(priv->peer, dev);
1393
1394         return 0;
1395
1396 err_register_dev:
1397         /* nothing to do */
1398 err_configure_peer:
1399         unregister_netdevice(peer);
1400         return err;
1401
1402 err_register_peer:
1403         free_netdev(peer);
1404         return err;
1405 }
1406
1407 static void veth_dellink(struct net_device *dev, struct list_head *head)
1408 {
1409         struct veth_priv *priv;
1410         struct net_device *peer;
1411
1412         priv = netdev_priv(dev);
1413         peer = rtnl_dereference(priv->peer);
1414
1415         /* Note : dellink() is called from default_device_exit_batch(),
1416          * before a rcu_synchronize() point. The devices are guaranteed
1417          * not being freed before one RCU grace period.
1418          */
1419         RCU_INIT_POINTER(priv->peer, NULL);
1420         unregister_netdevice_queue(dev, head);
1421
1422         if (peer) {
1423                 priv = netdev_priv(peer);
1424                 RCU_INIT_POINTER(priv->peer, NULL);
1425                 unregister_netdevice_queue(peer, head);
1426         }
1427 }
1428
1429 static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1430         [VETH_INFO_PEER]        = { .len = sizeof(struct ifinfomsg) },
1431 };
1432
1433 static struct net *veth_get_link_net(const struct net_device *dev)
1434 {
1435         struct veth_priv *priv = netdev_priv(dev);
1436         struct net_device *peer = rtnl_dereference(priv->peer);
1437
1438         return peer ? dev_net(peer) : dev_net(dev);
1439 }
1440
1441 static struct rtnl_link_ops veth_link_ops = {
1442         .kind           = DRV_NAME,
1443         .priv_size      = sizeof(struct veth_priv),
1444         .setup          = veth_setup,
1445         .validate       = veth_validate,
1446         .newlink        = veth_newlink,
1447         .dellink        = veth_dellink,
1448         .policy         = veth_policy,
1449         .maxtype        = VETH_INFO_MAX,
1450         .get_link_net   = veth_get_link_net,
1451 };
1452
1453 /*
1454  * init/fini
1455  */
1456
1457 static __init int veth_init(void)
1458 {
1459         return rtnl_link_register(&veth_link_ops);
1460 }
1461
1462 static __exit void veth_exit(void)
1463 {
1464         rtnl_link_unregister(&veth_link_ops);
1465 }
1466
1467 module_init(veth_init);
1468 module_exit(veth_exit);
1469
1470 MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1471 MODULE_LICENSE("GPL v2");
1472 MODULE_ALIAS_RTNL_LINK(DRV_NAME);