Merge tag 'gpio-updates-for-v6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / net / veth.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
e314dbdc
PE
2/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
e314dbdc 12#include <linux/netdevice.h>
5a0e3ad6 13#include <linux/slab.h>
e314dbdc
PE
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
cf05c700 16#include <linux/u64_stats_sync.h>
e314dbdc 17
f7b12606 18#include <net/rtnetlink.h>
e314dbdc
PE
19#include <net/dst.h>
20#include <net/xfrm.h>
af87a3aa 21#include <net/xdp.h>
ecef969e 22#include <linux/veth.h>
9d9779e7 23#include <linux/module.h>
948d4f21
TM
24#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
948d4f21 27#include <linux/bpf_trace.h>
aa4e689e 28#include <linux/net_tstamp.h>
a9ca9f9c 29#include <net/page_pool/helpers.h>
e314dbdc
PE
30
31#define DRV_NAME "veth"
32#define DRV_VERSION "1.0"
33
9fc8d518 34#define VETH_XDP_FLAG BIT(0)
948d4f21
TM
35#define VETH_RING_SIZE 256
36#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
37
9cda7807 38#define VETH_XDP_TX_BULK_SIZE 16
65e6dcf7 39#define VETH_XDP_BATCH 16
9cda7807 40
65780c56 41struct veth_stats {
1c5b82e5
LB
42 u64 rx_drops;
43 /* xdp */
65780c56
LB
44 u64 xdp_packets;
45 u64 xdp_bytes;
1c5b82e5 46 u64 xdp_redirect;
65780c56 47 u64 xdp_drops;
1c5b82e5 48 u64 xdp_tx;
9152cff0 49 u64 xdp_tx_err;
5fe6e567
LB
50 u64 peer_tq_xdp_xmit;
51 u64 peer_tq_xdp_xmit_err;
65780c56
LB
52};
53
4195e54a 54struct veth_rq_stats {
65780c56 55 struct veth_stats vs;
4195e54a
TM
56 struct u64_stats_sync syncp;
57};
58
638264dc 59struct veth_rq {
948d4f21 60 struct napi_struct xdp_napi;
d3256efd 61 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
948d4f21
TM
62 struct net_device *dev;
63 struct bpf_prog __rcu *xdp_prog;
d1396004 64 struct xdp_mem_info xdp_mem;
4195e54a 65 struct veth_rq_stats stats;
948d4f21
TM
66 bool rx_notify_masked;
67 struct ptr_ring xdp_ring;
68 struct xdp_rxq_info xdp_rxq;
0ebab78c 69 struct page_pool *page_pool;
e314dbdc
PE
70};
71
638264dc
TM
72struct veth_priv {
73 struct net_device __rcu *peer;
74 atomic64_t dropped;
75 struct bpf_prog *_xdp_prog;
76 struct veth_rq *rq;
77 unsigned int requested_headroom;
78};
79
9cda7807
TM
80struct veth_xdp_tx_bq {
81 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
82 unsigned int count;
83};
84
e314dbdc
PE
85/*
86 * ethtool interface
87 */
88
d397b968
TM
89struct veth_q_stat_desc {
90 char desc[ETH_GSTRING_LEN];
91 size_t offset;
92};
93
65780c56 94#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
d397b968
TM
95
96static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
97 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
98 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
5fe6e567
LB
99 { "drops", VETH_RQ_STAT(rx_drops) },
100 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
101 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
102 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
103 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
d397b968
TM
104};
105
106#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
107
5fe6e567
LB
108static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
109 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
110 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
111};
112
113#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
114
e314dbdc
PE
115static struct {
116 const char string[ETH_GSTRING_LEN];
117} ethtool_stats_keys[] = {
118 { "peer_ifindex" },
119};
120
fefb695a
SF
121struct veth_xdp_buff {
122 struct xdp_buff xdp;
306531f0 123 struct sk_buff *skb;
fefb695a
SF
124};
125
56607b98
PR
126static int veth_get_link_ksettings(struct net_device *dev,
127 struct ethtool_link_ksettings *cmd)
e314dbdc 128{
56607b98
PR
129 cmd->base.speed = SPEED_10000;
130 cmd->base.duplex = DUPLEX_FULL;
131 cmd->base.port = PORT_TP;
132 cmd->base.autoneg = AUTONEG_DISABLE;
e314dbdc
PE
133 return 0;
134}
135
136static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
137{
fb3ceec1
WS
138 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
139 strscpy(info->version, DRV_VERSION, sizeof(info->version));
e314dbdc
PE
140}
141
142static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
143{
a0341b73 144 u8 *p = buf;
d397b968
TM
145 int i, j;
146
e314dbdc
PE
147 switch(stringset) {
148 case ETH_SS_STATS:
d397b968
TM
149 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
150 p += sizeof(ethtool_stats_keys);
a0341b73
TZ
151 for (i = 0; i < dev->real_num_rx_queues; i++)
152 for (j = 0; j < VETH_RQ_STATS_LEN; j++)
153 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
154 i, veth_rq_stats_desc[j].desc);
155
156 for (i = 0; i < dev->real_num_tx_queues; i++)
157 for (j = 0; j < VETH_TQ_STATS_LEN; j++)
158 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
159 i, veth_tq_stats_desc[j].desc);
4fc41805
LB
160
161 page_pool_ethtool_stats_get_strings(p);
e314dbdc
PE
162 break;
163 }
164}
165
b9f2c044 166static int veth_get_sset_count(struct net_device *dev, int sset)
e314dbdc 167{
b9f2c044
JG
168 switch (sset) {
169 case ETH_SS_STATS:
d397b968 170 return ARRAY_SIZE(ethtool_stats_keys) +
5fe6e567 171 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
4fc41805
LB
172 VETH_TQ_STATS_LEN * dev->real_num_tx_queues +
173 page_pool_ethtool_stats_get_count();
b9f2c044
JG
174 default:
175 return -EOPNOTSUPP;
176 }
e314dbdc
PE
177}
178
5e316a81
LB
179static void veth_get_page_pool_stats(struct net_device *dev, u64 *data)
180{
181#ifdef CONFIG_PAGE_POOL_STATS
182 struct veth_priv *priv = netdev_priv(dev);
183 struct page_pool_stats pp_stats = {};
184 int i;
185
186 for (i = 0; i < dev->real_num_rx_queues; i++) {
187 if (!priv->rq[i].page_pool)
188 continue;
189 page_pool_get_stats(priv->rq[i].page_pool, &pp_stats);
190 }
191 page_pool_ethtool_stats_get(data, &pp_stats);
192#endif /* CONFIG_PAGE_POOL_STATS */
193}
194
e314dbdc
PE
195static void veth_get_ethtool_stats(struct net_device *dev,
196 struct ethtool_stats *stats, u64 *data)
197{
5fe6e567 198 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
d0e2c55e 199 struct net_device *peer = rtnl_dereference(priv->peer);
4fc41805 200 int i, j, idx, pp_idx;
e314dbdc 201
d0e2c55e 202 data[0] = peer ? peer->ifindex : 0;
d397b968
TM
203 idx = 1;
204 for (i = 0; i < dev->real_num_rx_queues; i++) {
205 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
65780c56 206 const void *stats_base = (void *)&rq_stats->vs;
d397b968
TM
207 unsigned int start;
208 size_t offset;
209
210 do {
068c38ad 211 start = u64_stats_fetch_begin(&rq_stats->syncp);
d397b968
TM
212 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
213 offset = veth_rq_stats_desc[j].offset;
214 data[idx + j] = *(u64 *)(stats_base + offset);
215 }
068c38ad 216 } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
d397b968
TM
217 idx += VETH_RQ_STATS_LEN;
218 }
4fc41805 219 pp_idx = idx;
5fe6e567
LB
220
221 if (!peer)
4fc41805 222 goto page_pool_stats;
5fe6e567
LB
223
224 rcv_priv = netdev_priv(peer);
225 for (i = 0; i < peer->real_num_rx_queues; i++) {
226 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
227 const void *base = (void *)&rq_stats->vs;
228 unsigned int start, tx_idx = idx;
229 size_t offset;
230
231 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
232 do {
068c38ad 233 start = u64_stats_fetch_begin(&rq_stats->syncp);
5fe6e567
LB
234 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
235 offset = veth_tq_stats_desc[j].offset;
236 data[tx_idx + j] += *(u64 *)(base + offset);
237 }
068c38ad 238 } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
4fc41805 239 }
818ad9cc 240 pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
4fc41805
LB
241
242page_pool_stats:
5e316a81 243 veth_get_page_pool_stats(dev, &data[pp_idx]);
e314dbdc
PE
244}
245
34829eec
MF
246static void veth_get_channels(struct net_device *dev,
247 struct ethtool_channels *channels)
248{
249 channels->tx_count = dev->real_num_tx_queues;
250 channels->rx_count = dev->real_num_rx_queues;
4752eeb3
PA
251 channels->max_tx = dev->num_tx_queues;
252 channels->max_rx = dev->num_rx_queues;
34829eec
MF
253}
254
4752eeb3
PA
255static int veth_set_channels(struct net_device *dev,
256 struct ethtool_channels *ch);
257
0fc0b732 258static const struct ethtool_ops veth_ethtool_ops = {
e314dbdc
PE
259 .get_drvinfo = veth_get_drvinfo,
260 .get_link = ethtool_op_get_link,
e314dbdc 261 .get_strings = veth_get_strings,
b9f2c044 262 .get_sset_count = veth_get_sset_count,
e314dbdc 263 .get_ethtool_stats = veth_get_ethtool_stats,
56607b98 264 .get_link_ksettings = veth_get_link_ksettings,
056b21fb 265 .get_ts_info = ethtool_op_get_ts_info,
34829eec 266 .get_channels = veth_get_channels,
4752eeb3 267 .set_channels = veth_set_channels,
e314dbdc
PE
268};
269
948d4f21
TM
270/* general routines */
271
9fc8d518
TM
272static bool veth_is_xdp_frame(void *ptr)
273{
274 return (unsigned long)ptr & VETH_XDP_FLAG;
275}
276
defcffeb 277static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
9fc8d518
TM
278{
279 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
280}
281
defcffeb 282static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
af87a3aa 283{
defcffeb 284 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
af87a3aa
TM
285}
286
9fc8d518
TM
287static void veth_ptr_free(void *ptr)
288{
289 if (veth_is_xdp_frame(ptr))
290 xdp_return_frame(veth_ptr_to_xdp(ptr));
291 else
292 kfree_skb(ptr);
293}
294
638264dc 295static void __veth_xdp_flush(struct veth_rq *rq)
948d4f21
TM
296{
297 /* Write ptr_ring before reading rx_notify_masked */
298 smp_mb();
68468d8c
ED
299 if (!READ_ONCE(rq->rx_notify_masked) &&
300 napi_schedule_prep(&rq->xdp_napi)) {
301 WRITE_ONCE(rq->rx_notify_masked, true);
302 __napi_schedule(&rq->xdp_napi);
948d4f21
TM
303 }
304}
305
638264dc 306static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
948d4f21 307{
638264dc 308 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
948d4f21
TM
309 dev_kfree_skb_any(skb);
310 return NET_RX_DROP;
311 }
312
313 return NET_RX_SUCCESS;
314}
315
638264dc
TM
316static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
317 struct veth_rq *rq, bool xdp)
e314dbdc 318{
948d4f21 319 return __dev_forward_skb(dev, skb) ?: xdp ?
638264dc 320 veth_xdp_rx(rq, skb) :
baebdf48 321 __netif_rx(skb);
948d4f21
TM
322}
323
47e550e0
PA
324/* return true if the specified skb has chances of GRO aggregation
325 * Don't strive for accuracy, but try to avoid GRO overhead in the most
326 * common scenarios.
327 * When XDP is enabled, all traffic is considered eligible, as the xmit
328 * device has TSO off.
329 * When TSO is enabled on the xmit device, we are likely interested only
330 * in UDP aggregation, explicitly check for that if the skb is suspected
331 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
332 * to belong to locally generated UDP traffic.
333 */
334static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
335 const struct net_device *rcv,
336 const struct sk_buff *skb)
337{
338 return !(dev->features & NETIF_F_ALL_TSO) ||
339 (skb->destructor == sock_wfree &&
340 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
341}
342
948d4f21
TM
343static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
344{
345 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
638264dc 346 struct veth_rq *rq = NULL;
151e887d 347 int ret = NETDEV_TX_OK;
d0e2c55e 348 struct net_device *rcv;
2681128f 349 int length = skb->len;
d3256efd 350 bool use_napi = false;
638264dc 351 int rxq;
e314dbdc 352
d0e2c55e
ED
353 rcu_read_lock();
354 rcv = rcu_dereference(priv->peer);
726e2c59 355 if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
d0e2c55e
ED
356 kfree_skb(skb);
357 goto drop;
358 }
e314dbdc 359
948d4f21 360 rcv_priv = netdev_priv(rcv);
638264dc
TM
361 rxq = skb_get_queue_mapping(skb);
362 if (rxq < rcv->real_num_rx_queues) {
363 rq = &rcv_priv->rq[rxq];
d3256efd
PA
364
365 /* The napi pointer is available when an XDP program is
366 * attached or when GRO is enabled
47e550e0 367 * Don't bother with napi/GRO if the skb can't be aggregated
d3256efd 368 */
47e550e0
PA
369 use_napi = rcu_access_pointer(rq->napi) &&
370 veth_skb_is_eligible_for_gro(dev, rcv, skb);
638264dc 371 }
948d4f21 372
aa4e689e 373 skb_tx_timestamp(skb);
d3256efd
PA
374 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
375 if (!use_napi)
6f2684bf 376 dev_sw_netstats_tx_add(dev, 1, length);
215eb9f9
LC
377 else
378 __veth_xdp_flush(rq);
2681128f 379 } else {
d0e2c55e 380drop:
2681128f 381 atomic64_inc(&priv->dropped);
151e887d 382 ret = NET_XMIT_DROP;
2681128f 383 }
948d4f21 384
d0e2c55e 385 rcu_read_unlock();
948d4f21 386
151e887d 387 return ret;
e314dbdc
PE
388}
389
65780c56 390static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
4195e54a
TM
391{
392 struct veth_priv *priv = netdev_priv(dev);
393 int i;
394
5fe6e567 395 result->peer_tq_xdp_xmit_err = 0;
4195e54a 396 result->xdp_packets = 0;
d99a7c2f 397 result->xdp_tx_err = 0;
4195e54a 398 result->xdp_bytes = 0;
66fe4a07 399 result->rx_drops = 0;
4195e54a 400 for (i = 0; i < dev->num_rx_queues; i++) {
5fe6e567 401 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
4195e54a 402 struct veth_rq_stats *stats = &priv->rq[i].stats;
4195e54a
TM
403 unsigned int start;
404
405 do {
068c38ad 406 start = u64_stats_fetch_begin(&stats->syncp);
5fe6e567 407 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
d99a7c2f 408 xdp_tx_err = stats->vs.xdp_tx_err;
65780c56
LB
409 packets = stats->vs.xdp_packets;
410 bytes = stats->vs.xdp_bytes;
66fe4a07 411 drops = stats->vs.rx_drops;
068c38ad 412 } while (u64_stats_fetch_retry(&stats->syncp, start));
5fe6e567 413 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
d99a7c2f 414 result->xdp_tx_err += xdp_tx_err;
4195e54a
TM
415 result->xdp_packets += packets;
416 result->xdp_bytes += bytes;
66fe4a07 417 result->rx_drops += drops;
4195e54a
TM
418 }
419}
420
bc1f4470 421static void veth_get_stats64(struct net_device *dev,
422 struct rtnl_link_stats64 *tot)
2681128f
ED
423{
424 struct veth_priv *priv = netdev_priv(dev);
d0e2c55e 425 struct net_device *peer;
65780c56 426 struct veth_stats rx;
4195e54a 427
6f2684bf
PY
428 tot->tx_dropped = atomic64_read(&priv->dropped);
429 dev_fetch_sw_netstats(tot, dev->tstats);
2681128f 430
4195e54a 431 veth_stats_rx(&rx, dev);
5fe6e567
LB
432 tot->tx_dropped += rx.xdp_tx_err;
433 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
6f2684bf
PY
434 tot->rx_bytes += rx.xdp_bytes;
435 tot->rx_packets += rx.xdp_packets;
2681128f 436
d0e2c55e
ED
437 rcu_read_lock();
438 peer = rcu_dereference(priv->peer);
439 if (peer) {
6f2684bf
PY
440 struct rtnl_link_stats64 tot_peer = {};
441
442 dev_fetch_sw_netstats(&tot_peer, peer->tstats);
443 tot->rx_bytes += tot_peer.tx_bytes;
444 tot->rx_packets += tot_peer.tx_packets;
4195e54a
TM
445
446 veth_stats_rx(&rx, peer);
5fe6e567
LB
447 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
448 tot->rx_dropped += rx.xdp_tx_err;
4195e54a
TM
449 tot->tx_bytes += rx.xdp_bytes;
450 tot->tx_packets += rx.xdp_packets;
d0e2c55e
ED
451 }
452 rcu_read_unlock();
e314dbdc
PE
453}
454
5c70ef85
G
455/* fake multicast ability */
456static void veth_set_multicast_list(struct net_device *dev)
457{
458}
459
638264dc
TM
460static int veth_select_rxq(struct net_device *dev)
461{
462 return smp_processor_id() % dev->real_num_rx_queues;
463}
464
9aa1206e
DB
465static struct net_device *veth_peer_dev(struct net_device *dev)
466{
467 struct veth_priv *priv = netdev_priv(dev);
468
469 /* Callers must be under RCU read side. */
470 return rcu_dereference(priv->peer);
471}
472
af87a3aa 473static int veth_xdp_xmit(struct net_device *dev, int n,
9152cff0
LB
474 struct xdp_frame **frames,
475 u32 flags, bool ndo_xmit)
af87a3aa
TM
476{
477 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
fdc13979 478 int i, ret = -ENXIO, nxmit = 0;
af87a3aa 479 struct net_device *rcv;
5fe6e567 480 unsigned int max_len;
638264dc 481 struct veth_rq *rq;
af87a3aa 482
5fe6e567 483 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
d99a7c2f 484 return -EINVAL;
af87a3aa 485
5fe6e567 486 rcu_read_lock();
af87a3aa 487 rcv = rcu_dereference(priv->peer);
5fe6e567
LB
488 if (unlikely(!rcv))
489 goto out;
af87a3aa
TM
490
491 rcv_priv = netdev_priv(rcv);
5fe6e567 492 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
0e672f30
THJ
493 /* The napi pointer is set if NAPI is enabled, which ensures that
494 * xdp_ring is initialized on receive side and the peer device is up.
af87a3aa 495 */
0e672f30 496 if (!rcu_access_pointer(rq->napi))
5fe6e567 497 goto out;
af87a3aa
TM
498
499 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
500
638264dc 501 spin_lock(&rq->xdp_ring.producer_lock);
af87a3aa
TM
502 for (i = 0; i < n; i++) {
503 struct xdp_frame *frame = frames[i];
504 void *ptr = veth_xdp_to_ptr(frame);
505
5142239a 506 if (unlikely(xdp_get_frame_len(frame) > max_len ||
fdc13979
LB
507 __ptr_ring_produce(&rq->xdp_ring, ptr)))
508 break;
509 nxmit++;
af87a3aa 510 }
638264dc 511 spin_unlock(&rq->xdp_ring.producer_lock);
af87a3aa
TM
512
513 if (flags & XDP_XMIT_FLUSH)
638264dc 514 __veth_xdp_flush(rq);
af87a3aa 515
fdc13979 516 ret = nxmit;
9152cff0 517 if (ndo_xmit) {
5fe6e567 518 u64_stats_update_begin(&rq->stats.syncp);
fdc13979
LB
519 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
520 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
5fe6e567 521 u64_stats_update_end(&rq->stats.syncp);
9152cff0 522 }
9152cff0 523
5fe6e567 524out:
b23bfa56 525 rcu_read_unlock();
2131479d
TM
526
527 return ret;
af87a3aa
TM
528}
529
9152cff0
LB
530static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
531 struct xdp_frame **frames, u32 flags)
532{
5fe6e567
LB
533 int err;
534
535 err = veth_xdp_xmit(dev, n, frames, flags, true);
536 if (err < 0) {
537 struct veth_priv *priv = netdev_priv(dev);
538
539 atomic64_add(n, &priv->dropped);
540 }
541
542 return err;
9152cff0
LB
543}
544
bd32aa1f 545static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
9cda7807 546{
fdc13979 547 int sent, i, err = 0, drops;
9cda7807 548
bd32aa1f 549 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
9cda7807
TM
550 if (sent < 0) {
551 err = sent;
552 sent = 0;
9cda7807 553 }
fdc13979
LB
554
555 for (i = sent; unlikely(i < bq->count); i++)
556 xdp_return_frame(bq->q[i]);
557
558 drops = bq->count - sent;
559 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
9cda7807 560
5fe6e567
LB
561 u64_stats_update_begin(&rq->stats.syncp);
562 rq->stats.vs.xdp_tx += sent;
fdc13979 563 rq->stats.vs.xdp_tx_err += drops;
5fe6e567
LB
564 u64_stats_update_end(&rq->stats.syncp);
565
9cda7807
TM
566 bq->count = 0;
567}
568
bd32aa1f 569static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
d1396004 570{
bd32aa1f 571 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
d1396004 572 struct net_device *rcv;
bd32aa1f 573 struct veth_rq *rcv_rq;
d1396004
TM
574
575 rcu_read_lock();
bd32aa1f 576 veth_xdp_flush_bq(rq, bq);
d1396004
TM
577 rcv = rcu_dereference(priv->peer);
578 if (unlikely(!rcv))
579 goto out;
580
581 rcv_priv = netdev_priv(rcv);
bd32aa1f 582 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
d1396004 583 /* xdp_ring is initialized on receive side? */
bd32aa1f 584 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
d1396004
TM
585 goto out;
586
bd32aa1f 587 __veth_xdp_flush(rcv_rq);
d1396004
TM
588out:
589 rcu_read_unlock();
590}
591
bd32aa1f 592static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
9cda7807 593 struct veth_xdp_tx_bq *bq)
d1396004 594{
1b698fa5 595 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
d1396004
TM
596
597 if (unlikely(!frame))
598 return -EOVERFLOW;
599
9cda7807 600 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
bd32aa1f 601 veth_xdp_flush_bq(rq, bq);
9cda7807
TM
602
603 bq->q[bq->count++] = frame;
604
605 return 0;
d1396004
TM
606}
607
65e6dcf7
LB
608static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
609 struct xdp_frame *frame,
610 struct veth_xdp_tx_bq *bq,
611 struct veth_stats *stats)
9fc8d518 612{
d1396004 613 struct xdp_frame orig_frame;
9fc8d518 614 struct bpf_prog *xdp_prog;
9fc8d518
TM
615
616 rcu_read_lock();
638264dc 617 xdp_prog = rcu_dereference(rq->xdp_prog);
9fc8d518 618 if (likely(xdp_prog)) {
fefb695a
SF
619 struct veth_xdp_buff vxbuf;
620 struct xdp_buff *xdp = &vxbuf.xdp;
9fc8d518
TM
621 u32 act;
622
fefb695a
SF
623 xdp_convert_frame_to_buff(frame, xdp);
624 xdp->rxq = &rq->xdp_rxq;
306531f0 625 vxbuf.skb = NULL;
9fc8d518 626
fefb695a 627 act = bpf_prog_run_xdp(xdp_prog, xdp);
9fc8d518
TM
628
629 switch (act) {
630 case XDP_PASS:
fefb695a 631 if (xdp_update_frame_from_buff(xdp, frame))
89f479f0 632 goto err_xdp;
9fc8d518 633 break;
d1396004
TM
634 case XDP_TX:
635 orig_frame = *frame;
fefb695a
SF
636 xdp->rxq->mem = frame->mem;
637 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
638264dc 638 trace_xdp_exception(rq->dev, xdp_prog, act);
d1396004 639 frame = &orig_frame;
1c5b82e5 640 stats->rx_drops++;
d1396004
TM
641 goto err_xdp;
642 }
1c5b82e5 643 stats->xdp_tx++;
d1396004
TM
644 rcu_read_unlock();
645 goto xdp_xmit;
646 case XDP_REDIRECT:
647 orig_frame = *frame;
fefb695a
SF
648 xdp->rxq->mem = frame->mem;
649 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
d1396004 650 frame = &orig_frame;
1c5b82e5 651 stats->rx_drops++;
d1396004
TM
652 goto err_xdp;
653 }
1c5b82e5 654 stats->xdp_redirect++;
d1396004
TM
655 rcu_read_unlock();
656 goto xdp_xmit;
9fc8d518 657 default:
c8064e5b 658 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
df561f66 659 fallthrough;
9fc8d518 660 case XDP_ABORTED:
638264dc 661 trace_xdp_exception(rq->dev, xdp_prog, act);
df561f66 662 fallthrough;
9fc8d518 663 case XDP_DROP:
1c5b82e5 664 stats->xdp_drops++;
9fc8d518
TM
665 goto err_xdp;
666 }
667 }
668 rcu_read_unlock();
669
65e6dcf7 670 return frame;
9fc8d518
TM
671err_xdp:
672 rcu_read_unlock();
673 xdp_return_frame(frame);
d1396004 674xdp_xmit:
9fc8d518
TM
675 return NULL;
676}
677
65e6dcf7
LB
678/* frames array contains VETH_XDP_BATCH at most */
679static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
680 int n_xdpf, struct veth_xdp_tx_bq *bq,
681 struct veth_stats *stats)
682{
683 void *skbs[VETH_XDP_BATCH];
684 int i;
685
686 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
687 GFP_ATOMIC | __GFP_ZERO) < 0) {
688 for (i = 0; i < n_xdpf; i++)
689 xdp_return_frame(frames[i]);
690 stats->rx_drops += n_xdpf;
691
692 return;
693 }
694
695 for (i = 0; i < n_xdpf; i++) {
696 struct sk_buff *skb = skbs[i];
697
698 skb = __xdp_build_skb_from_frame(frames[i], skb,
699 rq->dev);
700 if (!skb) {
701 xdp_return_frame(frames[i]);
702 stats->rx_drops++;
703 continue;
704 }
705 napi_gro_receive(&rq->xdp_napi, skb);
706 }
707}
708
718a18a0 709static void veth_xdp_get(struct xdp_buff *xdp)
948d4f21 710{
718a18a0
LB
711 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
712 int i;
948d4f21 713
718a18a0
LB
714 get_page(virt_to_page(xdp->data));
715 if (likely(!xdp_buff_has_frags(xdp)))
716 return;
4bf9ffa0 717
718a18a0
LB
718 for (i = 0; i < sinfo->nr_frags; i++)
719 __skb_frag_ref(&sinfo->frags[i]);
720}
948d4f21 721
718a18a0
LB
722static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
723 struct xdp_buff *xdp,
724 struct sk_buff **pskb)
725{
726 struct sk_buff *skb = *pskb;
727 u32 frame_sz;
948d4f21
TM
728
729 if (skb_shared(skb) || skb_head_is_locked(skb) ||
7c101318
SB
730 skb_shinfo(skb)->nr_frags ||
731 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
2d0de67d 732 u32 size, len, max_head_size, off, truesize, page_offset;
948d4f21 733 struct sk_buff *nskb;
948d4f21 734 struct page *page;
718a18a0 735 int i, head_off;
2d0de67d 736 void *va;
948d4f21 737
718a18a0
LB
738 /* We need a private copy of the skb and data buffers since
739 * the ebpf program can modify it. We segment the original skb
740 * into order-0 pages without linearize it.
741 *
742 * Make sure we have enough space for linear and paged area
743 */
744 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
745 VETH_XDP_HEADROOM);
746 if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
948d4f21
TM
747 goto drop;
748
2d0de67d
YL
749 size = min_t(u32, skb->len, max_head_size);
750 truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
751
718a18a0 752 /* Allocate skb head */
2d0de67d
YL
753 va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
754 if (!va)
948d4f21
TM
755 goto drop;
756
2d0de67d 757 nskb = napi_build_skb(va, truesize);
718a18a0 758 if (!nskb) {
2d0de67d 759 page_pool_free_va(rq->page_pool, va, true);
948d4f21
TM
760 goto drop;
761 }
762
718a18a0 763 skb_reserve(nskb, VETH_XDP_HEADROOM);
0ebab78c
LB
764 skb_copy_header(nskb, skb);
765 skb_mark_for_recycle(nskb);
766
718a18a0
LB
767 if (skb_copy_bits(skb, 0, nskb->data, size)) {
768 consume_skb(nskb);
948d4f21
TM
769 goto drop;
770 }
718a18a0 771 skb_put(nskb, size);
948d4f21 772
948d4f21
TM
773 head_off = skb_headroom(nskb) - skb_headroom(skb);
774 skb_headers_offset_update(nskb, head_off);
718a18a0
LB
775
776 /* Allocate paged area of new skb */
777 off = size;
778 len = skb->len - off;
779
780 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
2d0de67d
YL
781 size = min_t(u32, len, PAGE_SIZE);
782 truesize = size;
783
784 page = page_pool_dev_alloc(rq->page_pool, &page_offset,
785 &truesize);
718a18a0
LB
786 if (!page) {
787 consume_skb(nskb);
788 goto drop;
789 }
790
2d0de67d
YL
791 skb_add_rx_frag(nskb, i, page, page_offset, size,
792 truesize);
a61f46e1
LB
793 if (skb_copy_bits(skb, off,
794 page_address(page) + page_offset,
718a18a0
LB
795 size)) {
796 consume_skb(nskb);
797 goto drop;
798 }
799
800 len -= size;
801 off += size;
802 }
803
948d4f21
TM
804 consume_skb(skb);
805 skb = nskb;
806 }
807
45a9e6d8 808 /* SKB "head" area always have tailroom for skb_shared_info */
be9df4af 809 frame_sz = skb_end_pointer(skb) - skb->head;
43b5169d 810 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
718a18a0
LB
811 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
812 xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
813 skb_headlen(skb), true);
814
815 if (skb_is_nonlinear(skb)) {
816 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
817 xdp_buff_set_frags_flag(xdp);
818 } else {
819 xdp_buff_clear_frags_flag(xdp);
820 }
821 *pskb = skb;
822
823 return 0;
824drop:
825 consume_skb(skb);
826 *pskb = NULL;
827
828 return -ENOMEM;
829}
830
831static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
832 struct sk_buff *skb,
833 struct veth_xdp_tx_bq *bq,
834 struct veth_stats *stats)
835{
836 void *orig_data, *orig_data_end;
837 struct bpf_prog *xdp_prog;
fefb695a
SF
838 struct veth_xdp_buff vxbuf;
839 struct xdp_buff *xdp = &vxbuf.xdp;
718a18a0
LB
840 u32 act, metalen;
841 int off;
842
843 skb_prepare_for_gro(skb);
844
845 rcu_read_lock();
846 xdp_prog = rcu_dereference(rq->xdp_prog);
847 if (unlikely(!xdp_prog)) {
848 rcu_read_unlock();
849 goto out;
850 }
851
852 __skb_push(skb, skb->data - skb_mac_header(skb));
fefb695a 853 if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
718a18a0 854 goto drop;
306531f0 855 vxbuf.skb = skb;
45a9e6d8 856
fefb695a
SF
857 orig_data = xdp->data;
858 orig_data_end = xdp->data_end;
948d4f21 859
fefb695a 860 act = bpf_prog_run_xdp(xdp_prog, xdp);
948d4f21
TM
861
862 switch (act) {
863 case XDP_PASS:
864 break;
d1396004 865 case XDP_TX:
fefb695a 866 veth_xdp_get(xdp);
d1396004 867 consume_skb(skb);
fefb695a
SF
868 xdp->rxq->mem = rq->xdp_mem;
869 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
638264dc 870 trace_xdp_exception(rq->dev, xdp_prog, act);
1c5b82e5 871 stats->rx_drops++;
d1396004
TM
872 goto err_xdp;
873 }
1c5b82e5 874 stats->xdp_tx++;
d1396004
TM
875 rcu_read_unlock();
876 goto xdp_xmit;
877 case XDP_REDIRECT:
fefb695a 878 veth_xdp_get(xdp);
d1396004 879 consume_skb(skb);
fefb695a
SF
880 xdp->rxq->mem = rq->xdp_mem;
881 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
1c5b82e5 882 stats->rx_drops++;
d1396004 883 goto err_xdp;
1c5b82e5
LB
884 }
885 stats->xdp_redirect++;
d1396004
TM
886 rcu_read_unlock();
887 goto xdp_xmit;
948d4f21 888 default:
c8064e5b 889 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
df561f66 890 fallthrough;
948d4f21 891 case XDP_ABORTED:
638264dc 892 trace_xdp_exception(rq->dev, xdp_prog, act);
df561f66 893 fallthrough;
948d4f21 894 case XDP_DROP:
1c5b82e5
LB
895 stats->xdp_drops++;
896 goto xdp_drop;
948d4f21
TM
897 }
898 rcu_read_unlock();
899
45a9e6d8 900 /* check if bpf_xdp_adjust_head was used */
fefb695a 901 off = orig_data - xdp->data;
948d4f21
TM
902 if (off > 0)
903 __skb_push(skb, off);
904 else if (off < 0)
905 __skb_pull(skb, -off);
718a18a0
LB
906
907 skb_reset_mac_header(skb);
45a9e6d8
JDB
908
909 /* check if bpf_xdp_adjust_tail was used */
fefb695a 910 off = xdp->data_end - orig_data_end;
948d4f21 911 if (off != 0)
45a9e6d8 912 __skb_put(skb, off); /* positive on grow, negative on shrink */
718a18a0
LB
913
914 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
915 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
916 */
fefb695a 917 if (xdp_buff_has_frags(xdp))
718a18a0
LB
918 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
919 else
920 skb->data_len = 0;
921
638264dc 922 skb->protocol = eth_type_trans(skb, rq->dev);
948d4f21 923
fefb695a 924 metalen = xdp->data - xdp->data_meta;
948d4f21
TM
925 if (metalen)
926 skb_metadata_set(skb, metalen);
927out:
928 return skb;
929drop:
1c5b82e5
LB
930 stats->rx_drops++;
931xdp_drop:
948d4f21
TM
932 rcu_read_unlock();
933 kfree_skb(skb);
934 return NULL;
d1396004
TM
935err_xdp:
936 rcu_read_unlock();
fefb695a 937 xdp_return_buff(xdp);
d1396004
TM
938xdp_xmit:
939 return NULL;
948d4f21
TM
940}
941
1c5b82e5
LB
942static int veth_xdp_rcv(struct veth_rq *rq, int budget,
943 struct veth_xdp_tx_bq *bq,
944 struct veth_stats *stats)
948d4f21 945{
65e6dcf7
LB
946 int i, done = 0, n_xdpf = 0;
947 void *xdpf[VETH_XDP_BATCH];
948d4f21
TM
948
949 for (i = 0; i < budget; i++) {
638264dc 950 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
948d4f21 951
9fc8d518 952 if (!ptr)
948d4f21
TM
953 break;
954
d1396004 955 if (veth_is_xdp_frame(ptr)) {
65e6dcf7 956 /* ndo_xdp_xmit */
4195e54a
TM
957 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
958
5142239a 959 stats->xdp_bytes += xdp_get_frame_len(frame);
65e6dcf7
LB
960 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
961 if (frame) {
962 /* XDP_PASS */
963 xdpf[n_xdpf++] = frame;
964 if (n_xdpf == VETH_XDP_BATCH) {
965 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
966 bq, stats);
967 n_xdpf = 0;
968 }
969 }
d1396004 970 } else {
65e6dcf7
LB
971 /* ndo_start_xmit */
972 struct sk_buff *skb = ptr;
973
1c5b82e5
LB
974 stats->xdp_bytes += skb->len;
975 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
9695b7de
PA
976 if (skb) {
977 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
978 netif_receive_skb(skb);
979 else
980 napi_gro_receive(&rq->xdp_napi, skb);
981 }
d1396004 982 }
948d4f21
TM
983 done++;
984 }
985
65e6dcf7
LB
986 if (n_xdpf)
987 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
988
4195e54a 989 u64_stats_update_begin(&rq->stats.syncp);
9152cff0 990 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
1c5b82e5 991 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
66fe4a07
LB
992 rq->stats.vs.xdp_drops += stats->xdp_drops;
993 rq->stats.vs.rx_drops += stats->rx_drops;
65780c56 994 rq->stats.vs.xdp_packets += done;
4195e54a
TM
995 u64_stats_update_end(&rq->stats.syncp);
996
948d4f21
TM
997 return done;
998}
999
1000static int veth_poll(struct napi_struct *napi, int budget)
1001{
638264dc
TM
1002 struct veth_rq *rq =
1003 container_of(napi, struct veth_rq, xdp_napi);
1c5b82e5 1004 struct veth_stats stats = {};
9cda7807 1005 struct veth_xdp_tx_bq bq;
948d4f21
TM
1006 int done;
1007
9cda7807
TM
1008 bq.count = 0;
1009
d1396004 1010 xdp_set_return_frame_no_direct();
1c5b82e5 1011 done = veth_xdp_rcv(rq, budget, &bq, &stats);
948d4f21 1012
fa349e39
SB
1013 if (stats.xdp_redirect > 0)
1014 xdp_do_flush();
1015
948d4f21
TM
1016 if (done < budget && napi_complete_done(napi, done)) {
1017 /* Write rx_notify_masked before reading ptr_ring */
638264dc
TM
1018 smp_store_mb(rq->rx_notify_masked, false);
1019 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
68468d8c
ED
1020 if (napi_schedule_prep(&rq->xdp_napi)) {
1021 WRITE_ONCE(rq->rx_notify_masked, true);
1022 __napi_schedule(&rq->xdp_napi);
1023 }
948d4f21
TM
1024 }
1025 }
1026
1c5b82e5 1027 if (stats.xdp_tx > 0)
bd32aa1f 1028 veth_xdp_flush(rq, &bq);
d1396004
TM
1029 xdp_clear_return_frame_no_direct();
1030
948d4f21
TM
1031 return done;
1032}
1033
0ebab78c
LB
1034static int veth_create_page_pool(struct veth_rq *rq)
1035{
1036 struct page_pool_params pp_params = {
1037 .order = 0,
1038 .pool_size = VETH_RING_SIZE,
1039 .nid = NUMA_NO_NODE,
1040 .dev = &rq->dev->dev,
1041 };
1042
1043 rq->page_pool = page_pool_create(&pp_params);
1044 if (IS_ERR(rq->page_pool)) {
1045 int err = PTR_ERR(rq->page_pool);
1046
1047 rq->page_pool = NULL;
1048 return err;
1049 }
1050
1051 return 0;
1052}
1053
dedd53c5 1054static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
948d4f21
TM
1055{
1056 struct veth_priv *priv = netdev_priv(dev);
638264dc 1057 int err, i;
948d4f21 1058
0ebab78c
LB
1059 for (i = start; i < end; i++) {
1060 err = veth_create_page_pool(&priv->rq[i]);
1061 if (err)
1062 goto err_page_pool;
1063 }
1064
dedd53c5 1065 for (i = start; i < end; i++) {
638264dc
TM
1066 struct veth_rq *rq = &priv->rq[i];
1067
1068 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
1069 if (err)
1070 goto err_xdp_ring;
1071 }
948d4f21 1072
dedd53c5 1073 for (i = start; i < end; i++) {
638264dc
TM
1074 struct veth_rq *rq = &priv->rq[i];
1075
638264dc 1076 napi_enable(&rq->xdp_napi);
d3256efd 1077 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
638264dc 1078 }
948d4f21
TM
1079
1080 return 0;
dedd53c5 1081
638264dc 1082err_xdp_ring:
dedd53c5 1083 for (i--; i >= start; i--)
638264dc 1084 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
8a519a57 1085 i = end;
0ebab78c 1086err_page_pool:
8a519a57 1087 for (i--; i >= start; i--) {
0ebab78c
LB
1088 page_pool_destroy(priv->rq[i].page_pool);
1089 priv->rq[i].page_pool = NULL;
1090 }
638264dc
TM
1091
1092 return err;
948d4f21
TM
1093}
1094
dedd53c5
PA
1095static int __veth_napi_enable(struct net_device *dev)
1096{
1097 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1098}
1099
1100static void veth_napi_del_range(struct net_device *dev, int start, int end)
948d4f21
TM
1101{
1102 struct veth_priv *priv = netdev_priv(dev);
638264dc 1103 int i;
948d4f21 1104
dedd53c5 1105 for (i = start; i < end; i++) {
638264dc
TM
1106 struct veth_rq *rq = &priv->rq[i];
1107
d3256efd 1108 rcu_assign_pointer(priv->rq[i].napi, NULL);
638264dc 1109 napi_disable(&rq->xdp_napi);
5198d545 1110 __netif_napi_del(&rq->xdp_napi);
638264dc
TM
1111 }
1112 synchronize_net();
1113
dedd53c5 1114 for (i = start; i < end; i++) {
638264dc
TM
1115 struct veth_rq *rq = &priv->rq[i];
1116
638264dc
TM
1117 rq->rx_notify_masked = false;
1118 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
1119 }
0ebab78c
LB
1120
1121 for (i = start; i < end; i++) {
1122 page_pool_destroy(priv->rq[i].page_pool);
1123 priv->rq[i].page_pool = NULL;
1124 }
948d4f21
TM
1125}
1126
dedd53c5
PA
1127static void veth_napi_del(struct net_device *dev)
1128{
1129 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
1130}
1131
d3256efd
PA
1132static bool veth_gro_requested(const struct net_device *dev)
1133{
1134 return !!(dev->wanted_features & NETIF_F_GRO);
1135}
1136
dedd53c5
PA
1137static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
1138 bool napi_already_on)
948d4f21
TM
1139{
1140 struct veth_priv *priv = netdev_priv(dev);
638264dc 1141 int err, i;
948d4f21 1142
dedd53c5
PA
1143 for (i = start; i < end; i++) {
1144 struct veth_rq *rq = &priv->rq[i];
948d4f21 1145
dedd53c5 1146 if (!napi_already_on)
b48b89f9 1147 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
dedd53c5
PA
1148 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1149 if (err < 0)
1150 goto err_rxq_reg;
1151
1152 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1153 MEM_TYPE_PAGE_SHARED,
1154 NULL);
1155 if (err < 0)
1156 goto err_reg_mem;
1157
1158 /* Save original mem info as it can be overwritten */
1159 rq->xdp_mem = rq->xdp_rxq.mem;
1160 }
1161 return 0;
638264dc 1162
dedd53c5
PA
1163err_reg_mem:
1164 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1165err_rxq_reg:
1166 for (i--; i >= start; i--) {
1167 struct veth_rq *rq = &priv->rq[i];
638264dc 1168
dedd53c5
PA
1169 xdp_rxq_info_unreg(&rq->xdp_rxq);
1170 if (!napi_already_on)
1171 netif_napi_del(&rq->xdp_napi);
1172 }
1173
1174 return err;
1175}
1176
1177static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1178 bool delete_napi)
1179{
1180 struct veth_priv *priv = netdev_priv(dev);
1181 int i;
1182
1183 for (i = start; i < end; i++) {
1184 struct veth_rq *rq = &priv->rq[i];
1185
1186 rq->xdp_rxq.mem = rq->xdp_mem;
1187 xdp_rxq_info_unreg(&rq->xdp_rxq);
1188
1189 if (delete_napi)
1190 netif_napi_del(&rq->xdp_napi);
1191 }
1192}
1193
1194static int veth_enable_xdp(struct net_device *dev)
1195{
5e8d3dc7 1196 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
dedd53c5
PA
1197 struct veth_priv *priv = netdev_priv(dev);
1198 int err, i;
1199
1200 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1201 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1202 if (err)
1203 return err;
948d4f21 1204
d3256efd
PA
1205 if (!napi_already_on) {
1206 err = __veth_napi_enable(dev);
dedd53c5
PA
1207 if (err) {
1208 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1209 return err;
1210 }
d3256efd
PA
1211
1212 if (!veth_gro_requested(dev)) {
1213 /* user-space did not require GRO, but adding XDP
1214 * is supposed to get GRO working
1215 */
1216 dev->features |= NETIF_F_GRO;
1217 netdev_features_change(dev);
1218 }
1219 }
948d4f21
TM
1220 }
1221
d3256efd 1222 for (i = 0; i < dev->real_num_rx_queues; i++) {
638264dc 1223 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
d3256efd
PA
1224 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1225 }
948d4f21
TM
1226
1227 return 0;
948d4f21
TM
1228}
1229
1230static void veth_disable_xdp(struct net_device *dev)
1231{
1232 struct veth_priv *priv = netdev_priv(dev);
638264dc 1233 int i;
948d4f21 1234
638264dc
TM
1235 for (i = 0; i < dev->real_num_rx_queues; i++)
1236 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
d3256efd
PA
1237
1238 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1239 veth_napi_del(dev);
1240
1241 /* if user-space did not require GRO, since adding XDP
1242 * enabled it, clear it now
1243 */
1244 if (!veth_gro_requested(dev) && netif_running(dev)) {
1245 dev->features &= ~NETIF_F_GRO;
1246 netdev_features_change(dev);
1247 }
1248 }
1249
dedd53c5 1250 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
948d4f21
TM
1251}
1252
dedd53c5 1253static int veth_napi_enable_range(struct net_device *dev, int start, int end)
d3256efd
PA
1254{
1255 struct veth_priv *priv = netdev_priv(dev);
1256 int err, i;
1257
dedd53c5 1258 for (i = start; i < end; i++) {
d3256efd
PA
1259 struct veth_rq *rq = &priv->rq[i];
1260
b48b89f9 1261 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
d3256efd
PA
1262 }
1263
dedd53c5 1264 err = __veth_napi_enable_range(dev, start, end);
d3256efd 1265 if (err) {
dedd53c5 1266 for (i = start; i < end; i++) {
d3256efd
PA
1267 struct veth_rq *rq = &priv->rq[i];
1268
1269 netif_napi_del(&rq->xdp_napi);
1270 }
1271 return err;
1272 }
1273 return err;
1274}
1275
dedd53c5
PA
1276static int veth_napi_enable(struct net_device *dev)
1277{
1278 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1279}
1280
4752eeb3
PA
1281static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1282{
1283 struct veth_priv *priv = netdev_priv(dev);
1284
1285 if (start >= end)
1286 return;
1287
1288 if (priv->_xdp_prog) {
1289 veth_napi_del_range(dev, start, end);
1290 veth_disable_xdp_range(dev, start, end, false);
1291 } else if (veth_gro_requested(dev)) {
1292 veth_napi_del_range(dev, start, end);
1293 }
1294}
1295
1296static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1297{
1298 struct veth_priv *priv = netdev_priv(dev);
1299 int err;
1300
1301 if (start >= end)
1302 return 0;
1303
1304 if (priv->_xdp_prog) {
1305 /* these channels are freshly initialized, napi is not on there even
1306 * when GRO is requeste
1307 */
1308 err = veth_enable_xdp_range(dev, start, end, false);
1309 if (err)
1310 return err;
1311
1312 err = __veth_napi_enable_range(dev, start, end);
1313 if (err) {
1314 /* on error always delete the newly added napis */
1315 veth_disable_xdp_range(dev, start, end, true);
1316 return err;
1317 }
1318 } else if (veth_gro_requested(dev)) {
1319 return veth_napi_enable_range(dev, start, end);
1320 }
1321 return 0;
1322}
1323
fccca038
LB
1324static void veth_set_xdp_features(struct net_device *dev)
1325{
1326 struct veth_priv *priv = netdev_priv(dev);
1327 struct net_device *peer;
1328
5ce76fe1 1329 peer = rtnl_dereference(priv->peer);
fccca038 1330 if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
8267fc71 1331 struct veth_priv *priv_peer = netdev_priv(peer);
fccca038
LB
1332 xdp_features_t val = NETDEV_XDP_ACT_BASIC |
1333 NETDEV_XDP_ACT_REDIRECT |
1334 NETDEV_XDP_ACT_RX_SG;
1335
8267fc71 1336 if (priv_peer->_xdp_prog || veth_gro_requested(peer))
fccca038
LB
1337 val |= NETDEV_XDP_ACT_NDO_XMIT |
1338 NETDEV_XDP_ACT_NDO_XMIT_SG;
1339 xdp_set_features_flag(dev, val);
1340 } else {
1341 xdp_clear_features_flag(dev);
1342 }
1343}
1344
4752eeb3
PA
1345static int veth_set_channels(struct net_device *dev,
1346 struct ethtool_channels *ch)
1347{
1348 struct veth_priv *priv = netdev_priv(dev);
1349 unsigned int old_rx_count, new_rx_count;
1350 struct veth_priv *peer_priv;
1351 struct net_device *peer;
1352 int err;
1353
1354 /* sanity check. Upper bounds are already enforced by the caller */
1355 if (!ch->rx_count || !ch->tx_count)
1356 return -EINVAL;
1357
1358 /* avoid braking XDP, if that is enabled */
1359 peer = rtnl_dereference(priv->peer);
1360 peer_priv = peer ? netdev_priv(peer) : NULL;
1361 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1362 return -EINVAL;
1363
1364 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1365 return -EINVAL;
1366
1367 old_rx_count = dev->real_num_rx_queues;
1368 new_rx_count = ch->rx_count;
1369 if (netif_running(dev)) {
1370 /* turn device off */
1371 netif_carrier_off(dev);
1372 if (peer)
1373 netif_carrier_off(peer);
1374
1375 /* try to allocate new resurces, as needed*/
1376 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1377 if (err)
1378 goto out;
1379 }
1380
1381 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1382 if (err)
1383 goto revert;
1384
1385 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1386 if (err) {
1387 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1388
1389 /* this error condition could happen only if rx and tx change
1390 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1391 * and we can't do anything to fully restore the original
1392 * status
1393 */
1394 if (err2)
1395 pr_warn("Can't restore rx queues config %d -> %d %d",
1396 new_rx_count, old_rx_count, err2);
1397 else
1398 goto revert;
1399 }
1400
1401out:
1402 if (netif_running(dev)) {
1403 /* note that we need to swap the arguments WRT the enable part
1404 * to identify the range we have to disable
1405 */
1406 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1407 netif_carrier_on(dev);
1408 if (peer)
1409 netif_carrier_on(peer);
1410 }
fccca038
LB
1411
1412 /* update XDP supported features */
1413 veth_set_xdp_features(dev);
1414 if (peer)
1415 veth_set_xdp_features(peer);
1416
4752eeb3
PA
1417 return err;
1418
1419revert:
1420 new_rx_count = old_rx_count;
1421 old_rx_count = ch->rx_count;
1422 goto out;
1423}
1424
e314dbdc
PE
1425static int veth_open(struct net_device *dev)
1426{
5e8d3dc7 1427 struct veth_priv *priv = netdev_priv(dev);
d0e2c55e 1428 struct net_device *peer = rtnl_dereference(priv->peer);
948d4f21 1429 int err;
e314dbdc 1430
d0e2c55e 1431 if (!peer)
e314dbdc
PE
1432 return -ENOTCONN;
1433
948d4f21
TM
1434 if (priv->_xdp_prog) {
1435 err = veth_enable_xdp(dev);
1436 if (err)
1437 return err;
5e8d3dc7 1438 } else if (veth_gro_requested(dev)) {
d3256efd
PA
1439 err = veth_napi_enable(dev);
1440 if (err)
1441 return err;
948d4f21
TM
1442 }
1443
d0e2c55e 1444 if (peer->flags & IFF_UP) {
e314dbdc 1445 netif_carrier_on(dev);
d0e2c55e 1446 netif_carrier_on(peer);
e314dbdc 1447 }
948d4f21 1448
7a6102aa
THJ
1449 veth_set_xdp_features(dev);
1450
e314dbdc
PE
1451 return 0;
1452}
1453
2cf48a10
EB
1454static int veth_close(struct net_device *dev)
1455{
5e8d3dc7 1456 struct veth_priv *priv = netdev_priv(dev);
2efd32ee 1457 struct net_device *peer = rtnl_dereference(priv->peer);
2cf48a10
EB
1458
1459 netif_carrier_off(dev);
5e8d3dc7
HQ
1460 if (peer)
1461 netif_carrier_off(peer);
2cf48a10 1462
5e8d3dc7 1463 if (priv->_xdp_prog)
948d4f21 1464 veth_disable_xdp(dev);
5e8d3dc7 1465 else if (veth_gro_requested(dev))
d3256efd 1466 veth_napi_del(dev);
948d4f21 1467
2cf48a10
EB
1468 return 0;
1469}
1470
91572088 1471static int is_valid_veth_mtu(int mtu)
38d40815 1472{
91572088 1473 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
38d40815
EB
1474}
1475
7797b93b
TM
1476static int veth_alloc_queues(struct net_device *dev)
1477{
1478 struct veth_priv *priv = netdev_priv(dev);
1479 int i;
1480
961c6136 1481 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
7797b93b
TM
1482 if (!priv->rq)
1483 return -ENOMEM;
1484
4195e54a 1485 for (i = 0; i < dev->num_rx_queues; i++) {
7797b93b 1486 priv->rq[i].dev = dev;
4195e54a
TM
1487 u64_stats_init(&priv->rq[i].stats.syncp);
1488 }
7797b93b
TM
1489
1490 return 0;
1491}
1492
1493static void veth_free_queues(struct net_device *dev)
1494{
1495 struct veth_priv *priv = netdev_priv(dev);
1496
1497 kfree(priv->rq);
1498}
1499
e314dbdc
PE
1500static int veth_dev_init(struct net_device *dev)
1501{
34d21de9 1502 return veth_alloc_queues(dev);
e314dbdc
PE
1503}
1504
11687a10
DM
1505static void veth_dev_free(struct net_device *dev)
1506{
7797b93b 1507 veth_free_queues(dev);
11687a10
DM
1508}
1509
bb446c19
WC
1510#ifdef CONFIG_NET_POLL_CONTROLLER
1511static void veth_poll_controller(struct net_device *dev)
1512{
1513 /* veth only receives frames when its peer sends one
948d4f21 1514 * Since it has nothing to do with disabling irqs, we are guaranteed
bb446c19
WC
1515 * never to have pending data when we poll for it so
1516 * there is nothing to do here.
1517 *
1518 * We need this though so netpoll recognizes us as an interface that
1519 * supports polling, which enables bridge devices in virt setups to
1520 * still use netconsole
1521 */
1522}
1523#endif /* CONFIG_NET_POLL_CONTROLLER */
1524
a45253bf
ND
1525static int veth_get_iflink(const struct net_device *dev)
1526{
1527 struct veth_priv *priv = netdev_priv(dev);
1528 struct net_device *peer;
1529 int iflink;
1530
1531 rcu_read_lock();
1532 peer = rcu_dereference(priv->peer);
1533 iflink = peer ? peer->ifindex : 0;
1534 rcu_read_unlock();
1535
1536 return iflink;
1537}
1538
dc224822
TM
1539static netdev_features_t veth_fix_features(struct net_device *dev,
1540 netdev_features_t features)
1541{
1542 struct veth_priv *priv = netdev_priv(dev);
1543 struct net_device *peer;
1544
1545 peer = rtnl_dereference(priv->peer);
1546 if (peer) {
1547 struct veth_priv *peer_priv = netdev_priv(peer);
1548
1549 if (peer_priv->_xdp_prog)
1550 features &= ~NETIF_F_GSO_SOFTWARE;
1551 }
d3256efd
PA
1552 if (priv->_xdp_prog)
1553 features |= NETIF_F_GRO;
dc224822
TM
1554
1555 return features;
1556}
1557
d3256efd
PA
1558static int veth_set_features(struct net_device *dev,
1559 netdev_features_t features)
1560{
1561 netdev_features_t changed = features ^ dev->features;
1562 struct veth_priv *priv = netdev_priv(dev);
8267fc71 1563 struct net_device *peer;
d3256efd
PA
1564 int err;
1565
1566 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1567 return 0;
1568
8267fc71 1569 peer = rtnl_dereference(priv->peer);
d3256efd 1570 if (features & NETIF_F_GRO) {
5e8d3dc7
HQ
1571 err = veth_napi_enable(dev);
1572 if (err)
1573 return err;
fccca038 1574
8267fc71
LB
1575 if (peer)
1576 xdp_features_set_redirect_target(peer, true);
d3256efd 1577 } else {
8267fc71
LB
1578 if (peer)
1579 xdp_features_clear_redirect_target(peer);
5e8d3dc7 1580 veth_napi_del(dev);
d3256efd
PA
1581 }
1582 return 0;
1583}
1584
163e5292
PA
1585static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1586{
1587 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1588 struct net_device *peer;
1589
1590 if (new_hr < 0)
1591 new_hr = 0;
1592
1593 rcu_read_lock();
1594 peer = rcu_dereference(priv->peer);
1595 if (unlikely(!peer))
1596 goto out;
1597
1598 peer_priv = netdev_priv(peer);
1599 priv->requested_headroom = new_hr;
1600 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1601 dev->needed_headroom = new_hr;
1602 peer->needed_headroom = new_hr;
1603
1604out:
1605 rcu_read_unlock();
1606}
1607
948d4f21
TM
1608static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1609 struct netlink_ext_ack *extack)
1610{
1611 struct veth_priv *priv = netdev_priv(dev);
1612 struct bpf_prog *old_prog;
1613 struct net_device *peer;
dc224822 1614 unsigned int max_mtu;
948d4f21
TM
1615 int err;
1616
1617 old_prog = priv->_xdp_prog;
1618 priv->_xdp_prog = prog;
1619 peer = rtnl_dereference(priv->peer);
1620
1621 if (prog) {
1622 if (!peer) {
1623 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1624 err = -ENOTCONN;
1625 goto err;
1626 }
1627
7cda76d8
LB
1628 max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
1629 peer->hard_header_len;
1630 /* Allow increasing the max_mtu if the program supports
1631 * XDP fragments.
1632 */
1633 if (prog->aux->xdp_has_frags)
1634 max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
1635
dc224822
TM
1636 if (peer->mtu > max_mtu) {
1637 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1638 err = -ERANGE;
1639 goto err;
1640 }
1641
638264dc
TM
1642 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1643 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1644 err = -ENOSPC;
1645 goto err;
1646 }
1647
948d4f21
TM
1648 if (dev->flags & IFF_UP) {
1649 err = veth_enable_xdp(dev);
1650 if (err) {
1651 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1652 goto err;
1653 }
1654 }
dc224822
TM
1655
1656 if (!old_prog) {
1657 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1658 peer->max_mtu = max_mtu;
1659 }
fccca038 1660
8267fc71 1661 xdp_features_set_redirect_target(peer, true);
948d4f21
TM
1662 }
1663
1664 if (old_prog) {
dc224822 1665 if (!prog) {
8267fc71
LB
1666 if (peer && !veth_gro_requested(dev))
1667 xdp_features_clear_redirect_target(peer);
fccca038 1668
dc224822
TM
1669 if (dev->flags & IFF_UP)
1670 veth_disable_xdp(dev);
1671
1672 if (peer) {
1673 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1674 peer->max_mtu = ETH_MAX_MTU;
1675 }
1676 }
948d4f21
TM
1677 bpf_prog_put(old_prog);
1678 }
1679
dc224822
TM
1680 if ((!!old_prog ^ !!prog) && peer)
1681 netdev_update_features(peer);
1682
948d4f21
TM
1683 return 0;
1684err:
1685 priv->_xdp_prog = old_prog;
1686
1687 return err;
1688}
1689
948d4f21
TM
1690static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1691{
1692 switch (xdp->command) {
1693 case XDP_SETUP_PROG:
1694 return veth_xdp_set(dev, xdp->prog, xdp->extack);
948d4f21
TM
1695 default:
1696 return -EINVAL;
1697 }
1698}
1699
306531f0
SF
1700static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
1701{
1702 struct veth_xdp_buff *_ctx = (void *)ctx;
1703
1704 if (!_ctx->skb)
915efd8a 1705 return -ENODATA;
306531f0
SF
1706
1707 *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
1708 return 0;
1709}
1710
0cd917a4
JDB
1711static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
1712 enum xdp_rss_hash_type *rss_type)
306531f0
SF
1713{
1714 struct veth_xdp_buff *_ctx = (void *)ctx;
96b1a098 1715 struct sk_buff *skb = _ctx->skb;
306531f0 1716
96b1a098 1717 if (!skb)
915efd8a 1718 return -ENODATA;
306531f0 1719
96b1a098
JDB
1720 *hash = skb_get_hash(skb);
1721 *rss_type = skb->l4_hash ? XDP_RSS_TYPE_L4_ANY : XDP_RSS_TYPE_NONE;
1722
306531f0
SF
1723 return 0;
1724}
1725
fca78379
LZ
1726static int veth_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
1727 u16 *vlan_tci)
1728{
1729 const struct veth_xdp_buff *_ctx = (void *)ctx;
1730 const struct sk_buff *skb = _ctx->skb;
1731 int err;
1732
1733 if (!skb)
1734 return -ENODATA;
1735
1736 err = __vlan_hwaccel_get_tag(skb, vlan_tci);
1737 if (err)
1738 return err;
1739
1740 *vlan_proto = skb->vlan_proto;
1741 return err;
1742}
1743
4456e7bd 1744static const struct net_device_ops veth_netdev_ops = {
ee923623
DL
1745 .ndo_init = veth_dev_init,
1746 .ndo_open = veth_open,
2cf48a10 1747 .ndo_stop = veth_close,
ee923623 1748 .ndo_start_xmit = veth_xmit,
6311cc44 1749 .ndo_get_stats64 = veth_get_stats64,
5c70ef85 1750 .ndo_set_rx_mode = veth_set_multicast_list,
ee923623 1751 .ndo_set_mac_address = eth_mac_addr,
bb446c19
WC
1752#ifdef CONFIG_NET_POLL_CONTROLLER
1753 .ndo_poll_controller = veth_poll_controller,
1754#endif
a45253bf 1755 .ndo_get_iflink = veth_get_iflink,
dc224822 1756 .ndo_fix_features = veth_fix_features,
d3256efd 1757 .ndo_set_features = veth_set_features,
1a04a821 1758 .ndo_features_check = passthru_features_check,
163e5292 1759 .ndo_set_rx_headroom = veth_set_rx_headroom,
948d4f21 1760 .ndo_bpf = veth_xdp,
9152cff0 1761 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
9aa1206e 1762 .ndo_get_peer_dev = veth_peer_dev,
4456e7bd
SH
1763};
1764
306531f0
SF
1765static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
1766 .xmo_rx_timestamp = veth_xdp_rx_timestamp,
1767 .xmo_rx_hash = veth_xdp_rx_hash,
fca78379 1768 .xmo_rx_vlan_tag = veth_xdp_rx_vlan_tag,
306531f0
SF
1769};
1770
732912d7 1771#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
c80fafbb 1772 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
732912d7 1773 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
28d2b136
PM
1774 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1775 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
8093315a 1776
e314dbdc
PE
1777static void veth_setup(struct net_device *dev)
1778{
1779 ether_setup(dev);
1780
550fd08c 1781 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
23ea5a96 1782 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
02f01ec1 1783 dev->priv_flags |= IFF_NO_QUEUE;
163e5292 1784 dev->priv_flags |= IFF_PHONY_HEADROOM;
550fd08c 1785
4456e7bd 1786 dev->netdev_ops = &veth_netdev_ops;
306531f0 1787 dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
e314dbdc
PE
1788 dev->ethtool_ops = &veth_ethtool_ops;
1789 dev->features |= NETIF_F_LLTX;
8093315a 1790 dev->features |= VETH_FEATURES;
8d0d21f4 1791 dev->vlan_features = dev->features &
3f8c707b
VY
1792 ~(NETIF_F_HW_VLAN_CTAG_TX |
1793 NETIF_F_HW_VLAN_STAG_TX |
1794 NETIF_F_HW_VLAN_CTAG_RX |
1795 NETIF_F_HW_VLAN_STAG_RX);
cf124db5
DM
1796 dev->needs_free_netdev = true;
1797 dev->priv_destructor = veth_dev_free;
6f2684bf 1798 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
91572088 1799 dev->max_mtu = ETH_MAX_MTU;
a2c725fa 1800
8093315a 1801 dev->hw_features = VETH_FEATURES;
82d81898 1802 dev->hw_enc_features = VETH_FEATURES;
607fca9a 1803 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
d406099d 1804 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
e314dbdc
PE
1805}
1806
1807/*
1808 * netlink interface
1809 */
1810
a8b8a889
MS
1811static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1812 struct netlink_ext_ack *extack)
e314dbdc
PE
1813{
1814 if (tb[IFLA_ADDRESS]) {
1815 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1816 return -EINVAL;
1817 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1818 return -EADDRNOTAVAIL;
1819 }
38d40815
EB
1820 if (tb[IFLA_MTU]) {
1821 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1822 return -EINVAL;
1823 }
e314dbdc
PE
1824 return 0;
1825}
1826
1827static struct rtnl_link_ops veth_link_ops;
1828
d3256efd
PA
1829static void veth_disable_gro(struct net_device *dev)
1830{
1831 dev->features &= ~NETIF_F_GRO;
1832 dev->wanted_features &= ~NETIF_F_GRO;
1833 netdev_update_features(dev);
1834}
1835
9d3684c2
PA
1836static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1837{
1838 int err;
1839
1840 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1841 err = netif_set_real_num_tx_queues(dev, 1);
1842 if (err)
1843 return err;
1844 }
1845 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1846 err = netif_set_real_num_rx_queues(dev, 1);
1847 if (err)
1848 return err;
1849 }
1850 return 0;
1851}
1852
81adee47 1853static int veth_newlink(struct net *src_net, struct net_device *dev,
7a3f4a18
MS
1854 struct nlattr *tb[], struct nlattr *data[],
1855 struct netlink_ext_ack *extack)
e314dbdc 1856{
7797b93b 1857 int err;
e314dbdc
PE
1858 struct net_device *peer;
1859 struct veth_priv *priv;
1860 char ifname[IFNAMSIZ];
1861 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
5517750f 1862 unsigned char name_assign_type;
3729d502 1863 struct ifinfomsg *ifmp;
81adee47 1864 struct net *net;
e314dbdc
PE
1865
1866 /*
1867 * create and register peer first
e314dbdc 1868 */
e314dbdc
PE
1869 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1870 struct nlattr *nla_peer;
1871
1872 nla_peer = data[VETH_INFO_PEER];
3729d502 1873 ifmp = nla_data(nla_peer);
f534f658 1874 err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
e314dbdc
PE
1875 if (err < 0)
1876 return err;
1877
a8b8a889 1878 err = veth_validate(peer_tb, NULL, extack);
e314dbdc
PE
1879 if (err < 0)
1880 return err;
1881
1882 tbp = peer_tb;
3729d502
PM
1883 } else {
1884 ifmp = NULL;
e314dbdc 1885 tbp = tb;
3729d502 1886 }
e314dbdc 1887
191cdb38 1888 if (ifmp && tbp[IFLA_IFNAME]) {
872f6903 1889 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
5517750f
TG
1890 name_assign_type = NET_NAME_USER;
1891 } else {
e314dbdc 1892 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
5517750f
TG
1893 name_assign_type = NET_NAME_ENUM;
1894 }
e314dbdc 1895
81adee47
EB
1896 net = rtnl_link_get_net(src_net, tbp);
1897 if (IS_ERR(net))
1898 return PTR_ERR(net);
1899
5517750f 1900 peer = rtnl_create_link(net, ifname, name_assign_type,
d0522f1c 1901 &veth_link_ops, tbp, extack);
81adee47
EB
1902 if (IS_ERR(peer)) {
1903 put_net(net);
e314dbdc 1904 return PTR_ERR(peer);
81adee47 1905 }
e314dbdc 1906
191cdb38 1907 if (!ifmp || !tbp[IFLA_ADDRESS])
f2cedb63 1908 eth_hw_addr_random(peer);
e6f8f1a7
PE
1909
1910 if (ifmp && (dev->ifindex != 0))
1911 peer->ifindex = ifmp->ifi_index;
e314dbdc 1912
6df6398f 1913 netif_inherit_tso_max(peer, dev);
72d24955 1914
e314dbdc 1915 err = register_netdevice(peer);
81adee47
EB
1916 put_net(net);
1917 net = NULL;
e314dbdc
PE
1918 if (err < 0)
1919 goto err_register_peer;
1920
d3256efd
PA
1921 /* keep GRO disabled by default to be consistent with the established
1922 * veth behavior
1923 */
1924 veth_disable_gro(peer);
e314dbdc
PE
1925 netif_carrier_off(peer);
1926
1d997f10 1927 err = rtnl_configure_link(peer, ifmp, 0, NULL);
3729d502
PM
1928 if (err < 0)
1929 goto err_configure_peer;
1930
e314dbdc
PE
1931 /*
1932 * register dev last
1933 *
1934 * note, that since we've registered new device the dev's name
1935 * should be re-allocated
1936 */
1937
1938 if (tb[IFLA_ADDRESS] == NULL)
f2cedb63 1939 eth_hw_addr_random(dev);
e314dbdc 1940
6c8c4446 1941 if (tb[IFLA_IFNAME])
872f6903 1942 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
6c8c4446
JP
1943 else
1944 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1945
e314dbdc
PE
1946 err = register_netdevice(dev);
1947 if (err < 0)
1948 goto err_register_dev;
1949
1950 netif_carrier_off(dev);
1951
1952 /*
1953 * tie the deviced together
1954 */
1955
1956 priv = netdev_priv(dev);
d0e2c55e 1957 rcu_assign_pointer(priv->peer, peer);
9d3684c2
PA
1958 err = veth_init_queues(dev, tb);
1959 if (err)
1960 goto err_queues;
e314dbdc
PE
1961
1962 priv = netdev_priv(peer);
d0e2c55e 1963 rcu_assign_pointer(priv->peer, dev);
9d3684c2
PA
1964 err = veth_init_queues(peer, tb);
1965 if (err)
1966 goto err_queues;
948d4f21 1967
d3256efd 1968 veth_disable_gro(dev);
fccca038
LB
1969 /* update XDP supported features */
1970 veth_set_xdp_features(dev);
1971 veth_set_xdp_features(peer);
1972
e314dbdc
PE
1973 return 0;
1974
9d3684c2
PA
1975err_queues:
1976 unregister_netdevice(dev);
e314dbdc
PE
1977err_register_dev:
1978 /* nothing to do */
3729d502 1979err_configure_peer:
e314dbdc
PE
1980 unregister_netdevice(peer);
1981 return err;
1982
1983err_register_peer:
1984 free_netdev(peer);
1985 return err;
1986}
1987
23289a37 1988static void veth_dellink(struct net_device *dev, struct list_head *head)
e314dbdc
PE
1989{
1990 struct veth_priv *priv;
1991 struct net_device *peer;
1992
1993 priv = netdev_priv(dev);
d0e2c55e
ED
1994 peer = rtnl_dereference(priv->peer);
1995
1996 /* Note : dellink() is called from default_device_exit_batch(),
1997 * before a rcu_synchronize() point. The devices are guaranteed
1998 * not being freed before one RCU grace period.
1999 */
2000 RCU_INIT_POINTER(priv->peer, NULL);
24540535 2001 unregister_netdevice_queue(dev, head);
f45a5c26
ED
2002
2003 if (peer) {
2004 priv = netdev_priv(peer);
2005 RCU_INIT_POINTER(priv->peer, NULL);
2006 unregister_netdevice_queue(peer, head);
2007 }
e314dbdc
PE
2008}
2009
23711438
TG
2010static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
2011 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
2012};
e314dbdc 2013
e5f4e7b9
ND
2014static struct net *veth_get_link_net(const struct net_device *dev)
2015{
2016 struct veth_priv *priv = netdev_priv(dev);
2017 struct net_device *peer = rtnl_dereference(priv->peer);
2018
2019 return peer ? dev_net(peer) : dev_net(dev);
2020}
2021
9d3684c2
PA
2022static unsigned int veth_get_num_queues(void)
2023{
2024 /* enforce the same queue limit as rtnl_create_link */
2025 int queues = num_possible_cpus();
2026
2027 if (queues > 4096)
2028 queues = 4096;
2029 return queues;
2030}
2031
e314dbdc
PE
2032static struct rtnl_link_ops veth_link_ops = {
2033 .kind = DRV_NAME,
2034 .priv_size = sizeof(struct veth_priv),
2035 .setup = veth_setup,
2036 .validate = veth_validate,
2037 .newlink = veth_newlink,
2038 .dellink = veth_dellink,
2039 .policy = veth_policy,
2040 .maxtype = VETH_INFO_MAX,
e5f4e7b9 2041 .get_link_net = veth_get_link_net,
9d3684c2
PA
2042 .get_num_tx_queues = veth_get_num_queues,
2043 .get_num_rx_queues = veth_get_num_queues,
e314dbdc
PE
2044};
2045
2046/*
2047 * init/fini
2048 */
2049
2050static __init int veth_init(void)
2051{
2052 return rtnl_link_register(&veth_link_ops);
2053}
2054
2055static __exit void veth_exit(void)
2056{
68365458 2057 rtnl_link_unregister(&veth_link_ops);
e314dbdc
PE
2058}
2059
2060module_init(veth_init);
2061module_exit(veth_exit);
2062
2063MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
2064MODULE_LICENSE("GPL v2");
2065MODULE_ALIAS_RTNL_LINK(DRV_NAME);