Linux 6.10-rc6
[linux-2.6-block.git] / net / core / xdp.c
CommitLineData
ddc64d0a 1// SPDX-License-Identifier: GPL-2.0-only
aecd67b6
JDB
2/* net/core/xdp.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
aecd67b6 5 */
05296620 6#include <linux/bpf.h>
400031e0 7#include <linux/btf.h>
3d76a4d3 8#include <linux/btf_ids.h>
05296620 9#include <linux/filter.h>
aecd67b6
JDB
10#include <linux/types.h>
11#include <linux/mm.h>
05296620 12#include <linux/netdevice.h>
8d5d8852
JDB
13#include <linux/slab.h>
14#include <linux/idr.h>
15#include <linux/rhashtable.h>
34cc0b33 16#include <linux/bug.h>
a9ca9f9c 17#include <net/page_pool/helpers.h>
aecd67b6 18
aa70d2d1 19#include <net/hotdata.h>
aecd67b6 20#include <net/xdp.h>
f033b688
JDB
21#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
22#include <trace/events/xdp.h>
2b43470a 23#include <net/xdp_sock_drv.h>
aecd67b6
JDB
24
25#define REG_STATE_NEW 0x0
26#define REG_STATE_REGISTERED 0x1
27#define REG_STATE_UNREGISTERED 0x2
28#define REG_STATE_UNUSED 0x3
29
8d5d8852
JDB
30static DEFINE_IDA(mem_id_pool);
31static DEFINE_MUTEX(mem_id_lock);
32#define MEM_ID_MAX 0xFFFE
33#define MEM_ID_MIN 1
34static int mem_id_next = MEM_ID_MIN;
35
36static bool mem_id_init; /* false */
37static struct rhashtable *mem_id_ht;
38
8d5d8852
JDB
39static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
40{
41 const u32 *k = data;
42 const u32 key = *k;
43
c593642c 44 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
8d5d8852
JDB
45 != sizeof(u32));
46
9f9a7077
N
47 /* Use cyclic increasing ID as direct hash key */
48 return key;
8d5d8852
JDB
49}
50
51static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
52 const void *ptr)
53{
54 const struct xdp_mem_allocator *xa = ptr;
55 u32 mem_id = *(u32 *)arg->key;
56
57 return xa->mem.id != mem_id;
58}
59
60static const struct rhashtable_params mem_id_rht_params = {
61 .nelem_hint = 64,
62 .head_offset = offsetof(struct xdp_mem_allocator, node),
63 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
c593642c 64 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
8d5d8852
JDB
65 .max_size = MEM_ID_MAX,
66 .min_size = 8,
67 .automatic_shrinking = true,
68 .hashfn = xdp_mem_id_hashfn,
69 .obj_cmpfn = xdp_mem_id_cmp,
70};
71
72static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
73{
74 struct xdp_mem_allocator *xa;
75
76 xa = container_of(rcu, struct xdp_mem_allocator, rcu);
77
78 /* Allow this ID to be reused */
6a571895 79 ida_free(&mem_id_pool, xa->mem.id);
8d5d8852 80
8d5d8852
JDB
81 kfree(xa);
82}
83
c3f812ce 84static void mem_xa_remove(struct xdp_mem_allocator *xa)
99c07c43 85{
c3f812ce 86 trace_mem_disconnect(xa);
99c07c43 87
c3f812ce 88 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
99c07c43 89 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
99c07c43
JDB
90}
91
c3f812ce
JL
92static void mem_allocator_disconnect(void *allocator)
93{
94 struct xdp_mem_allocator *xa;
95 struct rhashtable_iter iter;
96
86c76c09
JL
97 mutex_lock(&mem_id_lock);
98
c3f812ce
JL
99 rhashtable_walk_enter(mem_id_ht, &iter);
100 do {
101 rhashtable_walk_start(&iter);
102
103 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
104 if (xa->allocator == allocator)
105 mem_xa_remove(xa);
106 }
107
108 rhashtable_walk_stop(&iter);
99c07c43 109
c3f812ce
JL
110 } while (xa == ERR_PTR(-EAGAIN));
111 rhashtable_walk_exit(&iter);
86c76c09
JL
112
113 mutex_unlock(&mem_id_lock);
c3f812ce
JL
114}
115
4a48ef70 116void xdp_unreg_mem_model(struct xdp_mem_info *mem)
8d5d8852
JDB
117{
118 struct xdp_mem_allocator *xa;
4a48ef70
THJ
119 int type = mem->type;
120 int id = mem->id;
8d5d8852 121
a78cae24 122 /* Reset mem info to defaults */
4a48ef70
THJ
123 mem->id = 0;
124 mem->type = 0;
dce5bd61 125
8d5d8852
JDB
126 if (id == 0)
127 return;
128
a78cae24 129 if (type == MEM_TYPE_PAGE_POOL) {
c3f812ce
JL
130 rcu_read_lock();
131 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
132 page_pool_destroy(xa->page_pool);
133 rcu_read_unlock();
99c07c43 134 }
8d5d8852 135}
4a48ef70
THJ
136EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
137
138void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
139{
140 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
141 WARN(1, "Missing register, driver bug");
142 return;
143 }
144
145 xdp_unreg_mem_model(&xdp_rxq->mem);
146}
dce5bd61 147EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
8d5d8852 148
aecd67b6
JDB
149void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
150{
151 /* Simplify driver cleanup code paths, allow unreg "unused" */
152 if (xdp_rxq->reg_state == REG_STATE_UNUSED)
153 return;
154
dce5bd61 155 xdp_rxq_info_unreg_mem_model(xdp_rxq);
8d5d8852 156
aecd67b6
JDB
157 xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
158 xdp_rxq->dev = NULL;
159}
160EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
161
162static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
163{
164 memset(xdp_rxq, 0, sizeof(*xdp_rxq));
165}
166
167/* Returns 0 on success, negative on failure */
bf25146a
EC
168int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
169 struct net_device *dev, u32 queue_index,
170 unsigned int napi_id, u32 frag_size)
aecd67b6 171{
f85b244e
YD
172 if (!dev) {
173 WARN(1, "Missing net_device from driver");
174 return -ENODEV;
175 }
176
aecd67b6
JDB
177 if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
178 WARN(1, "Driver promised not to register this");
179 return -EINVAL;
180 }
181
182 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
183 WARN(1, "Missing unregister, handled but fix driver");
184 xdp_rxq_info_unreg(xdp_rxq);
185 }
186
aecd67b6
JDB
187 /* State either UNREGISTERED or NEW */
188 xdp_rxq_info_init(xdp_rxq);
189 xdp_rxq->dev = dev;
190 xdp_rxq->queue_index = queue_index;
b02e5a0e 191 xdp_rxq->napi_id = napi_id;
bf25146a 192 xdp_rxq->frag_size = frag_size;
aecd67b6
JDB
193
194 xdp_rxq->reg_state = REG_STATE_REGISTERED;
195 return 0;
196}
bf25146a 197EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
aecd67b6
JDB
198
199void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
200{
201 xdp_rxq->reg_state = REG_STATE_UNUSED;
202}
203EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
c0124f32
JDB
204
205bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
206{
207 return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
208}
209EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
5ab073ff 210
8d5d8852
JDB
211static int __mem_id_init_hash_table(void)
212{
213 struct rhashtable *rht;
214 int ret;
215
216 if (unlikely(mem_id_init))
217 return 0;
218
219 rht = kzalloc(sizeof(*rht), GFP_KERNEL);
220 if (!rht)
221 return -ENOMEM;
222
223 ret = rhashtable_init(rht, &mem_id_rht_params);
224 if (ret < 0) {
225 kfree(rht);
226 return ret;
227 }
228 mem_id_ht = rht;
229 smp_mb(); /* mutex lock should provide enough pairing */
230 mem_id_init = true;
231
232 return 0;
233}
234
235/* Allocate a cyclic ID that maps to allocator pointer.
236 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
237 *
238 * Caller must lock mem_id_lock.
239 */
240static int __mem_id_cyclic_get(gfp_t gfp)
241{
242 int retries = 1;
243 int id;
244
245again:
6a571895 246 id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp);
8d5d8852
JDB
247 if (id < 0) {
248 if (id == -ENOSPC) {
249 /* Cyclic allocator, reset next id */
250 if (retries--) {
251 mem_id_next = MEM_ID_MIN;
252 goto again;
253 }
254 }
255 return id; /* errno */
256 }
257 mem_id_next = id + 1;
258
259 return id;
260}
261
57d0a1c1
JDB
262static bool __is_supported_mem_type(enum xdp_mem_type type)
263{
264 if (type == MEM_TYPE_PAGE_POOL)
265 return is_page_pool_compiled_in();
266
267 if (type >= MEM_TYPE_MAX)
268 return false;
269
270 return true;
271}
272
4a48ef70
THJ
273static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
274 enum xdp_mem_type type,
275 void *allocator)
5ab073ff 276{
8d5d8852
JDB
277 struct xdp_mem_allocator *xdp_alloc;
278 gfp_t gfp = GFP_KERNEL;
279 int id, errno, ret;
280 void *ptr;
281
57d0a1c1 282 if (!__is_supported_mem_type(type))
4a48ef70 283 return ERR_PTR(-EOPNOTSUPP);
5ab073ff 284
4a48ef70 285 mem->type = type;
5ab073ff 286
57d0a1c1 287 if (!allocator) {
0807892e 288 if (type == MEM_TYPE_PAGE_POOL)
4a48ef70
THJ
289 return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
290 return NULL;
57d0a1c1 291 }
8d5d8852
JDB
292
293 /* Delay init of rhashtable to save memory if feature isn't used */
294 if (!mem_id_init) {
295 mutex_lock(&mem_id_lock);
296 ret = __mem_id_init_hash_table();
297 mutex_unlock(&mem_id_lock);
7e9f7942 298 if (ret < 0)
4a48ef70 299 return ERR_PTR(ret);
8d5d8852
JDB
300 }
301
302 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
303 if (!xdp_alloc)
4a48ef70 304 return ERR_PTR(-ENOMEM);
8d5d8852
JDB
305
306 mutex_lock(&mem_id_lock);
307 id = __mem_id_cyclic_get(gfp);
308 if (id < 0) {
309 errno = id;
310 goto err;
311 }
4a48ef70
THJ
312 mem->id = id;
313 xdp_alloc->mem = *mem;
8d5d8852
JDB
314 xdp_alloc->allocator = allocator;
315
316 /* Insert allocator into ID lookup table */
317 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
318 if (IS_ERR(ptr)) {
6a571895 319 ida_free(&mem_id_pool, mem->id);
4a48ef70 320 mem->id = 0;
8d5d8852
JDB
321 errno = PTR_ERR(ptr);
322 goto err;
323 }
324
1da4bbef 325 if (type == MEM_TYPE_PAGE_POOL)
64693ec7 326 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
1da4bbef 327
8d5d8852 328 mutex_unlock(&mem_id_lock);
5ab073ff 329
4a48ef70 330 return xdp_alloc;
8d5d8852
JDB
331err:
332 mutex_unlock(&mem_id_lock);
333 kfree(xdp_alloc);
4a48ef70
THJ
334 return ERR_PTR(errno);
335}
336
337int xdp_reg_mem_model(struct xdp_mem_info *mem,
338 enum xdp_mem_type type, void *allocator)
339{
340 struct xdp_mem_allocator *xdp_alloc;
341
342 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
343 if (IS_ERR(xdp_alloc))
344 return PTR_ERR(xdp_alloc);
345 return 0;
346}
347EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
348
349int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
350 enum xdp_mem_type type, void *allocator)
351{
352 struct xdp_mem_allocator *xdp_alloc;
353
354 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
355 WARN(1, "Missing register, driver bug");
356 return -EFAULT;
357 }
358
359 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
360 if (IS_ERR(xdp_alloc))
361 return PTR_ERR(xdp_alloc);
362
e0ae7130
SAS
363 if (trace_mem_connect_enabled() && xdp_alloc)
364 trace_mem_connect(xdp_alloc, xdp_rxq);
4a48ef70 365 return 0;
5ab073ff 366}
4a48ef70 367
5ab073ff 368EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
8d5d8852 369
389ab7f0
JDB
370/* XDP RX runs under NAPI protection, and in different delivery error
371 * scenarios (e.g. queue full), it is possible to return the xdp_frame
baead859 372 * while still leveraging this protection. The @napi_direct boolean
389ab7f0 373 * is used for those calls sites. Thus, allowing for faster recycling
ed1182dc 374 * of xdp_frames/pages in those cases.
389ab7f0 375 */
bf25146a
EC
376void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
377 struct xdp_buff *xdp)
8d5d8852 378{
57d0a1c1
JDB
379 struct page *page;
380
381 switch (mem->type) {
382 case MEM_TYPE_PAGE_POOL:
57d0a1c1 383 page = virt_to_head_page(data);
622d1369
OBL
384 if (napi_direct && xdp_return_frame_no_direct())
385 napi_direct = false;
fb33ec01
JDB
386 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
387 * as mem->type knows this a page_pool page
388 */
389 page_pool_put_full_page(page->pp, page, napi_direct);
57d0a1c1
JDB
390 break;
391 case MEM_TYPE_PAGE_SHARED:
8d5d8852 392 page_frag_free(data);
57d0a1c1
JDB
393 break;
394 case MEM_TYPE_PAGE_ORDER0:
395 page = virt_to_page(data); /* Assumes order0 page*/
8d5d8852 396 put_page(page);
57d0a1c1 397 break;
ed1182dc
BT
398 case MEM_TYPE_XSK_BUFF_POOL:
399 /* NB! Only valid from an xdp_buff! */
400 xsk_buff_free(xdp);
401 break;
57d0a1c1
JDB
402 default:
403 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
82c41671 404 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
57d0a1c1 405 break;
8d5d8852
JDB
406 }
407}
c497176c
BT
408
409void xdp_return_frame(struct xdp_frame *xdpf)
410{
7c48cb01
LB
411 struct skb_shared_info *sinfo;
412 int i;
413
414 if (likely(!xdp_frame_has_frags(xdpf)))
415 goto out;
416
417 sinfo = xdp_get_shared_info_from_frame(xdpf);
418 for (i = 0; i < sinfo->nr_frags; i++) {
419 struct page *page = skb_frag_page(&sinfo->frags[i]);
420
421 __xdp_return(page_address(page), &xdpf->mem, false, NULL);
422 }
423out:
ed1182dc 424 __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
c497176c 425}
8d5d8852 426EXPORT_SYMBOL_GPL(xdp_return_frame);
c497176c 427
389ab7f0
JDB
428void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
429{
7c48cb01
LB
430 struct skb_shared_info *sinfo;
431 int i;
432
433 if (likely(!xdp_frame_has_frags(xdpf)))
434 goto out;
435
436 sinfo = xdp_get_shared_info_from_frame(xdpf);
437 for (i = 0; i < sinfo->nr_frags; i++) {
438 struct page *page = skb_frag_page(&sinfo->frags[i]);
439
440 __xdp_return(page_address(page), &xdpf->mem, true, NULL);
441 }
442out:
ed1182dc 443 __xdp_return(xdpf->data, &xdpf->mem, true, NULL);
389ab7f0
JDB
444}
445EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
446
89653987
LB
447/* XDP bulk APIs introduce a defer/flush mechanism to return
448 * pages belonging to the same xdp_mem_allocator object
449 * (identified via the mem.id field) in bulk to optimize
450 * I-cache and D-cache.
451 * The bulk queue size is set to 16 to be aligned to how
452 * XDP_REDIRECT bulking works. The bulk is flushed when
453 * it is full or when mem.id changes.
454 * xdp_frame_bulk is usually stored/allocated on the function
455 * call-stack to avoid locking penalties.
456 */
457void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
458{
459 struct xdp_mem_allocator *xa = bq->xa;
89653987 460
78862447 461 if (unlikely(!xa || !bq->count))
89653987
LB
462 return;
463
78862447 464 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
89653987
LB
465 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
466 bq->count = 0;
467}
468EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
469
470/* Must be called with rcu_read_lock held */
471void xdp_return_frame_bulk(struct xdp_frame *xdpf,
472 struct xdp_frame_bulk *bq)
473{
474 struct xdp_mem_info *mem = &xdpf->mem;
475 struct xdp_mem_allocator *xa;
476
477 if (mem->type != MEM_TYPE_PAGE_POOL) {
7c48cb01 478 xdp_return_frame(xdpf);
89653987
LB
479 return;
480 }
481
482 xa = bq->xa;
483 if (unlikely(!xa)) {
484 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
485 bq->count = 0;
486 bq->xa = xa;
487 }
488
489 if (bq->count == XDP_BULK_QUEUE_SIZE)
490 xdp_flush_frame_bulk(bq);
491
492 if (unlikely(mem->id != xa->mem.id)) {
493 xdp_flush_frame_bulk(bq);
494 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
495 }
496
7c48cb01
LB
497 if (unlikely(xdp_frame_has_frags(xdpf))) {
498 struct skb_shared_info *sinfo;
499 int i;
500
501 sinfo = xdp_get_shared_info_from_frame(xdpf);
502 for (i = 0; i < sinfo->nr_frags; i++) {
503 skb_frag_t *frag = &sinfo->frags[i];
504
505 bq->q[bq->count++] = skb_frag_address(frag);
506 if (bq->count == XDP_BULK_QUEUE_SIZE)
507 xdp_flush_frame_bulk(bq);
508 }
509 }
89653987
LB
510 bq->q[bq->count++] = xdpf->data;
511}
512EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
513
c497176c
BT
514void xdp_return_buff(struct xdp_buff *xdp)
515{
7c48cb01
LB
516 struct skb_shared_info *sinfo;
517 int i;
518
519 if (likely(!xdp_buff_has_frags(xdp)))
520 goto out;
521
522 sinfo = xdp_get_shared_info_from_buff(xdp);
523 for (i = 0; i < sinfo->nr_frags; i++) {
524 struct page *page = skb_frag_page(&sinfo->frags[i]);
525
526 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
527 }
528out:
ed1182dc 529 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
c497176c 530}
718a18a0 531EXPORT_SYMBOL_GPL(xdp_return_buff);
05296620 532
05296620
JK
533void xdp_attachment_setup(struct xdp_attachment_info *info,
534 struct netdev_bpf *bpf)
535{
536 if (info->prog)
537 bpf_prog_put(info->prog);
538 info->prog = bpf->prog;
539 info->flags = bpf->flags;
540}
541EXPORT_SYMBOL_GPL(xdp_attachment_setup);
b0d1beef
BT
542
543struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
544{
72962167 545 unsigned int metasize, totsize;
b0d1beef
BT
546 void *addr, *data_to_copy;
547 struct xdp_frame *xdpf;
548 struct page *page;
549
550 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
551 metasize = xdp_data_meta_unsupported(xdp) ? 0 :
552 xdp->data - xdp->data_meta;
b0d1beef
BT
553 totsize = xdp->data_end - xdp->data + metasize;
554
555 if (sizeof(*xdpf) + totsize > PAGE_SIZE)
556 return NULL;
557
558 page = dev_alloc_page();
559 if (!page)
560 return NULL;
561
562 addr = page_to_virt(page);
563 xdpf = addr;
564 memset(xdpf, 0, sizeof(*xdpf));
565
566 addr += sizeof(*xdpf);
567 data_to_copy = metasize ? xdp->data_meta : xdp->data;
568 memcpy(addr, data_to_copy, totsize);
569
570 xdpf->data = addr + metasize;
571 xdpf->len = totsize - metasize;
572 xdpf->headroom = 0;
573 xdpf->metasize = metasize;
3ff23516 574 xdpf->frame_sz = PAGE_SIZE;
b0d1beef
BT
575 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
576
82c41671 577 xsk_buff_free(xdp);
b0d1beef
BT
578 return xdpf;
579}
580EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
34cc0b33
JDB
581
582/* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
583void xdp_warn(const char *msg, const char *func, const int line)
584{
585 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
586};
587EXPORT_SYMBOL_GPL(xdp_warn);
97a0e1ea 588
65e6dcf7
LB
589int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
590{
aa70d2d1 591 n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
65e6dcf7
LB
592 if (unlikely(!n_skb))
593 return -ENOMEM;
594
595 return 0;
596}
597EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
598
97a0e1ea
LB
599struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
600 struct sk_buff *skb,
601 struct net_device *dev)
602{
d65a1906 603 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
97a0e1ea
LB
604 unsigned int headroom, frame_size;
605 void *hard_start;
d65a1906
LB
606 u8 nr_frags;
607
608 /* xdp frags frame */
609 if (unlikely(xdp_frame_has_frags(xdpf)))
610 nr_frags = sinfo->nr_frags;
97a0e1ea
LB
611
612 /* Part of headroom was reserved to xdpf */
613 headroom = sizeof(*xdpf) + xdpf->headroom;
614
615 /* Memory size backing xdp_frame data already have reserved
616 * room for build_skb to place skb_shared_info in tailroom.
617 */
618 frame_size = xdpf->frame_sz;
619
620 hard_start = xdpf->data - headroom;
621 skb = build_skb_around(skb, hard_start, frame_size);
622 if (unlikely(!skb))
623 return NULL;
624
625 skb_reserve(skb, headroom);
626 __skb_put(skb, xdpf->len);
627 if (xdpf->metasize)
628 skb_metadata_set(skb, xdpf->metasize);
629
d65a1906
LB
630 if (unlikely(xdp_frame_has_frags(xdpf)))
631 xdp_update_skb_shared_info(skb, nr_frags,
632 sinfo->xdp_frags_size,
633 nr_frags * xdpf->frame_sz,
634 xdp_frame_is_frag_pfmemalloc(xdpf));
635
97a0e1ea
LB
636 /* Essential SKB info: protocol and skb->dev */
637 skb->protocol = eth_type_trans(skb, dev);
638
639 /* Optional SKB info, currently missing:
640 * - HW checksum info (skb->ip_summed)
641 * - HW RX hash (skb_set_hash)
642 * - RX ring dev queue index (skb_record_rx_queue)
643 */
644
9c94bbf9
AL
645 if (xdpf->mem.type == MEM_TYPE_PAGE_POOL)
646 skb_mark_for_recycle(skb);
97a0e1ea
LB
647
648 /* Allow SKB to reuse area used by xdp_frame */
649 xdp_scrub_frame(xdpf);
650
651 return skb;
652}
653EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
89f479f0
LB
654
655struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
656 struct net_device *dev)
657{
658 struct sk_buff *skb;
659
aa70d2d1 660 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
89f479f0
LB
661 if (unlikely(!skb))
662 return NULL;
663
664 memset(skb, 0, offsetof(struct sk_buff, tail));
665
666 return __xdp_build_skb_from_frame(xdpf, skb, dev);
667}
668EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
e624d4ed
HL
669
670struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
671{
672 unsigned int headroom, totalsize;
673 struct xdp_frame *nxdpf;
674 struct page *page;
675 void *addr;
676
677 headroom = xdpf->headroom + sizeof(*xdpf);
678 totalsize = headroom + xdpf->len;
679
680 if (unlikely(totalsize > PAGE_SIZE))
681 return NULL;
682 page = dev_alloc_page();
683 if (!page)
684 return NULL;
685 addr = page_to_virt(page);
686
687 memcpy(addr, xdpf, totalsize);
688
689 nxdpf = addr;
690 nxdpf->data = addr + headroom;
691 nxdpf->frame_sz = PAGE_SIZE;
692 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
693 nxdpf->mem.id = 0;
694
695 return nxdpf;
696}
3d76a4d3 697
391145ba 698__bpf_kfunc_start_defs();
3d76a4d3
SF
699
700/**
701 * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
702 * @ctx: XDP context pointer.
703 * @timestamp: Return value pointer.
704 *
915efd8a
JDB
705 * Return:
706 * * Returns 0 on success or ``-errno`` on error.
707 * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
708 * * ``-ENODATA`` : means no RX-timestamp available for this frame
3d76a4d3 709 */
400031e0 710__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
3d76a4d3
SF
711{
712 return -EOPNOTSUPP;
713}
714
715/**
716 * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
717 * @ctx: XDP context pointer.
718 * @hash: Return value pointer.
0cd917a4
JDB
719 * @rss_type: Return value pointer for RSS type.
720 *
721 * The RSS hash type (@rss_type) specifies what portion of packet headers NIC
722 * hardware used when calculating RSS hash value. The RSS type can be decoded
723 * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits
724 * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types*
725 * ``XDP_RSS_TYPE_L*``.
3d76a4d3 726 *
915efd8a
JDB
727 * Return:
728 * * Returns 0 on success or ``-errno`` on error.
729 * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
730 * * ``-ENODATA`` : means no RX-hash available for this frame
3d76a4d3 731 */
0cd917a4
JDB
732__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
733 enum xdp_rss_hash_type *rss_type)
3d76a4d3
SF
734{
735 return -EOPNOTSUPP;
736}
737
e6795330
LZ
738/**
739 * bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag
740 * @ctx: XDP context pointer.
741 * @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID).
742 * @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP)
743 *
744 * In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*,
745 * usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use
746 * custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)**
747 * and should be used as follows:
748 * ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();``
749 *
750 * ``vlan_tci`` contains the remaining 16 bits of a VLAN tag.
751 * Driver is expected to provide those in **host byte order (usually LE)**,
752 * so the bpf program should not perform byte conversion.
753 * According to 802.1Q standard, *VLAN TCI (Tag control information)*
754 * is a bit field that contains:
755 * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``,
756 * *Drop eligible indicator (DEI)* - 1 bit,
757 * *Priority code point (PCP)* - 3 bits.
758 * For detailed meaning of DEI and PCP, please refer to other sources.
759 *
760 * Return:
761 * * Returns 0 on success or ``-errno`` on error.
762 * * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc
763 * * ``-ENODATA`` : VLAN tag was not stripped or is not available
764 */
765__bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
766 __be16 *vlan_proto, u16 *vlan_tci)
767{
768 return -EOPNOTSUPP;
769}
770
391145ba 771__bpf_kfunc_end_defs();
3d76a4d3 772
6f3189f3 773BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
a9c2a608 774#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
3d76a4d3
SF
775XDP_METADATA_KFUNC_xxx
776#undef XDP_METADATA_KFUNC
6f3189f3 777BTF_KFUNCS_END(xdp_metadata_kfunc_ids)
3d76a4d3
SF
778
779static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
780 .owner = THIS_MODULE,
781 .set = &xdp_metadata_kfunc_ids,
782};
783
784BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
a9c2a608 785#define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str)
3d76a4d3
SF
786XDP_METADATA_KFUNC_xxx
787#undef XDP_METADATA_KFUNC
788
789u32 bpf_xdp_metadata_kfunc_id(int id)
790{
791 /* xdp_metadata_kfunc_ids is sorted and can't be used */
792 return xdp_metadata_kfunc_ids_unsorted[id];
793}
794
795bool bpf_dev_bound_kfunc_id(u32 btf_id)
796{
797 return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id);
798}
799
800static int __init xdp_metadata_init(void)
801{
802 return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
803}
804late_initcall(xdp_metadata_init);
66c0e13a 805
f85949f9 806void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
66c0e13a 807{
f85949f9
LB
808 val &= NETDEV_XDP_ACT_MASK;
809 if (dev->xdp_features == val)
810 return;
66c0e13a 811
f85949f9 812 dev->xdp_features = val;
769639c1
JK
813
814 if (dev->reg_state == NETREG_REGISTERED)
815 call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
66c0e13a 816}
f85949f9
LB
817EXPORT_SYMBOL_GPL(xdp_set_features_flag);
818
819void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
820{
821 xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT);
822
823 if (support_sg)
824 val |= NETDEV_XDP_ACT_NDO_XMIT_SG;
825 xdp_set_features_flag(dev, val);
826}
66c0e13a
MM
827EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
828
829void xdp_features_clear_redirect_target(struct net_device *dev)
830{
f85949f9
LB
831 xdp_features_t val = dev->xdp_features;
832
833 val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG);
834 xdp_set_features_flag(dev, val);
66c0e13a
MM
835}
836EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);