Commit | Line | Data |
---|---|---|
ddc64d0a | 1 | // SPDX-License-Identifier: GPL-2.0-only |
aecd67b6 JDB |
2 | /* net/core/xdp.c |
3 | * | |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
aecd67b6 | 5 | */ |
05296620 JK |
6 | #include <linux/bpf.h> |
7 | #include <linux/filter.h> | |
aecd67b6 JDB |
8 | #include <linux/types.h> |
9 | #include <linux/mm.h> | |
05296620 | 10 | #include <linux/netdevice.h> |
8d5d8852 JDB |
11 | #include <linux/slab.h> |
12 | #include <linux/idr.h> | |
13 | #include <linux/rhashtable.h> | |
34cc0b33 | 14 | #include <linux/bug.h> |
57d0a1c1 | 15 | #include <net/page_pool.h> |
aecd67b6 JDB |
16 | |
17 | #include <net/xdp.h> | |
f033b688 JDB |
18 | #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ |
19 | #include <trace/events/xdp.h> | |
2b43470a | 20 | #include <net/xdp_sock_drv.h> |
aecd67b6 JDB |
21 | |
22 | #define REG_STATE_NEW 0x0 | |
23 | #define REG_STATE_REGISTERED 0x1 | |
24 | #define REG_STATE_UNREGISTERED 0x2 | |
25 | #define REG_STATE_UNUSED 0x3 | |
26 | ||
8d5d8852 JDB |
27 | static DEFINE_IDA(mem_id_pool); |
28 | static DEFINE_MUTEX(mem_id_lock); | |
29 | #define MEM_ID_MAX 0xFFFE | |
30 | #define MEM_ID_MIN 1 | |
31 | static int mem_id_next = MEM_ID_MIN; | |
32 | ||
33 | static bool mem_id_init; /* false */ | |
34 | static struct rhashtable *mem_id_ht; | |
35 | ||
8d5d8852 JDB |
36 | static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) |
37 | { | |
38 | const u32 *k = data; | |
39 | const u32 key = *k; | |
40 | ||
c593642c | 41 | BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) |
8d5d8852 JDB |
42 | != sizeof(u32)); |
43 | ||
9f9a7077 N |
44 | /* Use cyclic increasing ID as direct hash key */ |
45 | return key; | |
8d5d8852 JDB |
46 | } |
47 | ||
48 | static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, | |
49 | const void *ptr) | |
50 | { | |
51 | const struct xdp_mem_allocator *xa = ptr; | |
52 | u32 mem_id = *(u32 *)arg->key; | |
53 | ||
54 | return xa->mem.id != mem_id; | |
55 | } | |
56 | ||
57 | static const struct rhashtable_params mem_id_rht_params = { | |
58 | .nelem_hint = 64, | |
59 | .head_offset = offsetof(struct xdp_mem_allocator, node), | |
60 | .key_offset = offsetof(struct xdp_mem_allocator, mem.id), | |
c593642c | 61 | .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), |
8d5d8852 JDB |
62 | .max_size = MEM_ID_MAX, |
63 | .min_size = 8, | |
64 | .automatic_shrinking = true, | |
65 | .hashfn = xdp_mem_id_hashfn, | |
66 | .obj_cmpfn = xdp_mem_id_cmp, | |
67 | }; | |
68 | ||
69 | static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) | |
70 | { | |
71 | struct xdp_mem_allocator *xa; | |
72 | ||
73 | xa = container_of(rcu, struct xdp_mem_allocator, rcu); | |
74 | ||
75 | /* Allow this ID to be reused */ | |
76 | ida_simple_remove(&mem_id_pool, xa->mem.id); | |
77 | ||
8d5d8852 JDB |
78 | kfree(xa); |
79 | } | |
80 | ||
c3f812ce | 81 | static void mem_xa_remove(struct xdp_mem_allocator *xa) |
99c07c43 | 82 | { |
c3f812ce | 83 | trace_mem_disconnect(xa); |
99c07c43 | 84 | |
c3f812ce | 85 | if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) |
99c07c43 | 86 | call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); |
99c07c43 JDB |
87 | } |
88 | ||
c3f812ce JL |
89 | static void mem_allocator_disconnect(void *allocator) |
90 | { | |
91 | struct xdp_mem_allocator *xa; | |
92 | struct rhashtable_iter iter; | |
93 | ||
86c76c09 JL |
94 | mutex_lock(&mem_id_lock); |
95 | ||
c3f812ce JL |
96 | rhashtable_walk_enter(mem_id_ht, &iter); |
97 | do { | |
98 | rhashtable_walk_start(&iter); | |
99 | ||
100 | while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { | |
101 | if (xa->allocator == allocator) | |
102 | mem_xa_remove(xa); | |
103 | } | |
104 | ||
105 | rhashtable_walk_stop(&iter); | |
99c07c43 | 106 | |
c3f812ce JL |
107 | } while (xa == ERR_PTR(-EAGAIN)); |
108 | rhashtable_walk_exit(&iter); | |
86c76c09 JL |
109 | |
110 | mutex_unlock(&mem_id_lock); | |
c3f812ce JL |
111 | } |
112 | ||
4a48ef70 | 113 | void xdp_unreg_mem_model(struct xdp_mem_info *mem) |
8d5d8852 JDB |
114 | { |
115 | struct xdp_mem_allocator *xa; | |
4a48ef70 THJ |
116 | int type = mem->type; |
117 | int id = mem->id; | |
8d5d8852 | 118 | |
a78cae24 | 119 | /* Reset mem info to defaults */ |
4a48ef70 THJ |
120 | mem->id = 0; |
121 | mem->type = 0; | |
dce5bd61 | 122 | |
8d5d8852 JDB |
123 | if (id == 0) |
124 | return; | |
125 | ||
a78cae24 | 126 | if (type == MEM_TYPE_PAGE_POOL) { |
c3f812ce JL |
127 | rcu_read_lock(); |
128 | xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); | |
129 | page_pool_destroy(xa->page_pool); | |
130 | rcu_read_unlock(); | |
99c07c43 | 131 | } |
8d5d8852 | 132 | } |
4a48ef70 THJ |
133 | EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); |
134 | ||
135 | void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) | |
136 | { | |
137 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { | |
138 | WARN(1, "Missing register, driver bug"); | |
139 | return; | |
140 | } | |
141 | ||
142 | xdp_unreg_mem_model(&xdp_rxq->mem); | |
143 | } | |
dce5bd61 | 144 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); |
8d5d8852 | 145 | |
aecd67b6 JDB |
146 | void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) |
147 | { | |
148 | /* Simplify driver cleanup code paths, allow unreg "unused" */ | |
149 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) | |
150 | return; | |
151 | ||
dce5bd61 | 152 | xdp_rxq_info_unreg_mem_model(xdp_rxq); |
8d5d8852 | 153 | |
aecd67b6 JDB |
154 | xdp_rxq->reg_state = REG_STATE_UNREGISTERED; |
155 | xdp_rxq->dev = NULL; | |
156 | } | |
157 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); | |
158 | ||
159 | static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) | |
160 | { | |
161 | memset(xdp_rxq, 0, sizeof(*xdp_rxq)); | |
162 | } | |
163 | ||
164 | /* Returns 0 on success, negative on failure */ | |
bf25146a EC |
165 | int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, |
166 | struct net_device *dev, u32 queue_index, | |
167 | unsigned int napi_id, u32 frag_size) | |
aecd67b6 | 168 | { |
f85b244e YD |
169 | if (!dev) { |
170 | WARN(1, "Missing net_device from driver"); | |
171 | return -ENODEV; | |
172 | } | |
173 | ||
aecd67b6 JDB |
174 | if (xdp_rxq->reg_state == REG_STATE_UNUSED) { |
175 | WARN(1, "Driver promised not to register this"); | |
176 | return -EINVAL; | |
177 | } | |
178 | ||
179 | if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { | |
180 | WARN(1, "Missing unregister, handled but fix driver"); | |
181 | xdp_rxq_info_unreg(xdp_rxq); | |
182 | } | |
183 | ||
aecd67b6 JDB |
184 | /* State either UNREGISTERED or NEW */ |
185 | xdp_rxq_info_init(xdp_rxq); | |
186 | xdp_rxq->dev = dev; | |
187 | xdp_rxq->queue_index = queue_index; | |
b02e5a0e | 188 | xdp_rxq->napi_id = napi_id; |
bf25146a | 189 | xdp_rxq->frag_size = frag_size; |
aecd67b6 JDB |
190 | |
191 | xdp_rxq->reg_state = REG_STATE_REGISTERED; | |
192 | return 0; | |
193 | } | |
bf25146a | 194 | EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); |
aecd67b6 JDB |
195 | |
196 | void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) | |
197 | { | |
198 | xdp_rxq->reg_state = REG_STATE_UNUSED; | |
199 | } | |
200 | EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); | |
c0124f32 JDB |
201 | |
202 | bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) | |
203 | { | |
204 | return (xdp_rxq->reg_state == REG_STATE_REGISTERED); | |
205 | } | |
206 | EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); | |
5ab073ff | 207 | |
8d5d8852 JDB |
208 | static int __mem_id_init_hash_table(void) |
209 | { | |
210 | struct rhashtable *rht; | |
211 | int ret; | |
212 | ||
213 | if (unlikely(mem_id_init)) | |
214 | return 0; | |
215 | ||
216 | rht = kzalloc(sizeof(*rht), GFP_KERNEL); | |
217 | if (!rht) | |
218 | return -ENOMEM; | |
219 | ||
220 | ret = rhashtable_init(rht, &mem_id_rht_params); | |
221 | if (ret < 0) { | |
222 | kfree(rht); | |
223 | return ret; | |
224 | } | |
225 | mem_id_ht = rht; | |
226 | smp_mb(); /* mutex lock should provide enough pairing */ | |
227 | mem_id_init = true; | |
228 | ||
229 | return 0; | |
230 | } | |
231 | ||
232 | /* Allocate a cyclic ID that maps to allocator pointer. | |
233 | * See: https://www.kernel.org/doc/html/latest/core-api/idr.html | |
234 | * | |
235 | * Caller must lock mem_id_lock. | |
236 | */ | |
237 | static int __mem_id_cyclic_get(gfp_t gfp) | |
238 | { | |
239 | int retries = 1; | |
240 | int id; | |
241 | ||
242 | again: | |
243 | id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); | |
244 | if (id < 0) { | |
245 | if (id == -ENOSPC) { | |
246 | /* Cyclic allocator, reset next id */ | |
247 | if (retries--) { | |
248 | mem_id_next = MEM_ID_MIN; | |
249 | goto again; | |
250 | } | |
251 | } | |
252 | return id; /* errno */ | |
253 | } | |
254 | mem_id_next = id + 1; | |
255 | ||
256 | return id; | |
257 | } | |
258 | ||
57d0a1c1 JDB |
259 | static bool __is_supported_mem_type(enum xdp_mem_type type) |
260 | { | |
261 | if (type == MEM_TYPE_PAGE_POOL) | |
262 | return is_page_pool_compiled_in(); | |
263 | ||
264 | if (type >= MEM_TYPE_MAX) | |
265 | return false; | |
266 | ||
267 | return true; | |
268 | } | |
269 | ||
4a48ef70 THJ |
270 | static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, |
271 | enum xdp_mem_type type, | |
272 | void *allocator) | |
5ab073ff | 273 | { |
8d5d8852 JDB |
274 | struct xdp_mem_allocator *xdp_alloc; |
275 | gfp_t gfp = GFP_KERNEL; | |
276 | int id, errno, ret; | |
277 | void *ptr; | |
278 | ||
57d0a1c1 | 279 | if (!__is_supported_mem_type(type)) |
4a48ef70 | 280 | return ERR_PTR(-EOPNOTSUPP); |
5ab073ff | 281 | |
4a48ef70 | 282 | mem->type = type; |
5ab073ff | 283 | |
57d0a1c1 | 284 | if (!allocator) { |
0807892e | 285 | if (type == MEM_TYPE_PAGE_POOL) |
4a48ef70 THJ |
286 | return ERR_PTR(-EINVAL); /* Setup time check page_pool req */ |
287 | return NULL; | |
57d0a1c1 | 288 | } |
8d5d8852 JDB |
289 | |
290 | /* Delay init of rhashtable to save memory if feature isn't used */ | |
291 | if (!mem_id_init) { | |
292 | mutex_lock(&mem_id_lock); | |
293 | ret = __mem_id_init_hash_table(); | |
294 | mutex_unlock(&mem_id_lock); | |
295 | if (ret < 0) { | |
296 | WARN_ON(1); | |
4a48ef70 | 297 | return ERR_PTR(ret); |
8d5d8852 JDB |
298 | } |
299 | } | |
300 | ||
301 | xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); | |
302 | if (!xdp_alloc) | |
4a48ef70 | 303 | return ERR_PTR(-ENOMEM); |
8d5d8852 JDB |
304 | |
305 | mutex_lock(&mem_id_lock); | |
306 | id = __mem_id_cyclic_get(gfp); | |
307 | if (id < 0) { | |
308 | errno = id; | |
309 | goto err; | |
310 | } | |
4a48ef70 THJ |
311 | mem->id = id; |
312 | xdp_alloc->mem = *mem; | |
8d5d8852 JDB |
313 | xdp_alloc->allocator = allocator; |
314 | ||
315 | /* Insert allocator into ID lookup table */ | |
316 | ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); | |
317 | if (IS_ERR(ptr)) { | |
4a48ef70 THJ |
318 | ida_simple_remove(&mem_id_pool, mem->id); |
319 | mem->id = 0; | |
8d5d8852 JDB |
320 | errno = PTR_ERR(ptr); |
321 | goto err; | |
322 | } | |
323 | ||
1da4bbef | 324 | if (type == MEM_TYPE_PAGE_POOL) |
64693ec7 | 325 | page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem); |
1da4bbef | 326 | |
8d5d8852 | 327 | mutex_unlock(&mem_id_lock); |
5ab073ff | 328 | |
4a48ef70 | 329 | return xdp_alloc; |
8d5d8852 JDB |
330 | err: |
331 | mutex_unlock(&mem_id_lock); | |
332 | kfree(xdp_alloc); | |
4a48ef70 THJ |
333 | return ERR_PTR(errno); |
334 | } | |
335 | ||
336 | int xdp_reg_mem_model(struct xdp_mem_info *mem, | |
337 | enum xdp_mem_type type, void *allocator) | |
338 | { | |
339 | struct xdp_mem_allocator *xdp_alloc; | |
340 | ||
341 | xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); | |
342 | if (IS_ERR(xdp_alloc)) | |
343 | return PTR_ERR(xdp_alloc); | |
344 | return 0; | |
345 | } | |
346 | EXPORT_SYMBOL_GPL(xdp_reg_mem_model); | |
347 | ||
348 | int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, | |
349 | enum xdp_mem_type type, void *allocator) | |
350 | { | |
351 | struct xdp_mem_allocator *xdp_alloc; | |
352 | ||
353 | if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { | |
354 | WARN(1, "Missing register, driver bug"); | |
355 | return -EFAULT; | |
356 | } | |
357 | ||
358 | xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator); | |
359 | if (IS_ERR(xdp_alloc)) | |
360 | return PTR_ERR(xdp_alloc); | |
361 | ||
e0ae7130 SAS |
362 | if (trace_mem_connect_enabled() && xdp_alloc) |
363 | trace_mem_connect(xdp_alloc, xdp_rxq); | |
4a48ef70 | 364 | return 0; |
5ab073ff | 365 | } |
4a48ef70 | 366 | |
5ab073ff | 367 | EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); |
8d5d8852 | 368 | |
389ab7f0 JDB |
369 | /* XDP RX runs under NAPI protection, and in different delivery error |
370 | * scenarios (e.g. queue full), it is possible to return the xdp_frame | |
baead859 | 371 | * while still leveraging this protection. The @napi_direct boolean |
389ab7f0 | 372 | * is used for those calls sites. Thus, allowing for faster recycling |
ed1182dc | 373 | * of xdp_frames/pages in those cases. |
389ab7f0 | 374 | */ |
bf25146a EC |
375 | void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, |
376 | struct xdp_buff *xdp) | |
8d5d8852 | 377 | { |
57d0a1c1 JDB |
378 | struct page *page; |
379 | ||
380 | switch (mem->type) { | |
381 | case MEM_TYPE_PAGE_POOL: | |
57d0a1c1 | 382 | page = virt_to_head_page(data); |
622d1369 OBL |
383 | if (napi_direct && xdp_return_frame_no_direct()) |
384 | napi_direct = false; | |
fb33ec01 JDB |
385 | /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) |
386 | * as mem->type knows this a page_pool page | |
387 | */ | |
388 | page_pool_put_full_page(page->pp, page, napi_direct); | |
57d0a1c1 JDB |
389 | break; |
390 | case MEM_TYPE_PAGE_SHARED: | |
8d5d8852 | 391 | page_frag_free(data); |
57d0a1c1 JDB |
392 | break; |
393 | case MEM_TYPE_PAGE_ORDER0: | |
394 | page = virt_to_page(data); /* Assumes order0 page*/ | |
8d5d8852 | 395 | put_page(page); |
57d0a1c1 | 396 | break; |
ed1182dc BT |
397 | case MEM_TYPE_XSK_BUFF_POOL: |
398 | /* NB! Only valid from an xdp_buff! */ | |
399 | xsk_buff_free(xdp); | |
400 | break; | |
57d0a1c1 JDB |
401 | default: |
402 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ | |
82c41671 | 403 | WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); |
57d0a1c1 | 404 | break; |
8d5d8852 JDB |
405 | } |
406 | } | |
c497176c BT |
407 | |
408 | void xdp_return_frame(struct xdp_frame *xdpf) | |
409 | { | |
7c48cb01 LB |
410 | struct skb_shared_info *sinfo; |
411 | int i; | |
412 | ||
413 | if (likely(!xdp_frame_has_frags(xdpf))) | |
414 | goto out; | |
415 | ||
416 | sinfo = xdp_get_shared_info_from_frame(xdpf); | |
417 | for (i = 0; i < sinfo->nr_frags; i++) { | |
418 | struct page *page = skb_frag_page(&sinfo->frags[i]); | |
419 | ||
420 | __xdp_return(page_address(page), &xdpf->mem, false, NULL); | |
421 | } | |
422 | out: | |
ed1182dc | 423 | __xdp_return(xdpf->data, &xdpf->mem, false, NULL); |
c497176c | 424 | } |
8d5d8852 | 425 | EXPORT_SYMBOL_GPL(xdp_return_frame); |
c497176c | 426 | |
389ab7f0 JDB |
427 | void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) |
428 | { | |
7c48cb01 LB |
429 | struct skb_shared_info *sinfo; |
430 | int i; | |
431 | ||
432 | if (likely(!xdp_frame_has_frags(xdpf))) | |
433 | goto out; | |
434 | ||
435 | sinfo = xdp_get_shared_info_from_frame(xdpf); | |
436 | for (i = 0; i < sinfo->nr_frags; i++) { | |
437 | struct page *page = skb_frag_page(&sinfo->frags[i]); | |
438 | ||
439 | __xdp_return(page_address(page), &xdpf->mem, true, NULL); | |
440 | } | |
441 | out: | |
ed1182dc | 442 | __xdp_return(xdpf->data, &xdpf->mem, true, NULL); |
389ab7f0 JDB |
443 | } |
444 | EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); | |
445 | ||
89653987 LB |
446 | /* XDP bulk APIs introduce a defer/flush mechanism to return |
447 | * pages belonging to the same xdp_mem_allocator object | |
448 | * (identified via the mem.id field) in bulk to optimize | |
449 | * I-cache and D-cache. | |
450 | * The bulk queue size is set to 16 to be aligned to how | |
451 | * XDP_REDIRECT bulking works. The bulk is flushed when | |
452 | * it is full or when mem.id changes. | |
453 | * xdp_frame_bulk is usually stored/allocated on the function | |
454 | * call-stack to avoid locking penalties. | |
455 | */ | |
456 | void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) | |
457 | { | |
458 | struct xdp_mem_allocator *xa = bq->xa; | |
89653987 | 459 | |
78862447 | 460 | if (unlikely(!xa || !bq->count)) |
89653987 LB |
461 | return; |
462 | ||
78862447 | 463 | page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); |
89653987 LB |
464 | /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ |
465 | bq->count = 0; | |
466 | } | |
467 | EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk); | |
468 | ||
469 | /* Must be called with rcu_read_lock held */ | |
470 | void xdp_return_frame_bulk(struct xdp_frame *xdpf, | |
471 | struct xdp_frame_bulk *bq) | |
472 | { | |
473 | struct xdp_mem_info *mem = &xdpf->mem; | |
474 | struct xdp_mem_allocator *xa; | |
475 | ||
476 | if (mem->type != MEM_TYPE_PAGE_POOL) { | |
7c48cb01 | 477 | xdp_return_frame(xdpf); |
89653987 LB |
478 | return; |
479 | } | |
480 | ||
481 | xa = bq->xa; | |
482 | if (unlikely(!xa)) { | |
483 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | |
484 | bq->count = 0; | |
485 | bq->xa = xa; | |
486 | } | |
487 | ||
488 | if (bq->count == XDP_BULK_QUEUE_SIZE) | |
489 | xdp_flush_frame_bulk(bq); | |
490 | ||
491 | if (unlikely(mem->id != xa->mem.id)) { | |
492 | xdp_flush_frame_bulk(bq); | |
493 | bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | |
494 | } | |
495 | ||
7c48cb01 LB |
496 | if (unlikely(xdp_frame_has_frags(xdpf))) { |
497 | struct skb_shared_info *sinfo; | |
498 | int i; | |
499 | ||
500 | sinfo = xdp_get_shared_info_from_frame(xdpf); | |
501 | for (i = 0; i < sinfo->nr_frags; i++) { | |
502 | skb_frag_t *frag = &sinfo->frags[i]; | |
503 | ||
504 | bq->q[bq->count++] = skb_frag_address(frag); | |
505 | if (bq->count == XDP_BULK_QUEUE_SIZE) | |
506 | xdp_flush_frame_bulk(bq); | |
507 | } | |
508 | } | |
89653987 LB |
509 | bq->q[bq->count++] = xdpf->data; |
510 | } | |
511 | EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); | |
512 | ||
c497176c BT |
513 | void xdp_return_buff(struct xdp_buff *xdp) |
514 | { | |
7c48cb01 LB |
515 | struct skb_shared_info *sinfo; |
516 | int i; | |
517 | ||
518 | if (likely(!xdp_buff_has_frags(xdp))) | |
519 | goto out; | |
520 | ||
521 | sinfo = xdp_get_shared_info_from_buff(xdp); | |
522 | for (i = 0; i < sinfo->nr_frags; i++) { | |
523 | struct page *page = skb_frag_page(&sinfo->frags[i]); | |
524 | ||
525 | __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp); | |
526 | } | |
527 | out: | |
ed1182dc | 528 | __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); |
c497176c | 529 | } |
718a18a0 | 530 | EXPORT_SYMBOL_GPL(xdp_return_buff); |
05296620 | 531 | |
6bf071bf JDB |
532 | /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ |
533 | void __xdp_release_frame(void *data, struct xdp_mem_info *mem) | |
534 | { | |
535 | struct xdp_mem_allocator *xa; | |
536 | struct page *page; | |
537 | ||
538 | rcu_read_lock(); | |
539 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | |
540 | page = virt_to_head_page(data); | |
541 | if (xa) | |
542 | page_pool_release_page(xa->page_pool, page); | |
543 | rcu_read_unlock(); | |
544 | } | |
545 | EXPORT_SYMBOL_GPL(__xdp_release_frame); | |
546 | ||
05296620 JK |
547 | void xdp_attachment_setup(struct xdp_attachment_info *info, |
548 | struct netdev_bpf *bpf) | |
549 | { | |
550 | if (info->prog) | |
551 | bpf_prog_put(info->prog); | |
552 | info->prog = bpf->prog; | |
553 | info->flags = bpf->flags; | |
554 | } | |
555 | EXPORT_SYMBOL_GPL(xdp_attachment_setup); | |
b0d1beef BT |
556 | |
557 | struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) | |
558 | { | |
72962167 | 559 | unsigned int metasize, totsize; |
b0d1beef BT |
560 | void *addr, *data_to_copy; |
561 | struct xdp_frame *xdpf; | |
562 | struct page *page; | |
563 | ||
564 | /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ | |
565 | metasize = xdp_data_meta_unsupported(xdp) ? 0 : | |
566 | xdp->data - xdp->data_meta; | |
b0d1beef BT |
567 | totsize = xdp->data_end - xdp->data + metasize; |
568 | ||
569 | if (sizeof(*xdpf) + totsize > PAGE_SIZE) | |
570 | return NULL; | |
571 | ||
572 | page = dev_alloc_page(); | |
573 | if (!page) | |
574 | return NULL; | |
575 | ||
576 | addr = page_to_virt(page); | |
577 | xdpf = addr; | |
578 | memset(xdpf, 0, sizeof(*xdpf)); | |
579 | ||
580 | addr += sizeof(*xdpf); | |
581 | data_to_copy = metasize ? xdp->data_meta : xdp->data; | |
582 | memcpy(addr, data_to_copy, totsize); | |
583 | ||
584 | xdpf->data = addr + metasize; | |
585 | xdpf->len = totsize - metasize; | |
586 | xdpf->headroom = 0; | |
587 | xdpf->metasize = metasize; | |
3ff23516 | 588 | xdpf->frame_sz = PAGE_SIZE; |
b0d1beef BT |
589 | xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; |
590 | ||
82c41671 | 591 | xsk_buff_free(xdp); |
b0d1beef BT |
592 | return xdpf; |
593 | } | |
594 | EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); | |
34cc0b33 JDB |
595 | |
596 | /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ | |
597 | void xdp_warn(const char *msg, const char *func, const int line) | |
598 | { | |
599 | WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); | |
600 | }; | |
601 | EXPORT_SYMBOL_GPL(xdp_warn); | |
97a0e1ea | 602 | |
65e6dcf7 LB |
603 | int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) |
604 | { | |
605 | n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, | |
606 | n_skb, skbs); | |
607 | if (unlikely(!n_skb)) | |
608 | return -ENOMEM; | |
609 | ||
610 | return 0; | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk); | |
613 | ||
97a0e1ea LB |
614 | struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, |
615 | struct sk_buff *skb, | |
616 | struct net_device *dev) | |
617 | { | |
d65a1906 | 618 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); |
97a0e1ea LB |
619 | unsigned int headroom, frame_size; |
620 | void *hard_start; | |
d65a1906 LB |
621 | u8 nr_frags; |
622 | ||
623 | /* xdp frags frame */ | |
624 | if (unlikely(xdp_frame_has_frags(xdpf))) | |
625 | nr_frags = sinfo->nr_frags; | |
97a0e1ea LB |
626 | |
627 | /* Part of headroom was reserved to xdpf */ | |
628 | headroom = sizeof(*xdpf) + xdpf->headroom; | |
629 | ||
630 | /* Memory size backing xdp_frame data already have reserved | |
631 | * room for build_skb to place skb_shared_info in tailroom. | |
632 | */ | |
633 | frame_size = xdpf->frame_sz; | |
634 | ||
635 | hard_start = xdpf->data - headroom; | |
636 | skb = build_skb_around(skb, hard_start, frame_size); | |
637 | if (unlikely(!skb)) | |
638 | return NULL; | |
639 | ||
640 | skb_reserve(skb, headroom); | |
641 | __skb_put(skb, xdpf->len); | |
642 | if (xdpf->metasize) | |
643 | skb_metadata_set(skb, xdpf->metasize); | |
644 | ||
d65a1906 LB |
645 | if (unlikely(xdp_frame_has_frags(xdpf))) |
646 | xdp_update_skb_shared_info(skb, nr_frags, | |
647 | sinfo->xdp_frags_size, | |
648 | nr_frags * xdpf->frame_sz, | |
649 | xdp_frame_is_frag_pfmemalloc(xdpf)); | |
650 | ||
97a0e1ea LB |
651 | /* Essential SKB info: protocol and skb->dev */ |
652 | skb->protocol = eth_type_trans(skb, dev); | |
653 | ||
654 | /* Optional SKB info, currently missing: | |
655 | * - HW checksum info (skb->ip_summed) | |
656 | * - HW RX hash (skb_set_hash) | |
657 | * - RX ring dev queue index (skb_record_rx_queue) | |
658 | */ | |
659 | ||
660 | /* Until page_pool get SKB return path, release DMA here */ | |
661 | xdp_release_frame(xdpf); | |
662 | ||
663 | /* Allow SKB to reuse area used by xdp_frame */ | |
664 | xdp_scrub_frame(xdpf); | |
665 | ||
666 | return skb; | |
667 | } | |
668 | EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); | |
89f479f0 LB |
669 | |
670 | struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, | |
671 | struct net_device *dev) | |
672 | { | |
673 | struct sk_buff *skb; | |
674 | ||
675 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); | |
676 | if (unlikely(!skb)) | |
677 | return NULL; | |
678 | ||
679 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
680 | ||
681 | return __xdp_build_skb_from_frame(xdpf, skb, dev); | |
682 | } | |
683 | EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame); | |
e624d4ed HL |
684 | |
685 | struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) | |
686 | { | |
687 | unsigned int headroom, totalsize; | |
688 | struct xdp_frame *nxdpf; | |
689 | struct page *page; | |
690 | void *addr; | |
691 | ||
692 | headroom = xdpf->headroom + sizeof(*xdpf); | |
693 | totalsize = headroom + xdpf->len; | |
694 | ||
695 | if (unlikely(totalsize > PAGE_SIZE)) | |
696 | return NULL; | |
697 | page = dev_alloc_page(); | |
698 | if (!page) | |
699 | return NULL; | |
700 | addr = page_to_virt(page); | |
701 | ||
702 | memcpy(addr, xdpf, totalsize); | |
703 | ||
704 | nxdpf = addr; | |
705 | nxdpf->data = addr + headroom; | |
706 | nxdpf->frame_sz = PAGE_SIZE; | |
707 | nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0; | |
708 | nxdpf->mem.id = 0; | |
709 | ||
710 | return nxdpf; | |
711 | } |