Commit | Line | Data |
---|---|---|
2b43470a BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <net/xsk_buff_pool.h> | |
4 | #include <net/xdp_sock.h> | |
1c1efc2a | 5 | #include <net/xdp_sock_drv.h> |
2b43470a BT |
6 | |
7 | #include "xsk_queue.h" | |
1c1efc2a MK |
8 | #include "xdp_umem.h" |
9 | #include "xsk.h" | |
2b43470a | 10 | |
a5aa8e52 MK |
11 | void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) |
12 | { | |
13 | unsigned long flags; | |
14 | ||
15 | if (!xs->tx) | |
16 | return; | |
17 | ||
18 | spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); | |
19 | list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); | |
20 | spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); | |
21 | } | |
22 | ||
23 | void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) | |
24 | { | |
25 | unsigned long flags; | |
26 | ||
27 | if (!xs->tx) | |
28 | return; | |
29 | ||
30 | spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); | |
31 | list_del_rcu(&xs->tx_list); | |
32 | spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); | |
33 | } | |
34 | ||
2b43470a BT |
35 | void xp_destroy(struct xsk_buff_pool *pool) |
36 | { | |
37 | if (!pool) | |
38 | return; | |
39 | ||
d1bc532e | 40 | kvfree(pool->tx_descs); |
2b43470a BT |
41 | kvfree(pool->heads); |
42 | kvfree(pool); | |
43 | } | |
44 | ||
ba3beec2 MF |
45 | int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs) |
46 | { | |
47 | pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), | |
48 | GFP_KERNEL); | |
49 | if (!pool->tx_descs) | |
50 | return -ENOMEM; | |
51 | ||
52 | return 0; | |
53 | } | |
54 | ||
1c1efc2a MK |
55 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
56 | struct xdp_umem *umem) | |
2b43470a | 57 | { |
94033cd8 | 58 | bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; |
2b43470a BT |
59 | struct xsk_buff_pool *pool; |
60 | struct xdp_buff_xsk *xskb; | |
94033cd8 | 61 | u32 i, entries; |
2b43470a | 62 | |
94033cd8 MK |
63 | entries = unaligned ? umem->chunks : 0; |
64 | pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL); | |
2b43470a BT |
65 | if (!pool) |
66 | goto out; | |
67 | ||
1c1efc2a | 68 | pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); |
2b43470a BT |
69 | if (!pool->heads) |
70 | goto out; | |
71 | ||
ba3beec2 MF |
72 | if (xs->tx) |
73 | if (xp_alloc_tx_descs(pool, xs)) | |
d1bc532e | 74 | goto out; |
d1bc532e | 75 | |
1c1efc2a MK |
76 | pool->chunk_mask = ~((u64)umem->chunk_size - 1); |
77 | pool->addrs_cnt = umem->size; | |
78 | pool->heads_cnt = umem->chunks; | |
79 | pool->free_heads_cnt = umem->chunks; | |
80 | pool->headroom = umem->headroom; | |
81 | pool->chunk_size = umem->chunk_size; | |
94033cd8 MK |
82 | pool->chunk_shift = ffs(umem->chunk_size) - 1; |
83 | pool->unaligned = unaligned; | |
1c1efc2a MK |
84 | pool->frame_len = umem->chunk_size - umem->headroom - |
85 | XDP_PACKET_HEADROOM; | |
1742b3d5 | 86 | pool->umem = umem; |
7f7ffa4e | 87 | pool->addrs = umem->addrs; |
2b43470a | 88 | INIT_LIST_HEAD(&pool->free_list); |
a5aa8e52 MK |
89 | INIT_LIST_HEAD(&pool->xsk_tx_list); |
90 | spin_lock_init(&pool->xsk_tx_list_lock); | |
f09ced40 | 91 | spin_lock_init(&pool->cq_lock); |
1c1efc2a | 92 | refcount_set(&pool->users, 1); |
2b43470a | 93 | |
7361f9c3 MK |
94 | pool->fq = xs->fq_tmp; |
95 | pool->cq = xs->cq_tmp; | |
7361f9c3 | 96 | |
2b43470a BT |
97 | for (i = 0; i < pool->free_heads_cnt; i++) { |
98 | xskb = &pool->heads[i]; | |
99 | xskb->pool = pool; | |
1c1efc2a | 100 | xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; |
5bec7ca2 | 101 | INIT_LIST_HEAD(&xskb->free_list_node); |
94033cd8 MK |
102 | if (pool->unaligned) |
103 | pool->free_heads[i] = xskb; | |
104 | else | |
105 | xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); | |
2b43470a BT |
106 | } |
107 | ||
7f7ffa4e | 108 | return pool; |
2b43470a BT |
109 | |
110 | out: | |
111 | xp_destroy(pool); | |
112 | return NULL; | |
113 | } | |
114 | ||
2b43470a BT |
115 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) |
116 | { | |
117 | u32 i; | |
118 | ||
119 | for (i = 0; i < pool->heads_cnt; i++) | |
120 | pool->heads[i].xdp.rxq = rxq; | |
121 | } | |
122 | EXPORT_SYMBOL(xp_set_rxq_info); | |
123 | ||
921b6869 MK |
124 | static void xp_disable_drv_zc(struct xsk_buff_pool *pool) |
125 | { | |
126 | struct netdev_bpf bpf; | |
127 | int err; | |
128 | ||
129 | ASSERT_RTNL(); | |
130 | ||
131 | if (pool->umem->zc) { | |
132 | bpf.command = XDP_SETUP_XSK_POOL; | |
133 | bpf.xsk.pool = NULL; | |
134 | bpf.xsk.queue_id = pool->queue_id; | |
135 | ||
136 | err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); | |
137 | ||
138 | if (err) | |
139 | WARN(1, "Failed to disable zero-copy!\n"); | |
140 | } | |
141 | } | |
142 | ||
0ae0cb2b MM |
143 | #define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \ |
144 | NETDEV_XDP_ACT_REDIRECT | \ | |
145 | NETDEV_XDP_ACT_XSK_ZEROCOPY) | |
146 | ||
f0863eab BT |
147 | int xp_assign_dev(struct xsk_buff_pool *pool, |
148 | struct net_device *netdev, u16 queue_id, u16 flags) | |
1c1efc2a | 149 | { |
1c1efc2a MK |
150 | bool force_zc, force_copy; |
151 | struct netdev_bpf bpf; | |
152 | int err = 0; | |
153 | ||
154 | ASSERT_RTNL(); | |
155 | ||
156 | force_zc = flags & XDP_ZEROCOPY; | |
157 | force_copy = flags & XDP_COPY; | |
158 | ||
159 | if (force_zc && force_copy) | |
160 | return -EINVAL; | |
161 | ||
c2d3d6a4 | 162 | if (xsk_get_pool_from_qid(netdev, queue_id)) |
1c1efc2a MK |
163 | return -EBUSY; |
164 | ||
921b6869 MK |
165 | pool->netdev = netdev; |
166 | pool->queue_id = queue_id; | |
c2d3d6a4 | 167 | err = xsk_reg_pool_at_qid(netdev, pool, queue_id); |
1c1efc2a MK |
168 | if (err) |
169 | return err; | |
170 | ||
e3920818 | 171 | if (flags & XDP_USE_NEED_WAKEUP) |
c2d3d6a4 | 172 | pool->uses_need_wakeup = true; |
e3920818 BT |
173 | /* Tx needs to be explicitly woken up the first time. Also |
174 | * for supporting drivers that do not implement this | |
175 | * feature. They will always have to call sendto() or poll(). | |
176 | */ | |
177 | pool->cached_need_wakeup = XDP_WAKEUP_TX; | |
1c1efc2a | 178 | |
c2d3d6a4 MK |
179 | dev_hold(netdev); |
180 | ||
1c1efc2a MK |
181 | if (force_copy) |
182 | /* For copy-mode, we are done. */ | |
183 | return 0; | |
184 | ||
0ae0cb2b | 185 | if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) { |
1c1efc2a MK |
186 | err = -EOPNOTSUPP; |
187 | goto err_unreg_pool; | |
188 | } | |
189 | ||
190 | bpf.command = XDP_SETUP_XSK_POOL; | |
191 | bpf.xsk.pool = pool; | |
192 | bpf.xsk.queue_id = queue_id; | |
193 | ||
c2d3d6a4 | 194 | err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); |
1c1efc2a MK |
195 | if (err) |
196 | goto err_unreg_pool; | |
197 | ||
921b6869 MK |
198 | if (!pool->dma_pages) { |
199 | WARN(1, "Driver did not DMA map zero-copy buffers"); | |
12c8a8ca | 200 | err = -EINVAL; |
921b6869 MK |
201 | goto err_unreg_xsk; |
202 | } | |
c2d3d6a4 | 203 | pool->umem->zc = true; |
1c1efc2a MK |
204 | return 0; |
205 | ||
921b6869 MK |
206 | err_unreg_xsk: |
207 | xp_disable_drv_zc(pool); | |
1c1efc2a MK |
208 | err_unreg_pool: |
209 | if (!force_zc) | |
210 | err = 0; /* fallback to copy mode */ | |
17864891 | 211 | if (err) { |
c2d3d6a4 | 212 | xsk_clear_pool_at_qid(netdev, queue_id); |
17864891 MM |
213 | dev_put(netdev); |
214 | } | |
1c1efc2a MK |
215 | return err; |
216 | } | |
217 | ||
60240bc2 | 218 | int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, |
b5aea28d MK |
219 | struct net_device *dev, u16 queue_id) |
220 | { | |
221 | u16 flags; | |
60240bc2 | 222 | struct xdp_umem *umem = umem_xs->umem; |
b5aea28d MK |
223 | |
224 | /* One fill and completion ring required for each queue id. */ | |
225 | if (!pool->fq || !pool->cq) | |
226 | return -EINVAL; | |
227 | ||
228 | flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; | |
60240bc2 | 229 | if (umem_xs->pool->uses_need_wakeup) |
b5aea28d MK |
230 | flags |= XDP_USE_NEED_WAKEUP; |
231 | ||
f0863eab | 232 | return xp_assign_dev(pool, dev, queue_id, flags); |
b5aea28d MK |
233 | } |
234 | ||
1c1efc2a MK |
235 | void xp_clear_dev(struct xsk_buff_pool *pool) |
236 | { | |
c2d3d6a4 | 237 | if (!pool->netdev) |
1c1efc2a MK |
238 | return; |
239 | ||
921b6869 | 240 | xp_disable_drv_zc(pool); |
c2d3d6a4 MK |
241 | xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); |
242 | dev_put(pool->netdev); | |
243 | pool->netdev = NULL; | |
1c1efc2a MK |
244 | } |
245 | ||
246 | static void xp_release_deferred(struct work_struct *work) | |
247 | { | |
248 | struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, | |
249 | work); | |
250 | ||
251 | rtnl_lock(); | |
252 | xp_clear_dev(pool); | |
253 | rtnl_unlock(); | |
254 | ||
7361f9c3 MK |
255 | if (pool->fq) { |
256 | xskq_destroy(pool->fq); | |
257 | pool->fq = NULL; | |
258 | } | |
259 | ||
260 | if (pool->cq) { | |
261 | xskq_destroy(pool->cq); | |
262 | pool->cq = NULL; | |
263 | } | |
264 | ||
537cf4e3 | 265 | xdp_put_umem(pool->umem, false); |
1c1efc2a MK |
266 | xp_destroy(pool); |
267 | } | |
268 | ||
269 | void xp_get_pool(struct xsk_buff_pool *pool) | |
270 | { | |
271 | refcount_inc(&pool->users); | |
272 | } | |
273 | ||
e5e1a4bc | 274 | bool xp_put_pool(struct xsk_buff_pool *pool) |
1c1efc2a MK |
275 | { |
276 | if (!pool) | |
e5e1a4bc | 277 | return false; |
1c1efc2a MK |
278 | |
279 | if (refcount_dec_and_test(&pool->users)) { | |
280 | INIT_WORK(&pool->work, xp_release_deferred); | |
281 | schedule_work(&pool->work); | |
e5e1a4bc | 282 | return true; |
1c1efc2a | 283 | } |
e5e1a4bc MK |
284 | |
285 | return false; | |
1c1efc2a MK |
286 | } |
287 | ||
921b6869 MK |
288 | static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) |
289 | { | |
290 | struct xsk_dma_map *dma_map; | |
291 | ||
292 | list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { | |
293 | if (dma_map->netdev == pool->netdev) | |
294 | return dma_map; | |
295 | } | |
296 | ||
297 | return NULL; | |
298 | } | |
299 | ||
300 | static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev, | |
301 | u32 nr_pages, struct xdp_umem *umem) | |
302 | { | |
303 | struct xsk_dma_map *dma_map; | |
304 | ||
305 | dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); | |
306 | if (!dma_map) | |
307 | return NULL; | |
308 | ||
309 | dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); | |
1d6fd78a | 310 | if (!dma_map->dma_pages) { |
921b6869 MK |
311 | kfree(dma_map); |
312 | return NULL; | |
313 | } | |
314 | ||
315 | dma_map->netdev = netdev; | |
316 | dma_map->dev = dev; | |
317 | dma_map->dma_need_sync = false; | |
318 | dma_map->dma_pages_cnt = nr_pages; | |
bf74a370 | 319 | refcount_set(&dma_map->users, 1); |
921b6869 MK |
320 | list_add(&dma_map->list, &umem->xsk_dma_list); |
321 | return dma_map; | |
322 | } | |
323 | ||
324 | static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) | |
325 | { | |
326 | list_del(&dma_map->list); | |
327 | kvfree(dma_map->dma_pages); | |
328 | kfree(dma_map); | |
329 | } | |
330 | ||
331 | static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) | |
2b43470a BT |
332 | { |
333 | dma_addr_t *dma; | |
334 | u32 i; | |
335 | ||
921b6869 MK |
336 | for (i = 0; i < dma_map->dma_pages_cnt; i++) { |
337 | dma = &dma_map->dma_pages[i]; | |
2b43470a | 338 | if (*dma) { |
512d1999 | 339 | *dma &= ~XSK_NEXT_PG_CONTIG_MASK; |
921b6869 | 340 | dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, |
2b43470a BT |
341 | DMA_BIDIRECTIONAL, attrs); |
342 | *dma = 0; | |
343 | } | |
344 | } | |
345 | ||
921b6869 MK |
346 | xp_destroy_dma_map(dma_map); |
347 | } | |
348 | ||
349 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) | |
350 | { | |
351 | struct xsk_dma_map *dma_map; | |
352 | ||
353 | if (pool->dma_pages_cnt == 0) | |
354 | return; | |
355 | ||
356 | dma_map = xp_find_dma_map(pool); | |
357 | if (!dma_map) { | |
358 | WARN(1, "Could not find dma_map for device"); | |
359 | return; | |
360 | } | |
361 | ||
362 | if (!refcount_dec_and_test(&dma_map->users)) | |
363 | return; | |
364 | ||
365 | __xp_dma_unmap(dma_map, attrs); | |
2b43470a BT |
366 | kvfree(pool->dma_pages); |
367 | pool->dma_pages_cnt = 0; | |
368 | pool->dev = NULL; | |
369 | } | |
370 | EXPORT_SYMBOL(xp_dma_unmap); | |
371 | ||
921b6869 | 372 | static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) |
2b43470a BT |
373 | { |
374 | u32 i; | |
375 | ||
921b6869 MK |
376 | for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { |
377 | if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) | |
378 | dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; | |
2b43470a | 379 | else |
921b6869 | 380 | dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; |
2b43470a BT |
381 | } |
382 | } | |
383 | ||
921b6869 MK |
384 | static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) |
385 | { | |
58ca14ed MK |
386 | if (!pool->unaligned) { |
387 | u32 i; | |
388 | ||
389 | for (i = 0; i < pool->heads_cnt; i++) { | |
390 | struct xdp_buff_xsk *xskb = &pool->heads[i]; | |
391 | ||
392 | xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); | |
393 | } | |
394 | } | |
395 | ||
921b6869 MK |
396 | pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); |
397 | if (!pool->dma_pages) | |
398 | return -ENOMEM; | |
399 | ||
400 | pool->dev = dma_map->dev; | |
401 | pool->dma_pages_cnt = dma_map->dma_pages_cnt; | |
402 | pool->dma_need_sync = dma_map->dma_need_sync; | |
921b6869 MK |
403 | memcpy(pool->dma_pages, dma_map->dma_pages, |
404 | pool->dma_pages_cnt * sizeof(*pool->dma_pages)); | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
2b43470a BT |
409 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, |
410 | unsigned long attrs, struct page **pages, u32 nr_pages) | |
411 | { | |
921b6869 | 412 | struct xsk_dma_map *dma_map; |
2b43470a | 413 | dma_addr_t dma; |
921b6869 | 414 | int err; |
2b43470a BT |
415 | u32 i; |
416 | ||
921b6869 MK |
417 | dma_map = xp_find_dma_map(pool); |
418 | if (dma_map) { | |
419 | err = xp_init_dma_info(pool, dma_map); | |
420 | if (err) | |
421 | return err; | |
2b43470a | 422 | |
bf74a370 | 423 | refcount_inc(&dma_map->users); |
921b6869 MK |
424 | return 0; |
425 | } | |
2b43470a | 426 | |
921b6869 MK |
427 | dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); |
428 | if (!dma_map) | |
429 | return -ENOMEM; | |
430 | ||
431 | for (i = 0; i < dma_map->dma_pages_cnt; i++) { | |
2b43470a BT |
432 | dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, |
433 | DMA_BIDIRECTIONAL, attrs); | |
434 | if (dma_mapping_error(dev, dma)) { | |
921b6869 | 435 | __xp_dma_unmap(dma_map, attrs); |
2b43470a BT |
436 | return -ENOMEM; |
437 | } | |
7e024575 | 438 | if (dma_need_sync(dev, dma)) |
921b6869 MK |
439 | dma_map->dma_need_sync = true; |
440 | dma_map->dma_pages[i] = dma; | |
2b43470a BT |
441 | } |
442 | ||
443 | if (pool->unaligned) | |
921b6869 MK |
444 | xp_check_dma_contiguity(dma_map); |
445 | ||
446 | err = xp_init_dma_info(pool, dma_map); | |
447 | if (err) { | |
448 | __xp_dma_unmap(dma_map, attrs); | |
449 | return err; | |
450 | } | |
451 | ||
2b43470a BT |
452 | return 0; |
453 | } | |
454 | EXPORT_SYMBOL(xp_dma_map); | |
455 | ||
2b43470a BT |
456 | static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, |
457 | u64 addr) | |
458 | { | |
459 | return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); | |
460 | } | |
461 | ||
2b43470a BT |
462 | static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) |
463 | { | |
464 | *addr = xp_unaligned_extract_addr(*addr); | |
465 | if (*addr >= pool->addrs_cnt || | |
466 | *addr + pool->chunk_size > pool->addrs_cnt || | |
467 | xp_addr_crosses_non_contig_pg(pool, *addr)) | |
468 | return false; | |
469 | return true; | |
470 | } | |
471 | ||
472 | static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) | |
473 | { | |
474 | *addr = xp_aligned_extract_addr(pool, *addr); | |
475 | return *addr < pool->addrs_cnt; | |
476 | } | |
477 | ||
478 | static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) | |
479 | { | |
480 | struct xdp_buff_xsk *xskb; | |
481 | u64 addr; | |
482 | bool ok; | |
483 | ||
484 | if (pool->free_heads_cnt == 0) | |
485 | return NULL; | |
486 | ||
2b43470a BT |
487 | for (;;) { |
488 | if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { | |
8aa5a335 | 489 | pool->fq->queue_empty_descs++; |
2b43470a BT |
490 | return NULL; |
491 | } | |
492 | ||
493 | ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : | |
494 | xp_check_aligned(pool, &addr); | |
495 | if (!ok) { | |
496 | pool->fq->invalid_descs++; | |
497 | xskq_cons_release(pool->fq); | |
498 | continue; | |
499 | } | |
500 | break; | |
501 | } | |
2b43470a | 502 | |
94033cd8 MK |
503 | if (pool->unaligned) { |
504 | xskb = pool->free_heads[--pool->free_heads_cnt]; | |
505 | xp_init_xskb_addr(xskb, pool, addr); | |
506 | if (pool->dma_pages_cnt) | |
507 | xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); | |
508 | } else { | |
509 | xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; | |
2b43470a | 510 | } |
94033cd8 MK |
511 | |
512 | xskq_cons_release(pool->fq); | |
2b43470a BT |
513 | return xskb; |
514 | } | |
515 | ||
516 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) | |
517 | { | |
518 | struct xdp_buff_xsk *xskb; | |
519 | ||
520 | if (!pool->free_list_cnt) { | |
521 | xskb = __xp_alloc(pool); | |
522 | if (!xskb) | |
523 | return NULL; | |
524 | } else { | |
525 | pool->free_list_cnt--; | |
526 | xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, | |
527 | free_list_node); | |
199d983b | 528 | list_del_init(&xskb->free_list_node); |
2b43470a BT |
529 | } |
530 | ||
531 | xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; | |
532 | xskb->xdp.data_meta = xskb->xdp.data; | |
533 | ||
91d5b702 | 534 | if (pool->dma_need_sync) { |
2b43470a BT |
535 | dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, |
536 | pool->frame_len, | |
537 | DMA_BIDIRECTIONAL); | |
538 | } | |
539 | return &xskb->xdp; | |
540 | } | |
541 | EXPORT_SYMBOL(xp_alloc); | |
542 | ||
47e4075d MK |
543 | static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) |
544 | { | |
545 | u32 i, cached_cons, nb_entries; | |
546 | ||
547 | if (max > pool->free_heads_cnt) | |
548 | max = pool->free_heads_cnt; | |
549 | max = xskq_cons_nb_entries(pool->fq, max); | |
550 | ||
551 | cached_cons = pool->fq->cached_cons; | |
552 | nb_entries = max; | |
553 | i = max; | |
554 | while (i--) { | |
555 | struct xdp_buff_xsk *xskb; | |
556 | u64 addr; | |
557 | bool ok; | |
558 | ||
559 | __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr); | |
560 | ||
561 | ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : | |
562 | xp_check_aligned(pool, &addr); | |
563 | if (unlikely(!ok)) { | |
564 | pool->fq->invalid_descs++; | |
565 | nb_entries--; | |
566 | continue; | |
567 | } | |
568 | ||
94033cd8 MK |
569 | if (pool->unaligned) { |
570 | xskb = pool->free_heads[--pool->free_heads_cnt]; | |
571 | xp_init_xskb_addr(xskb, pool, addr); | |
572 | if (pool->dma_pages_cnt) | |
573 | xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); | |
574 | } else { | |
575 | xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; | |
576 | } | |
577 | ||
47e4075d | 578 | *xdp = &xskb->xdp; |
47e4075d MK |
579 | xdp++; |
580 | } | |
581 | ||
582 | xskq_cons_release_n(pool->fq, max); | |
583 | return nb_entries; | |
584 | } | |
585 | ||
586 | static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) | |
587 | { | |
588 | struct xdp_buff_xsk *xskb; | |
589 | u32 i; | |
590 | ||
591 | nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); | |
592 | ||
593 | i = nb_entries; | |
594 | while (i--) { | |
595 | xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); | |
199d983b | 596 | list_del_init(&xskb->free_list_node); |
47e4075d MK |
597 | |
598 | *xdp = &xskb->xdp; | |
599 | xdp++; | |
600 | } | |
601 | pool->free_list_cnt -= nb_entries; | |
602 | ||
603 | return nb_entries; | |
604 | } | |
605 | ||
606 | u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) | |
607 | { | |
608 | u32 nb_entries1 = 0, nb_entries2; | |
609 | ||
610 | if (unlikely(pool->dma_need_sync)) { | |
a95a4d9b MK |
611 | struct xdp_buff *buff; |
612 | ||
47e4075d | 613 | /* Slow path */ |
a95a4d9b MK |
614 | buff = xp_alloc(pool); |
615 | if (buff) | |
616 | *xdp = buff; | |
617 | return !!buff; | |
47e4075d MK |
618 | } |
619 | ||
620 | if (unlikely(pool->free_list_cnt)) { | |
621 | nb_entries1 = xp_alloc_reused(pool, xdp, max); | |
622 | if (nb_entries1 == max) | |
623 | return nb_entries1; | |
624 | ||
625 | max -= nb_entries1; | |
626 | xdp += nb_entries1; | |
627 | } | |
628 | ||
629 | nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max); | |
630 | if (!nb_entries2) | |
631 | pool->fq->queue_empty_descs++; | |
632 | ||
633 | return nb_entries1 + nb_entries2; | |
634 | } | |
635 | EXPORT_SYMBOL(xp_alloc_batch); | |
636 | ||
2b43470a BT |
637 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) |
638 | { | |
639 | if (pool->free_list_cnt >= count) | |
640 | return true; | |
641 | return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); | |
642 | } | |
643 | EXPORT_SYMBOL(xp_can_alloc); | |
644 | ||
645 | void xp_free(struct xdp_buff_xsk *xskb) | |
646 | { | |
199d983b MK |
647 | if (!list_empty(&xskb->free_list_node)) |
648 | return; | |
649 | ||
2b43470a BT |
650 | xskb->pool->free_list_cnt++; |
651 | list_add(&xskb->free_list_node, &xskb->pool->free_list); | |
652 | } | |
653 | EXPORT_SYMBOL(xp_free); | |
654 | ||
2b43470a BT |
655 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
656 | { | |
657 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; | |
658 | return pool->addrs + addr; | |
659 | } | |
660 | EXPORT_SYMBOL(xp_raw_get_data); | |
661 | ||
662 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) | |
663 | { | |
664 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; | |
665 | return (pool->dma_pages[addr >> PAGE_SHIFT] & | |
666 | ~XSK_NEXT_PG_CONTIG_MASK) + | |
667 | (addr & ~PAGE_MASK); | |
668 | } | |
669 | EXPORT_SYMBOL(xp_raw_get_dma); | |
670 | ||
26062b18 | 671 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) |
2b43470a | 672 | { |
2b43470a BT |
673 | dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, |
674 | xskb->pool->frame_len, DMA_BIDIRECTIONAL); | |
675 | } | |
26062b18 | 676 | EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); |
2b43470a | 677 | |
26062b18 BT |
678 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, |
679 | size_t size) | |
2b43470a | 680 | { |
2b43470a BT |
681 | dma_sync_single_range_for_device(pool->dev, dma, 0, |
682 | size, DMA_BIDIRECTIONAL); | |
683 | } | |
26062b18 | 684 | EXPORT_SYMBOL(xp_dma_sync_for_device_slow); |