Commit | Line | Data |
---|---|---|
2b43470a BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <net/xsk_buff_pool.h> | |
4 | #include <net/xdp_sock.h> | |
1c1efc2a | 5 | #include <net/xdp_sock_drv.h> |
2b43470a BT |
6 | |
7 | #include "xsk_queue.h" | |
1c1efc2a MK |
8 | #include "xdp_umem.h" |
9 | #include "xsk.h" | |
2b43470a | 10 | |
a5aa8e52 MK |
11 | void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) |
12 | { | |
13 | unsigned long flags; | |
14 | ||
15 | if (!xs->tx) | |
16 | return; | |
17 | ||
18 | spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); | |
19 | list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); | |
20 | spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); | |
21 | } | |
22 | ||
23 | void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) | |
24 | { | |
25 | unsigned long flags; | |
26 | ||
27 | if (!xs->tx) | |
28 | return; | |
29 | ||
30 | spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); | |
31 | list_del_rcu(&xs->tx_list); | |
32 | spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); | |
33 | } | |
34 | ||
2b43470a BT |
35 | void xp_destroy(struct xsk_buff_pool *pool) |
36 | { | |
37 | if (!pool) | |
38 | return; | |
39 | ||
2b43470a BT |
40 | kvfree(pool->heads); |
41 | kvfree(pool); | |
42 | } | |
43 | ||
1c1efc2a MK |
44 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
45 | struct xdp_umem *umem) | |
2b43470a BT |
46 | { |
47 | struct xsk_buff_pool *pool; | |
48 | struct xdp_buff_xsk *xskb; | |
2b43470a BT |
49 | u32 i; |
50 | ||
1c1efc2a MK |
51 | pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), |
52 | GFP_KERNEL); | |
2b43470a BT |
53 | if (!pool) |
54 | goto out; | |
55 | ||
1c1efc2a | 56 | pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); |
2b43470a BT |
57 | if (!pool->heads) |
58 | goto out; | |
59 | ||
1c1efc2a MK |
60 | pool->chunk_mask = ~((u64)umem->chunk_size - 1); |
61 | pool->addrs_cnt = umem->size; | |
62 | pool->heads_cnt = umem->chunks; | |
63 | pool->free_heads_cnt = umem->chunks; | |
64 | pool->headroom = umem->headroom; | |
65 | pool->chunk_size = umem->chunk_size; | |
66 | pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; | |
67 | pool->frame_len = umem->chunk_size - umem->headroom - | |
68 | XDP_PACKET_HEADROOM; | |
1742b3d5 | 69 | pool->umem = umem; |
7f7ffa4e | 70 | pool->addrs = umem->addrs; |
2b43470a | 71 | INIT_LIST_HEAD(&pool->free_list); |
a5aa8e52 MK |
72 | INIT_LIST_HEAD(&pool->xsk_tx_list); |
73 | spin_lock_init(&pool->xsk_tx_list_lock); | |
f09ced40 | 74 | spin_lock_init(&pool->cq_lock); |
1c1efc2a | 75 | refcount_set(&pool->users, 1); |
2b43470a | 76 | |
7361f9c3 MK |
77 | pool->fq = xs->fq_tmp; |
78 | pool->cq = xs->cq_tmp; | |
7361f9c3 | 79 | |
2b43470a BT |
80 | for (i = 0; i < pool->free_heads_cnt; i++) { |
81 | xskb = &pool->heads[i]; | |
82 | xskb->pool = pool; | |
1c1efc2a | 83 | xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; |
2b43470a BT |
84 | pool->free_heads[i] = xskb; |
85 | } | |
86 | ||
7f7ffa4e | 87 | return pool; |
2b43470a BT |
88 | |
89 | out: | |
90 | xp_destroy(pool); | |
91 | return NULL; | |
92 | } | |
93 | ||
2b43470a BT |
94 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) |
95 | { | |
96 | u32 i; | |
97 | ||
98 | for (i = 0; i < pool->heads_cnt; i++) | |
99 | pool->heads[i].xdp.rxq = rxq; | |
100 | } | |
101 | EXPORT_SYMBOL(xp_set_rxq_info); | |
102 | ||
921b6869 MK |
103 | static void xp_disable_drv_zc(struct xsk_buff_pool *pool) |
104 | { | |
105 | struct netdev_bpf bpf; | |
106 | int err; | |
107 | ||
108 | ASSERT_RTNL(); | |
109 | ||
110 | if (pool->umem->zc) { | |
111 | bpf.command = XDP_SETUP_XSK_POOL; | |
112 | bpf.xsk.pool = NULL; | |
113 | bpf.xsk.queue_id = pool->queue_id; | |
114 | ||
115 | err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); | |
116 | ||
117 | if (err) | |
118 | WARN(1, "Failed to disable zero-copy!\n"); | |
119 | } | |
120 | } | |
121 | ||
b5aea28d MK |
122 | static int __xp_assign_dev(struct xsk_buff_pool *pool, |
123 | struct net_device *netdev, u16 queue_id, u16 flags) | |
1c1efc2a | 124 | { |
1c1efc2a MK |
125 | bool force_zc, force_copy; |
126 | struct netdev_bpf bpf; | |
127 | int err = 0; | |
128 | ||
129 | ASSERT_RTNL(); | |
130 | ||
131 | force_zc = flags & XDP_ZEROCOPY; | |
132 | force_copy = flags & XDP_COPY; | |
133 | ||
134 | if (force_zc && force_copy) | |
135 | return -EINVAL; | |
136 | ||
c2d3d6a4 | 137 | if (xsk_get_pool_from_qid(netdev, queue_id)) |
1c1efc2a MK |
138 | return -EBUSY; |
139 | ||
921b6869 MK |
140 | pool->netdev = netdev; |
141 | pool->queue_id = queue_id; | |
c2d3d6a4 | 142 | err = xsk_reg_pool_at_qid(netdev, pool, queue_id); |
1c1efc2a MK |
143 | if (err) |
144 | return err; | |
145 | ||
e3920818 | 146 | if (flags & XDP_USE_NEED_WAKEUP) |
c2d3d6a4 | 147 | pool->uses_need_wakeup = true; |
e3920818 BT |
148 | /* Tx needs to be explicitly woken up the first time. Also |
149 | * for supporting drivers that do not implement this | |
150 | * feature. They will always have to call sendto() or poll(). | |
151 | */ | |
152 | pool->cached_need_wakeup = XDP_WAKEUP_TX; | |
1c1efc2a | 153 | |
c2d3d6a4 MK |
154 | dev_hold(netdev); |
155 | ||
1c1efc2a MK |
156 | if (force_copy) |
157 | /* For copy-mode, we are done. */ | |
158 | return 0; | |
159 | ||
c2d3d6a4 MK |
160 | if (!netdev->netdev_ops->ndo_bpf || |
161 | !netdev->netdev_ops->ndo_xsk_wakeup) { | |
1c1efc2a MK |
162 | err = -EOPNOTSUPP; |
163 | goto err_unreg_pool; | |
164 | } | |
165 | ||
166 | bpf.command = XDP_SETUP_XSK_POOL; | |
167 | bpf.xsk.pool = pool; | |
168 | bpf.xsk.queue_id = queue_id; | |
169 | ||
c2d3d6a4 | 170 | err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); |
1c1efc2a MK |
171 | if (err) |
172 | goto err_unreg_pool; | |
173 | ||
921b6869 MK |
174 | if (!pool->dma_pages) { |
175 | WARN(1, "Driver did not DMA map zero-copy buffers"); | |
12c8a8ca | 176 | err = -EINVAL; |
921b6869 MK |
177 | goto err_unreg_xsk; |
178 | } | |
c2d3d6a4 | 179 | pool->umem->zc = true; |
1c1efc2a MK |
180 | return 0; |
181 | ||
921b6869 MK |
182 | err_unreg_xsk: |
183 | xp_disable_drv_zc(pool); | |
1c1efc2a MK |
184 | err_unreg_pool: |
185 | if (!force_zc) | |
186 | err = 0; /* fallback to copy mode */ | |
17864891 | 187 | if (err) { |
c2d3d6a4 | 188 | xsk_clear_pool_at_qid(netdev, queue_id); |
17864891 MM |
189 | dev_put(netdev); |
190 | } | |
1c1efc2a MK |
191 | return err; |
192 | } | |
193 | ||
b5aea28d MK |
194 | int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, |
195 | u16 queue_id, u16 flags) | |
196 | { | |
197 | return __xp_assign_dev(pool, dev, queue_id, flags); | |
198 | } | |
199 | ||
200 | int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, | |
201 | struct net_device *dev, u16 queue_id) | |
202 | { | |
203 | u16 flags; | |
204 | ||
205 | /* One fill and completion ring required for each queue id. */ | |
206 | if (!pool->fq || !pool->cq) | |
207 | return -EINVAL; | |
208 | ||
209 | flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; | |
210 | if (pool->uses_need_wakeup) | |
211 | flags |= XDP_USE_NEED_WAKEUP; | |
212 | ||
213 | return __xp_assign_dev(pool, dev, queue_id, flags); | |
214 | } | |
215 | ||
1c1efc2a MK |
216 | void xp_clear_dev(struct xsk_buff_pool *pool) |
217 | { | |
c2d3d6a4 | 218 | if (!pool->netdev) |
1c1efc2a MK |
219 | return; |
220 | ||
921b6869 | 221 | xp_disable_drv_zc(pool); |
c2d3d6a4 MK |
222 | xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); |
223 | dev_put(pool->netdev); | |
224 | pool->netdev = NULL; | |
1c1efc2a MK |
225 | } |
226 | ||
227 | static void xp_release_deferred(struct work_struct *work) | |
228 | { | |
229 | struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, | |
230 | work); | |
231 | ||
232 | rtnl_lock(); | |
233 | xp_clear_dev(pool); | |
234 | rtnl_unlock(); | |
235 | ||
7361f9c3 MK |
236 | if (pool->fq) { |
237 | xskq_destroy(pool->fq); | |
238 | pool->fq = NULL; | |
239 | } | |
240 | ||
241 | if (pool->cq) { | |
242 | xskq_destroy(pool->cq); | |
243 | pool->cq = NULL; | |
244 | } | |
245 | ||
537cf4e3 | 246 | xdp_put_umem(pool->umem, false); |
1c1efc2a MK |
247 | xp_destroy(pool); |
248 | } | |
249 | ||
250 | void xp_get_pool(struct xsk_buff_pool *pool) | |
251 | { | |
252 | refcount_inc(&pool->users); | |
253 | } | |
254 | ||
e5e1a4bc | 255 | bool xp_put_pool(struct xsk_buff_pool *pool) |
1c1efc2a MK |
256 | { |
257 | if (!pool) | |
e5e1a4bc | 258 | return false; |
1c1efc2a MK |
259 | |
260 | if (refcount_dec_and_test(&pool->users)) { | |
261 | INIT_WORK(&pool->work, xp_release_deferred); | |
262 | schedule_work(&pool->work); | |
e5e1a4bc | 263 | return true; |
1c1efc2a | 264 | } |
e5e1a4bc MK |
265 | |
266 | return false; | |
1c1efc2a MK |
267 | } |
268 | ||
921b6869 MK |
269 | static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) |
270 | { | |
271 | struct xsk_dma_map *dma_map; | |
272 | ||
273 | list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { | |
274 | if (dma_map->netdev == pool->netdev) | |
275 | return dma_map; | |
276 | } | |
277 | ||
278 | return NULL; | |
279 | } | |
280 | ||
281 | static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev, | |
282 | u32 nr_pages, struct xdp_umem *umem) | |
283 | { | |
284 | struct xsk_dma_map *dma_map; | |
285 | ||
286 | dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); | |
287 | if (!dma_map) | |
288 | return NULL; | |
289 | ||
290 | dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); | |
1d6fd78a | 291 | if (!dma_map->dma_pages) { |
921b6869 MK |
292 | kfree(dma_map); |
293 | return NULL; | |
294 | } | |
295 | ||
296 | dma_map->netdev = netdev; | |
297 | dma_map->dev = dev; | |
298 | dma_map->dma_need_sync = false; | |
299 | dma_map->dma_pages_cnt = nr_pages; | |
bf74a370 | 300 | refcount_set(&dma_map->users, 1); |
921b6869 MK |
301 | list_add(&dma_map->list, &umem->xsk_dma_list); |
302 | return dma_map; | |
303 | } | |
304 | ||
305 | static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) | |
306 | { | |
307 | list_del(&dma_map->list); | |
308 | kvfree(dma_map->dma_pages); | |
309 | kfree(dma_map); | |
310 | } | |
311 | ||
312 | static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) | |
2b43470a BT |
313 | { |
314 | dma_addr_t *dma; | |
315 | u32 i; | |
316 | ||
921b6869 MK |
317 | for (i = 0; i < dma_map->dma_pages_cnt; i++) { |
318 | dma = &dma_map->dma_pages[i]; | |
2b43470a | 319 | if (*dma) { |
921b6869 | 320 | dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, |
2b43470a BT |
321 | DMA_BIDIRECTIONAL, attrs); |
322 | *dma = 0; | |
323 | } | |
324 | } | |
325 | ||
921b6869 MK |
326 | xp_destroy_dma_map(dma_map); |
327 | } | |
328 | ||
329 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) | |
330 | { | |
331 | struct xsk_dma_map *dma_map; | |
332 | ||
333 | if (pool->dma_pages_cnt == 0) | |
334 | return; | |
335 | ||
336 | dma_map = xp_find_dma_map(pool); | |
337 | if (!dma_map) { | |
338 | WARN(1, "Could not find dma_map for device"); | |
339 | return; | |
340 | } | |
341 | ||
342 | if (!refcount_dec_and_test(&dma_map->users)) | |
343 | return; | |
344 | ||
345 | __xp_dma_unmap(dma_map, attrs); | |
2b43470a BT |
346 | kvfree(pool->dma_pages); |
347 | pool->dma_pages_cnt = 0; | |
348 | pool->dev = NULL; | |
349 | } | |
350 | EXPORT_SYMBOL(xp_dma_unmap); | |
351 | ||
921b6869 | 352 | static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) |
2b43470a BT |
353 | { |
354 | u32 i; | |
355 | ||
921b6869 MK |
356 | for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { |
357 | if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) | |
358 | dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; | |
2b43470a | 359 | else |
921b6869 | 360 | dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; |
2b43470a BT |
361 | } |
362 | } | |
363 | ||
921b6869 MK |
364 | static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) |
365 | { | |
366 | pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); | |
367 | if (!pool->dma_pages) | |
368 | return -ENOMEM; | |
369 | ||
370 | pool->dev = dma_map->dev; | |
371 | pool->dma_pages_cnt = dma_map->dma_pages_cnt; | |
372 | pool->dma_need_sync = dma_map->dma_need_sync; | |
921b6869 MK |
373 | memcpy(pool->dma_pages, dma_map->dma_pages, |
374 | pool->dma_pages_cnt * sizeof(*pool->dma_pages)); | |
375 | ||
376 | return 0; | |
377 | } | |
378 | ||
2b43470a BT |
379 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, |
380 | unsigned long attrs, struct page **pages, u32 nr_pages) | |
381 | { | |
921b6869 | 382 | struct xsk_dma_map *dma_map; |
2b43470a | 383 | dma_addr_t dma; |
921b6869 | 384 | int err; |
2b43470a BT |
385 | u32 i; |
386 | ||
921b6869 MK |
387 | dma_map = xp_find_dma_map(pool); |
388 | if (dma_map) { | |
389 | err = xp_init_dma_info(pool, dma_map); | |
390 | if (err) | |
391 | return err; | |
2b43470a | 392 | |
bf74a370 | 393 | refcount_inc(&dma_map->users); |
921b6869 MK |
394 | return 0; |
395 | } | |
2b43470a | 396 | |
921b6869 MK |
397 | dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); |
398 | if (!dma_map) | |
399 | return -ENOMEM; | |
400 | ||
401 | for (i = 0; i < dma_map->dma_pages_cnt; i++) { | |
2b43470a BT |
402 | dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, |
403 | DMA_BIDIRECTIONAL, attrs); | |
404 | if (dma_mapping_error(dev, dma)) { | |
921b6869 | 405 | __xp_dma_unmap(dma_map, attrs); |
2b43470a BT |
406 | return -ENOMEM; |
407 | } | |
7e024575 | 408 | if (dma_need_sync(dev, dma)) |
921b6869 MK |
409 | dma_map->dma_need_sync = true; |
410 | dma_map->dma_pages[i] = dma; | |
2b43470a BT |
411 | } |
412 | ||
413 | if (pool->unaligned) | |
921b6869 MK |
414 | xp_check_dma_contiguity(dma_map); |
415 | ||
416 | err = xp_init_dma_info(pool, dma_map); | |
417 | if (err) { | |
418 | __xp_dma_unmap(dma_map, attrs); | |
419 | return err; | |
420 | } | |
421 | ||
2b43470a BT |
422 | return 0; |
423 | } | |
424 | EXPORT_SYMBOL(xp_dma_map); | |
425 | ||
2b43470a BT |
426 | static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, |
427 | u64 addr) | |
428 | { | |
429 | return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); | |
430 | } | |
431 | ||
2b43470a BT |
432 | static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) |
433 | { | |
434 | *addr = xp_unaligned_extract_addr(*addr); | |
435 | if (*addr >= pool->addrs_cnt || | |
436 | *addr + pool->chunk_size > pool->addrs_cnt || | |
437 | xp_addr_crosses_non_contig_pg(pool, *addr)) | |
438 | return false; | |
439 | return true; | |
440 | } | |
441 | ||
442 | static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) | |
443 | { | |
444 | *addr = xp_aligned_extract_addr(pool, *addr); | |
445 | return *addr < pool->addrs_cnt; | |
446 | } | |
447 | ||
448 | static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) | |
449 | { | |
450 | struct xdp_buff_xsk *xskb; | |
451 | u64 addr; | |
452 | bool ok; | |
453 | ||
454 | if (pool->free_heads_cnt == 0) | |
455 | return NULL; | |
456 | ||
457 | xskb = pool->free_heads[--pool->free_heads_cnt]; | |
458 | ||
459 | for (;;) { | |
460 | if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { | |
8aa5a335 | 461 | pool->fq->queue_empty_descs++; |
2b43470a BT |
462 | xp_release(xskb); |
463 | return NULL; | |
464 | } | |
465 | ||
466 | ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : | |
467 | xp_check_aligned(pool, &addr); | |
468 | if (!ok) { | |
469 | pool->fq->invalid_descs++; | |
470 | xskq_cons_release(pool->fq); | |
471 | continue; | |
472 | } | |
473 | break; | |
474 | } | |
475 | xskq_cons_release(pool->fq); | |
476 | ||
477 | xskb->orig_addr = addr; | |
478 | xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; | |
479 | if (pool->dma_pages_cnt) { | |
480 | xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & | |
481 | ~XSK_NEXT_PG_CONTIG_MASK) + | |
482 | (addr & ~PAGE_MASK); | |
483 | xskb->dma = xskb->frame_dma + pool->headroom + | |
484 | XDP_PACKET_HEADROOM; | |
485 | } | |
486 | return xskb; | |
487 | } | |
488 | ||
489 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) | |
490 | { | |
491 | struct xdp_buff_xsk *xskb; | |
492 | ||
493 | if (!pool->free_list_cnt) { | |
494 | xskb = __xp_alloc(pool); | |
495 | if (!xskb) | |
496 | return NULL; | |
497 | } else { | |
498 | pool->free_list_cnt--; | |
499 | xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, | |
500 | free_list_node); | |
501 | list_del(&xskb->free_list_node); | |
502 | } | |
503 | ||
504 | xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; | |
505 | xskb->xdp.data_meta = xskb->xdp.data; | |
506 | ||
91d5b702 | 507 | if (pool->dma_need_sync) { |
2b43470a BT |
508 | dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, |
509 | pool->frame_len, | |
510 | DMA_BIDIRECTIONAL); | |
511 | } | |
512 | return &xskb->xdp; | |
513 | } | |
514 | EXPORT_SYMBOL(xp_alloc); | |
515 | ||
516 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) | |
517 | { | |
518 | if (pool->free_list_cnt >= count) | |
519 | return true; | |
520 | return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); | |
521 | } | |
522 | EXPORT_SYMBOL(xp_can_alloc); | |
523 | ||
524 | void xp_free(struct xdp_buff_xsk *xskb) | |
525 | { | |
526 | xskb->pool->free_list_cnt++; | |
527 | list_add(&xskb->free_list_node, &xskb->pool->free_list); | |
528 | } | |
529 | EXPORT_SYMBOL(xp_free); | |
530 | ||
2b43470a BT |
531 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
532 | { | |
533 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; | |
534 | return pool->addrs + addr; | |
535 | } | |
536 | EXPORT_SYMBOL(xp_raw_get_data); | |
537 | ||
538 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) | |
539 | { | |
540 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; | |
541 | return (pool->dma_pages[addr >> PAGE_SHIFT] & | |
542 | ~XSK_NEXT_PG_CONTIG_MASK) + | |
543 | (addr & ~PAGE_MASK); | |
544 | } | |
545 | EXPORT_SYMBOL(xp_raw_get_dma); | |
546 | ||
26062b18 | 547 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) |
2b43470a | 548 | { |
2b43470a BT |
549 | dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, |
550 | xskb->pool->frame_len, DMA_BIDIRECTIONAL); | |
551 | } | |
26062b18 | 552 | EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); |
2b43470a | 553 | |
26062b18 BT |
554 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, |
555 | size_t size) | |
2b43470a | 556 | { |
2b43470a BT |
557 | dma_sync_single_range_for_device(pool->dev, dma, 0, |
558 | size, DMA_BIDIRECTIONAL); | |
559 | } | |
26062b18 | 560 | EXPORT_SYMBOL(xp_dma_sync_for_device_slow); |