Commit | Line | Data |
---|---|---|
2b43470a BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <net/xsk_buff_pool.h> | |
4 | #include <net/xdp_sock.h> | |
1c1efc2a | 5 | #include <net/xdp_sock_drv.h> |
2b43470a BT |
6 | |
7 | #include "xsk_queue.h" | |
1c1efc2a MK |
8 | #include "xdp_umem.h" |
9 | #include "xsk.h" | |
2b43470a | 10 | |
a5aa8e52 MK |
11 | void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) |
12 | { | |
13 | unsigned long flags; | |
14 | ||
15 | if (!xs->tx) | |
16 | return; | |
17 | ||
18 | spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); | |
19 | list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); | |
20 | spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); | |
21 | } | |
22 | ||
23 | void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) | |
24 | { | |
25 | unsigned long flags; | |
26 | ||
27 | if (!xs->tx) | |
28 | return; | |
29 | ||
30 | spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); | |
31 | list_del_rcu(&xs->tx_list); | |
32 | spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); | |
33 | } | |
34 | ||
2b43470a BT |
35 | void xp_destroy(struct xsk_buff_pool *pool) |
36 | { | |
37 | if (!pool) | |
38 | return; | |
39 | ||
2b43470a BT |
40 | kvfree(pool->heads); |
41 | kvfree(pool); | |
42 | } | |
43 | ||
1c1efc2a MK |
44 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
45 | struct xdp_umem *umem) | |
2b43470a BT |
46 | { |
47 | struct xsk_buff_pool *pool; | |
48 | struct xdp_buff_xsk *xskb; | |
2b43470a BT |
49 | u32 i; |
50 | ||
1c1efc2a MK |
51 | pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), |
52 | GFP_KERNEL); | |
2b43470a BT |
53 | if (!pool) |
54 | goto out; | |
55 | ||
1c1efc2a | 56 | pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); |
2b43470a BT |
57 | if (!pool->heads) |
58 | goto out; | |
59 | ||
1c1efc2a MK |
60 | pool->chunk_mask = ~((u64)umem->chunk_size - 1); |
61 | pool->addrs_cnt = umem->size; | |
62 | pool->heads_cnt = umem->chunks; | |
63 | pool->free_heads_cnt = umem->chunks; | |
64 | pool->headroom = umem->headroom; | |
65 | pool->chunk_size = umem->chunk_size; | |
66 | pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; | |
67 | pool->frame_len = umem->chunk_size - umem->headroom - | |
68 | XDP_PACKET_HEADROOM; | |
1742b3d5 | 69 | pool->umem = umem; |
7f7ffa4e | 70 | pool->addrs = umem->addrs; |
2b43470a | 71 | INIT_LIST_HEAD(&pool->free_list); |
a5aa8e52 MK |
72 | INIT_LIST_HEAD(&pool->xsk_tx_list); |
73 | spin_lock_init(&pool->xsk_tx_list_lock); | |
1c1efc2a | 74 | refcount_set(&pool->users, 1); |
2b43470a | 75 | |
7361f9c3 MK |
76 | pool->fq = xs->fq_tmp; |
77 | pool->cq = xs->cq_tmp; | |
78 | xs->fq_tmp = NULL; | |
79 | xs->cq_tmp = NULL; | |
80 | ||
2b43470a BT |
81 | for (i = 0; i < pool->free_heads_cnt; i++) { |
82 | xskb = &pool->heads[i]; | |
83 | xskb->pool = pool; | |
1c1efc2a | 84 | xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; |
2b43470a BT |
85 | pool->free_heads[i] = xskb; |
86 | } | |
87 | ||
7f7ffa4e | 88 | return pool; |
2b43470a BT |
89 | |
90 | out: | |
91 | xp_destroy(pool); | |
92 | return NULL; | |
93 | } | |
94 | ||
2b43470a BT |
95 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) |
96 | { | |
97 | u32 i; | |
98 | ||
99 | for (i = 0; i < pool->heads_cnt; i++) | |
100 | pool->heads[i].xdp.rxq = rxq; | |
101 | } | |
102 | EXPORT_SYMBOL(xp_set_rxq_info); | |
103 | ||
921b6869 MK |
104 | static void xp_disable_drv_zc(struct xsk_buff_pool *pool) |
105 | { | |
106 | struct netdev_bpf bpf; | |
107 | int err; | |
108 | ||
109 | ASSERT_RTNL(); | |
110 | ||
111 | if (pool->umem->zc) { | |
112 | bpf.command = XDP_SETUP_XSK_POOL; | |
113 | bpf.xsk.pool = NULL; | |
114 | bpf.xsk.queue_id = pool->queue_id; | |
115 | ||
116 | err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); | |
117 | ||
118 | if (err) | |
119 | WARN(1, "Failed to disable zero-copy!\n"); | |
120 | } | |
121 | } | |
122 | ||
b5aea28d MK |
123 | static int __xp_assign_dev(struct xsk_buff_pool *pool, |
124 | struct net_device *netdev, u16 queue_id, u16 flags) | |
1c1efc2a | 125 | { |
1c1efc2a MK |
126 | bool force_zc, force_copy; |
127 | struct netdev_bpf bpf; | |
128 | int err = 0; | |
129 | ||
130 | ASSERT_RTNL(); | |
131 | ||
132 | force_zc = flags & XDP_ZEROCOPY; | |
133 | force_copy = flags & XDP_COPY; | |
134 | ||
135 | if (force_zc && force_copy) | |
136 | return -EINVAL; | |
137 | ||
c2d3d6a4 | 138 | if (xsk_get_pool_from_qid(netdev, queue_id)) |
1c1efc2a MK |
139 | return -EBUSY; |
140 | ||
921b6869 MK |
141 | pool->netdev = netdev; |
142 | pool->queue_id = queue_id; | |
c2d3d6a4 | 143 | err = xsk_reg_pool_at_qid(netdev, pool, queue_id); |
1c1efc2a MK |
144 | if (err) |
145 | return err; | |
146 | ||
147 | if (flags & XDP_USE_NEED_WAKEUP) { | |
c2d3d6a4 | 148 | pool->uses_need_wakeup = true; |
1c1efc2a MK |
149 | /* Tx needs to be explicitly woken up the first time. |
150 | * Also for supporting drivers that do not implement this | |
151 | * feature. They will always have to call sendto(). | |
152 | */ | |
c2d3d6a4 | 153 | pool->cached_need_wakeup = XDP_WAKEUP_TX; |
1c1efc2a MK |
154 | } |
155 | ||
c2d3d6a4 MK |
156 | dev_hold(netdev); |
157 | ||
1c1efc2a MK |
158 | if (force_copy) |
159 | /* For copy-mode, we are done. */ | |
160 | return 0; | |
161 | ||
c2d3d6a4 MK |
162 | if (!netdev->netdev_ops->ndo_bpf || |
163 | !netdev->netdev_ops->ndo_xsk_wakeup) { | |
1c1efc2a MK |
164 | err = -EOPNOTSUPP; |
165 | goto err_unreg_pool; | |
166 | } | |
167 | ||
168 | bpf.command = XDP_SETUP_XSK_POOL; | |
169 | bpf.xsk.pool = pool; | |
170 | bpf.xsk.queue_id = queue_id; | |
171 | ||
c2d3d6a4 | 172 | err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); |
1c1efc2a MK |
173 | if (err) |
174 | goto err_unreg_pool; | |
175 | ||
921b6869 MK |
176 | if (!pool->dma_pages) { |
177 | WARN(1, "Driver did not DMA map zero-copy buffers"); | |
178 | goto err_unreg_xsk; | |
179 | } | |
c2d3d6a4 | 180 | pool->umem->zc = true; |
1c1efc2a MK |
181 | return 0; |
182 | ||
921b6869 MK |
183 | err_unreg_xsk: |
184 | xp_disable_drv_zc(pool); | |
1c1efc2a MK |
185 | err_unreg_pool: |
186 | if (!force_zc) | |
187 | err = 0; /* fallback to copy mode */ | |
188 | if (err) | |
c2d3d6a4 | 189 | xsk_clear_pool_at_qid(netdev, queue_id); |
1c1efc2a MK |
190 | return err; |
191 | } | |
192 | ||
b5aea28d MK |
193 | int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, |
194 | u16 queue_id, u16 flags) | |
195 | { | |
196 | return __xp_assign_dev(pool, dev, queue_id, flags); | |
197 | } | |
198 | ||
199 | int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, | |
200 | struct net_device *dev, u16 queue_id) | |
201 | { | |
202 | u16 flags; | |
203 | ||
204 | /* One fill and completion ring required for each queue id. */ | |
205 | if (!pool->fq || !pool->cq) | |
206 | return -EINVAL; | |
207 | ||
208 | flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; | |
209 | if (pool->uses_need_wakeup) | |
210 | flags |= XDP_USE_NEED_WAKEUP; | |
211 | ||
212 | return __xp_assign_dev(pool, dev, queue_id, flags); | |
213 | } | |
214 | ||
1c1efc2a MK |
215 | void xp_clear_dev(struct xsk_buff_pool *pool) |
216 | { | |
c2d3d6a4 | 217 | if (!pool->netdev) |
1c1efc2a MK |
218 | return; |
219 | ||
921b6869 | 220 | xp_disable_drv_zc(pool); |
c2d3d6a4 MK |
221 | xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); |
222 | dev_put(pool->netdev); | |
223 | pool->netdev = NULL; | |
1c1efc2a MK |
224 | } |
225 | ||
226 | static void xp_release_deferred(struct work_struct *work) | |
227 | { | |
228 | struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, | |
229 | work); | |
230 | ||
231 | rtnl_lock(); | |
232 | xp_clear_dev(pool); | |
233 | rtnl_unlock(); | |
234 | ||
7361f9c3 MK |
235 | if (pool->fq) { |
236 | xskq_destroy(pool->fq); | |
237 | pool->fq = NULL; | |
238 | } | |
239 | ||
240 | if (pool->cq) { | |
241 | xskq_destroy(pool->cq); | |
242 | pool->cq = NULL; | |
243 | } | |
244 | ||
1c1efc2a MK |
245 | xdp_put_umem(pool->umem); |
246 | xp_destroy(pool); | |
247 | } | |
248 | ||
249 | void xp_get_pool(struct xsk_buff_pool *pool) | |
250 | { | |
251 | refcount_inc(&pool->users); | |
252 | } | |
253 | ||
e5e1a4bc | 254 | bool xp_put_pool(struct xsk_buff_pool *pool) |
1c1efc2a MK |
255 | { |
256 | if (!pool) | |
e5e1a4bc | 257 | return false; |
1c1efc2a MK |
258 | |
259 | if (refcount_dec_and_test(&pool->users)) { | |
260 | INIT_WORK(&pool->work, xp_release_deferred); | |
261 | schedule_work(&pool->work); | |
e5e1a4bc | 262 | return true; |
1c1efc2a | 263 | } |
e5e1a4bc MK |
264 | |
265 | return false; | |
1c1efc2a MK |
266 | } |
267 | ||
921b6869 MK |
268 | static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) |
269 | { | |
270 | struct xsk_dma_map *dma_map; | |
271 | ||
272 | list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { | |
273 | if (dma_map->netdev == pool->netdev) | |
274 | return dma_map; | |
275 | } | |
276 | ||
277 | return NULL; | |
278 | } | |
279 | ||
280 | static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev, | |
281 | u32 nr_pages, struct xdp_umem *umem) | |
282 | { | |
283 | struct xsk_dma_map *dma_map; | |
284 | ||
285 | dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); | |
286 | if (!dma_map) | |
287 | return NULL; | |
288 | ||
289 | dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); | |
1d6fd78a | 290 | if (!dma_map->dma_pages) { |
921b6869 MK |
291 | kfree(dma_map); |
292 | return NULL; | |
293 | } | |
294 | ||
295 | dma_map->netdev = netdev; | |
296 | dma_map->dev = dev; | |
297 | dma_map->dma_need_sync = false; | |
298 | dma_map->dma_pages_cnt = nr_pages; | |
bf74a370 | 299 | refcount_set(&dma_map->users, 1); |
921b6869 MK |
300 | list_add(&dma_map->list, &umem->xsk_dma_list); |
301 | return dma_map; | |
302 | } | |
303 | ||
304 | static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) | |
305 | { | |
306 | list_del(&dma_map->list); | |
307 | kvfree(dma_map->dma_pages); | |
308 | kfree(dma_map); | |
309 | } | |
310 | ||
311 | static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) | |
2b43470a BT |
312 | { |
313 | dma_addr_t *dma; | |
314 | u32 i; | |
315 | ||
921b6869 MK |
316 | for (i = 0; i < dma_map->dma_pages_cnt; i++) { |
317 | dma = &dma_map->dma_pages[i]; | |
2b43470a | 318 | if (*dma) { |
921b6869 | 319 | dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, |
2b43470a BT |
320 | DMA_BIDIRECTIONAL, attrs); |
321 | *dma = 0; | |
322 | } | |
323 | } | |
324 | ||
921b6869 MK |
325 | xp_destroy_dma_map(dma_map); |
326 | } | |
327 | ||
328 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) | |
329 | { | |
330 | struct xsk_dma_map *dma_map; | |
331 | ||
332 | if (pool->dma_pages_cnt == 0) | |
333 | return; | |
334 | ||
335 | dma_map = xp_find_dma_map(pool); | |
336 | if (!dma_map) { | |
337 | WARN(1, "Could not find dma_map for device"); | |
338 | return; | |
339 | } | |
340 | ||
341 | if (!refcount_dec_and_test(&dma_map->users)) | |
342 | return; | |
343 | ||
344 | __xp_dma_unmap(dma_map, attrs); | |
2b43470a BT |
345 | kvfree(pool->dma_pages); |
346 | pool->dma_pages_cnt = 0; | |
347 | pool->dev = NULL; | |
348 | } | |
349 | EXPORT_SYMBOL(xp_dma_unmap); | |
350 | ||
921b6869 | 351 | static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) |
2b43470a BT |
352 | { |
353 | u32 i; | |
354 | ||
921b6869 MK |
355 | for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { |
356 | if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) | |
357 | dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; | |
2b43470a | 358 | else |
921b6869 | 359 | dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; |
2b43470a BT |
360 | } |
361 | } | |
362 | ||
921b6869 MK |
363 | static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) |
364 | { | |
365 | pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); | |
366 | if (!pool->dma_pages) | |
367 | return -ENOMEM; | |
368 | ||
369 | pool->dev = dma_map->dev; | |
370 | pool->dma_pages_cnt = dma_map->dma_pages_cnt; | |
371 | pool->dma_need_sync = dma_map->dma_need_sync; | |
921b6869 MK |
372 | memcpy(pool->dma_pages, dma_map->dma_pages, |
373 | pool->dma_pages_cnt * sizeof(*pool->dma_pages)); | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
2b43470a BT |
378 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, |
379 | unsigned long attrs, struct page **pages, u32 nr_pages) | |
380 | { | |
921b6869 | 381 | struct xsk_dma_map *dma_map; |
2b43470a | 382 | dma_addr_t dma; |
921b6869 | 383 | int err; |
2b43470a BT |
384 | u32 i; |
385 | ||
921b6869 MK |
386 | dma_map = xp_find_dma_map(pool); |
387 | if (dma_map) { | |
388 | err = xp_init_dma_info(pool, dma_map); | |
389 | if (err) | |
390 | return err; | |
2b43470a | 391 | |
bf74a370 | 392 | refcount_inc(&dma_map->users); |
921b6869 MK |
393 | return 0; |
394 | } | |
2b43470a | 395 | |
921b6869 MK |
396 | dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); |
397 | if (!dma_map) | |
398 | return -ENOMEM; | |
399 | ||
400 | for (i = 0; i < dma_map->dma_pages_cnt; i++) { | |
2b43470a BT |
401 | dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, |
402 | DMA_BIDIRECTIONAL, attrs); | |
403 | if (dma_mapping_error(dev, dma)) { | |
921b6869 | 404 | __xp_dma_unmap(dma_map, attrs); |
2b43470a BT |
405 | return -ENOMEM; |
406 | } | |
7e024575 | 407 | if (dma_need_sync(dev, dma)) |
921b6869 MK |
408 | dma_map->dma_need_sync = true; |
409 | dma_map->dma_pages[i] = dma; | |
2b43470a BT |
410 | } |
411 | ||
412 | if (pool->unaligned) | |
921b6869 MK |
413 | xp_check_dma_contiguity(dma_map); |
414 | ||
415 | err = xp_init_dma_info(pool, dma_map); | |
416 | if (err) { | |
417 | __xp_dma_unmap(dma_map, attrs); | |
418 | return err; | |
419 | } | |
420 | ||
2b43470a BT |
421 | return 0; |
422 | } | |
423 | EXPORT_SYMBOL(xp_dma_map); | |
424 | ||
2b43470a BT |
425 | static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, |
426 | u64 addr) | |
427 | { | |
428 | return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); | |
429 | } | |
430 | ||
2b43470a BT |
431 | static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) |
432 | { | |
433 | *addr = xp_unaligned_extract_addr(*addr); | |
434 | if (*addr >= pool->addrs_cnt || | |
435 | *addr + pool->chunk_size > pool->addrs_cnt || | |
436 | xp_addr_crosses_non_contig_pg(pool, *addr)) | |
437 | return false; | |
438 | return true; | |
439 | } | |
440 | ||
441 | static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) | |
442 | { | |
443 | *addr = xp_aligned_extract_addr(pool, *addr); | |
444 | return *addr < pool->addrs_cnt; | |
445 | } | |
446 | ||
447 | static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) | |
448 | { | |
449 | struct xdp_buff_xsk *xskb; | |
450 | u64 addr; | |
451 | bool ok; | |
452 | ||
453 | if (pool->free_heads_cnt == 0) | |
454 | return NULL; | |
455 | ||
456 | xskb = pool->free_heads[--pool->free_heads_cnt]; | |
457 | ||
458 | for (;;) { | |
459 | if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { | |
8aa5a335 | 460 | pool->fq->queue_empty_descs++; |
2b43470a BT |
461 | xp_release(xskb); |
462 | return NULL; | |
463 | } | |
464 | ||
465 | ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : | |
466 | xp_check_aligned(pool, &addr); | |
467 | if (!ok) { | |
468 | pool->fq->invalid_descs++; | |
469 | xskq_cons_release(pool->fq); | |
470 | continue; | |
471 | } | |
472 | break; | |
473 | } | |
474 | xskq_cons_release(pool->fq); | |
475 | ||
476 | xskb->orig_addr = addr; | |
477 | xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; | |
478 | if (pool->dma_pages_cnt) { | |
479 | xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & | |
480 | ~XSK_NEXT_PG_CONTIG_MASK) + | |
481 | (addr & ~PAGE_MASK); | |
482 | xskb->dma = xskb->frame_dma + pool->headroom + | |
483 | XDP_PACKET_HEADROOM; | |
484 | } | |
485 | return xskb; | |
486 | } | |
487 | ||
488 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) | |
489 | { | |
490 | struct xdp_buff_xsk *xskb; | |
491 | ||
492 | if (!pool->free_list_cnt) { | |
493 | xskb = __xp_alloc(pool); | |
494 | if (!xskb) | |
495 | return NULL; | |
496 | } else { | |
497 | pool->free_list_cnt--; | |
498 | xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, | |
499 | free_list_node); | |
500 | list_del(&xskb->free_list_node); | |
501 | } | |
502 | ||
503 | xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; | |
504 | xskb->xdp.data_meta = xskb->xdp.data; | |
505 | ||
91d5b702 | 506 | if (pool->dma_need_sync) { |
2b43470a BT |
507 | dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, |
508 | pool->frame_len, | |
509 | DMA_BIDIRECTIONAL); | |
510 | } | |
511 | return &xskb->xdp; | |
512 | } | |
513 | EXPORT_SYMBOL(xp_alloc); | |
514 | ||
515 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) | |
516 | { | |
517 | if (pool->free_list_cnt >= count) | |
518 | return true; | |
519 | return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); | |
520 | } | |
521 | EXPORT_SYMBOL(xp_can_alloc); | |
522 | ||
523 | void xp_free(struct xdp_buff_xsk *xskb) | |
524 | { | |
525 | xskb->pool->free_list_cnt++; | |
526 | list_add(&xskb->free_list_node, &xskb->pool->free_list); | |
527 | } | |
528 | EXPORT_SYMBOL(xp_free); | |
529 | ||
2b43470a BT |
530 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
531 | { | |
532 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; | |
533 | return pool->addrs + addr; | |
534 | } | |
535 | EXPORT_SYMBOL(xp_raw_get_data); | |
536 | ||
537 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) | |
538 | { | |
539 | addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; | |
540 | return (pool->dma_pages[addr >> PAGE_SHIFT] & | |
541 | ~XSK_NEXT_PG_CONTIG_MASK) + | |
542 | (addr & ~PAGE_MASK); | |
543 | } | |
544 | EXPORT_SYMBOL(xp_raw_get_dma); | |
545 | ||
26062b18 | 546 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) |
2b43470a | 547 | { |
2b43470a BT |
548 | dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, |
549 | xskb->pool->frame_len, DMA_BIDIRECTIONAL); | |
550 | } | |
26062b18 | 551 | EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); |
2b43470a | 552 | |
26062b18 BT |
553 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, |
554 | size_t size) | |
2b43470a | 555 | { |
2b43470a BT |
556 | dma_sync_single_range_for_device(pool->dev, dma, 0, |
557 | size, DMA_BIDIRECTIONAL); | |
558 | } | |
26062b18 | 559 | EXPORT_SYMBOL(xp_dma_sync_for_device_slow); |