1 // SPDX-License-Identifier: GPL-2.0
3 #include <net/xsk_buff_pool.h>
4 #include <net/xdp_sock.h>
8 static void xp_addr_unmap(struct xsk_buff_pool *pool)
13 static int xp_addr_map(struct xsk_buff_pool *pool,
14 struct page **pages, u32 nr_pages)
16 pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
22 void xp_destroy(struct xsk_buff_pool *pool)
32 struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
33 u32 chunk_size, u32 headroom, u64 size,
36 struct xsk_buff_pool *pool;
37 struct xdp_buff_xsk *xskb;
41 pool = kvzalloc(struct_size(pool, free_heads, chunks), GFP_KERNEL);
45 pool->heads = kvcalloc(chunks, sizeof(*pool->heads), GFP_KERNEL);
49 pool->chunk_mask = ~((u64)chunk_size - 1);
50 pool->addrs_cnt = size;
51 pool->heads_cnt = chunks;
52 pool->free_heads_cnt = chunks;
53 pool->headroom = headroom;
54 pool->chunk_size = chunk_size;
55 pool->unaligned = unaligned;
56 pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
57 INIT_LIST_HEAD(&pool->free_list);
59 for (i = 0; i < pool->free_heads_cnt; i++) {
60 xskb = &pool->heads[i];
62 xskb->xdp.frame_sz = chunk_size - headroom;
63 pool->free_heads[i] = xskb;
66 err = xp_addr_map(pool, pages, nr_pages);
75 void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq)
80 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
84 for (i = 0; i < pool->heads_cnt; i++)
85 pool->heads[i].xdp.rxq = rxq;
87 EXPORT_SYMBOL(xp_set_rxq_info);
89 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
94 if (pool->dma_pages_cnt == 0)
97 for (i = 0; i < pool->dma_pages_cnt; i++) {
98 dma = &pool->dma_pages[i];
100 dma_unmap_page_attrs(pool->dev, *dma, PAGE_SIZE,
101 DMA_BIDIRECTIONAL, attrs);
106 kvfree(pool->dma_pages);
107 pool->dma_pages_cnt = 0;
110 EXPORT_SYMBOL(xp_dma_unmap);
112 static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
116 for (i = 0; i < pool->dma_pages_cnt - 1; i++) {
117 if (pool->dma_pages[i] + PAGE_SIZE == pool->dma_pages[i + 1])
118 pool->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
120 pool->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
124 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
125 unsigned long attrs, struct page **pages, u32 nr_pages)
130 pool->dma_pages = kvcalloc(nr_pages, sizeof(*pool->dma_pages),
132 if (!pool->dma_pages)
136 pool->dma_pages_cnt = nr_pages;
137 pool->dma_need_sync = false;
139 for (i = 0; i < pool->dma_pages_cnt; i++) {
140 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
141 DMA_BIDIRECTIONAL, attrs);
142 if (dma_mapping_error(dev, dma)) {
143 xp_dma_unmap(pool, attrs);
146 if (dma_need_sync(dev, dma))
147 pool->dma_need_sync = true;
148 pool->dma_pages[i] = dma;
152 xp_check_dma_contiguity(pool);
155 EXPORT_SYMBOL(xp_dma_map);
157 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
160 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
163 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
165 *addr = xp_unaligned_extract_addr(*addr);
166 if (*addr >= pool->addrs_cnt ||
167 *addr + pool->chunk_size > pool->addrs_cnt ||
168 xp_addr_crosses_non_contig_pg(pool, *addr))
173 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
175 *addr = xp_aligned_extract_addr(pool, *addr);
176 return *addr < pool->addrs_cnt;
179 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
181 struct xdp_buff_xsk *xskb;
185 if (pool->free_heads_cnt == 0)
188 xskb = pool->free_heads[--pool->free_heads_cnt];
191 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
196 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
197 xp_check_aligned(pool, &addr);
199 pool->fq->invalid_descs++;
200 xskq_cons_release(pool->fq);
205 xskq_cons_release(pool->fq);
207 xskb->orig_addr = addr;
208 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
209 if (pool->dma_pages_cnt) {
210 xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
211 ~XSK_NEXT_PG_CONTIG_MASK) +
213 xskb->dma = xskb->frame_dma + pool->headroom +
219 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
221 struct xdp_buff_xsk *xskb;
223 if (!pool->free_list_cnt) {
224 xskb = __xp_alloc(pool);
228 pool->free_list_cnt--;
229 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
231 list_del(&xskb->free_list_node);
234 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
235 xskb->xdp.data_meta = xskb->xdp.data;
237 if (pool->dma_need_sync) {
238 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
244 EXPORT_SYMBOL(xp_alloc);
246 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
248 if (pool->free_list_cnt >= count)
250 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
252 EXPORT_SYMBOL(xp_can_alloc);
254 void xp_free(struct xdp_buff_xsk *xskb)
256 xskb->pool->free_list_cnt++;
257 list_add(&xskb->free_list_node, &xskb->pool->free_list);
259 EXPORT_SYMBOL(xp_free);
261 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
263 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
264 return pool->addrs + addr;
266 EXPORT_SYMBOL(xp_raw_get_data);
268 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
270 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
271 return (pool->dma_pages[addr >> PAGE_SHIFT] &
272 ~XSK_NEXT_PG_CONTIG_MASK) +
275 EXPORT_SYMBOL(xp_raw_get_dma);
277 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
279 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
280 xskb->pool->frame_len, DMA_BIDIRECTIONAL);
282 EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
284 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
287 dma_sync_single_range_for_device(pool->dev, dma, 0,
288 size, DMA_BIDIRECTIONAL);
290 EXPORT_SYMBOL(xp_dma_sync_for_device_slow);