Commit | Line | Data |
---|---|---|
2b43470a BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ | |
3 | ||
4 | #ifndef XSK_BUFF_POOL_H_ | |
5 | #define XSK_BUFF_POOL_H_ | |
6 | ||
26062b18 | 7 | #include <linux/if_xdp.h> |
2b43470a BT |
8 | #include <linux/types.h> |
9 | #include <linux/dma-mapping.h> | |
10 | #include <net/xdp.h> | |
11 | ||
12 | struct xsk_buff_pool; | |
13 | struct xdp_rxq_info; | |
14 | struct xsk_queue; | |
15 | struct xdp_desc; | |
16 | struct device; | |
17 | struct page; | |
18 | ||
19 | struct xdp_buff_xsk { | |
20 | struct xdp_buff xdp; | |
21 | dma_addr_t dma; | |
22 | dma_addr_t frame_dma; | |
23 | struct xsk_buff_pool *pool; | |
24 | bool unaligned; | |
25 | u64 orig_addr; | |
26 | struct list_head free_list_node; | |
27 | }; | |
28 | ||
26062b18 BT |
29 | struct xsk_buff_pool { |
30 | struct xsk_queue *fq; | |
31 | struct list_head free_list; | |
32 | dma_addr_t *dma_pages; | |
33 | struct xdp_buff_xsk *heads; | |
34 | u64 chunk_mask; | |
35 | u64 addrs_cnt; | |
36 | u32 free_list_cnt; | |
37 | u32 dma_pages_cnt; | |
38 | u32 heads_cnt; | |
39 | u32 free_heads_cnt; | |
40 | u32 headroom; | |
41 | u32 chunk_size; | |
42 | u32 frame_len; | |
91d5b702 | 43 | bool dma_need_sync; |
26062b18 BT |
44 | bool unaligned; |
45 | void *addrs; | |
46 | struct device *dev; | |
47 | struct xdp_buff_xsk *free_heads[]; | |
48 | }; | |
49 | ||
2b43470a BT |
50 | /* AF_XDP core. */ |
51 | struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, | |
52 | u32 chunk_size, u32 headroom, u64 size, | |
53 | bool unaligned); | |
54 | void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq); | |
55 | void xp_destroy(struct xsk_buff_pool *pool); | |
56 | void xp_release(struct xdp_buff_xsk *xskb); | |
2b43470a BT |
57 | |
58 | /* AF_XDP, and XDP core. */ | |
59 | void xp_free(struct xdp_buff_xsk *xskb); | |
60 | ||
61 | /* AF_XDP ZC drivers, via xdp_sock_buff.h */ | |
62 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); | |
63 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, | |
64 | unsigned long attrs, struct page **pages, u32 nr_pages); | |
65 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); | |
66 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); | |
67 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); | |
68 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); | |
69 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); | |
26062b18 BT |
70 | static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) |
71 | { | |
72 | return xskb->dma; | |
73 | } | |
74 | ||
75 | static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) | |
76 | { | |
77 | return xskb->frame_dma; | |
78 | } | |
79 | ||
80 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); | |
81 | static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) | |
82 | { | |
91d5b702 | 83 | if (!xskb->pool->dma_need_sync) |
26062b18 BT |
84 | return; |
85 | ||
86 | xp_dma_sync_for_cpu_slow(xskb); | |
87 | } | |
88 | ||
89 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, | |
90 | size_t size); | |
91 | static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, | |
92 | dma_addr_t dma, size_t size) | |
93 | { | |
91d5b702 | 94 | if (!pool->dma_need_sync) |
26062b18 BT |
95 | return; |
96 | ||
97 | xp_dma_sync_for_device_slow(pool, dma, size); | |
98 | } | |
99 | ||
100 | /* Masks for xdp_umem_page flags. | |
101 | * The low 12-bits of the addr will be 0 since this is the page address, so we | |
102 | * can use them for flags. | |
103 | */ | |
104 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 | |
105 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) | |
106 | ||
107 | static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, | |
108 | u64 addr, u32 len) | |
109 | { | |
110 | bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; | |
111 | ||
112 | if (pool->dma_pages_cnt && cross_pg) { | |
113 | return !(pool->dma_pages[addr >> PAGE_SHIFT] & | |
114 | XSK_NEXT_PG_CONTIG_MASK); | |
115 | } | |
116 | return false; | |
117 | } | |
118 | ||
119 | static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) | |
120 | { | |
121 | return addr & pool->chunk_mask; | |
122 | } | |
123 | ||
124 | static inline u64 xp_unaligned_extract_addr(u64 addr) | |
125 | { | |
126 | return addr & XSK_UNALIGNED_BUF_ADDR_MASK; | |
127 | } | |
128 | ||
129 | static inline u64 xp_unaligned_extract_offset(u64 addr) | |
130 | { | |
131 | return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; | |
132 | } | |
133 | ||
134 | static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) | |
135 | { | |
136 | return xp_unaligned_extract_addr(addr) + | |
137 | xp_unaligned_extract_offset(addr); | |
138 | } | |
2b43470a BT |
139 | |
140 | #endif /* XSK_BUFF_POOL_H_ */ |