Commit | Line | Data |
---|---|---|
2b43470a BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ | |
3 | ||
4 | #ifndef XSK_BUFF_POOL_H_ | |
5 | #define XSK_BUFF_POOL_H_ | |
6 | ||
26062b18 | 7 | #include <linux/if_xdp.h> |
2b43470a BT |
8 | #include <linux/types.h> |
9 | #include <linux/dma-mapping.h> | |
94033cd8 | 10 | #include <linux/bpf.h> |
2b43470a BT |
11 | #include <net/xdp.h> |
12 | ||
13 | struct xsk_buff_pool; | |
14 | struct xdp_rxq_info; | |
15 | struct xsk_queue; | |
16 | struct xdp_desc; | |
1742b3d5 | 17 | struct xdp_umem; |
1c1efc2a | 18 | struct xdp_sock; |
2b43470a BT |
19 | struct device; |
20 | struct page; | |
21 | ||
94ecc5ca THJ |
22 | #define XSK_PRIV_MAX 24 |
23 | ||
2b43470a BT |
24 | struct xdp_buff_xsk { |
25 | struct xdp_buff xdp; | |
94ecc5ca | 26 | u8 cb[XSK_PRIV_MAX]; |
2b43470a BT |
27 | dma_addr_t dma; |
28 | dma_addr_t frame_dma; | |
29 | struct xsk_buff_pool *pool; | |
2b43470a BT |
30 | u64 orig_addr; |
31 | struct list_head free_list_node; | |
32 | }; | |
33 | ||
94ecc5ca THJ |
34 | #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) |
35 | ||
921b6869 MK |
36 | struct xsk_dma_map { |
37 | dma_addr_t *dma_pages; | |
38 | struct device *dev; | |
39 | struct net_device *netdev; | |
40 | refcount_t users; | |
41 | struct list_head list; /* Protected by the RTNL_LOCK */ | |
42 | u32 dma_pages_cnt; | |
43 | bool dma_need_sync; | |
44 | }; | |
45 | ||
26062b18 | 46 | struct xsk_buff_pool { |
8ef4e27e MK |
47 | /* Members only used in the control path first. */ |
48 | struct device *dev; | |
49 | struct net_device *netdev; | |
50 | struct list_head xsk_tx_list; | |
51 | /* Protects modifications to the xsk_tx_list */ | |
52 | spinlock_t xsk_tx_list_lock; | |
53 | refcount_t users; | |
54 | struct xdp_umem *umem; | |
55 | struct work_struct work; | |
26062b18 | 56 | struct list_head free_list; |
8ef4e27e MK |
57 | u32 heads_cnt; |
58 | u16 queue_id; | |
59 | ||
60 | /* Data path members as close to free_heads at the end as possible. */ | |
61 | struct xsk_queue *fq ____cacheline_aligned_in_smp; | |
62 | struct xsk_queue *cq; | |
921b6869 MK |
63 | /* For performance reasons, each buff pool has its own array of dma_pages |
64 | * even when they are identical. | |
65 | */ | |
26062b18 BT |
66 | dma_addr_t *dma_pages; |
67 | struct xdp_buff_xsk *heads; | |
d1bc532e | 68 | struct xdp_desc *tx_descs; |
26062b18 BT |
69 | u64 chunk_mask; |
70 | u64 addrs_cnt; | |
71 | u32 free_list_cnt; | |
72 | u32 dma_pages_cnt; | |
26062b18 BT |
73 | u32 free_heads_cnt; |
74 | u32 headroom; | |
75 | u32 chunk_size; | |
94033cd8 | 76 | u32 chunk_shift; |
26062b18 | 77 | u32 frame_len; |
c2d3d6a4 MK |
78 | u8 cached_need_wakeup; |
79 | bool uses_need_wakeup; | |
91d5b702 | 80 | bool dma_need_sync; |
26062b18 BT |
81 | bool unaligned; |
82 | void *addrs; | |
f09ced40 MK |
83 | /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: |
84 | * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when | |
85 | * sockets share a single cq when the same netdev and queue id is shared. | |
86 | */ | |
87 | spinlock_t cq_lock; | |
26062b18 BT |
88 | struct xdp_buff_xsk *free_heads[]; |
89 | }; | |
90 | ||
94033cd8 MK |
91 | /* Masks for xdp_umem_page flags. |
92 | * The low 12-bits of the addr will be 0 since this is the page address, so we | |
93 | * can use them for flags. | |
94 | */ | |
95 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 | |
96 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) | |
97 | ||
2b43470a | 98 | /* AF_XDP core. */ |
1c1efc2a MK |
99 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
100 | struct xdp_umem *umem); | |
101 | int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, | |
102 | u16 queue_id, u16 flags); | |
60240bc2 | 103 | int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, |
b5aea28d | 104 | struct net_device *dev, u16 queue_id); |
ba3beec2 | 105 | int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); |
2b43470a | 106 | void xp_destroy(struct xsk_buff_pool *pool); |
1c1efc2a | 107 | void xp_get_pool(struct xsk_buff_pool *pool); |
e5e1a4bc | 108 | bool xp_put_pool(struct xsk_buff_pool *pool); |
1c1efc2a | 109 | void xp_clear_dev(struct xsk_buff_pool *pool); |
a5aa8e52 MK |
110 | void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); |
111 | void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); | |
2b43470a BT |
112 | |
113 | /* AF_XDP, and XDP core. */ | |
114 | void xp_free(struct xdp_buff_xsk *xskb); | |
115 | ||
94033cd8 MK |
116 | static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, |
117 | u64 addr) | |
118 | { | |
119 | xskb->orig_addr = addr; | |
120 | xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; | |
121 | } | |
122 | ||
123 | static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, | |
124 | dma_addr_t *dma_pages, u64 addr) | |
125 | { | |
126 | xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + | |
127 | (addr & ~PAGE_MASK); | |
128 | xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM; | |
129 | } | |
130 | ||
2b43470a BT |
131 | /* AF_XDP ZC drivers, via xdp_sock_buff.h */ |
132 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); | |
133 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, | |
134 | unsigned long attrs, struct page **pages, u32 nr_pages); | |
135 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); | |
136 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); | |
47e4075d | 137 | u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); |
2b43470a BT |
138 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); |
139 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); | |
140 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); | |
26062b18 BT |
141 | static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) |
142 | { | |
143 | return xskb->dma; | |
144 | } | |
145 | ||
146 | static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) | |
147 | { | |
148 | return xskb->frame_dma; | |
149 | } | |
150 | ||
151 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); | |
152 | static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) | |
153 | { | |
26062b18 BT |
154 | xp_dma_sync_for_cpu_slow(xskb); |
155 | } | |
156 | ||
157 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, | |
158 | size_t size); | |
159 | static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, | |
160 | dma_addr_t dma, size_t size) | |
161 | { | |
91d5b702 | 162 | if (!pool->dma_need_sync) |
26062b18 BT |
163 | return; |
164 | ||
165 | xp_dma_sync_for_device_slow(pool, dma, size); | |
166 | } | |
167 | ||
168 | /* Masks for xdp_umem_page flags. | |
169 | * The low 12-bits of the addr will be 0 since this is the page address, so we | |
170 | * can use them for flags. | |
171 | */ | |
172 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 | |
173 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) | |
174 | ||
175 | static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, | |
176 | u64 addr, u32 len) | |
177 | { | |
178 | bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; | |
179 | ||
2f996198 MK |
180 | if (likely(!cross_pg)) |
181 | return false; | |
182 | ||
d769ccaf KC |
183 | return pool->dma_pages_cnt && |
184 | !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); | |
26062b18 BT |
185 | } |
186 | ||
187 | static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) | |
188 | { | |
189 | return addr & pool->chunk_mask; | |
190 | } | |
191 | ||
192 | static inline u64 xp_unaligned_extract_addr(u64 addr) | |
193 | { | |
194 | return addr & XSK_UNALIGNED_BUF_ADDR_MASK; | |
195 | } | |
196 | ||
197 | static inline u64 xp_unaligned_extract_offset(u64 addr) | |
198 | { | |
199 | return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; | |
200 | } | |
201 | ||
202 | static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) | |
203 | { | |
204 | return xp_unaligned_extract_addr(addr) + | |
205 | xp_unaligned_extract_offset(addr); | |
206 | } | |
2b43470a | 207 | |
94033cd8 MK |
208 | static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) |
209 | { | |
210 | return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; | |
211 | } | |
212 | ||
213 | static inline void xp_release(struct xdp_buff_xsk *xskb) | |
214 | { | |
215 | if (xskb->pool->unaligned) | |
216 | xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; | |
217 | } | |
218 | ||
219 | static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) | |
220 | { | |
221 | u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; | |
222 | ||
223 | offset += xskb->pool->headroom; | |
224 | if (!xskb->pool->unaligned) | |
225 | return xskb->orig_addr + offset; | |
226 | return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); | |
227 | } | |
228 | ||
2b43470a | 229 | #endif /* XSK_BUFF_POOL_H_ */ |