Commit | Line | Data |
---|---|---|
a71506a4 MK |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Interface for implementing AF_XDP zero-copy support in drivers. | |
3 | * Copyright(c) 2020 Intel Corporation. | |
4 | */ | |
5 | ||
6 | #ifndef _LINUX_XDP_SOCK_DRV_H | |
7 | #define _LINUX_XDP_SOCK_DRV_H | |
8 | ||
9 | #include <net/xdp_sock.h> | |
2b43470a | 10 | #include <net/xsk_buff_pool.h> |
a71506a4 | 11 | |
9ca66afe MM |
12 | #define XDP_UMEM_MIN_CHUNK_SHIFT 11 |
13 | #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT) | |
14 | ||
b4e352ff MF |
15 | struct xsk_cb_desc { |
16 | void *src; | |
17 | u8 off; | |
18 | u8 bytes; | |
19 | }; | |
20 | ||
8dc4c410 VO |
21 | #ifdef CONFIG_XDP_SOCKETS |
22 | ||
c4655761 MK |
23 | void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); |
24 | bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); | |
d1bc532e | 25 | u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); |
c4655761 MK |
26 | void xsk_tx_release(struct xsk_buff_pool *pool); |
27 | struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, | |
28 | u16 queue_id); | |
29 | void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); | |
30 | void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); | |
31 | void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); | |
32 | void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); | |
33 | bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); | |
a71506a4 | 34 | |
c4655761 | 35 | static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) |
2b43470a | 36 | { |
c4655761 | 37 | return XDP_PACKET_HEADROOM + pool->headroom; |
2b43470a BT |
38 | } |
39 | ||
c4655761 | 40 | static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) |
2b43470a | 41 | { |
c4655761 | 42 | return pool->chunk_size; |
2b43470a BT |
43 | } |
44 | ||
c4655761 | 45 | static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) |
2b43470a | 46 | { |
c4655761 | 47 | return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); |
2b43470a BT |
48 | } |
49 | ||
c4655761 | 50 | static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, |
2b43470a BT |
51 | struct xdp_rxq_info *rxq) |
52 | { | |
c4655761 | 53 | xp_set_rxq_info(pool, rxq); |
2b43470a BT |
54 | } |
55 | ||
b4e352ff MF |
56 | static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, |
57 | struct xsk_cb_desc *desc) | |
58 | { | |
59 | xp_fill_cb(pool, desc); | |
60 | } | |
61 | ||
ca2e1a62 MF |
62 | static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) |
63 | { | |
64 | #ifdef CONFIG_NET_RX_BUSY_POLL | |
65 | return pool->heads[0].xdp.rxq->napi_id; | |
66 | #else | |
67 | return 0; | |
68 | #endif | |
69 | } | |
70 | ||
c4655761 | 71 | static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, |
2b43470a BT |
72 | unsigned long attrs) |
73 | { | |
c4655761 | 74 | xp_dma_unmap(pool, attrs); |
2b43470a BT |
75 | } |
76 | ||
c4655761 MK |
77 | static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool, |
78 | struct device *dev, unsigned long attrs) | |
2b43470a | 79 | { |
c4655761 MK |
80 | struct xdp_umem *umem = pool->umem; |
81 | ||
82 | return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs); | |
2b43470a BT |
83 | } |
84 | ||
85 | static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) | |
86 | { | |
87 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); | |
88 | ||
89 | return xp_get_dma(xskb); | |
90 | } | |
91 | ||
92 | static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) | |
93 | { | |
94 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); | |
95 | ||
96 | return xp_get_frame_dma(xskb); | |
97 | } | |
98 | ||
c4655761 | 99 | static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) |
2b43470a | 100 | { |
c4655761 | 101 | return xp_alloc(pool); |
2b43470a BT |
102 | } |
103 | ||
1b725b0c MF |
104 | static inline bool xsk_is_eop_desc(struct xdp_desc *desc) |
105 | { | |
106 | return !xp_mb_desc(desc); | |
107 | } | |
108 | ||
47e4075d MK |
109 | /* Returns as many entries as possible up to max. 0 <= N <= max. */ |
110 | static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) | |
111 | { | |
112 | return xp_alloc_batch(pool, xdp, max); | |
113 | } | |
114 | ||
c4655761 | 115 | static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) |
2b43470a | 116 | { |
c4655761 | 117 | return xp_can_alloc(pool, count); |
2b43470a BT |
118 | } |
119 | ||
120 | static inline void xsk_buff_free(struct xdp_buff *xdp) | |
121 | { | |
122 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); | |
24ea5012 MF |
123 | struct list_head *xskb_list = &xskb->pool->xskb_list; |
124 | struct xdp_buff_xsk *pos, *tmp; | |
2b43470a | 125 | |
24ea5012 MF |
126 | if (likely(!xdp_buff_has_frags(xdp))) |
127 | goto out; | |
128 | ||
129 | list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { | |
130 | list_del(&pos->xskb_list_node); | |
131 | xp_free(pos); | |
132 | } | |
133 | ||
134 | xdp_get_shared_info_from_buff(xdp)->nr_frags = 0; | |
135 | out: | |
2b43470a BT |
136 | xp_free(xskb); |
137 | } | |
138 | ||
24ea5012 MF |
139 | static inline void xsk_buff_add_frag(struct xdp_buff *xdp) |
140 | { | |
141 | struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); | |
142 | ||
143 | list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list); | |
144 | } | |
145 | ||
146 | static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) | |
147 | { | |
148 | struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); | |
149 | struct xdp_buff *ret = NULL; | |
150 | struct xdp_buff_xsk *frag; | |
151 | ||
152 | frag = list_first_entry_or_null(&xskb->pool->xskb_list, | |
153 | struct xdp_buff_xsk, xskb_list_node); | |
154 | if (frag) { | |
155 | list_del(&frag->xskb_list_node); | |
156 | ret = &frag->xdp; | |
157 | } | |
158 | ||
159 | return ret; | |
160 | } | |
161 | ||
c5114710 MF |
162 | static inline void xsk_buff_del_tail(struct xdp_buff *tail) |
163 | { | |
164 | struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); | |
165 | ||
166 | list_del(&xskb->xskb_list_node); | |
167 | } | |
168 | ||
169 | static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) | |
170 | { | |
171 | struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); | |
172 | struct xdp_buff_xsk *frag; | |
173 | ||
174 | frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, | |
175 | xskb_list_node); | |
176 | return &frag->xdp; | |
177 | } | |
178 | ||
47e4075d MK |
179 | static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) |
180 | { | |
181 | xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; | |
182 | xdp->data_meta = xdp->data; | |
183 | xdp->data_end = xdp->data + size; | |
f7f6aa8e | 184 | xdp->flags = 0; |
47e4075d MK |
185 | } |
186 | ||
c4655761 MK |
187 | static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, |
188 | u64 addr) | |
2b43470a | 189 | { |
c4655761 | 190 | return xp_raw_get_dma(pool, addr); |
2b43470a BT |
191 | } |
192 | ||
c4655761 | 193 | static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
2b43470a | 194 | { |
c4655761 | 195 | return xp_raw_get_data(pool, addr); |
2b43470a BT |
196 | } |
197 | ||
ce59f968 SF |
198 | #define XDP_TXMD_FLAGS_VALID ( \ |
199 | XDP_TXMD_FLAGS_TIMESTAMP | \ | |
200 | XDP_TXMD_FLAGS_CHECKSUM | \ | |
201 | 0) | |
202 | ||
203 | static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) | |
204 | { | |
205 | return !(meta->flags & ~XDP_TXMD_FLAGS_VALID); | |
206 | } | |
207 | ||
48eb03dd SF |
208 | static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) |
209 | { | |
ce59f968 SF |
210 | struct xsk_tx_metadata *meta; |
211 | ||
48eb03dd SF |
212 | if (!pool->tx_metadata_len) |
213 | return NULL; | |
214 | ||
ce59f968 SF |
215 | meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len; |
216 | if (unlikely(!xsk_buff_valid_tx_metadata(meta))) | |
217 | return NULL; /* no way to signal the error to the user */ | |
218 | ||
219 | return meta; | |
48eb03dd SF |
220 | } |
221 | ||
9647c57b | 222 | static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) |
2b43470a BT |
223 | { |
224 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); | |
225 | ||
9647c57b MK |
226 | if (!pool->dma_need_sync) |
227 | return; | |
228 | ||
2b43470a BT |
229 | xp_dma_sync_for_cpu(xskb); |
230 | } | |
231 | ||
c4655761 | 232 | static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, |
2b43470a BT |
233 | dma_addr_t dma, |
234 | size_t size) | |
235 | { | |
c4655761 | 236 | xp_dma_sync_for_device(pool, dma, size); |
2b43470a BT |
237 | } |
238 | ||
a71506a4 MK |
239 | #else |
240 | ||
c4655761 | 241 | static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) |
a71506a4 MK |
242 | { |
243 | } | |
244 | ||
c4655761 MK |
245 | static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, |
246 | struct xdp_desc *desc) | |
a71506a4 MK |
247 | { |
248 | return false; | |
249 | } | |
250 | ||
d1bc532e | 251 | static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max) |
9349eb3a MK |
252 | { |
253 | return 0; | |
254 | } | |
255 | ||
c4655761 | 256 | static inline void xsk_tx_release(struct xsk_buff_pool *pool) |
a71506a4 MK |
257 | { |
258 | } | |
259 | ||
1742b3d5 | 260 | static inline struct xsk_buff_pool * |
c4655761 | 261 | xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id) |
a71506a4 MK |
262 | { |
263 | return NULL; | |
264 | } | |
265 | ||
c4655761 | 266 | static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) |
a71506a4 MK |
267 | { |
268 | } | |
269 | ||
c4655761 | 270 | static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) |
a71506a4 MK |
271 | { |
272 | } | |
273 | ||
c4655761 | 274 | static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) |
a71506a4 MK |
275 | { |
276 | } | |
277 | ||
c4655761 | 278 | static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) |
a71506a4 MK |
279 | { |
280 | } | |
281 | ||
c4655761 | 282 | static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) |
a71506a4 MK |
283 | { |
284 | return false; | |
285 | } | |
286 | ||
c4655761 | 287 | static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) |
2b43470a BT |
288 | { |
289 | return 0; | |
290 | } | |
291 | ||
c4655761 | 292 | static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) |
2b43470a BT |
293 | { |
294 | return 0; | |
295 | } | |
296 | ||
c4655761 | 297 | static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) |
2b43470a BT |
298 | { |
299 | return 0; | |
300 | } | |
301 | ||
c4655761 | 302 | static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, |
2b43470a BT |
303 | struct xdp_rxq_info *rxq) |
304 | { | |
305 | } | |
306 | ||
b4e352ff MF |
307 | static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool, |
308 | struct xsk_cb_desc *desc) | |
309 | { | |
310 | } | |
311 | ||
ca2e1a62 MF |
312 | static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) |
313 | { | |
314 | return 0; | |
315 | } | |
316 | ||
c4655761 | 317 | static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, |
2b43470a BT |
318 | unsigned long attrs) |
319 | { | |
320 | } | |
321 | ||
c4655761 MK |
322 | static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool, |
323 | struct device *dev, unsigned long attrs) | |
2b43470a BT |
324 | { |
325 | return 0; | |
326 | } | |
327 | ||
328 | static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) | |
329 | { | |
330 | return 0; | |
331 | } | |
332 | ||
333 | static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) | |
334 | { | |
335 | return 0; | |
336 | } | |
337 | ||
c4655761 | 338 | static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool) |
2b43470a BT |
339 | { |
340 | return NULL; | |
341 | } | |
342 | ||
1b725b0c MF |
343 | static inline bool xsk_is_eop_desc(struct xdp_desc *desc) |
344 | { | |
345 | return false; | |
346 | } | |
347 | ||
47e4075d MK |
348 | static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) |
349 | { | |
350 | return 0; | |
351 | } | |
352 | ||
c4655761 | 353 | static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) |
2b43470a BT |
354 | { |
355 | return false; | |
356 | } | |
357 | ||
358 | static inline void xsk_buff_free(struct xdp_buff *xdp) | |
359 | { | |
360 | } | |
361 | ||
24ea5012 MF |
362 | static inline void xsk_buff_add_frag(struct xdp_buff *xdp) |
363 | { | |
364 | } | |
365 | ||
366 | static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) | |
367 | { | |
368 | return NULL; | |
369 | } | |
370 | ||
c5114710 MF |
371 | static inline void xsk_buff_del_tail(struct xdp_buff *tail) |
372 | { | |
373 | } | |
374 | ||
375 | static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) | |
376 | { | |
377 | return NULL; | |
378 | } | |
379 | ||
47e4075d MK |
380 | static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) |
381 | { | |
382 | } | |
383 | ||
c4655761 MK |
384 | static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, |
385 | u64 addr) | |
2b43470a BT |
386 | { |
387 | return 0; | |
388 | } | |
389 | ||
c4655761 | 390 | static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) |
2b43470a BT |
391 | { |
392 | return NULL; | |
393 | } | |
394 | ||
ce59f968 SF |
395 | static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) |
396 | { | |
397 | return false; | |
398 | } | |
399 | ||
48eb03dd SF |
400 | static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) |
401 | { | |
402 | return NULL; | |
403 | } | |
404 | ||
9647c57b | 405 | static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) |
2b43470a BT |
406 | { |
407 | } | |
408 | ||
c4655761 | 409 | static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, |
2b43470a BT |
410 | dma_addr_t dma, |
411 | size_t size) | |
412 | { | |
413 | } | |
414 | ||
a71506a4 MK |
415 | #endif /* CONFIG_XDP_SOCKETS */ |
416 | ||
417 | #endif /* _LINUX_XDP_SOCK_DRV_H */ |