xsk: Rearrange internal structs for better performance
[linux-block.git] / include / net / xdp_sock_drv.h
CommitLineData
a71506a4
MK
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
2b43470a 10#include <net/xsk_buff_pool.h>
a71506a4
MK
11
12#ifdef CONFIG_XDP_SOCKETS
13
c4655761
MK
14void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16void xsk_tx_release(struct xsk_buff_pool *pool);
17struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
18 u16 queue_id);
19void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
20void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
21void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
22void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
23bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
a71506a4 24
c4655761 25static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
2b43470a 26{
c4655761 27 return XDP_PACKET_HEADROOM + pool->headroom;
2b43470a
BT
28}
29
c4655761 30static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
2b43470a 31{
c4655761 32 return pool->chunk_size;
2b43470a
BT
33}
34
c4655761 35static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
2b43470a 36{
c4655761 37 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
2b43470a
BT
38}
39
c4655761 40static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
2b43470a
BT
41 struct xdp_rxq_info *rxq)
42{
c4655761 43 xp_set_rxq_info(pool, rxq);
2b43470a
BT
44}
45
c4655761 46static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
2b43470a
BT
47 unsigned long attrs)
48{
c4655761 49 xp_dma_unmap(pool, attrs);
2b43470a
BT
50}
51
c4655761
MK
52static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
53 struct device *dev, unsigned long attrs)
2b43470a 54{
c4655761
MK
55 struct xdp_umem *umem = pool->umem;
56
57 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
2b43470a
BT
58}
59
60static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
61{
62 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
63
64 return xp_get_dma(xskb);
65}
66
67static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
68{
69 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
70
71 return xp_get_frame_dma(xskb);
72}
73
c4655761 74static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
2b43470a 75{
c4655761 76 return xp_alloc(pool);
2b43470a
BT
77}
78
c4655761 79static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
2b43470a 80{
c4655761 81 return xp_can_alloc(pool, count);
2b43470a
BT
82}
83
84static inline void xsk_buff_free(struct xdp_buff *xdp)
85{
86 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
87
88 xp_free(xskb);
89}
90
c4655761
MK
91static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
92 u64 addr)
2b43470a 93{
c4655761 94 return xp_raw_get_dma(pool, addr);
2b43470a
BT
95}
96
c4655761 97static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
2b43470a 98{
c4655761 99 return xp_raw_get_data(pool, addr);
2b43470a
BT
100}
101
102static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
103{
104 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
105
106 xp_dma_sync_for_cpu(xskb);
107}
108
c4655761 109static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
2b43470a
BT
110 dma_addr_t dma,
111 size_t size)
112{
c4655761 113 xp_dma_sync_for_device(pool, dma, size);
2b43470a
BT
114}
115
a71506a4
MK
116#else
117
c4655761 118static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
a71506a4
MK
119{
120}
121
c4655761
MK
122static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
123 struct xdp_desc *desc)
a71506a4
MK
124{
125 return false;
126}
127
c4655761 128static inline void xsk_tx_release(struct xsk_buff_pool *pool)
a71506a4
MK
129{
130}
131
1742b3d5 132static inline struct xsk_buff_pool *
c4655761 133xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
a71506a4
MK
134{
135 return NULL;
136}
137
c4655761 138static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
139{
140}
141
c4655761 142static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
143{
144}
145
c4655761 146static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
147{
148}
149
c4655761 150static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
151{
152}
153
c4655761 154static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
155{
156 return false;
157}
158
c4655761 159static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
2b43470a
BT
160{
161 return 0;
162}
163
c4655761 164static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
2b43470a
BT
165{
166 return 0;
167}
168
c4655761 169static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
2b43470a
BT
170{
171 return 0;
172}
173
c4655761 174static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
2b43470a
BT
175 struct xdp_rxq_info *rxq)
176{
177}
178
c4655761 179static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
2b43470a
BT
180 unsigned long attrs)
181{
182}
183
c4655761
MK
184static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
185 struct device *dev, unsigned long attrs)
2b43470a
BT
186{
187 return 0;
188}
189
190static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
191{
192 return 0;
193}
194
195static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
196{
197 return 0;
198}
199
c4655761 200static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
2b43470a
BT
201{
202 return NULL;
203}
204
c4655761 205static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
2b43470a
BT
206{
207 return false;
208}
209
210static inline void xsk_buff_free(struct xdp_buff *xdp)
211{
212}
213
c4655761
MK
214static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
215 u64 addr)
2b43470a
BT
216{
217 return 0;
218}
219
c4655761 220static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
2b43470a
BT
221{
222 return NULL;
223}
224
225static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
226{
227}
228
c4655761 229static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
2b43470a
BT
230 dma_addr_t dma,
231 size_t size)
232{
233}
234
a71506a4
MK
235#endif /* CONFIG_XDP_SOCKETS */
236
237#endif /* _LINUX_XDP_SOCK_DRV_H */