Commit | Line | Data |
---|---|---|
2b43470a BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ | |
3 | ||
4 | #ifndef XSK_BUFF_POOL_H_ | |
5 | #define XSK_BUFF_POOL_H_ | |
6 | ||
26062b18 | 7 | #include <linux/if_xdp.h> |
2b43470a BT |
8 | #include <linux/types.h> |
9 | #include <linux/dma-mapping.h> | |
94033cd8 | 10 | #include <linux/bpf.h> |
2b43470a BT |
11 | #include <net/xdp.h> |
12 | ||
13 | struct xsk_buff_pool; | |
14 | struct xdp_rxq_info; | |
b4e352ff | 15 | struct xsk_cb_desc; |
2b43470a BT |
16 | struct xsk_queue; |
17 | struct xdp_desc; | |
1742b3d5 | 18 | struct xdp_umem; |
1c1efc2a | 19 | struct xdp_sock; |
2b43470a BT |
20 | struct device; |
21 | struct page; | |
22 | ||
94ecc5ca THJ |
23 | #define XSK_PRIV_MAX 24 |
24 | ||
2b43470a BT |
25 | struct xdp_buff_xsk { |
26 | struct xdp_buff xdp; | |
94ecc5ca | 27 | u8 cb[XSK_PRIV_MAX]; |
2b43470a BT |
28 | dma_addr_t dma; |
29 | dma_addr_t frame_dma; | |
30 | struct xsk_buff_pool *pool; | |
2b43470a BT |
31 | u64 orig_addr; |
32 | struct list_head free_list_node; | |
24ea5012 | 33 | struct list_head xskb_list_node; |
2b43470a BT |
34 | }; |
35 | ||
94ecc5ca | 36 | #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) |
48eb03dd | 37 | #define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t)) |
94ecc5ca | 38 | |
921b6869 MK |
39 | struct xsk_dma_map { |
40 | dma_addr_t *dma_pages; | |
41 | struct device *dev; | |
42 | struct net_device *netdev; | |
43 | refcount_t users; | |
44 | struct list_head list; /* Protected by the RTNL_LOCK */ | |
45 | u32 dma_pages_cnt; | |
46 | bool dma_need_sync; | |
47 | }; | |
48 | ||
26062b18 | 49 | struct xsk_buff_pool { |
8ef4e27e MK |
50 | /* Members only used in the control path first. */ |
51 | struct device *dev; | |
52 | struct net_device *netdev; | |
53 | struct list_head xsk_tx_list; | |
54 | /* Protects modifications to the xsk_tx_list */ | |
55 | spinlock_t xsk_tx_list_lock; | |
56 | refcount_t users; | |
57 | struct xdp_umem *umem; | |
58 | struct work_struct work; | |
26062b18 | 59 | struct list_head free_list; |
24ea5012 | 60 | struct list_head xskb_list; |
8ef4e27e MK |
61 | u32 heads_cnt; |
62 | u16 queue_id; | |
63 | ||
64 | /* Data path members as close to free_heads at the end as possible. */ | |
65 | struct xsk_queue *fq ____cacheline_aligned_in_smp; | |
66 | struct xsk_queue *cq; | |
921b6869 MK |
67 | /* For performance reasons, each buff pool has its own array of dma_pages |
68 | * even when they are identical. | |
69 | */ | |
26062b18 BT |
70 | dma_addr_t *dma_pages; |
71 | struct xdp_buff_xsk *heads; | |
d1bc532e | 72 | struct xdp_desc *tx_descs; |
26062b18 BT |
73 | u64 chunk_mask; |
74 | u64 addrs_cnt; | |
75 | u32 free_list_cnt; | |
76 | u32 dma_pages_cnt; | |
26062b18 BT |
77 | u32 free_heads_cnt; |
78 | u32 headroom; | |
79 | u32 chunk_size; | |
94033cd8 | 80 | u32 chunk_shift; |
26062b18 | 81 | u32 frame_len; |
341ac980 | 82 | u8 tx_metadata_len; /* inherited from umem */ |
c2d3d6a4 MK |
83 | u8 cached_need_wakeup; |
84 | bool uses_need_wakeup; | |
91d5b702 | 85 | bool dma_need_sync; |
26062b18 | 86 | bool unaligned; |
11614723 | 87 | bool tx_sw_csum; |
26062b18 | 88 | void *addrs; |
f09ced40 MK |
89 | /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: |
90 | * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when | |
91 | * sockets share a single cq when the same netdev and queue id is shared. | |
92 | */ | |
93 | spinlock_t cq_lock; | |
26062b18 BT |
94 | struct xdp_buff_xsk *free_heads[]; |
95 | }; | |
96 | ||
94033cd8 MK |
97 | /* Masks for xdp_umem_page flags. |
98 | * The low 12-bits of the addr will be 0 since this is the page address, so we | |
99 | * can use them for flags. | |
100 | */ | |
101 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 | |
102 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) | |
103 | ||
2b43470a | 104 | /* AF_XDP core. */ |
1c1efc2a MK |
105 | struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, |
106 | struct xdp_umem *umem); | |
107 | int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, | |
108 | u16 queue_id, u16 flags); | |
60240bc2 | 109 | int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, |
b5aea28d | 110 | struct net_device *dev, u16 queue_id); |
ba3beec2 | 111 | int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); |
2b43470a | 112 | void xp_destroy(struct xsk_buff_pool *pool); |
1c1efc2a | 113 | void xp_get_pool(struct xsk_buff_pool *pool); |
e5e1a4bc | 114 | bool xp_put_pool(struct xsk_buff_pool *pool); |
1c1efc2a | 115 | void xp_clear_dev(struct xsk_buff_pool *pool); |
a5aa8e52 MK |
116 | void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); |
117 | void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); | |
2b43470a BT |
118 | |
119 | /* AF_XDP, and XDP core. */ | |
120 | void xp_free(struct xdp_buff_xsk *xskb); | |
121 | ||
94033cd8 MK |
122 | static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, |
123 | u64 addr) | |
124 | { | |
125 | xskb->orig_addr = addr; | |
126 | xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; | |
127 | } | |
128 | ||
129 | static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, | |
130 | dma_addr_t *dma_pages, u64 addr) | |
131 | { | |
132 | xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + | |
133 | (addr & ~PAGE_MASK); | |
134 | xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM; | |
135 | } | |
136 | ||
2b43470a BT |
137 | /* AF_XDP ZC drivers, via xdp_sock_buff.h */ |
138 | void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); | |
b4e352ff | 139 | void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc); |
2b43470a BT |
140 | int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, |
141 | unsigned long attrs, struct page **pages, u32 nr_pages); | |
142 | void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); | |
143 | struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); | |
47e4075d | 144 | u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); |
2b43470a BT |
145 | bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); |
146 | void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); | |
147 | dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); | |
26062b18 BT |
148 | static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) |
149 | { | |
150 | return xskb->dma; | |
151 | } | |
152 | ||
153 | static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) | |
154 | { | |
155 | return xskb->frame_dma; | |
156 | } | |
157 | ||
158 | void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); | |
159 | static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) | |
160 | { | |
26062b18 BT |
161 | xp_dma_sync_for_cpu_slow(xskb); |
162 | } | |
163 | ||
164 | void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, | |
165 | size_t size); | |
166 | static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, | |
167 | dma_addr_t dma, size_t size) | |
168 | { | |
91d5b702 | 169 | if (!pool->dma_need_sync) |
26062b18 BT |
170 | return; |
171 | ||
172 | xp_dma_sync_for_device_slow(pool, dma, size); | |
173 | } | |
174 | ||
175 | /* Masks for xdp_umem_page flags. | |
176 | * The low 12-bits of the addr will be 0 since this is the page address, so we | |
177 | * can use them for flags. | |
178 | */ | |
179 | #define XSK_NEXT_PG_CONTIG_SHIFT 0 | |
180 | #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) | |
181 | ||
182 | static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, | |
183 | u64 addr, u32 len) | |
184 | { | |
185 | bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; | |
186 | ||
2f996198 MK |
187 | if (likely(!cross_pg)) |
188 | return false; | |
189 | ||
6ec7be9a | 190 | return pool->dma_pages && |
d769ccaf | 191 | !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); |
26062b18 BT |
192 | } |
193 | ||
1b725b0c MF |
194 | static inline bool xp_mb_desc(struct xdp_desc *desc) |
195 | { | |
196 | return desc->options & XDP_PKT_CONTD; | |
197 | } | |
198 | ||
26062b18 BT |
199 | static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) |
200 | { | |
201 | return addr & pool->chunk_mask; | |
202 | } | |
203 | ||
204 | static inline u64 xp_unaligned_extract_addr(u64 addr) | |
205 | { | |
206 | return addr & XSK_UNALIGNED_BUF_ADDR_MASK; | |
207 | } | |
208 | ||
209 | static inline u64 xp_unaligned_extract_offset(u64 addr) | |
210 | { | |
211 | return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; | |
212 | } | |
213 | ||
214 | static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) | |
215 | { | |
216 | return xp_unaligned_extract_addr(addr) + | |
217 | xp_unaligned_extract_offset(addr); | |
218 | } | |
2b43470a | 219 | |
94033cd8 MK |
220 | static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) |
221 | { | |
222 | return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; | |
223 | } | |
224 | ||
225 | static inline void xp_release(struct xdp_buff_xsk *xskb) | |
226 | { | |
227 | if (xskb->pool->unaligned) | |
228 | xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; | |
229 | } | |
230 | ||
231 | static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) | |
232 | { | |
233 | u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; | |
234 | ||
235 | offset += xskb->pool->headroom; | |
236 | if (!xskb->pool->unaligned) | |
237 | return xskb->orig_addr + offset; | |
238 | return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); | |
239 | } | |
240 | ||
48eb03dd SF |
241 | static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool) |
242 | { | |
243 | return pool->tx_metadata_len > 0; | |
244 | } | |
245 | ||
2b43470a | 246 | #endif /* XSK_BUFF_POOL_H_ */ |