1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
21 u32 pad1 ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 pad2 ____cacheline_aligned_in_smp;
25 u32 pad3 ____cacheline_aligned_in_smp;
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
31 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
37 u64 desc[] ____cacheline_aligned_in_smp;
45 struct xdp_ring *ring;
47 u64 queue_empty_descs;
48 size_t ring_vmalloc_size;
51 /* The structure of the shared state of the rings are a simple
52 * circular buffer, as outlined in
53 * Documentation/core-api/circular-buffers.rst. For the Rx and
54 * completion ring, the kernel is the producer and user space is the
55 * consumer. For the Tx and fill rings, the kernel is the consumer and
56 * user space is the producer.
60 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
61 * STORE $data LOAD $data
62 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
65 * (A) pairs with (D), and (B) pairs with (C).
67 * Starting with (B), it protects the data from being written after
68 * the producer pointer. If this barrier was missing, the consumer
69 * could observe the producer pointer being set and thus load the data
70 * before the producer has written the new data. The consumer would in
71 * this case load the old data.
73 * (C) protects the consumer from speculatively loading the data before
74 * the producer pointer actually has been read. If we do not have this
75 * barrier, some architectures could load old data as speculative loads
76 * are not discarded as the CPU does not know there is a dependency
77 * between ->producer and data.
79 * (A) is a control dependency that separates the load of ->consumer
80 * from the stores of $data. In case ->consumer indicates there is no
81 * room in the buffer to store $data we do not. The dependency will
82 * order both of the stores after the loads. So no barrier is needed.
84 * (D) protects the load of the data to be observed to happen after the
85 * store of the consumer pointer. If we did not have this memory
86 * barrier, the producer could observe the consumer pointer being set
87 * and overwrite the data with a new value before the consumer got the
88 * chance to read the old value. The consumer would thus miss reading
89 * the old entry and very likely read the new entry twice, once right
90 * now and again after circling through the ring.
93 /* The operations on the rings are the following:
97 * RESERVE entries PEEK in the ring for entries
98 * WRITE data into the ring READ data from the ring
99 * SUBMIT entries RELEASE entries
101 * The producer reserves one or more entries in the ring. It can then
102 * fill in these entries and finally submit them so that they can be
103 * seen and read by the consumer.
105 * The consumer peeks into the ring to see if the producer has written
106 * any new entries. If so, the consumer can then read these entries
107 * and when it is done reading them release them back to the producer
108 * so that the producer can use these slots to fill in new entries.
110 * The function names below reflect these operations.
113 /* Functions that read and validate content from consumer rings. */
115 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
117 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
118 u32 idx = cached_cons & q->ring_mask;
120 *addr = ring->desc[idx];
123 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
125 if (q->cached_cons != q->cached_prod) {
126 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
133 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
134 struct xdp_desc *desc)
136 u64 chunk, chunk_end;
138 chunk = xp_aligned_extract_addr(pool, desc->addr);
139 if (likely(desc->len)) {
140 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
141 if (chunk != chunk_end)
145 if (chunk >= pool->addrs_cnt)
153 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
154 struct xdp_desc *desc)
158 base_addr = xp_unaligned_extract_addr(desc->addr);
159 addr = xp_unaligned_add_offset_to_addr(desc->addr);
161 if (desc->len > pool->chunk_size)
164 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
165 xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
173 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
174 struct xdp_desc *desc)
176 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
177 xp_aligned_validate_desc(pool, desc);
180 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
182 struct xsk_buff_pool *pool)
184 if (!xp_validate_desc(pool, d)) {
191 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
192 struct xdp_desc *desc,
193 struct xsk_buff_pool *pool)
195 while (q->cached_cons != q->cached_prod) {
196 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
197 u32 idx = q->cached_cons & q->ring_mask;
199 *desc = ring->desc[idx];
200 if (xskq_cons_is_valid_desc(q, desc, pool))
209 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
211 q->cached_cons += cnt;
214 static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
217 u32 cached_cons = q->cached_cons, nb_entries = 0;
218 struct xdp_desc *descs = pool->tx_descs;
220 while (cached_cons != q->cached_prod && nb_entries < max) {
221 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
222 u32 idx = cached_cons & q->ring_mask;
224 descs[nb_entries] = ring->desc[idx];
225 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
235 /* Release valid plus any invalid entries */
236 xskq_cons_release_n(q, cached_cons - q->cached_cons);
240 /* Functions for consumers */
242 static inline void __xskq_cons_release(struct xsk_queue *q)
244 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
247 static inline void __xskq_cons_peek(struct xsk_queue *q)
249 /* Refresh the local pointer */
250 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
253 static inline void xskq_cons_get_entries(struct xsk_queue *q)
255 __xskq_cons_release(q);
259 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
261 u32 entries = q->cached_prod - q->cached_cons;
267 entries = q->cached_prod - q->cached_cons;
269 return entries >= max ? max : entries;
272 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
274 return xskq_cons_nb_entries(q, cnt) >= cnt;
277 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
279 if (q->cached_prod == q->cached_cons)
280 xskq_cons_get_entries(q);
281 return xskq_cons_read_addr_unchecked(q, addr);
284 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
285 struct xdp_desc *desc,
286 struct xsk_buff_pool *pool)
288 if (q->cached_prod == q->cached_cons)
289 xskq_cons_get_entries(q);
290 return xskq_cons_read_desc(q, desc, pool);
293 /* To improve performance in the xskq_cons_release functions, only update local state here.
294 * Reflect this to global state when we get new entries from the ring in
295 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
297 static inline void xskq_cons_release(struct xsk_queue *q)
302 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
304 /* No barriers needed since data is not accessed */
305 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
308 /* Functions for producers */
310 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
312 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
314 if (free_entries >= max)
317 /* Refresh the local tail pointer */
318 q->cached_cons = READ_ONCE(q->ring->consumer);
319 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
321 return free_entries >= max ? max : free_entries;
324 static inline bool xskq_prod_is_full(struct xsk_queue *q)
326 return xskq_prod_nb_free(q, 1) ? false : true;
329 static inline void xskq_prod_cancel(struct xsk_queue *q)
334 static inline int xskq_prod_reserve(struct xsk_queue *q)
336 if (xskq_prod_is_full(q))
344 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
346 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
348 if (xskq_prod_is_full(q))
352 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
356 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
359 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
363 cached_prod = q->cached_prod;
364 for (i = 0; i < nb_entries; i++)
365 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
366 q->cached_prod = cached_prod;
369 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
372 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
375 if (xskq_prod_is_full(q))
379 idx = q->cached_prod++ & q->ring_mask;
380 ring->desc[idx].addr = addr;
381 ring->desc[idx].len = len;
386 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
388 smp_store_release(&q->ring->producer, idx); /* B, matches C */
391 static inline void xskq_prod_submit(struct xsk_queue *q)
393 __xskq_prod_submit(q, q->cached_prod);
396 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
398 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
399 u32 idx = q->ring->producer;
401 ring->desc[idx++ & q->ring_mask] = addr;
403 __xskq_prod_submit(q, idx);
406 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
408 __xskq_prod_submit(q, q->ring->producer + nb_entries);
411 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
413 /* No barriers needed since data is not accessed */
414 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
417 /* For both producers and consumers */
419 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
421 return q ? q->invalid_descs : 0;
424 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
426 return q ? q->queue_empty_descs : 0;
429 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
430 void xskq_destroy(struct xsk_queue *q_ops);
432 #endif /* _LINUX_XSK_QUEUE_H */