1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. */
4 #ifndef XSK_BUFF_POOL_H_
5 #define XSK_BUFF_POOL_H_
7 #include <linux/if_xdp.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bpf.h>
23 #define XSK_PRIV_MAX 24
30 struct xsk_buff_pool *pool;
32 struct list_head free_list_node;
33 struct list_head xskb_list_node;
36 #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
37 #define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
40 dma_addr_t *dma_pages;
42 struct net_device *netdev;
44 struct list_head list; /* Protected by the RTNL_LOCK */
49 struct xsk_buff_pool {
50 /* Members only used in the control path first. */
52 struct net_device *netdev;
53 struct list_head xsk_tx_list;
54 /* Protects modifications to the xsk_tx_list */
55 spinlock_t xsk_tx_list_lock;
57 struct xdp_umem *umem;
58 struct work_struct work;
59 struct list_head free_list;
60 struct list_head xskb_list;
64 /* Data path members as close to free_heads at the end as possible. */
65 struct xsk_queue *fq ____cacheline_aligned_in_smp;
67 /* For performance reasons, each buff pool has its own array of dma_pages
68 * even when they are identical.
70 dma_addr_t *dma_pages;
71 struct xdp_buff_xsk *heads;
72 struct xdp_desc *tx_descs;
82 u8 tx_metadata_len; /* inherited from umem */
83 u8 cached_need_wakeup;
84 bool uses_need_wakeup;
89 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
90 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
91 * sockets share a single cq when the same netdev and queue id is shared.
94 struct xdp_buff_xsk *free_heads[];
97 /* Masks for xdp_umem_page flags.
98 * The low 12-bits of the addr will be 0 since this is the page address, so we
99 * can use them for flags.
101 #define XSK_NEXT_PG_CONTIG_SHIFT 0
102 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
105 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
106 struct xdp_umem *umem);
107 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
108 u16 queue_id, u16 flags);
109 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
110 struct net_device *dev, u16 queue_id);
111 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
112 void xp_destroy(struct xsk_buff_pool *pool);
113 void xp_get_pool(struct xsk_buff_pool *pool);
114 bool xp_put_pool(struct xsk_buff_pool *pool);
115 void xp_clear_dev(struct xsk_buff_pool *pool);
116 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
117 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
119 /* AF_XDP, and XDP core. */
120 void xp_free(struct xdp_buff_xsk *xskb);
122 static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
125 xskb->orig_addr = addr;
126 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
129 static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
130 dma_addr_t *dma_pages, u64 addr)
132 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
134 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
137 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
138 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
139 void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
140 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
141 unsigned long attrs, struct page **pages, u32 nr_pages);
142 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
143 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
144 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
145 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
146 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
147 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
148 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
153 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
155 return xskb->frame_dma;
158 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
159 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
161 xp_dma_sync_for_cpu_slow(xskb);
164 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
166 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
167 dma_addr_t dma, size_t size)
169 if (!pool->dma_need_sync)
172 xp_dma_sync_for_device_slow(pool, dma, size);
175 /* Masks for xdp_umem_page flags.
176 * The low 12-bits of the addr will be 0 since this is the page address, so we
177 * can use them for flags.
179 #define XSK_NEXT_PG_CONTIG_SHIFT 0
180 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
182 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
185 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
187 if (likely(!cross_pg))
190 return pool->dma_pages &&
191 !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
194 static inline bool xp_mb_desc(struct xdp_desc *desc)
196 return desc->options & XDP_PKT_CONTD;
199 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
201 return addr & pool->chunk_mask;
204 static inline u64 xp_unaligned_extract_addr(u64 addr)
206 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
209 static inline u64 xp_unaligned_extract_offset(u64 addr)
211 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
214 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
216 return xp_unaligned_extract_addr(addr) +
217 xp_unaligned_extract_offset(addr);
220 static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
222 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
225 static inline void xp_release(struct xdp_buff_xsk *xskb)
227 if (xskb->pool->unaligned)
228 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
231 static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
233 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
235 offset += xskb->pool->headroom;
236 if (!xskb->pool->unaligned)
237 return xskb->orig_addr + offset;
238 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
241 static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
243 return pool->tx_metadata_len > 0;
246 #endif /* XSK_BUFF_POOL_H_ */