bpf, selftests: Fold test_current_pid_tgid_new_ns into test_progs.
[linux-block.git] / include / net / xdp.h
CommitLineData
ddc64d0a 1/* SPDX-License-Identifier: GPL-2.0-only */
aecd67b6
JDB
2/* include/net/xdp.h
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
aecd67b6
JDB
5 */
6#ifndef __LINUX_NET_XDP_H__
7#define __LINUX_NET_XDP_H__
8
f95f0f95
JDB
9#include <linux/skbuff.h> /* skb_shared_info */
10
aecd67b6
JDB
11/**
12 * DOC: XDP RX-queue information
13 *
14 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
15 * level RX-ring queues. It is information that is specific to how
16 * the driver have configured a given RX-ring queue.
17 *
18 * Each xdp_buff frame received in the driver carry a (pointer)
19 * reference to this xdp_rxq_info structure. This provides the XDP
20 * data-path read-access to RX-info for both kernel and bpf-side
21 * (limited subset).
22 *
23 * For now, direct access is only safe while running in NAPI/softirq
24 * context. Contents is read-mostly and must not be updated during
25 * driver NAPI/softirq poll.
26 *
27 * The driver usage API is a register and unregister API.
28 *
29 * The struct is not directly tied to the XDP prog. A new XDP prog
30 * can be attached as long as it doesn't change the underlying
31 * RX-ring. If the RX-ring does change significantly, the NIC driver
32 * naturally need to stop the RX-ring before purging and reallocating
33 * memory. In that process the driver MUST call unregistor (which
34 * also apply for driver shutdown and unload). The register API is
35 * also mandatory during RX-ring setup.
36 */
37
5ab073ff
JDB
38enum xdp_mem_type {
39 MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
40 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */
57d0a1c1 41 MEM_TYPE_PAGE_POOL,
2b43470a 42 MEM_TYPE_XSK_BUFF_POOL,
5ab073ff
JDB
43 MEM_TYPE_MAX,
44};
45
42b33468 46/* XDP flags for ndo_xdp_xmit */
42b33468
JDB
47#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
48#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
49
5ab073ff
JDB
50struct xdp_mem_info {
51 u32 type; /* enum xdp_mem_type, but known size type */
8d5d8852 52 u32 id;
5ab073ff
JDB
53};
54
57d0a1c1
JDB
55struct page_pool;
56
aecd67b6
JDB
57struct xdp_rxq_info {
58 struct net_device *dev;
59 u32 queue_index;
60 u32 reg_state;
5ab073ff 61 struct xdp_mem_info mem;
b02e5a0e 62 unsigned int napi_id;
aecd67b6
JDB
63} ____cacheline_aligned; /* perf critical, avoid false-sharing */
64
64b59025
DA
65struct xdp_txq_info {
66 struct net_device *dev;
67};
68
106ca27f
JDB
69struct xdp_buff {
70 void *data;
71 void *data_end;
72 void *data_meta;
73 void *data_hard_start;
74 struct xdp_rxq_info *rxq;
64b59025 75 struct xdp_txq_info *txq;
f95f0f95 76 u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
106ca27f 77};
5ab073ff 78
43b5169d
LB
79static __always_inline void
80xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
81{
82 xdp->frame_sz = frame_sz;
83 xdp->rxq = rxq;
84}
85
be9df4af
LB
86static __always_inline void
87xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
88 int headroom, int data_len, const bool meta_valid)
89{
90 unsigned char *data = hard_start + headroom;
91
92 xdp->data_hard_start = hard_start;
93 xdp->data = data;
94 xdp->data_end = data + data_len;
95 xdp->data_meta = meta_valid ? data : data + 1;
96}
97
f95f0f95
JDB
98/* Reserve memory area at end-of data area.
99 *
100 * This macro reserves tailroom in the XDP buffer by limiting the
101 * XDP/BPF data access to data_hard_end. Notice same area (and size)
102 * is used for XDP_PASS, when constructing the SKB via build_skb().
103 */
104#define xdp_data_hard_end(xdp) \
105 ((xdp)->data_hard_start + (xdp)->frame_sz - \
106 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
107
2f0bc54b
LB
108static inline struct skb_shared_info *
109xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
110{
111 return (struct skb_shared_info *)xdp_data_hard_end(xdp);
112}
113
c0048cff
JDB
114struct xdp_frame {
115 void *data;
116 u16 len;
117 u16 headroom;
34cc0b33
JDB
118 u32 metasize:8;
119 u32 frame_sz:24;
c0048cff
JDB
120 /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
121 * while mem info is valid on remote CPU.
122 */
123 struct xdp_mem_info mem;
70280ed9 124 struct net_device *dev_rx; /* used by cpumap */
c0048cff
JDB
125};
126
89653987
LB
127#define XDP_BULK_QUEUE_SIZE 16
128struct xdp_frame_bulk {
129 int count;
130 void *xa;
131 void *q[XDP_BULK_QUEUE_SIZE];
132};
133
134static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
135{
136 /* bq->count will be zero'ed when bq->xa gets updated */
137 bq->xa = NULL;
138}
dee72f8a 139
2f0bc54b
LB
140static inline struct skb_shared_info *
141xdp_get_shared_info_from_frame(struct xdp_frame *frame)
142{
143 void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
144
145 return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
146 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
147}
148
92164774 149struct xdp_cpumap_stats {
28b1520e 150 unsigned int redirect;
92164774
LB
151 unsigned int pass;
152 unsigned int drop;
153};
154
a8d5b4ab
TM
155/* Clear kernel pointers in xdp_frame */
156static inline void xdp_scrub_frame(struct xdp_frame *frame)
157{
158 frame->data = NULL;
159 frame->dev_rx = NULL;
160}
161
34cc0b33
JDB
162/* Avoids inlining WARN macro in fast-path */
163void xdp_warn(const char *msg, const char *func, const int line);
164#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
165
b0d1beef
BT
166struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
167
fc379872
LB
168static inline
169void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
170{
171 xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
172 xdp->data = frame->data;
173 xdp->data_end = frame->data + frame->len;
174 xdp->data_meta = frame->data - frame->metasize;
175 xdp->frame_sz = frame->frame_sz;
176}
177
c0048cff 178static inline
daa5cdc3
DA
179int xdp_update_frame_from_buff(struct xdp_buff *xdp,
180 struct xdp_frame *xdp_frame)
c0048cff 181{
daa5cdc3 182 int metasize, headroom;
02b55e56 183
c0048cff
JDB
184 /* Assure headroom is available for storing info */
185 headroom = xdp->data - xdp->data_hard_start;
186 metasize = xdp->data - xdp->data_meta;
187 metasize = metasize > 0 ? metasize : 0;
188 if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
daa5cdc3 189 return -ENOSPC;
c0048cff 190
34cc0b33
JDB
191 /* Catch if driver didn't reserve tailroom for skb_shared_info */
192 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
193 XDP_WARN("Driver BUG: missing reserved tailroom");
daa5cdc3 194 return -ENOSPC;
34cc0b33
JDB
195 }
196
c0048cff
JDB
197 xdp_frame->data = xdp->data;
198 xdp_frame->len = xdp->data_end - xdp->data;
199 xdp_frame->headroom = headroom - sizeof(*xdp_frame);
200 xdp_frame->metasize = metasize;
34cc0b33 201 xdp_frame->frame_sz = xdp->frame_sz;
c0048cff 202
daa5cdc3
DA
203 return 0;
204}
205
206/* Convert xdp_buff to xdp_frame */
207static inline
208struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
209{
210 struct xdp_frame *xdp_frame;
211
212 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
213 return xdp_convert_zc_to_xdp_frame(xdp);
214
215 /* Store info in top of packet */
216 xdp_frame = xdp->data_hard_start;
217 if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
218 return NULL;
219
c0048cff
JDB
220 /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
221 xdp_frame->mem = xdp->rxq->mem;
222
223 return xdp_frame;
224}
225
03993094 226void xdp_return_frame(struct xdp_frame *xdpf);
389ab7f0 227void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
c497176c 228void xdp_return_buff(struct xdp_buff *xdp);
89653987
LB
229void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
230void xdp_return_frame_bulk(struct xdp_frame *xdpf,
231 struct xdp_frame_bulk *bq);
5ab073ff 232
6bf071bf
JDB
233/* When sending xdp_frame into the network stack, then there is no
234 * return point callback, which is needed to release e.g. DMA-mapping
235 * resources with page_pool. Thus, have explicit function to release
236 * frame resources.
237 */
238void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
239static inline void xdp_release_frame(struct xdp_frame *xdpf)
240{
241 struct xdp_mem_info *mem = &xdpf->mem;
242
243 /* Curr only page_pool needs this */
244 if (mem->type == MEM_TYPE_PAGE_POOL)
245 __xdp_release_frame(xdpf->data, mem);
246}
247
aecd67b6 248int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
b02e5a0e 249 struct net_device *dev, u32 queue_index, unsigned int napi_id);
aecd67b6
JDB
250void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
251void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
c0124f32 252bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
5ab073ff
JDB
253int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
254 enum xdp_mem_type type, void *allocator);
dce5bd61 255void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
aecd67b6 256
106ca27f
JDB
257/* Drivers not supporting XDP metadata can use this helper, which
258 * rejects any room expansion for metadata as a result.
259 */
260static __always_inline void
261xdp_set_data_meta_invalid(struct xdp_buff *xdp)
262{
263 xdp->data_meta = xdp->data + 1;
264}
265
266static __always_inline bool
267xdp_data_meta_unsupported(const struct xdp_buff *xdp)
268{
269 return unlikely(xdp->data_meta > xdp->data);
270}
271
05296620
JK
272struct xdp_attachment_info {
273 struct bpf_prog *prog;
274 u32 flags;
275};
276
277struct netdev_bpf;
05296620
JK
278void xdp_attachment_setup(struct xdp_attachment_info *info,
279 struct netdev_bpf *bpf);
280
89653987 281#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
788f87ac 282
aecd67b6 283#endif /* __LINUX_NET_XDP_H__ */