Commit | Line | Data |
---|---|---|
dac09149 BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* AF_XDP internal functions | |
c0c77d8f | 3 | * Copyright(c) 2018 Intel Corporation. |
c0c77d8f BT |
4 | */ |
5 | ||
6 | #ifndef _LINUX_XDP_SOCK_H | |
7 | #define _LINUX_XDP_SOCK_H | |
8 | ||
b6459415 | 9 | #include <linux/bpf.h> |
e61e62b9 BT |
10 | #include <linux/workqueue.h> |
11 | #include <linux/if_xdp.h> | |
c0c77d8f | 12 | #include <linux/mutex.h> |
ac98d8aa | 13 | #include <linux/spinlock.h> |
e61e62b9 | 14 | #include <linux/mm.h> |
c0c77d8f BT |
15 | #include <net/sock.h> |
16 | ||
b9b6b68e BT |
17 | struct net_device; |
18 | struct xsk_queue; | |
a71506a4 | 19 | struct xdp_buff; |
e61e62b9 | 20 | |
e61e62b9 | 21 | struct xdp_umem { |
7f7ffa4e | 22 | void *addrs; |
93ee30f3 | 23 | u64 size; |
e61e62b9 | 24 | u32 headroom; |
2b43470a | 25 | u32 chunk_size; |
1c1efc2a | 26 | u32 chunks; |
8ef4e27e | 27 | u32 npgs; |
e61e62b9 | 28 | struct user_struct *user; |
e61e62b9 | 29 | refcount_t users; |
77cd0d7b | 30 | u8 flags; |
173d3adb | 31 | bool zc; |
8ef4e27e MK |
32 | struct page **pgs; |
33 | int id; | |
921b6869 | 34 | struct list_head xsk_dma_list; |
537cf4e3 | 35 | struct work_struct work; |
e61e62b9 | 36 | }; |
c0c77d8f | 37 | |
d817991c BT |
38 | struct xsk_map { |
39 | struct bpf_map map; | |
d817991c | 40 | spinlock_t lock; /* Synchronize map updates */ |
b4fd0d67 | 41 | atomic_t count; |
782347b6 | 42 | struct xdp_sock __rcu *xsk_map[]; |
d817991c BT |
43 | }; |
44 | ||
c0c77d8f BT |
45 | struct xdp_sock { |
46 | /* struct sock must be the first member of struct xdp_sock */ | |
47 | struct sock sk; | |
8ef4e27e | 48 | struct xsk_queue *rx ____cacheline_aligned_in_smp; |
b9b6b68e | 49 | struct net_device *dev; |
c0c77d8f | 50 | struct xdp_umem *umem; |
fbfc504a | 51 | struct list_head flush_node; |
c4655761 | 52 | struct xsk_buff_pool *pool; |
965a9909 | 53 | u16 queue_id; |
ac98d8aa | 54 | bool zc; |
81470b5c | 55 | bool sg; |
455302d1 IM |
56 | enum { |
57 | XSK_READY = 0, | |
58 | XSK_BOUND, | |
59 | XSK_UNBOUND, | |
60 | } state; | |
8ef4e27e | 61 | |
fada7fdc | 62 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
a5aa8e52 | 63 | struct list_head tx_list; |
bf0bdd13 IM |
64 | /* Protects generic receive. */ |
65 | spinlock_t rx_lock; | |
8aa5a335 CL |
66 | |
67 | /* Statistics */ | |
c497176c | 68 | u64 rx_dropped; |
8aa5a335 CL |
69 | u64 rx_queue_full; |
70 | ||
0402acd6 BT |
71 | struct list_head map_list; |
72 | /* Protects map_list */ | |
73 | spinlock_t map_list_lock; | |
8ef4e27e MK |
74 | /* Protects multiple processes in the control path */ |
75 | struct mutex mutex; | |
7361f9c3 MK |
76 | struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ |
77 | struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ | |
c0c77d8f BT |
78 | }; |
79 | ||
c497176c | 80 | #ifdef CONFIG_XDP_SOCKETS |
90254034 | 81 | |
a71506a4 | 82 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
e312b9e7 BT |
83 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); |
84 | void __xsk_map_flush(void); | |
d817991c | 85 | |
c497176c | 86 | #else |
a71506a4 | 87 | |
c497176c BT |
88 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
89 | { | |
90 | return -ENOTSUPP; | |
91 | } | |
92 | ||
a71506a4 | 93 | static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
f5bd9138 | 94 | { |
a71506a4 | 95 | return -EOPNOTSUPP; |
f5bd9138 JK |
96 | } |
97 | ||
a71506a4 | 98 | static inline void __xsk_map_flush(void) |
f5bd9138 JK |
99 | { |
100 | } | |
101 | ||
c497176c BT |
102 | #endif /* CONFIG_XDP_SOCKETS */ |
103 | ||
c0c77d8f | 104 | #endif /* _LINUX_XDP_SOCK_H */ |