Commit | Line | Data |
---|---|---|
dac09149 BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* AF_XDP internal functions | |
c0c77d8f | 3 | * Copyright(c) 2018 Intel Corporation. |
c0c77d8f BT |
4 | */ |
5 | ||
6 | #ifndef _LINUX_XDP_SOCK_H | |
7 | #define _LINUX_XDP_SOCK_H | |
8 | ||
b6459415 | 9 | #include <linux/bpf.h> |
e61e62b9 BT |
10 | #include <linux/workqueue.h> |
11 | #include <linux/if_xdp.h> | |
c0c77d8f | 12 | #include <linux/mutex.h> |
ac98d8aa | 13 | #include <linux/spinlock.h> |
e61e62b9 | 14 | #include <linux/mm.h> |
c0c77d8f BT |
15 | #include <net/sock.h> |
16 | ||
b9b6b68e BT |
17 | struct net_device; |
18 | struct xsk_queue; | |
a71506a4 | 19 | struct xdp_buff; |
e61e62b9 | 20 | |
e61e62b9 | 21 | struct xdp_umem { |
7f7ffa4e | 22 | void *addrs; |
93ee30f3 | 23 | u64 size; |
e61e62b9 | 24 | u32 headroom; |
2b43470a | 25 | u32 chunk_size; |
1c1efc2a | 26 | u32 chunks; |
8ef4e27e | 27 | u32 npgs; |
e61e62b9 | 28 | struct user_struct *user; |
e61e62b9 | 29 | refcount_t users; |
77cd0d7b | 30 | u8 flags; |
173d3adb | 31 | bool zc; |
8ef4e27e MK |
32 | struct page **pgs; |
33 | int id; | |
921b6869 | 34 | struct list_head xsk_dma_list; |
537cf4e3 | 35 | struct work_struct work; |
e61e62b9 | 36 | }; |
c0c77d8f | 37 | |
d817991c BT |
38 | struct xsk_map { |
39 | struct bpf_map map; | |
d817991c | 40 | spinlock_t lock; /* Synchronize map updates */ |
782347b6 | 41 | struct xdp_sock __rcu *xsk_map[]; |
d817991c BT |
42 | }; |
43 | ||
c0c77d8f BT |
44 | struct xdp_sock { |
45 | /* struct sock must be the first member of struct xdp_sock */ | |
46 | struct sock sk; | |
8ef4e27e | 47 | struct xsk_queue *rx ____cacheline_aligned_in_smp; |
b9b6b68e | 48 | struct net_device *dev; |
c0c77d8f | 49 | struct xdp_umem *umem; |
fbfc504a | 50 | struct list_head flush_node; |
c4655761 | 51 | struct xsk_buff_pool *pool; |
965a9909 | 52 | u16 queue_id; |
ac98d8aa | 53 | bool zc; |
455302d1 IM |
54 | enum { |
55 | XSK_READY = 0, | |
56 | XSK_BOUND, | |
57 | XSK_UNBOUND, | |
58 | } state; | |
8ef4e27e | 59 | |
fada7fdc | 60 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
a5aa8e52 | 61 | struct list_head tx_list; |
bf0bdd13 IM |
62 | /* Protects generic receive. */ |
63 | spinlock_t rx_lock; | |
8aa5a335 CL |
64 | |
65 | /* Statistics */ | |
c497176c | 66 | u64 rx_dropped; |
8aa5a335 CL |
67 | u64 rx_queue_full; |
68 | ||
0402acd6 BT |
69 | struct list_head map_list; |
70 | /* Protects map_list */ | |
71 | spinlock_t map_list_lock; | |
8ef4e27e MK |
72 | /* Protects multiple processes in the control path */ |
73 | struct mutex mutex; | |
7361f9c3 MK |
74 | struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ |
75 | struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ | |
c0c77d8f BT |
76 | }; |
77 | ||
c497176c | 78 | #ifdef CONFIG_XDP_SOCKETS |
90254034 | 79 | |
a71506a4 | 80 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); |
e312b9e7 BT |
81 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); |
82 | void __xsk_map_flush(void); | |
d817991c | 83 | |
c497176c | 84 | #else |
a71506a4 | 85 | |
c497176c BT |
86 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
87 | { | |
88 | return -ENOTSUPP; | |
89 | } | |
90 | ||
a71506a4 | 91 | static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
f5bd9138 | 92 | { |
a71506a4 | 93 | return -EOPNOTSUPP; |
f5bd9138 JK |
94 | } |
95 | ||
a71506a4 | 96 | static inline void __xsk_map_flush(void) |
f5bd9138 JK |
97 | { |
98 | } | |
99 | ||
c497176c BT |
100 | #endif /* CONFIG_XDP_SOCKETS */ |
101 | ||
c0c77d8f | 102 | #endif /* _LINUX_XDP_SOCK_H */ |