1 // SPDX-License-Identifier: GPL-2.0
3 * To speed up listener socket lookup, create an array to store all sockets
4 * listening on the same port. This allows a decision to be made after finding
5 * the first socket. An optional BPF program can also be configured for
6 * selecting the socket index from the array of available sockets.
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/rcupdate.h>
13 #define INIT_SOCKS 128
15 static DEFINE_SPINLOCK(reuseport_lock);
17 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
19 unsigned int size = sizeof(struct sock_reuseport) +
20 sizeof(struct sock *) * max_socks;
21 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
26 reuse->max_socks = max_socks;
28 RCU_INIT_POINTER(reuse->prog, NULL);
32 int reuseport_alloc(struct sock *sk)
34 struct sock_reuseport *reuse;
36 /* bh lock used since this function call may precede hlist lock in
37 * soft irq of receive path or setsockopt from process context
39 spin_lock_bh(&reuseport_lock);
41 /* Allocation attempts can occur concurrently via the setsockopt path
42 * and the bind/hash path. Nothing to do when we lose the race.
44 if (rcu_dereference_protected(sk->sk_reuseport_cb,
45 lockdep_is_held(&reuseport_lock)))
48 reuse = __reuseport_alloc(INIT_SOCKS);
50 spin_unlock_bh(&reuseport_lock);
56 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
59 spin_unlock_bh(&reuseport_lock);
63 EXPORT_SYMBOL(reuseport_alloc);
65 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
67 struct sock_reuseport *more_reuse;
68 u32 more_socks_size, i;
70 more_socks_size = reuse->max_socks * 2U;
71 if (more_socks_size > U16_MAX)
74 more_reuse = __reuseport_alloc(more_socks_size);
78 more_reuse->max_socks = more_socks_size;
79 more_reuse->num_socks = reuse->num_socks;
80 more_reuse->prog = reuse->prog;
82 memcpy(more_reuse->socks, reuse->socks,
83 reuse->num_socks * sizeof(struct sock *));
85 for (i = 0; i < reuse->num_socks; ++i)
86 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
89 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
90 * that reuse and more_reuse can temporarily share a reference
93 kfree_rcu(reuse, rcu);
98 * reuseport_add_sock - Add a socket to the reuseport group of another.
99 * @sk: New socket to add to the group.
100 * @sk2: Socket belonging to the existing reuseport group.
101 * May return ENOMEM and not add socket to group under memory pressure.
103 int reuseport_add_sock(struct sock *sk, struct sock *sk2)
105 struct sock_reuseport *reuse;
107 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
108 int err = reuseport_alloc(sk2);
114 spin_lock_bh(&reuseport_lock);
115 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
116 lockdep_is_held(&reuseport_lock)),
117 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
118 lockdep_is_held(&reuseport_lock)),
119 "socket already in reuseport group");
121 if (reuse->num_socks == reuse->max_socks) {
122 reuse = reuseport_grow(reuse);
124 spin_unlock_bh(&reuseport_lock);
129 reuse->socks[reuse->num_socks] = sk;
130 /* paired with smp_rmb() in reuseport_select_sock() */
133 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
135 spin_unlock_bh(&reuseport_lock);
140 static void reuseport_free_rcu(struct rcu_head *head)
142 struct sock_reuseport *reuse;
144 reuse = container_of(head, struct sock_reuseport, rcu);
146 bpf_prog_destroy(reuse->prog);
150 void reuseport_detach_sock(struct sock *sk)
152 struct sock_reuseport *reuse;
155 spin_lock_bh(&reuseport_lock);
156 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
157 lockdep_is_held(&reuseport_lock));
158 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
160 for (i = 0; i < reuse->num_socks; i++) {
161 if (reuse->socks[i] == sk) {
162 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
164 if (reuse->num_socks == 0)
165 call_rcu(&reuse->rcu, reuseport_free_rcu);
169 spin_unlock_bh(&reuseport_lock);
171 EXPORT_SYMBOL(reuseport_detach_sock);
173 static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
174 struct bpf_prog *prog, struct sk_buff *skb,
177 struct sk_buff *nskb = NULL;
180 if (skb_shared(skb)) {
181 nskb = skb_clone(skb, GFP_ATOMIC);
187 /* temporarily advance data past protocol header */
188 if (!pskb_pull(skb, hdr_len)) {
192 index = bpf_prog_run_save_cb(prog, skb);
193 __skb_push(skb, hdr_len);
200 return reuse->socks[index];
204 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
205 * @sk: First socket in the group.
206 * @hash: When no BPF filter is available, use this hash to select.
207 * @skb: skb to run through BPF filter.
208 * @hdr_len: BPF filter expects skb data pointer at payload data. If
209 * the skb does not yet point at the payload, this parameter represents
210 * how far the pointer needs to advance to reach the payload.
211 * Returns a socket that should receive the packet (or NULL on error).
213 struct sock *reuseport_select_sock(struct sock *sk,
218 struct sock_reuseport *reuse;
219 struct bpf_prog *prog;
220 struct sock *sk2 = NULL;
224 reuse = rcu_dereference(sk->sk_reuseport_cb);
226 /* if memory allocation failed or add call is not yet complete */
230 prog = rcu_dereference(reuse->prog);
231 socks = READ_ONCE(reuse->num_socks);
233 /* paired with smp_wmb() in reuseport_add_sock() */
237 sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
239 /* no bpf or invalid bpf result: fall back to hash usage */
241 sk2 = reuse->socks[reciprocal_scale(hash, socks)];
248 EXPORT_SYMBOL(reuseport_select_sock);
251 reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
253 struct sock_reuseport *reuse;
254 struct bpf_prog *old_prog;
256 spin_lock_bh(&reuseport_lock);
257 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
258 lockdep_is_held(&reuseport_lock));
259 old_prog = rcu_dereference_protected(reuse->prog,
260 lockdep_is_held(&reuseport_lock));
261 rcu_assign_pointer(reuse->prog, prog);
262 spin_unlock_bh(&reuseport_lock);
266 EXPORT_SYMBOL(reuseport_attach_prog);