Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / kernel / bpf / xskmap.c
CommitLineData
fbfc504a
BT
1// SPDX-License-Identifier: GPL-2.0
2/* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
fbfc504a
BT
4 */
5
6#include <linux/bpf.h>
7#include <linux/capability.h>
8#include <net/xdp_sock.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11
0402acd6
BT
12int xsk_map_inc(struct xsk_map *map)
13{
1e0bd5a0
AN
14 bpf_map_inc(&map->map);
15 return 0;
0402acd6
BT
16}
17
18void xsk_map_put(struct xsk_map *map)
19{
20 bpf_map_put(&map->map);
21}
22
23static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
24 struct xdp_sock **map_entry)
25{
26 struct xsk_map_node *node;
27 int err;
28
29 node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
30 if (!node)
fcd30ae0 31 return ERR_PTR(-ENOMEM);
0402acd6
BT
32
33 err = xsk_map_inc(map);
34 if (err) {
35 kfree(node);
36 return ERR_PTR(err);
37 }
38
39 node->map = map;
40 node->map_entry = map_entry;
41 return node;
42}
43
44static void xsk_map_node_free(struct xsk_map_node *node)
45{
46 xsk_map_put(node->map);
47 kfree(node);
48}
49
50static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
51{
52 spin_lock_bh(&xs->map_list_lock);
53 list_add_tail(&node->node, &xs->map_list);
54 spin_unlock_bh(&xs->map_list_lock);
55}
56
57static void xsk_map_sock_delete(struct xdp_sock *xs,
58 struct xdp_sock **map_entry)
59{
60 struct xsk_map_node *n, *tmp;
61
62 spin_lock_bh(&xs->map_list_lock);
63 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
64 if (map_entry == n->map_entry) {
65 list_del(&n->node);
66 xsk_map_node_free(n);
67 }
68 }
69 spin_unlock_bh(&xs->map_list_lock);
70}
71
fbfc504a
BT
72static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
73{
64fe8c06
BT
74 struct bpf_map_memory mem;
75 int cpu, err, numa_node;
fbfc504a 76 struct xsk_map *m;
64fe8c06 77 u64 cost, size;
fbfc504a
BT
78
79 if (!capable(CAP_NET_ADMIN))
80 return ERR_PTR(-EPERM);
81
82 if (attr->max_entries == 0 || attr->key_size != 4 ||
83 attr->value_size != 4 ||
84 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
85 return ERR_PTR(-EINVAL);
86
64fe8c06
BT
87 numa_node = bpf_map_attr_numa_node(attr);
88 size = struct_size(m, xsk_map, attr->max_entries);
89 cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
90
91 err = bpf_map_charge_init(&mem, cost);
92 if (err < 0)
93 return ERR_PTR(err);
94
95 m = bpf_map_area_alloc(size, numa_node);
96 if (!m) {
97 bpf_map_charge_finish(&mem);
fbfc504a 98 return ERR_PTR(-ENOMEM);
64fe8c06 99 }
fbfc504a
BT
100
101 bpf_map_init_from_attr(&m->map, attr);
64fe8c06 102 bpf_map_charge_move(&m->map.memory, &mem);
0402acd6 103 spin_lock_init(&m->lock);
fbfc504a 104
fbfc504a 105 m->flush_list = alloc_percpu(struct list_head);
64fe8c06
BT
106 if (!m->flush_list) {
107 bpf_map_charge_finish(&m->map.memory);
108 bpf_map_area_free(m);
109 return ERR_PTR(-ENOMEM);
110 }
fbfc504a
BT
111
112 for_each_possible_cpu(cpu)
113 INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
114
fbfc504a 115 return &m->map;
fbfc504a
BT
116}
117
118static void xsk_map_free(struct bpf_map *map)
119{
120 struct xsk_map *m = container_of(map, struct xsk_map, map);
fbfc504a 121
f6069b9a 122 bpf_clear_redirect_map(map);
fbfc504a 123 synchronize_net();
fbfc504a 124 free_percpu(m->flush_list);
64fe8c06 125 bpf_map_area_free(m);
fbfc504a
BT
126}
127
128static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
129{
130 struct xsk_map *m = container_of(map, struct xsk_map, map);
131 u32 index = key ? *(u32 *)key : U32_MAX;
132 u32 *next = next_key;
133
134 if (index >= m->map.max_entries) {
135 *next = 0;
136 return 0;
137 }
138
139 if (index == m->map.max_entries - 1)
140 return -ENOENT;
141 *next = index + 1;
142 return 0;
143}
144
e65650f2
MF
145static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
146{
147 const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
148 struct bpf_insn *insn = insn_buf;
149
150 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
151 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
152 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
153 *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
154 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
155 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
156 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
157 *insn++ = BPF_MOV64_IMM(ret, 0);
158 return insn - insn_buf;
159}
160
fbfc504a 161static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
fada7fdc
JL
162{
163 WARN_ON_ONCE(!rcu_read_lock_held());
164 return __xsk_map_lookup_elem(map, *(u32 *)key);
165}
166
167static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
fbfc504a 168{
3b4a63f6 169 return ERR_PTR(-EOPNOTSUPP);
fbfc504a
BT
170}
171
172static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
173 u64 map_flags)
174{
175 struct xsk_map *m = container_of(map, struct xsk_map, map);
0402acd6 176 struct xdp_sock *xs, *old_xs, **map_entry;
fbfc504a 177 u32 i = *(u32 *)key, fd = *(u32 *)value;
0402acd6 178 struct xsk_map_node *node;
fbfc504a
BT
179 struct socket *sock;
180 int err;
181
182 if (unlikely(map_flags > BPF_EXIST))
183 return -EINVAL;
184 if (unlikely(i >= m->map.max_entries))
185 return -E2BIG;
fbfc504a
BT
186
187 sock = sockfd_lookup(fd, &err);
188 if (!sock)
189 return err;
190
191 if (sock->sk->sk_family != PF_XDP) {
192 sockfd_put(sock);
193 return -EOPNOTSUPP;
194 }
195
196 xs = (struct xdp_sock *)sock->sk;
197
198 if (!xsk_is_setup_for_bpf_map(xs)) {
199 sockfd_put(sock);
200 return -EOPNOTSUPP;
201 }
202
0402acd6
BT
203 map_entry = &m->xsk_map[i];
204 node = xsk_map_node_alloc(m, map_entry);
205 if (IS_ERR(node)) {
206 sockfd_put(sock);
207 return PTR_ERR(node);
208 }
fbfc504a 209
0402acd6
BT
210 spin_lock_bh(&m->lock);
211 old_xs = READ_ONCE(*map_entry);
212 if (old_xs == xs) {
213 err = 0;
214 goto out;
36cc3435
BT
215 } else if (old_xs && map_flags == BPF_NOEXIST) {
216 err = -EEXIST;
217 goto out;
218 } else if (!old_xs && map_flags == BPF_EXIST) {
219 err = -ENOENT;
220 goto out;
0402acd6
BT
221 }
222 xsk_map_sock_add(xs, node);
223 WRITE_ONCE(*map_entry, xs);
cee27167 224 if (old_xs)
0402acd6
BT
225 xsk_map_sock_delete(old_xs, map_entry);
226 spin_unlock_bh(&m->lock);
fbfc504a
BT
227 sockfd_put(sock);
228 return 0;
0402acd6
BT
229
230out:
231 spin_unlock_bh(&m->lock);
232 sockfd_put(sock);
233 xsk_map_node_free(node);
234 return err;
fbfc504a
BT
235}
236
237static int xsk_map_delete_elem(struct bpf_map *map, void *key)
238{
239 struct xsk_map *m = container_of(map, struct xsk_map, map);
0402acd6 240 struct xdp_sock *old_xs, **map_entry;
fbfc504a
BT
241 int k = *(u32 *)key;
242
243 if (k >= map->max_entries)
244 return -EINVAL;
245
0402acd6
BT
246 spin_lock_bh(&m->lock);
247 map_entry = &m->xsk_map[k];
248 old_xs = xchg(map_entry, NULL);
cee27167 249 if (old_xs)
0402acd6
BT
250 xsk_map_sock_delete(old_xs, map_entry);
251 spin_unlock_bh(&m->lock);
fbfc504a
BT
252
253 return 0;
254}
255
0402acd6
BT
256void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
257 struct xdp_sock **map_entry)
258{
259 spin_lock_bh(&map->lock);
260 if (READ_ONCE(*map_entry) == xs) {
261 WRITE_ONCE(*map_entry, NULL);
262 xsk_map_sock_delete(xs, map_entry);
263 }
264 spin_unlock_bh(&map->lock);
265}
266
fbfc504a
BT
267const struct bpf_map_ops xsk_map_ops = {
268 .map_alloc = xsk_map_alloc,
269 .map_free = xsk_map_free,
270 .map_get_next_key = xsk_map_get_next_key,
271 .map_lookup_elem = xsk_map_lookup_elem,
e65650f2 272 .map_gen_lookup = xsk_map_gen_lookup,
fada7fdc 273 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
fbfc504a
BT
274 .map_update_elem = xsk_map_update_elem,
275 .map_delete_elem = xsk_map_delete_elem,
e8d2bec0 276 .map_check_btf = map_check_no_btf,
fbfc504a 277};