Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
546ac1ff | 2 | /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io |
546ac1ff JF |
3 | */ |
4 | ||
5 | /* Devmaps primary use is as a backend map for XDP BPF helper call | |
6 | * bpf_redirect_map(). Because XDP is mostly concerned with performance we | |
7 | * spent some effort to ensure the datapath with redirect maps does not use | |
8 | * any locking. This is a quick note on the details. | |
9 | * | |
10 | * We have three possible paths to get into the devmap control plane bpf | |
11 | * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall | |
12 | * will invoke an update, delete, or lookup operation. To ensure updates and | |
13 | * deletes appear atomic from the datapath side xchg() is used to modify the | |
14 | * netdev_map array. Then because the datapath does a lookup into the netdev_map | |
15 | * array (read-only) from an RCU critical section we use call_rcu() to wait for | |
16 | * an rcu grace period before free'ing the old data structures. This ensures the | |
17 | * datapath always has a valid copy. However, the datapath does a "flush" | |
18 | * operation that pushes any pending packets in the driver outside the RCU | |
19 | * critical section. Each bpf_dtab_netdev tracks these pending operations using | |
d5df2830 THJ |
20 | * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until |
21 | * this list is empty, indicating outstanding flush operations have completed. | |
546ac1ff JF |
22 | * |
23 | * BPF syscalls may race with BPF program calls on any of the update, delete | |
24 | * or lookup operations. As noted above the xchg() operation also keep the | |
25 | * netdev_map consistent in this case. From the devmap side BPF programs | |
26 | * calling into these operations are the same as multiple user space threads | |
27 | * making system calls. | |
2ddf71e2 JF |
28 | * |
29 | * Finally, any of the above may race with a netdev_unregister notifier. The | |
30 | * unregister notifier must search for net devices in the map structure that | |
31 | * contain a reference to the net device and remove them. This is a two step | |
32 | * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) | |
33 | * check to see if the ifindex is the same as the net_device being removed. | |
4cc7b954 JF |
34 | * When removing the dev a cmpxchg() is used to ensure the correct dev is |
35 | * removed, in the case of a concurrent update or delete operation it is | |
36 | * possible that the initially referenced dev is no longer in the map. As the | |
37 | * notifier hook walks the map we know that new dev references can not be | |
38 | * added by the user because core infrastructure ensures dev_get_by_index() | |
39 | * calls will fail at this point. | |
6f9d451a THJ |
40 | * |
41 | * The devmap_hash type is a map type which interprets keys as ifindexes and | |
42 | * indexes these using a hashmap. This allows maps that use ifindex as key to be | |
43 | * densely packed instead of having holes in the lookup array for unused | |
44 | * ifindexes. The setup and packet enqueue/send code is shared between the two | |
45 | * types of devmap; only the lookup and insertion is different. | |
546ac1ff JF |
46 | */ |
47 | #include <linux/bpf.h> | |
67f29e07 | 48 | #include <net/xdp.h> |
546ac1ff | 49 | #include <linux/filter.h> |
67f29e07 | 50 | #include <trace/events/xdp.h> |
546ac1ff | 51 | |
6e71b04a CF |
52 | #define DEV_CREATE_FLAG_MASK \ |
53 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | |
54 | ||
5d053f9d | 55 | #define DEV_MAP_BULK_SIZE 16 |
d5df2830 THJ |
56 | struct bpf_dtab_netdev; |
57 | ||
5d053f9d JDB |
58 | struct xdp_bulk_queue { |
59 | struct xdp_frame *q[DEV_MAP_BULK_SIZE]; | |
d5df2830 | 60 | struct list_head flush_node; |
38edddb8 | 61 | struct net_device *dev_rx; |
d5df2830 | 62 | struct bpf_dtab_netdev *obj; |
5d053f9d JDB |
63 | unsigned int count; |
64 | }; | |
65 | ||
546ac1ff | 66 | struct bpf_dtab_netdev { |
67f29e07 | 67 | struct net_device *dev; /* must be first member, due to tracepoint */ |
6f9d451a | 68 | struct hlist_node index_hlist; |
546ac1ff | 69 | struct bpf_dtab *dtab; |
5d053f9d | 70 | struct xdp_bulk_queue __percpu *bulkq; |
af4d045c | 71 | struct rcu_head rcu; |
fca16e51 | 72 | unsigned int idx; /* keep track of map index for tracepoint */ |
546ac1ff JF |
73 | }; |
74 | ||
75 | struct bpf_dtab { | |
76 | struct bpf_map map; | |
77 | struct bpf_dtab_netdev **netdev_map; | |
d5df2830 | 78 | struct list_head __percpu *flush_list; |
2ddf71e2 | 79 | struct list_head list; |
6f9d451a THJ |
80 | |
81 | /* these are only used for DEVMAP_HASH type maps */ | |
82 | struct hlist_head *dev_index_head; | |
83 | spinlock_t index_lock; | |
84 | unsigned int items; | |
85 | u32 n_buckets; | |
546ac1ff JF |
86 | }; |
87 | ||
4cc7b954 | 88 | static DEFINE_SPINLOCK(dev_map_lock); |
2ddf71e2 JF |
89 | static LIST_HEAD(dev_map_list); |
90 | ||
6f9d451a THJ |
91 | static struct hlist_head *dev_map_create_hash(unsigned int entries) |
92 | { | |
93 | int i; | |
94 | struct hlist_head *hash; | |
95 | ||
96 | hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); | |
97 | if (hash != NULL) | |
98 | for (i = 0; i < entries; i++) | |
99 | INIT_HLIST_HEAD(&hash[i]); | |
100 | ||
101 | return hash; | |
102 | } | |
103 | ||
fca16e51 | 104 | static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) |
546ac1ff | 105 | { |
d5df2830 | 106 | int err, cpu; |
546ac1ff | 107 | u64 cost; |
546ac1ff JF |
108 | |
109 | /* check sanity of attributes */ | |
110 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
6e71b04a | 111 | attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) |
fca16e51 | 112 | return -EINVAL; |
546ac1ff | 113 | |
0cdbb4b0 THJ |
114 | /* Lookup returns a pointer straight to dev->ifindex, so make sure the |
115 | * verifier prevents writes from the BPF side | |
116 | */ | |
117 | attr->map_flags |= BPF_F_RDONLY_PROG; | |
118 | ||
546ac1ff | 119 | |
bd475643 | 120 | bpf_map_init_from_attr(&dtab->map, attr); |
546ac1ff | 121 | |
546ac1ff JF |
122 | /* make sure page count doesn't overflow */ |
123 | cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); | |
d5df2830 | 124 | cost += sizeof(struct list_head) * num_possible_cpus(); |
546ac1ff | 125 | |
6f9d451a THJ |
126 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
127 | dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); | |
128 | ||
129 | if (!dtab->n_buckets) /* Overflow check */ | |
130 | return -EINVAL; | |
131 | cost += sizeof(struct hlist_head) * dtab->n_buckets; | |
132 | } | |
133 | ||
b936ca64 | 134 | /* if map size is larger than memlock limit, reject it */ |
c85d6913 | 135 | err = bpf_map_charge_init(&dtab->map.memory, cost); |
546ac1ff | 136 | if (err) |
fca16e51 | 137 | return -EINVAL; |
582db7e0 | 138 | |
d5df2830 THJ |
139 | dtab->flush_list = alloc_percpu(struct list_head); |
140 | if (!dtab->flush_list) | |
b936ca64 | 141 | goto free_charge; |
11393cc9 | 142 | |
d5df2830 THJ |
143 | for_each_possible_cpu(cpu) |
144 | INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); | |
145 | ||
546ac1ff | 146 | dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * |
96eabe7a MKL |
147 | sizeof(struct bpf_dtab_netdev *), |
148 | dtab->map.numa_node); | |
546ac1ff | 149 | if (!dtab->netdev_map) |
d5df2830 | 150 | goto free_percpu; |
546ac1ff | 151 | |
6f9d451a THJ |
152 | if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { |
153 | dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); | |
154 | if (!dtab->dev_index_head) | |
155 | goto free_map_area; | |
156 | ||
157 | spin_lock_init(&dtab->index_lock); | |
158 | } | |
159 | ||
fca16e51 | 160 | return 0; |
d5df2830 | 161 | |
6f9d451a THJ |
162 | free_map_area: |
163 | bpf_map_area_free(dtab->netdev_map); | |
d5df2830 THJ |
164 | free_percpu: |
165 | free_percpu(dtab->flush_list); | |
b936ca64 RG |
166 | free_charge: |
167 | bpf_map_charge_finish(&dtab->map.memory); | |
fca16e51 THJ |
168 | return -ENOMEM; |
169 | } | |
170 | ||
171 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | |
172 | { | |
173 | struct bpf_dtab *dtab; | |
174 | int err; | |
175 | ||
176 | if (!capable(CAP_NET_ADMIN)) | |
177 | return ERR_PTR(-EPERM); | |
178 | ||
179 | dtab = kzalloc(sizeof(*dtab), GFP_USER); | |
180 | if (!dtab) | |
181 | return ERR_PTR(-ENOMEM); | |
182 | ||
183 | err = dev_map_init_map(dtab, attr); | |
184 | if (err) { | |
185 | kfree(dtab); | |
186 | return ERR_PTR(err); | |
187 | } | |
188 | ||
189 | spin_lock(&dev_map_lock); | |
190 | list_add_tail_rcu(&dtab->list, &dev_map_list); | |
191 | spin_unlock(&dev_map_lock); | |
192 | ||
193 | return &dtab->map; | |
546ac1ff JF |
194 | } |
195 | ||
196 | static void dev_map_free(struct bpf_map *map) | |
197 | { | |
198 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
11393cc9 | 199 | int i, cpu; |
546ac1ff JF |
200 | |
201 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
202 | * so the programs (can be more than one that used this map) were | |
203 | * disconnected from events. Wait for outstanding critical sections in | |
204 | * these programs to complete. The rcu critical section only guarantees | |
205 | * no further reads against netdev_map. It does __not__ ensure pending | |
206 | * flush operations (if any) are complete. | |
207 | */ | |
274043c6 DB |
208 | |
209 | spin_lock(&dev_map_lock); | |
210 | list_del_rcu(&dtab->list); | |
211 | spin_unlock(&dev_map_lock); | |
212 | ||
f6069b9a | 213 | bpf_clear_redirect_map(map); |
546ac1ff JF |
214 | synchronize_rcu(); |
215 | ||
2baae354 ED |
216 | /* Make sure prior __dev_map_entry_free() have completed. */ |
217 | rcu_barrier(); | |
218 | ||
11393cc9 | 219 | /* To ensure all pending flush operations have completed wait for flush |
d5df2830 | 220 | * list to empty on _all_ cpus. |
11393cc9 | 221 | * Because the above synchronize_rcu() ensures the map is disconnected |
d5df2830 | 222 | * from the program we can assume no new items will be added. |
11393cc9 JF |
223 | */ |
224 | for_each_online_cpu(cpu) { | |
d5df2830 | 225 | struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu); |
11393cc9 | 226 | |
d5df2830 | 227 | while (!list_empty(flush_list)) |
374fb014 | 228 | cond_resched(); |
11393cc9 JF |
229 | } |
230 | ||
546ac1ff JF |
231 | for (i = 0; i < dtab->map.max_entries; i++) { |
232 | struct bpf_dtab_netdev *dev; | |
233 | ||
234 | dev = dtab->netdev_map[i]; | |
235 | if (!dev) | |
236 | continue; | |
237 | ||
edabf4d9 | 238 | free_percpu(dev->bulkq); |
546ac1ff JF |
239 | dev_put(dev->dev); |
240 | kfree(dev); | |
241 | } | |
242 | ||
d5df2830 | 243 | free_percpu(dtab->flush_list); |
546ac1ff | 244 | bpf_map_area_free(dtab->netdev_map); |
6f9d451a | 245 | kfree(dtab->dev_index_head); |
546ac1ff JF |
246 | kfree(dtab); |
247 | } | |
248 | ||
249 | static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
250 | { | |
251 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
252 | u32 index = key ? *(u32 *)key : U32_MAX; | |
af4d045c | 253 | u32 *next = next_key; |
546ac1ff JF |
254 | |
255 | if (index >= dtab->map.max_entries) { | |
256 | *next = 0; | |
257 | return 0; | |
258 | } | |
259 | ||
260 | if (index == dtab->map.max_entries - 1) | |
261 | return -ENOENT; | |
546ac1ff JF |
262 | *next = index + 1; |
263 | return 0; | |
264 | } | |
265 | ||
6f9d451a THJ |
266 | static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, |
267 | int idx) | |
268 | { | |
269 | return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; | |
270 | } | |
271 | ||
272 | struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) | |
273 | { | |
274 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
275 | struct hlist_head *head = dev_map_index_hash(dtab, key); | |
276 | struct bpf_dtab_netdev *dev; | |
277 | ||
278 | hlist_for_each_entry_rcu(dev, head, index_hlist) | |
279 | if (dev->idx == key) | |
280 | return dev; | |
281 | ||
282 | return NULL; | |
283 | } | |
284 | ||
285 | static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, | |
286 | void *next_key) | |
287 | { | |
288 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
289 | u32 idx, *next = next_key; | |
290 | struct bpf_dtab_netdev *dev, *next_dev; | |
291 | struct hlist_head *head; | |
292 | int i = 0; | |
293 | ||
294 | if (!key) | |
295 | goto find_first; | |
296 | ||
297 | idx = *(u32 *)key; | |
298 | ||
299 | dev = __dev_map_hash_lookup_elem(map, idx); | |
300 | if (!dev) | |
301 | goto find_first; | |
302 | ||
303 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), | |
304 | struct bpf_dtab_netdev, index_hlist); | |
305 | ||
306 | if (next_dev) { | |
307 | *next = next_dev->idx; | |
308 | return 0; | |
309 | } | |
310 | ||
311 | i = idx & (dtab->n_buckets - 1); | |
312 | i++; | |
313 | ||
314 | find_first: | |
315 | for (; i < dtab->n_buckets; i++) { | |
316 | head = dev_map_index_hash(dtab, i); | |
317 | ||
318 | next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), | |
319 | struct bpf_dtab_netdev, | |
320 | index_hlist); | |
321 | if (next_dev) { | |
322 | *next = next_dev->idx; | |
323 | return 0; | |
324 | } | |
325 | } | |
326 | ||
327 | return -ENOENT; | |
328 | } | |
329 | ||
d5df2830 | 330 | static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, |
1bf9116d | 331 | bool in_napi_ctx) |
5d053f9d | 332 | { |
d5df2830 | 333 | struct bpf_dtab_netdev *obj = bq->obj; |
5d053f9d | 334 | struct net_device *dev = obj->dev; |
e74de52e | 335 | int sent = 0, drops = 0, err = 0; |
5d053f9d JDB |
336 | int i; |
337 | ||
338 | if (unlikely(!bq->count)) | |
339 | return 0; | |
340 | ||
341 | for (i = 0; i < bq->count; i++) { | |
342 | struct xdp_frame *xdpf = bq->q[i]; | |
343 | ||
344 | prefetch(xdpf); | |
345 | } | |
346 | ||
c1ece6b2 | 347 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); |
735fc405 | 348 | if (sent < 0) { |
e74de52e | 349 | err = sent; |
735fc405 JDB |
350 | sent = 0; |
351 | goto error; | |
5d053f9d | 352 | } |
735fc405 JDB |
353 | drops = bq->count - sent; |
354 | out: | |
5d053f9d JDB |
355 | bq->count = 0; |
356 | ||
fca16e51 | 357 | trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx, |
e74de52e | 358 | sent, drops, bq->dev_rx, dev, err); |
38edddb8 | 359 | bq->dev_rx = NULL; |
d5df2830 | 360 | __list_del_clearprev(&bq->flush_node); |
5d053f9d | 361 | return 0; |
735fc405 JDB |
362 | error: |
363 | /* If ndo_xdp_xmit fails with an errno, no frames have been | |
364 | * xmit'ed and it's our responsibility to them free all. | |
365 | */ | |
366 | for (i = 0; i < bq->count; i++) { | |
367 | struct xdp_frame *xdpf = bq->q[i]; | |
368 | ||
369 | /* RX path under NAPI protection, can return frames faster */ | |
1bf9116d JDB |
370 | if (likely(in_napi_ctx)) |
371 | xdp_return_frame_rx_napi(xdpf); | |
372 | else | |
373 | xdp_return_frame(xdpf); | |
735fc405 JDB |
374 | drops++; |
375 | } | |
376 | goto out; | |
5d053f9d JDB |
377 | } |
378 | ||
11393cc9 JF |
379 | /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled |
380 | * from the driver before returning from its napi->poll() routine. The poll() | |
381 | * routine is called either from busy_poll context or net_rx_action signaled | |
382 | * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the | |
d5df2830 THJ |
383 | * net device can be torn down. On devmap tear down we ensure the flush list |
384 | * is empty before completing to ensure all flush operations have completed. | |
11393cc9 JF |
385 | */ |
386 | void __dev_map_flush(struct bpf_map *map) | |
387 | { | |
388 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
d5df2830 THJ |
389 | struct list_head *flush_list = this_cpu_ptr(dtab->flush_list); |
390 | struct xdp_bulk_queue *bq, *tmp; | |
11393cc9 | 391 | |
86723c86 | 392 | rcu_read_lock(); |
d5df2830 THJ |
393 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) |
394 | bq_xmit_all(bq, XDP_XMIT_FLUSH, true); | |
86723c86 | 395 | rcu_read_unlock(); |
11393cc9 JF |
396 | } |
397 | ||
546ac1ff JF |
398 | /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or |
399 | * update happens in parallel here a dev_put wont happen until after reading the | |
400 | * ifindex. | |
401 | */ | |
67f29e07 | 402 | struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) |
546ac1ff JF |
403 | { |
404 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
67f29e07 | 405 | struct bpf_dtab_netdev *obj; |
546ac1ff | 406 | |
af4d045c | 407 | if (key >= map->max_entries) |
546ac1ff JF |
408 | return NULL; |
409 | ||
67f29e07 JDB |
410 | obj = READ_ONCE(dtab->netdev_map[key]); |
411 | return obj; | |
412 | } | |
413 | ||
5d053f9d JDB |
414 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
415 | * Thus, safe percpu variable access. | |
416 | */ | |
38edddb8 JDB |
417 | static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, |
418 | struct net_device *dev_rx) | |
419 | ||
5d053f9d | 420 | { |
d5df2830 | 421 | struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list); |
5d053f9d JDB |
422 | struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); |
423 | ||
424 | if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) | |
d5df2830 | 425 | bq_xmit_all(bq, 0, true); |
5d053f9d | 426 | |
38edddb8 JDB |
427 | /* Ingress dev_rx will be the same for all xdp_frame's in |
428 | * bulk_queue, because bq stored per-CPU and must be flushed | |
429 | * from net_device drivers NAPI func end. | |
430 | */ | |
431 | if (!bq->dev_rx) | |
432 | bq->dev_rx = dev_rx; | |
433 | ||
5d053f9d | 434 | bq->q[bq->count++] = xdpf; |
d5df2830 THJ |
435 | |
436 | if (!bq->flush_node.prev) | |
437 | list_add(&bq->flush_node, flush_list); | |
438 | ||
5d053f9d JDB |
439 | return 0; |
440 | } | |
441 | ||
38edddb8 JDB |
442 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, |
443 | struct net_device *dev_rx) | |
67f29e07 JDB |
444 | { |
445 | struct net_device *dev = dst->dev; | |
446 | struct xdp_frame *xdpf; | |
d8d7218a | 447 | int err; |
67f29e07 JDB |
448 | |
449 | if (!dev->netdev_ops->ndo_xdp_xmit) | |
450 | return -EOPNOTSUPP; | |
451 | ||
d8d7218a TM |
452 | err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); |
453 | if (unlikely(err)) | |
454 | return err; | |
455 | ||
67f29e07 JDB |
456 | xdpf = convert_to_xdp_frame(xdp); |
457 | if (unlikely(!xdpf)) | |
458 | return -EOVERFLOW; | |
459 | ||
38edddb8 | 460 | return bq_enqueue(dst, xdpf, dev_rx); |
546ac1ff JF |
461 | } |
462 | ||
6d5fc195 TM |
463 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
464 | struct bpf_prog *xdp_prog) | |
465 | { | |
466 | int err; | |
467 | ||
d8d7218a | 468 | err = xdp_ok_fwd_dev(dst->dev, skb->len); |
6d5fc195 TM |
469 | if (unlikely(err)) |
470 | return err; | |
471 | skb->dev = dst->dev; | |
472 | generic_xdp_tx(skb, xdp_prog); | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
af4d045c DB |
477 | static void *dev_map_lookup_elem(struct bpf_map *map, void *key) |
478 | { | |
67f29e07 | 479 | struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); |
71b2c87d | 480 | struct net_device *dev = obj ? obj->dev : NULL; |
af4d045c DB |
481 | |
482 | return dev ? &dev->ifindex : NULL; | |
483 | } | |
484 | ||
6f9d451a THJ |
485 | static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) |
486 | { | |
487 | struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, | |
488 | *(u32 *)key); | |
489 | struct net_device *dev = obj ? obj->dev : NULL; | |
490 | ||
491 | return dev ? &dev->ifindex : NULL; | |
492 | } | |
493 | ||
af4d045c | 494 | static void dev_map_flush_old(struct bpf_dtab_netdev *dev) |
11393cc9 | 495 | { |
c1ece6b2 | 496 | if (dev->dev->netdev_ops->ndo_xdp_xmit) { |
5d053f9d | 497 | struct xdp_bulk_queue *bq; |
11393cc9 JF |
498 | int cpu; |
499 | ||
86723c86 | 500 | rcu_read_lock(); |
11393cc9 | 501 | for_each_online_cpu(cpu) { |
5d053f9d | 502 | bq = per_cpu_ptr(dev->bulkq, cpu); |
d5df2830 | 503 | bq_xmit_all(bq, XDP_XMIT_FLUSH, false); |
11393cc9 | 504 | } |
86723c86 | 505 | rcu_read_unlock(); |
11393cc9 JF |
506 | } |
507 | } | |
508 | ||
546ac1ff JF |
509 | static void __dev_map_entry_free(struct rcu_head *rcu) |
510 | { | |
af4d045c | 511 | struct bpf_dtab_netdev *dev; |
546ac1ff | 512 | |
af4d045c DB |
513 | dev = container_of(rcu, struct bpf_dtab_netdev, rcu); |
514 | dev_map_flush_old(dev); | |
5d053f9d | 515 | free_percpu(dev->bulkq); |
af4d045c DB |
516 | dev_put(dev->dev); |
517 | kfree(dev); | |
546ac1ff JF |
518 | } |
519 | ||
520 | static int dev_map_delete_elem(struct bpf_map *map, void *key) | |
521 | { | |
522 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
523 | struct bpf_dtab_netdev *old_dev; | |
524 | int k = *(u32 *)key; | |
525 | ||
526 | if (k >= map->max_entries) | |
527 | return -EINVAL; | |
528 | ||
af4d045c DB |
529 | /* Use call_rcu() here to ensure any rcu critical sections have |
530 | * completed, but this does not guarantee a flush has happened | |
546ac1ff JF |
531 | * yet. Because driver side rcu_read_lock/unlock only protects the |
532 | * running XDP program. However, for pending flush operations the | |
533 | * dev and ctx are stored in another per cpu map. And additionally, | |
534 | * the driver tear down ensures all soft irqs are complete before | |
535 | * removing the net device in the case of dev_put equals zero. | |
536 | */ | |
537 | old_dev = xchg(&dtab->netdev_map[k], NULL); | |
538 | if (old_dev) | |
539 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
540 | return 0; | |
541 | } | |
542 | ||
6f9d451a THJ |
543 | static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) |
544 | { | |
545 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
546 | struct bpf_dtab_netdev *old_dev; | |
547 | int k = *(u32 *)key; | |
548 | unsigned long flags; | |
549 | int ret = -ENOENT; | |
550 | ||
551 | spin_lock_irqsave(&dtab->index_lock, flags); | |
552 | ||
553 | old_dev = __dev_map_hash_lookup_elem(map, k); | |
554 | if (old_dev) { | |
555 | dtab->items--; | |
556 | hlist_del_init_rcu(&old_dev->index_hlist); | |
557 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
558 | ret = 0; | |
559 | } | |
560 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
561 | ||
562 | return ret; | |
563 | } | |
564 | ||
fca16e51 THJ |
565 | static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, |
566 | struct bpf_dtab *dtab, | |
567 | u32 ifindex, | |
568 | unsigned int idx) | |
546ac1ff | 569 | { |
5d053f9d | 570 | gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; |
fca16e51 THJ |
571 | struct bpf_dtab_netdev *dev; |
572 | struct xdp_bulk_queue *bq; | |
573 | int cpu; | |
574 | ||
575 | dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node); | |
576 | if (!dev) | |
577 | return ERR_PTR(-ENOMEM); | |
578 | ||
579 | dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq), | |
580 | sizeof(void *), gfp); | |
581 | if (!dev->bulkq) { | |
582 | kfree(dev); | |
583 | return ERR_PTR(-ENOMEM); | |
584 | } | |
585 | ||
586 | for_each_possible_cpu(cpu) { | |
587 | bq = per_cpu_ptr(dev->bulkq, cpu); | |
588 | bq->obj = dev; | |
589 | } | |
590 | ||
591 | dev->dev = dev_get_by_index(net, ifindex); | |
592 | if (!dev->dev) { | |
593 | free_percpu(dev->bulkq); | |
594 | kfree(dev); | |
595 | return ERR_PTR(-EINVAL); | |
596 | } | |
597 | ||
598 | dev->idx = idx; | |
599 | dev->dtab = dtab; | |
600 | ||
601 | return dev; | |
602 | } | |
603 | ||
604 | static int __dev_map_update_elem(struct net *net, struct bpf_map *map, | |
605 | void *key, void *value, u64 map_flags) | |
606 | { | |
607 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
546ac1ff | 608 | struct bpf_dtab_netdev *dev, *old_dev; |
546ac1ff | 609 | u32 ifindex = *(u32 *)value; |
d5df2830 | 610 | u32 i = *(u32 *)key; |
546ac1ff JF |
611 | |
612 | if (unlikely(map_flags > BPF_EXIST)) | |
613 | return -EINVAL; | |
546ac1ff JF |
614 | if (unlikely(i >= dtab->map.max_entries)) |
615 | return -E2BIG; | |
546ac1ff JF |
616 | if (unlikely(map_flags == BPF_NOEXIST)) |
617 | return -EEXIST; | |
618 | ||
619 | if (!ifindex) { | |
620 | dev = NULL; | |
621 | } else { | |
fca16e51 THJ |
622 | dev = __dev_map_alloc_node(net, dtab, ifindex, i); |
623 | if (IS_ERR(dev)) | |
624 | return PTR_ERR(dev); | |
546ac1ff JF |
625 | } |
626 | ||
627 | /* Use call_rcu() here to ensure rcu critical sections have completed | |
628 | * Remembering the driver side flush operation will happen before the | |
629 | * net device is removed. | |
630 | */ | |
631 | old_dev = xchg(&dtab->netdev_map[i], dev); | |
632 | if (old_dev) | |
633 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
fca16e51 THJ |
638 | static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, |
639 | u64 map_flags) | |
640 | { | |
641 | return __dev_map_update_elem(current->nsproxy->net_ns, | |
642 | map, key, value, map_flags); | |
643 | } | |
644 | ||
6f9d451a THJ |
645 | static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, |
646 | void *key, void *value, u64 map_flags) | |
647 | { | |
648 | struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); | |
649 | struct bpf_dtab_netdev *dev, *old_dev; | |
650 | u32 ifindex = *(u32 *)value; | |
651 | u32 idx = *(u32 *)key; | |
652 | unsigned long flags; | |
af58e7ee | 653 | int err = -EEXIST; |
6f9d451a THJ |
654 | |
655 | if (unlikely(map_flags > BPF_EXIST || !ifindex)) | |
656 | return -EINVAL; | |
657 | ||
af58e7ee THJ |
658 | spin_lock_irqsave(&dtab->index_lock, flags); |
659 | ||
6f9d451a THJ |
660 | old_dev = __dev_map_hash_lookup_elem(map, idx); |
661 | if (old_dev && (map_flags & BPF_NOEXIST)) | |
af58e7ee | 662 | goto out_err; |
6f9d451a THJ |
663 | |
664 | dev = __dev_map_alloc_node(net, dtab, ifindex, idx); | |
af58e7ee THJ |
665 | if (IS_ERR(dev)) { |
666 | err = PTR_ERR(dev); | |
667 | goto out_err; | |
668 | } | |
6f9d451a THJ |
669 | |
670 | if (old_dev) { | |
671 | hlist_del_rcu(&old_dev->index_hlist); | |
672 | } else { | |
673 | if (dtab->items >= dtab->map.max_entries) { | |
674 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
675 | call_rcu(&dev->rcu, __dev_map_entry_free); | |
676 | return -E2BIG; | |
677 | } | |
678 | dtab->items++; | |
679 | } | |
680 | ||
681 | hlist_add_head_rcu(&dev->index_hlist, | |
682 | dev_map_index_hash(dtab, idx)); | |
683 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
684 | ||
685 | if (old_dev) | |
686 | call_rcu(&old_dev->rcu, __dev_map_entry_free); | |
687 | ||
688 | return 0; | |
af58e7ee THJ |
689 | |
690 | out_err: | |
691 | spin_unlock_irqrestore(&dtab->index_lock, flags); | |
692 | return err; | |
6f9d451a THJ |
693 | } |
694 | ||
695 | static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, | |
696 | u64 map_flags) | |
697 | { | |
698 | return __dev_map_hash_update_elem(current->nsproxy->net_ns, | |
699 | map, key, value, map_flags); | |
700 | } | |
701 | ||
546ac1ff JF |
702 | const struct bpf_map_ops dev_map_ops = { |
703 | .map_alloc = dev_map_alloc, | |
704 | .map_free = dev_map_free, | |
705 | .map_get_next_key = dev_map_get_next_key, | |
706 | .map_lookup_elem = dev_map_lookup_elem, | |
707 | .map_update_elem = dev_map_update_elem, | |
708 | .map_delete_elem = dev_map_delete_elem, | |
e8d2bec0 | 709 | .map_check_btf = map_check_no_btf, |
546ac1ff | 710 | }; |
2ddf71e2 | 711 | |
6f9d451a THJ |
712 | const struct bpf_map_ops dev_map_hash_ops = { |
713 | .map_alloc = dev_map_alloc, | |
714 | .map_free = dev_map_free, | |
715 | .map_get_next_key = dev_map_hash_get_next_key, | |
716 | .map_lookup_elem = dev_map_hash_lookup_elem, | |
717 | .map_update_elem = dev_map_hash_update_elem, | |
718 | .map_delete_elem = dev_map_hash_delete_elem, | |
719 | .map_check_btf = map_check_no_btf, | |
720 | }; | |
721 | ||
2ddf71e2 JF |
722 | static int dev_map_notification(struct notifier_block *notifier, |
723 | ulong event, void *ptr) | |
724 | { | |
725 | struct net_device *netdev = netdev_notifier_info_to_dev(ptr); | |
726 | struct bpf_dtab *dtab; | |
727 | int i; | |
728 | ||
729 | switch (event) { | |
730 | case NETDEV_UNREGISTER: | |
4cc7b954 JF |
731 | /* This rcu_read_lock/unlock pair is needed because |
732 | * dev_map_list is an RCU list AND to ensure a delete | |
733 | * operation does not free a netdev_map entry while we | |
734 | * are comparing it against the netdev being unregistered. | |
735 | */ | |
736 | rcu_read_lock(); | |
737 | list_for_each_entry_rcu(dtab, &dev_map_list, list) { | |
2ddf71e2 | 738 | for (i = 0; i < dtab->map.max_entries; i++) { |
4cc7b954 | 739 | struct bpf_dtab_netdev *dev, *odev; |
2ddf71e2 | 740 | |
4cc7b954 | 741 | dev = READ_ONCE(dtab->netdev_map[i]); |
f592f804 | 742 | if (!dev || netdev != dev->dev) |
2ddf71e2 | 743 | continue; |
4cc7b954 JF |
744 | odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); |
745 | if (dev == odev) | |
2ddf71e2 JF |
746 | call_rcu(&dev->rcu, |
747 | __dev_map_entry_free); | |
748 | } | |
749 | } | |
4cc7b954 | 750 | rcu_read_unlock(); |
2ddf71e2 JF |
751 | break; |
752 | default: | |
753 | break; | |
754 | } | |
755 | return NOTIFY_OK; | |
756 | } | |
757 | ||
758 | static struct notifier_block dev_map_notifier = { | |
759 | .notifier_call = dev_map_notification, | |
760 | }; | |
761 | ||
762 | static int __init dev_map_init(void) | |
763 | { | |
67f29e07 JDB |
764 | /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ |
765 | BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != | |
766 | offsetof(struct _bpf_dtab_netdev, dev)); | |
2ddf71e2 JF |
767 | register_netdevice_notifier(&dev_map_notifier); |
768 | return 0; | |
769 | } | |
770 | ||
771 | subsys_initcall(dev_map_init); |