Merge tag 'linux_kselftest_active-fixes-6.6-rc7' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / kernel / bpf / cpumap.c
CommitLineData
ddc64d0a 1// SPDX-License-Identifier: GPL-2.0-only
6710e112
JDB
2/* bpf/cpumap.c
3 *
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
6710e112
JDB
5 */
6
161939ab
MT
7/**
8 * DOC: cpu map
9 * The 'cpumap' is primarily used as a backend map for XDP BPF helper
6710e112
JDB
10 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
11 *
161939ab 12 * Unlike devmap which redirects XDP frames out to another NIC device,
6710e112
JDB
13 * this map type redirects raw XDP frames to another CPU. The remote
14 * CPU will do SKB-allocation and call the normal network stack.
161939ab
MT
15 */
16/*
6710e112
JDB
17 * This is a scalability and isolation mechanism, that allow
18 * separating the early driver network XDP layer, from the rest of the
19 * netstack, and assigning dedicated CPUs for this stage. This
20 * basically allows for 10G wirespeed pre-filtering via bpf.
21 */
11941f8a 22#include <linux/bitops.h>
6710e112
JDB
23#include <linux/bpf.h>
24#include <linux/filter.h>
25#include <linux/ptr_ring.h>
5ab073ff 26#include <net/xdp.h>
6710e112
JDB
27
28#include <linux/sched.h>
29#include <linux/workqueue.h>
30#include <linux/kthread.h>
640a6045 31#include <linux/completion.h>
f9419f7b 32#include <trace/events/xdp.h>
c317ab71 33#include <linux/btf_ids.h>
6710e112 34
bb024780 35#include <linux/netdevice.h> /* netif_receive_skb_list */
1c601d82
JDB
36#include <linux/etherdevice.h> /* eth_type_trans */
37
6710e112
JDB
38/* General idea: XDP packets getting XDP redirected to another CPU,
39 * will maximum be stored/queued for one driver ->poll() call. It is
d5df2830 40 * guaranteed that queueing the frame and the flush operation happen on
6710e112
JDB
41 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
42 * which queue in bpf_cpu_map_entry contains packets.
43 */
44
45#define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
d5df2830
THJ
46struct bpf_cpu_map_entry;
47struct bpf_cpu_map;
48
6710e112
JDB
49struct xdp_bulk_queue {
50 void *q[CPU_MAP_BULK_SIZE];
d5df2830
THJ
51 struct list_head flush_node;
52 struct bpf_cpu_map_entry *obj;
6710e112
JDB
53 unsigned int count;
54};
55
56/* Struct for every remote "destination" CPU in map */
57struct bpf_cpu_map_entry {
f9419f7b
JDB
58 u32 cpu; /* kthread CPU and map index */
59 int map_id; /* Back reference to map */
6710e112
JDB
60
61 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
62 struct xdp_bulk_queue __percpu *bulkq;
63
64 /* Queue with potential multi-producers, and single-consumer kthread */
65 struct ptr_ring *queue;
66 struct task_struct *kthread;
644bfe51
LB
67
68 struct bpf_cpumap_val value;
92164774 69 struct bpf_prog *prog;
6710e112 70
640a6045 71 struct completion kthread_running;
8f8500a2 72 struct rcu_work free_work;
6710e112
JDB
73};
74
75struct bpf_cpu_map {
76 struct bpf_map map;
77 /* Below members specific for map type */
782347b6 78 struct bpf_cpu_map_entry __rcu **cpu_map;
6710e112
JDB
79};
80
cdfafe98
BT
81static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
82
6710e112
JDB
83static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
84{
92164774 85 u32 value_size = attr->value_size;
6710e112 86 struct bpf_cpu_map *cmap;
6710e112 87
6710e112
JDB
88 /* check sanity of attributes */
89 if (attr->max_entries == 0 || attr->key_size != 4 ||
92164774
LB
90 (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
91 value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
92 attr->map_flags & ~BPF_F_NUMA_NODE)
6710e112
JDB
93 return ERR_PTR(-EINVAL);
94
e39e739a
FL
95 /* Pre-limit array size based on NR_CPUS, not final CPU check */
96 if (attr->max_entries > NR_CPUS)
97 return ERR_PTR(-E2BIG);
98
73cf09a3 99 cmap = bpf_map_area_alloc(sizeof(*cmap), NUMA_NO_NODE);
6710e112
JDB
100 if (!cmap)
101 return ERR_PTR(-ENOMEM);
102
bd475643 103 bpf_map_init_from_attr(&cmap->map, attr);
6710e112 104
6710e112
JDB
105 /* Alloc array for possible remote "destination" CPUs */
106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
107 sizeof(struct bpf_cpu_map_entry *),
108 cmap->map.numa_node);
e39e739a
FL
109 if (!cmap->cpu_map) {
110 bpf_map_area_free(cmap);
111 return ERR_PTR(-ENOMEM);
112 }
6710e112
JDB
113
114 return &cmap->map;
6710e112
JDB
115}
116
5ab073ff
JDB
117static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
118{
119 /* The tear-down procedure should have made sure that queue is
120 * empty. See __cpu_map_entry_replace() and work-queue
121 * invoked cpu_map_kthread_stop(). Catch any broken behaviour
122 * gracefully and warn once.
123 */
7c62b75c
HT
124 void *ptr;
125
126 while ((ptr = ptr_ring_consume(ring))) {
127 WARN_ON_ONCE(1);
128 if (unlikely(__ptr_test_bit(0, &ptr))) {
129 __ptr_clear_bit(0, &ptr);
130 kfree_skb(ptr);
131 continue;
132 }
133 xdp_return_frame(ptr);
134 }
5ab073ff
JDB
135}
136
11941f8a
KKD
137static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
138 struct list_head *listp,
139 struct xdp_cpumap_stats *stats)
140{
141 struct sk_buff *skb, *tmp;
142 struct xdp_buff xdp;
143 u32 act;
144 int err;
145
146 list_for_each_entry_safe(skb, tmp, listp, list) {
147 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog);
148 switch (act) {
149 case XDP_PASS:
150 break;
151 case XDP_REDIRECT:
152 skb_list_del_init(skb);
153 err = xdp_do_generic_redirect(skb->dev, skb, &xdp,
154 rcpu->prog);
155 if (unlikely(err)) {
156 kfree_skb(skb);
157 stats->drop++;
158 } else {
159 stats->redirect++;
160 }
161 return;
162 default:
c8064e5b 163 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
11941f8a
KKD
164 fallthrough;
165 case XDP_ABORTED:
166 trace_xdp_exception(skb->dev, rcpu->prog, act);
167 fallthrough;
168 case XDP_DROP:
169 skb_list_del_init(skb);
170 kfree_skb(skb);
171 stats->drop++;
172 return;
173 }
174 }
175}
176
92164774
LB
177static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
178 void **frames, int n,
179 struct xdp_cpumap_stats *stats)
180{
181 struct xdp_rxq_info rxq;
182 struct xdp_buff xdp;
183 int i, nframes = 0;
184
92164774
LB
185 xdp_set_return_frame_no_direct();
186 xdp.rxq = &rxq;
187
188 for (i = 0; i < n; i++) {
189 struct xdp_frame *xdpf = frames[i];
190 u32 act;
191 int err;
192
193 rxq.dev = xdpf->dev_rx;
194 rxq.mem = xdpf->mem;
195 /* TODO: report queue_index to xdp_rxq_info */
196
197 xdp_convert_frame_to_buff(xdpf, &xdp);
198
199 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
200 switch (act) {
201 case XDP_PASS:
202 err = xdp_update_frame_from_buff(&xdp, xdpf);
203 if (err < 0) {
204 xdp_return_frame(xdpf);
205 stats->drop++;
206 } else {
207 frames[nframes++] = xdpf;
208 stats->pass++;
209 }
210 break;
28b1520e
LB
211 case XDP_REDIRECT:
212 err = xdp_do_redirect(xdpf->dev_rx, &xdp,
213 rcpu->prog);
214 if (unlikely(err)) {
215 xdp_return_frame(xdpf);
216 stats->drop++;
217 } else {
218 stats->redirect++;
219 }
220 break;
92164774 221 default:
c8064e5b 222 bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
df561f66 223 fallthrough;
92164774
LB
224 case XDP_DROP:
225 xdp_return_frame(xdpf);
226 stats->drop++;
227 break;
228 }
229 }
230
11941f8a
KKD
231 xdp_clear_return_frame_no_direct();
232
233 return nframes;
234}
235
236#define CPUMAP_BATCH 8
237
238static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
239 int xdp_n, struct xdp_cpumap_stats *stats,
240 struct list_head *list)
241{
242 int nframes;
243
244 if (!rcpu->prog)
245 return xdp_n;
246
247 rcu_read_lock_bh();
248
249 nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
250
28b1520e 251 if (stats->redirect)
11941f8a 252 xdp_do_flush();
28b1520e 253
11941f8a
KKD
254 if (unlikely(!list_empty(list)))
255 cpu_map_bpf_prog_run_skb(rcpu, list, stats);
92164774 256
28b1520e 257 rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
92164774
LB
258
259 return nframes;
260}
261
6710e112
JDB
262static int cpu_map_kthread_run(void *data)
263{
264 struct bpf_cpu_map_entry *rcpu = data;
265
640a6045 266 complete(&rcpu->kthread_running);
6710e112
JDB
267 set_current_state(TASK_INTERRUPTIBLE);
268
269 /* When kthread gives stop order, then rcpu have been disconnected
270 * from map, thus no new packets can enter. Remaining in-flight
271 * per CPU stored packets are flushed to this queue. Wait honoring
272 * kthread_stop signal until queue is empty.
273 */
274 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
92164774 275 struct xdp_cpumap_stats stats = {}; /* zero stats */
bb024780 276 unsigned int kmem_alloc_drops = 0, sched = 0;
92164774 277 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
11941f8a 278 int i, n, m, nframes, xdp_n;
77361825 279 void *frames[CPUMAP_BATCH];
8f0504a9 280 void *skbs[CPUMAP_BATCH];
bb024780 281 LIST_HEAD(list);
6710e112 282
1c601d82
JDB
283 /* Release CPU reschedule checks */
284 if (__ptr_ring_empty(rcpu->queue)) {
31749468
JDB
285 set_current_state(TASK_INTERRUPTIBLE);
286 /* Recheck to avoid lost wake-up */
287 if (__ptr_ring_empty(rcpu->queue)) {
288 schedule();
289 sched = 1;
290 } else {
291 __set_current_state(TASK_RUNNING);
292 }
1c601d82 293 } else {
f9419f7b 294 sched = cond_resched();
6710e112 295 }
1c601d82 296
1c601d82
JDB
297 /*
298 * The bpf_cpu_map_entry is single consumer, with this
299 * kthread CPU pinned. Lockless access to ptr_ring
300 * consume side valid as no-resize allowed of queue.
301 */
92164774
LB
302 n = __ptr_ring_consume_batched(rcpu->queue, frames,
303 CPUMAP_BATCH);
11941f8a 304 for (i = 0, xdp_n = 0; i < n; i++) {
86d23145 305 void *f = frames[i];
11941f8a
KKD
306 struct page *page;
307
308 if (unlikely(__ptr_test_bit(0, &f))) {
309 struct sk_buff *skb = f;
310
311 __ptr_clear_bit(0, &skb);
312 list_add_tail(&skb->list, &list);
313 continue;
314 }
315
316 frames[xdp_n++] = f;
317 page = virt_to_page(f);
86d23145
JDB
318
319 /* Bring struct page memory area to curr CPU. Read by
320 * build_skb_around via page_is_pfmemalloc(), and when
321 * freed written by page_frag_free call.
322 */
323 prefetchw(page);
324 }
325
92164774 326 /* Support running another XDP prog on this CPU */
11941f8a 327 nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
92164774 328 if (nframes) {
025a785f 329 m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
92164774
LB
330 if (unlikely(m == 0)) {
331 for (i = 0; i < nframes; i++)
332 skbs[i] = NULL; /* effect: xdp_return_frame */
bb024780 333 kmem_alloc_drops += nframes;
92164774 334 }
8f0504a9 335 }
77361825
JDB
336
337 local_bh_disable();
92164774 338 for (i = 0; i < nframes; i++) {
77361825 339 struct xdp_frame *xdpf = frames[i];
8f0504a9 340 struct sk_buff *skb = skbs[i];
1c601d82 341
97a0e1ea
LB
342 skb = __xdp_build_skb_from_frame(xdpf, skb,
343 xdpf->dev_rx);
1c601d82 344 if (!skb) {
03993094 345 xdp_return_frame(xdpf);
1c601d82
JDB
346 continue;
347 }
348
bb024780 349 list_add_tail(&skb->list, &list);
1c601d82 350 }
bb024780
LB
351 netif_receive_skb_list(&list);
352
f9419f7b 353 /* Feedback loop via tracepoint */
bb024780
LB
354 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
355 sched, &stats);
f9419f7b 356
1c601d82 357 local_bh_enable(); /* resched point, may call do_softirq() */
6710e112
JDB
358 }
359 __set_current_state(TASK_RUNNING);
360
6710e112
JDB
361 return 0;
362}
363
f45d5b6c
THJ
364static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
365 struct bpf_map *map, int fd)
92164774
LB
366{
367 struct bpf_prog *prog;
368
369 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
370 if (IS_ERR(prog))
371 return PTR_ERR(prog);
372
f45d5b6c
THJ
373 if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
374 !bpf_prog_map_compatible(map, prog)) {
92164774
LB
375 bpf_prog_put(prog);
376 return -EINVAL;
377 }
378
379 rcpu->value.bpf_prog.id = prog->aux->id;
380 rcpu->prog = prog;
381
382 return 0;
383}
384
644bfe51 385static struct bpf_cpu_map_entry *
e88cc05b
RG
386__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
387 u32 cpu)
6710e112 388{
92164774 389 int numa, err, i, fd = value->bpf_prog.fd;
7fc17e90 390 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
6710e112 391 struct bpf_cpu_map_entry *rcpu;
d5df2830 392 struct xdp_bulk_queue *bq;
6710e112
JDB
393
394 /* Have map->numa_node, but choose node of redirect target CPU */
395 numa = cpu_to_node(cpu);
396
e88cc05b 397 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
6710e112
JDB
398 if (!rcpu)
399 return NULL;
400
401 /* Alloc percpu bulkq */
e88cc05b
RG
402 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
403 sizeof(void *), gfp);
6710e112
JDB
404 if (!rcpu->bulkq)
405 goto free_rcu;
406
d5df2830
THJ
407 for_each_possible_cpu(i) {
408 bq = per_cpu_ptr(rcpu->bulkq, i);
409 bq->obj = rcpu;
410 }
411
6710e112 412 /* Alloc queue */
e88cc05b
RG
413 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
414 numa);
6710e112
JDB
415 if (!rcpu->queue)
416 goto free_bulkq;
417
644bfe51 418 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
6710e112
JDB
419 if (err)
420 goto free_queue;
421
f9419f7b 422 rcpu->cpu = cpu;
e88cc05b 423 rcpu->map_id = map->id;
644bfe51 424 rcpu->value.qsize = value->qsize;
6710e112 425
f45d5b6c 426 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
c576b9c7
LB
427 goto free_ptr_ring;
428
6710e112 429 /* Setup kthread */
640a6045 430 init_completion(&rcpu->kthread_running);
6710e112 431 rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
e88cc05b
RG
432 "cpumap/%d/map:%d", cpu,
433 map->id);
6710e112 434 if (IS_ERR(rcpu->kthread))
c576b9c7 435 goto free_prog;
6710e112 436
6710e112
JDB
437 /* Make sure kthread runs on a single CPU */
438 kthread_bind(rcpu->kthread, cpu);
439 wake_up_process(rcpu->kthread);
440
640a6045
HT
441 /* Make sure kthread has been running, so kthread_stop() will not
442 * stop the kthread prematurely and all pending frames or skbs
443 * will be handled by the kthread before kthread_stop() returns.
444 */
445 wait_for_completion(&rcpu->kthread_running);
446
6710e112
JDB
447 return rcpu;
448
c576b9c7
LB
449free_prog:
450 if (rcpu->prog)
451 bpf_prog_put(rcpu->prog);
6710e112
JDB
452free_ptr_ring:
453 ptr_ring_cleanup(rcpu->queue, NULL);
454free_queue:
455 kfree(rcpu->queue);
456free_bulkq:
457 free_percpu(rcpu->bulkq);
458free_rcu:
459 kfree(rcpu);
460 return NULL;
461}
462
8f8500a2 463static void __cpu_map_entry_free(struct work_struct *work)
6710e112
JDB
464{
465 struct bpf_cpu_map_entry *rcpu;
6710e112
JDB
466
467 /* This cpu_map_entry have been disconnected from map and one
8f8500a2 468 * RCU grace-period have elapsed. Thus, XDP cannot queue any
6710e112
JDB
469 * new packets and cannot change/set flush_needed that can
470 * find this entry.
471 */
8f8500a2
HT
472 rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work);
473
474 /* kthread_stop will wake_up_process and wait for it to complete.
475 * cpu_map_kthread_run() makes sure the pointer ring is empty
476 * before exiting.
477 */
478 kthread_stop(rcpu->kthread);
6710e112 479
8f8500a2
HT
480 if (rcpu->prog)
481 bpf_prog_put(rcpu->prog);
482 /* The queue should be empty at this point */
483 __cpu_map_ring_cleanup(rcpu->queue);
484 ptr_ring_cleanup(rcpu->queue, NULL);
485 kfree(rcpu->queue);
6710e112 486 free_percpu(rcpu->bulkq);
8f8500a2 487 kfree(rcpu);
6710e112
JDB
488}
489
8f8500a2
HT
490/* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old
491 * entry is no longer in use before freeing. We use queue_rcu_work() to call
492 * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace
493 * period. This means that (a) all pending enqueue and flush operations have
494 * completed (because of the RCU callback), and (b) we are in a workqueue
495 * context where we can stop the kthread and wait for it to exit before freeing
496 * everything.
6710e112 497 */
0fe875c5
WY
498static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
499 u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
6710e112
JDB
500{
501 struct bpf_cpu_map_entry *old_rcpu;
502
782347b6 503 old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
6710e112 504 if (old_rcpu) {
8f8500a2
HT
505 INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free);
506 queue_rcu_work(system_wq, &old_rcpu->free_work);
6710e112
JDB
507 }
508}
509
d7ba4cc9 510static long cpu_map_delete_elem(struct bpf_map *map, void *key)
6710e112
JDB
511{
512 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
513 u32 key_cpu = *(u32 *)key;
514
515 if (key_cpu >= map->max_entries)
516 return -EINVAL;
517
8f8500a2 518 /* notice caller map_delete_elem() uses rcu_read_lock() */
6710e112
JDB
519 __cpu_map_entry_replace(cmap, key_cpu, NULL);
520 return 0;
521}
522
d7ba4cc9
JK
523static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
524 u64 map_flags)
6710e112
JDB
525{
526 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
644bfe51 527 struct bpf_cpumap_val cpumap_value = {};
6710e112 528 struct bpf_cpu_map_entry *rcpu;
6710e112
JDB
529 /* Array index key correspond to CPU number */
530 u32 key_cpu = *(u32 *)key;
644bfe51
LB
531
532 memcpy(&cpumap_value, value, map->value_size);
6710e112
JDB
533
534 if (unlikely(map_flags > BPF_EXIST))
535 return -EINVAL;
536 if (unlikely(key_cpu >= cmap->map.max_entries))
537 return -E2BIG;
538 if (unlikely(map_flags == BPF_NOEXIST))
539 return -EEXIST;
644bfe51 540 if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
6710e112
JDB
541 return -EOVERFLOW;
542
543 /* Make sure CPU is a valid possible cpu */
bc23d0e3 544 if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
6710e112
JDB
545 return -ENODEV;
546
644bfe51 547 if (cpumap_value.qsize == 0) {
6710e112
JDB
548 rcpu = NULL; /* Same as deleting */
549 } else {
550 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
e88cc05b 551 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
6710e112
JDB
552 if (!rcpu)
553 return -ENOMEM;
554 }
555 rcu_read_lock();
556 __cpu_map_entry_replace(cmap, key_cpu, rcpu);
557 rcu_read_unlock();
558 return 0;
559}
560
0fe875c5 561static void cpu_map_free(struct bpf_map *map)
6710e112
JDB
562{
563 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
6710e112
JDB
564 u32 i;
565
566 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
567 * so the bpf programs (can be more than one that used this map) were
568 * disconnected from events. Wait for outstanding critical sections in
c2e42ddf
HT
569 * these programs to complete. synchronize_rcu() below not only
570 * guarantees no further "XDP/bpf-side" reads against
571 * bpf_cpu_map->cpu_map, but also ensure pending flush operations
572 * (if any) are completed.
6710e112
JDB
573 */
574 synchronize_rcu();
575
c2e42ddf
HT
576 /* The only possible user of bpf_cpu_map_entry is
577 * cpu_map_kthread_run().
6710e112
JDB
578 */
579 for (i = 0; i < cmap->map.max_entries; i++) {
580 struct bpf_cpu_map_entry *rcpu;
581
782347b6 582 rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
6710e112
JDB
583 if (!rcpu)
584 continue;
585
c2e42ddf
HT
586 /* Stop kthread and cleanup entry directly */
587 __cpu_map_entry_free(&rcpu->free_work.work);
6710e112 588 }
6710e112 589 bpf_map_area_free(cmap->cpu_map);
73cf09a3 590 bpf_map_area_free(cmap);
6710e112
JDB
591}
592
782347b6
THJ
593/* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
594 * by local_bh_disable() (from XDP calls inside NAPI). The
595 * rcu_read_lock_bh_held() below makes lockdep accept both.
596 */
e6a4750f 597static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
6710e112
JDB
598{
599 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
600 struct bpf_cpu_map_entry *rcpu;
601
602 if (key >= map->max_entries)
603 return NULL;
604
782347b6
THJ
605 rcpu = rcu_dereference_check(cmap->cpu_map[key],
606 rcu_read_lock_bh_held());
6710e112
JDB
607 return rcpu;
608}
609
610static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
611{
612 struct bpf_cpu_map_entry *rcpu =
613 __cpu_map_lookup_elem(map, *(u32 *)key);
614
644bfe51 615 return rcpu ? &rcpu->value : NULL;
6710e112
JDB
616}
617
618static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
619{
620 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
621 u32 index = key ? *(u32 *)key : U32_MAX;
622 u32 *next = next_key;
623
624 if (index >= cmap->map.max_entries) {
625 *next = 0;
626 return 0;
627 }
628
629 if (index == cmap->map.max_entries - 1)
630 return -ENOENT;
631 *next = index + 1;
632 return 0;
633}
634
d7ba4cc9 635static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
e6a4750f 636{
32637e33 637 return __bpf_xdp_redirect_map(map, index, flags, 0,
e624d4ed 638 __cpu_map_lookup_elem);
e6a4750f
BT
639}
640
835f1fca
YS
641static u64 cpu_map_mem_usage(const struct bpf_map *map)
642{
643 u64 usage = sizeof(struct bpf_cpu_map);
644
645 /* Currently the dynamically allocated elements are not counted */
646 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);
647 return usage;
648}
649
c317ab71 650BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
6710e112 651const struct bpf_map_ops cpu_map_ops = {
f4d05259 652 .map_meta_equal = bpf_map_meta_equal,
6710e112
JDB
653 .map_alloc = cpu_map_alloc,
654 .map_free = cpu_map_free,
655 .map_delete_elem = cpu_map_delete_elem,
656 .map_update_elem = cpu_map_update_elem,
657 .map_lookup_elem = cpu_map_lookup_elem,
658 .map_get_next_key = cpu_map_get_next_key,
e8d2bec0 659 .map_check_btf = map_check_no_btf,
835f1fca 660 .map_mem_usage = cpu_map_mem_usage,
c317ab71 661 .map_btf_id = &cpu_map_btf_ids[0],
e6a4750f 662 .map_redirect = cpu_map_redirect,
6710e112
JDB
663};
664
ebc4ecd4 665static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
6710e112 666{
d5df2830 667 struct bpf_cpu_map_entry *rcpu = bq->obj;
f9419f7b
JDB
668 unsigned int processed = 0, drops = 0;
669 const int to_cpu = rcpu->cpu;
6710e112
JDB
670 struct ptr_ring *q;
671 int i;
672
673 if (unlikely(!bq->count))
ebc4ecd4 674 return;
6710e112
JDB
675
676 q = rcpu->queue;
677 spin_lock(&q->producer_lock);
678
679 for (i = 0; i < bq->count; i++) {
70280ed9 680 struct xdp_frame *xdpf = bq->q[i];
6710e112
JDB
681 int err;
682
70280ed9 683 err = __ptr_ring_produce(q, xdpf);
6710e112 684 if (err) {
f9419f7b 685 drops++;
4bc188c7 686 xdp_return_frame_rx_napi(xdpf);
6710e112 687 }
f9419f7b 688 processed++;
6710e112
JDB
689 }
690 bq->count = 0;
691 spin_unlock(&q->producer_lock);
692
d5df2830
THJ
693 __list_del_clearprev(&bq->flush_node);
694
f9419f7b
JDB
695 /* Feedback loop via tracepoints */
696 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
6710e112
JDB
697}
698
6710e112
JDB
699/* Runs under RCU-read-side, plus in softirq under NAPI protection.
700 * Thus, safe percpu variable access.
701 */
ebc4ecd4 702static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
6710e112 703{
cdfafe98 704 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
6710e112
JDB
705 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
706
707 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
4bc188c7 708 bq_flush_to_queue(bq);
6710e112
JDB
709
710 /* Notice, xdp_buff/page MUST be queued here, long enough for
711 * driver to code invoking us to finished, due to driver
712 * (e.g. ixgbe) recycle tricks based on page-refcnt.
713 *
70280ed9 714 * Thus, incoming xdp_frame is always queued here (else we race
6710e112
JDB
715 * with another CPU on page-refcnt and remaining driver code).
716 * Queue time is very short, as driver will invoke flush
717 * operation, when completing napi->poll call.
718 */
70280ed9 719 bq->q[bq->count++] = xdpf;
d5df2830
THJ
720
721 if (!bq->flush_node.prev)
722 list_add(&bq->flush_node, flush_list);
6710e112
JDB
723}
724
d53ad5d8 725int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
9c270af3
JDB
726 struct net_device *dev_rx)
727{
1c601d82 728 /* Info needed when constructing SKB on remote CPU */
70280ed9 729 xdpf->dev_rx = dev_rx;
9c270af3 730
70280ed9 731 bq_enqueue(rcpu, xdpf);
9c270af3
JDB
732 return 0;
733}
734
11941f8a
KKD
735int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
736 struct sk_buff *skb)
737{
738 int ret;
739
740 __skb_pull(skb, skb->mac_len);
741 skb_set_redirected(skb, false);
742 __ptr_set_bit(0, &skb);
743
744 ret = ptr_ring_produce(rcpu->queue, skb);
745 if (ret < 0)
746 goto trace;
747
748 wake_up_process(rcpu->kthread);
749trace:
750 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu);
751 return ret;
752}
753
cdfafe98 754void __cpu_map_flush(void)
6710e112 755{
cdfafe98 756 struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
d5df2830 757 struct xdp_bulk_queue *bq, *tmp;
6710e112 758
d5df2830 759 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
4bc188c7 760 bq_flush_to_queue(bq);
6710e112
JDB
761
762 /* If already running, costs spin_lock_irqsave + smb_mb */
d5df2830 763 wake_up_process(bq->obj->kthread);
6710e112
JDB
764 }
765}
cdfafe98
BT
766
767static int __init cpu_map_init(void)
768{
769 int cpu;
770
771 for_each_possible_cpu(cpu)
772 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
773 return 0;
774}
775
776subsys_initcall(cpu_map_init);