Commit | Line | Data |
---|---|---|
ddc64d0a | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6710e112 JDB |
2 | /* bpf/cpumap.c |
3 | * | |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
6710e112 JDB |
5 | */ |
6 | ||
7 | /* The 'cpumap' is primarily used as a backend map for XDP BPF helper | |
8 | * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'. | |
9 | * | |
10 | * Unlike devmap which redirects XDP frames out another NIC device, | |
11 | * this map type redirects raw XDP frames to another CPU. The remote | |
12 | * CPU will do SKB-allocation and call the normal network stack. | |
13 | * | |
14 | * This is a scalability and isolation mechanism, that allow | |
15 | * separating the early driver network XDP layer, from the rest of the | |
16 | * netstack, and assigning dedicated CPUs for this stage. This | |
17 | * basically allows for 10G wirespeed pre-filtering via bpf. | |
18 | */ | |
11941f8a | 19 | #include <linux/bitops.h> |
6710e112 JDB |
20 | #include <linux/bpf.h> |
21 | #include <linux/filter.h> | |
22 | #include <linux/ptr_ring.h> | |
5ab073ff | 23 | #include <net/xdp.h> |
6710e112 JDB |
24 | |
25 | #include <linux/sched.h> | |
26 | #include <linux/workqueue.h> | |
27 | #include <linux/kthread.h> | |
28 | #include <linux/capability.h> | |
f9419f7b | 29 | #include <trace/events/xdp.h> |
6710e112 | 30 | |
bb024780 | 31 | #include <linux/netdevice.h> /* netif_receive_skb_list */ |
1c601d82 JDB |
32 | #include <linux/etherdevice.h> /* eth_type_trans */ |
33 | ||
6710e112 JDB |
34 | /* General idea: XDP packets getting XDP redirected to another CPU, |
35 | * will maximum be stored/queued for one driver ->poll() call. It is | |
d5df2830 | 36 | * guaranteed that queueing the frame and the flush operation happen on |
6710e112 JDB |
37 | * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() |
38 | * which queue in bpf_cpu_map_entry contains packets. | |
39 | */ | |
40 | ||
41 | #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ | |
d5df2830 THJ |
42 | struct bpf_cpu_map_entry; |
43 | struct bpf_cpu_map; | |
44 | ||
6710e112 JDB |
45 | struct xdp_bulk_queue { |
46 | void *q[CPU_MAP_BULK_SIZE]; | |
d5df2830 THJ |
47 | struct list_head flush_node; |
48 | struct bpf_cpu_map_entry *obj; | |
6710e112 JDB |
49 | unsigned int count; |
50 | }; | |
51 | ||
52 | /* Struct for every remote "destination" CPU in map */ | |
53 | struct bpf_cpu_map_entry { | |
f9419f7b JDB |
54 | u32 cpu; /* kthread CPU and map index */ |
55 | int map_id; /* Back reference to map */ | |
6710e112 JDB |
56 | |
57 | /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ | |
58 | struct xdp_bulk_queue __percpu *bulkq; | |
59 | ||
d5df2830 THJ |
60 | struct bpf_cpu_map *cmap; |
61 | ||
6710e112 JDB |
62 | /* Queue with potential multi-producers, and single-consumer kthread */ |
63 | struct ptr_ring *queue; | |
64 | struct task_struct *kthread; | |
644bfe51 LB |
65 | |
66 | struct bpf_cpumap_val value; | |
92164774 | 67 | struct bpf_prog *prog; |
6710e112 JDB |
68 | |
69 | atomic_t refcnt; /* Control when this struct can be free'ed */ | |
70 | struct rcu_head rcu; | |
644bfe51 LB |
71 | |
72 | struct work_struct kthread_stop_wq; | |
6710e112 JDB |
73 | }; |
74 | ||
75 | struct bpf_cpu_map { | |
76 | struct bpf_map map; | |
77 | /* Below members specific for map type */ | |
782347b6 | 78 | struct bpf_cpu_map_entry __rcu **cpu_map; |
6710e112 JDB |
79 | }; |
80 | ||
cdfafe98 BT |
81 | static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list); |
82 | ||
6710e112 JDB |
83 | static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) |
84 | { | |
92164774 | 85 | u32 value_size = attr->value_size; |
6710e112 JDB |
86 | struct bpf_cpu_map *cmap; |
87 | int err = -ENOMEM; | |
6710e112 | 88 | |
2c78ee89 | 89 | if (!bpf_capable()) |
6710e112 JDB |
90 | return ERR_PTR(-EPERM); |
91 | ||
92 | /* check sanity of attributes */ | |
93 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
92164774 LB |
94 | (value_size != offsetofend(struct bpf_cpumap_val, qsize) && |
95 | value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || | |
96 | attr->map_flags & ~BPF_F_NUMA_NODE) | |
6710e112 JDB |
97 | return ERR_PTR(-EINVAL); |
98 | ||
e88cc05b | 99 | cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT); |
6710e112 JDB |
100 | if (!cmap) |
101 | return ERR_PTR(-ENOMEM); | |
102 | ||
bd475643 | 103 | bpf_map_init_from_attr(&cmap->map, attr); |
6710e112 JDB |
104 | |
105 | /* Pre-limit array size based on NR_CPUS, not final CPU check */ | |
106 | if (cmap->map.max_entries > NR_CPUS) { | |
107 | err = -E2BIG; | |
108 | goto free_cmap; | |
109 | } | |
110 | ||
6710e112 JDB |
111 | /* Alloc array for possible remote "destination" CPUs */ |
112 | cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * | |
113 | sizeof(struct bpf_cpu_map_entry *), | |
114 | cmap->map.numa_node); | |
115 | if (!cmap->cpu_map) | |
711cabaf | 116 | goto free_cmap; |
6710e112 JDB |
117 | |
118 | return &cmap->map; | |
6710e112 JDB |
119 | free_cmap: |
120 | kfree(cmap); | |
121 | return ERR_PTR(err); | |
122 | } | |
123 | ||
6710e112 JDB |
124 | static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) |
125 | { | |
126 | atomic_inc(&rcpu->refcnt); | |
127 | } | |
128 | ||
129 | /* called from workqueue, to workaround syscall using preempt_disable */ | |
130 | static void cpu_map_kthread_stop(struct work_struct *work) | |
131 | { | |
132 | struct bpf_cpu_map_entry *rcpu; | |
133 | ||
134 | rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); | |
135 | ||
136 | /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier, | |
137 | * as it waits until all in-flight call_rcu() callbacks complete. | |
138 | */ | |
139 | rcu_barrier(); | |
140 | ||
141 | /* kthread_stop will wake_up_process and wait for it to complete */ | |
142 | kthread_stop(rcpu->kthread); | |
143 | } | |
144 | ||
5ab073ff JDB |
145 | static void __cpu_map_ring_cleanup(struct ptr_ring *ring) |
146 | { | |
147 | /* The tear-down procedure should have made sure that queue is | |
148 | * empty. See __cpu_map_entry_replace() and work-queue | |
149 | * invoked cpu_map_kthread_stop(). Catch any broken behaviour | |
150 | * gracefully and warn once. | |
151 | */ | |
70280ed9 | 152 | struct xdp_frame *xdpf; |
5ab073ff | 153 | |
70280ed9 JDB |
154 | while ((xdpf = ptr_ring_consume(ring))) |
155 | if (WARN_ON_ONCE(xdpf)) | |
03993094 | 156 | xdp_return_frame(xdpf); |
5ab073ff JDB |
157 | } |
158 | ||
159 | static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) | |
160 | { | |
161 | if (atomic_dec_and_test(&rcpu->refcnt)) { | |
92164774 LB |
162 | if (rcpu->prog) |
163 | bpf_prog_put(rcpu->prog); | |
5ab073ff JDB |
164 | /* The queue should be empty at this point */ |
165 | __cpu_map_ring_cleanup(rcpu->queue); | |
166 | ptr_ring_cleanup(rcpu->queue, NULL); | |
167 | kfree(rcpu->queue); | |
168 | kfree(rcpu); | |
169 | } | |
170 | } | |
171 | ||
11941f8a KKD |
172 | static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, |
173 | struct list_head *listp, | |
174 | struct xdp_cpumap_stats *stats) | |
175 | { | |
176 | struct sk_buff *skb, *tmp; | |
177 | struct xdp_buff xdp; | |
178 | u32 act; | |
179 | int err; | |
180 | ||
181 | list_for_each_entry_safe(skb, tmp, listp, list) { | |
182 | act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); | |
183 | switch (act) { | |
184 | case XDP_PASS: | |
185 | break; | |
186 | case XDP_REDIRECT: | |
187 | skb_list_del_init(skb); | |
188 | err = xdp_do_generic_redirect(skb->dev, skb, &xdp, | |
189 | rcpu->prog); | |
190 | if (unlikely(err)) { | |
191 | kfree_skb(skb); | |
192 | stats->drop++; | |
193 | } else { | |
194 | stats->redirect++; | |
195 | } | |
196 | return; | |
197 | default: | |
c8064e5b | 198 | bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); |
11941f8a KKD |
199 | fallthrough; |
200 | case XDP_ABORTED: | |
201 | trace_xdp_exception(skb->dev, rcpu->prog, act); | |
202 | fallthrough; | |
203 | case XDP_DROP: | |
204 | skb_list_del_init(skb); | |
205 | kfree_skb(skb); | |
206 | stats->drop++; | |
207 | return; | |
208 | } | |
209 | } | |
210 | } | |
211 | ||
92164774 LB |
212 | static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, |
213 | void **frames, int n, | |
214 | struct xdp_cpumap_stats *stats) | |
215 | { | |
216 | struct xdp_rxq_info rxq; | |
217 | struct xdp_buff xdp; | |
218 | int i, nframes = 0; | |
219 | ||
92164774 LB |
220 | xdp_set_return_frame_no_direct(); |
221 | xdp.rxq = &rxq; | |
222 | ||
223 | for (i = 0; i < n; i++) { | |
224 | struct xdp_frame *xdpf = frames[i]; | |
225 | u32 act; | |
226 | int err; | |
227 | ||
228 | rxq.dev = xdpf->dev_rx; | |
229 | rxq.mem = xdpf->mem; | |
230 | /* TODO: report queue_index to xdp_rxq_info */ | |
231 | ||
232 | xdp_convert_frame_to_buff(xdpf, &xdp); | |
233 | ||
234 | act = bpf_prog_run_xdp(rcpu->prog, &xdp); | |
235 | switch (act) { | |
236 | case XDP_PASS: | |
237 | err = xdp_update_frame_from_buff(&xdp, xdpf); | |
238 | if (err < 0) { | |
239 | xdp_return_frame(xdpf); | |
240 | stats->drop++; | |
241 | } else { | |
242 | frames[nframes++] = xdpf; | |
243 | stats->pass++; | |
244 | } | |
245 | break; | |
28b1520e LB |
246 | case XDP_REDIRECT: |
247 | err = xdp_do_redirect(xdpf->dev_rx, &xdp, | |
248 | rcpu->prog); | |
249 | if (unlikely(err)) { | |
250 | xdp_return_frame(xdpf); | |
251 | stats->drop++; | |
252 | } else { | |
253 | stats->redirect++; | |
254 | } | |
255 | break; | |
92164774 | 256 | default: |
c8064e5b | 257 | bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); |
df561f66 | 258 | fallthrough; |
92164774 LB |
259 | case XDP_DROP: |
260 | xdp_return_frame(xdpf); | |
261 | stats->drop++; | |
262 | break; | |
263 | } | |
264 | } | |
265 | ||
11941f8a KKD |
266 | xdp_clear_return_frame_no_direct(); |
267 | ||
268 | return nframes; | |
269 | } | |
270 | ||
271 | #define CPUMAP_BATCH 8 | |
272 | ||
273 | static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, | |
274 | int xdp_n, struct xdp_cpumap_stats *stats, | |
275 | struct list_head *list) | |
276 | { | |
277 | int nframes; | |
278 | ||
279 | if (!rcpu->prog) | |
280 | return xdp_n; | |
281 | ||
282 | rcu_read_lock_bh(); | |
283 | ||
284 | nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats); | |
285 | ||
28b1520e | 286 | if (stats->redirect) |
11941f8a | 287 | xdp_do_flush(); |
28b1520e | 288 | |
11941f8a KKD |
289 | if (unlikely(!list_empty(list))) |
290 | cpu_map_bpf_prog_run_skb(rcpu, list, stats); | |
92164774 | 291 | |
28b1520e | 292 | rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ |
92164774 LB |
293 | |
294 | return nframes; | |
295 | } | |
296 | ||
77361825 | 297 | |
6710e112 JDB |
298 | static int cpu_map_kthread_run(void *data) |
299 | { | |
300 | struct bpf_cpu_map_entry *rcpu = data; | |
301 | ||
302 | set_current_state(TASK_INTERRUPTIBLE); | |
303 | ||
304 | /* When kthread gives stop order, then rcpu have been disconnected | |
305 | * from map, thus no new packets can enter. Remaining in-flight | |
306 | * per CPU stored packets are flushed to this queue. Wait honoring | |
307 | * kthread_stop signal until queue is empty. | |
308 | */ | |
309 | while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { | |
92164774 | 310 | struct xdp_cpumap_stats stats = {}; /* zero stats */ |
bb024780 | 311 | unsigned int kmem_alloc_drops = 0, sched = 0; |
92164774 | 312 | gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; |
11941f8a | 313 | int i, n, m, nframes, xdp_n; |
77361825 | 314 | void *frames[CPUMAP_BATCH]; |
8f0504a9 | 315 | void *skbs[CPUMAP_BATCH]; |
bb024780 | 316 | LIST_HEAD(list); |
6710e112 | 317 | |
1c601d82 JDB |
318 | /* Release CPU reschedule checks */ |
319 | if (__ptr_ring_empty(rcpu->queue)) { | |
31749468 JDB |
320 | set_current_state(TASK_INTERRUPTIBLE); |
321 | /* Recheck to avoid lost wake-up */ | |
322 | if (__ptr_ring_empty(rcpu->queue)) { | |
323 | schedule(); | |
324 | sched = 1; | |
325 | } else { | |
326 | __set_current_state(TASK_RUNNING); | |
327 | } | |
1c601d82 | 328 | } else { |
f9419f7b | 329 | sched = cond_resched(); |
6710e112 | 330 | } |
1c601d82 | 331 | |
1c601d82 JDB |
332 | /* |
333 | * The bpf_cpu_map_entry is single consumer, with this | |
334 | * kthread CPU pinned. Lockless access to ptr_ring | |
335 | * consume side valid as no-resize allowed of queue. | |
336 | */ | |
92164774 LB |
337 | n = __ptr_ring_consume_batched(rcpu->queue, frames, |
338 | CPUMAP_BATCH); | |
11941f8a | 339 | for (i = 0, xdp_n = 0; i < n; i++) { |
86d23145 | 340 | void *f = frames[i]; |
11941f8a KKD |
341 | struct page *page; |
342 | ||
343 | if (unlikely(__ptr_test_bit(0, &f))) { | |
344 | struct sk_buff *skb = f; | |
345 | ||
346 | __ptr_clear_bit(0, &skb); | |
347 | list_add_tail(&skb->list, &list); | |
348 | continue; | |
349 | } | |
350 | ||
351 | frames[xdp_n++] = f; | |
352 | page = virt_to_page(f); | |
86d23145 JDB |
353 | |
354 | /* Bring struct page memory area to curr CPU. Read by | |
355 | * build_skb_around via page_is_pfmemalloc(), and when | |
356 | * freed written by page_frag_free call. | |
357 | */ | |
358 | prefetchw(page); | |
359 | } | |
360 | ||
92164774 | 361 | /* Support running another XDP prog on this CPU */ |
11941f8a | 362 | nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list); |
92164774 LB |
363 | if (nframes) { |
364 | m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); | |
365 | if (unlikely(m == 0)) { | |
366 | for (i = 0; i < nframes; i++) | |
367 | skbs[i] = NULL; /* effect: xdp_return_frame */ | |
bb024780 | 368 | kmem_alloc_drops += nframes; |
92164774 | 369 | } |
8f0504a9 | 370 | } |
77361825 JDB |
371 | |
372 | local_bh_disable(); | |
92164774 | 373 | for (i = 0; i < nframes; i++) { |
77361825 | 374 | struct xdp_frame *xdpf = frames[i]; |
8f0504a9 | 375 | struct sk_buff *skb = skbs[i]; |
1c601d82 | 376 | |
97a0e1ea LB |
377 | skb = __xdp_build_skb_from_frame(xdpf, skb, |
378 | xdpf->dev_rx); | |
1c601d82 | 379 | if (!skb) { |
03993094 | 380 | xdp_return_frame(xdpf); |
1c601d82 JDB |
381 | continue; |
382 | } | |
383 | ||
bb024780 | 384 | list_add_tail(&skb->list, &list); |
1c601d82 | 385 | } |
bb024780 LB |
386 | netif_receive_skb_list(&list); |
387 | ||
f9419f7b | 388 | /* Feedback loop via tracepoint */ |
bb024780 LB |
389 | trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, |
390 | sched, &stats); | |
f9419f7b | 391 | |
1c601d82 | 392 | local_bh_enable(); /* resched point, may call do_softirq() */ |
6710e112 JDB |
393 | } |
394 | __set_current_state(TASK_RUNNING); | |
395 | ||
396 | put_cpu_map_entry(rcpu); | |
397 | return 0; | |
398 | } | |
399 | ||
92164774 LB |
400 | static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) |
401 | { | |
402 | struct bpf_prog *prog; | |
403 | ||
404 | prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); | |
405 | if (IS_ERR(prog)) | |
406 | return PTR_ERR(prog); | |
407 | ||
408 | if (prog->expected_attach_type != BPF_XDP_CPUMAP) { | |
409 | bpf_prog_put(prog); | |
410 | return -EINVAL; | |
411 | } | |
412 | ||
413 | rcpu->value.bpf_prog.id = prog->aux->id; | |
414 | rcpu->prog = prog; | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
644bfe51 | 419 | static struct bpf_cpu_map_entry * |
e88cc05b RG |
420 | __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, |
421 | u32 cpu) | |
6710e112 | 422 | { |
92164774 | 423 | int numa, err, i, fd = value->bpf_prog.fd; |
7fc17e90 | 424 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
6710e112 | 425 | struct bpf_cpu_map_entry *rcpu; |
d5df2830 | 426 | struct xdp_bulk_queue *bq; |
6710e112 JDB |
427 | |
428 | /* Have map->numa_node, but choose node of redirect target CPU */ | |
429 | numa = cpu_to_node(cpu); | |
430 | ||
e88cc05b | 431 | rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); |
6710e112 JDB |
432 | if (!rcpu) |
433 | return NULL; | |
434 | ||
435 | /* Alloc percpu bulkq */ | |
e88cc05b RG |
436 | rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), |
437 | sizeof(void *), gfp); | |
6710e112 JDB |
438 | if (!rcpu->bulkq) |
439 | goto free_rcu; | |
440 | ||
d5df2830 THJ |
441 | for_each_possible_cpu(i) { |
442 | bq = per_cpu_ptr(rcpu->bulkq, i); | |
443 | bq->obj = rcpu; | |
444 | } | |
445 | ||
6710e112 | 446 | /* Alloc queue */ |
e88cc05b RG |
447 | rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, |
448 | numa); | |
6710e112 JDB |
449 | if (!rcpu->queue) |
450 | goto free_bulkq; | |
451 | ||
644bfe51 | 452 | err = ptr_ring_init(rcpu->queue, value->qsize, gfp); |
6710e112 JDB |
453 | if (err) |
454 | goto free_queue; | |
455 | ||
f9419f7b | 456 | rcpu->cpu = cpu; |
e88cc05b | 457 | rcpu->map_id = map->id; |
644bfe51 | 458 | rcpu->value.qsize = value->qsize; |
6710e112 | 459 | |
c576b9c7 LB |
460 | if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) |
461 | goto free_ptr_ring; | |
462 | ||
6710e112 JDB |
463 | /* Setup kthread */ |
464 | rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, | |
e88cc05b RG |
465 | "cpumap/%d/map:%d", cpu, |
466 | map->id); | |
6710e112 | 467 | if (IS_ERR(rcpu->kthread)) |
c576b9c7 | 468 | goto free_prog; |
6710e112 JDB |
469 | |
470 | get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ | |
471 | get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ | |
472 | ||
473 | /* Make sure kthread runs on a single CPU */ | |
474 | kthread_bind(rcpu->kthread, cpu); | |
475 | wake_up_process(rcpu->kthread); | |
476 | ||
477 | return rcpu; | |
478 | ||
c576b9c7 LB |
479 | free_prog: |
480 | if (rcpu->prog) | |
481 | bpf_prog_put(rcpu->prog); | |
6710e112 JDB |
482 | free_ptr_ring: |
483 | ptr_ring_cleanup(rcpu->queue, NULL); | |
484 | free_queue: | |
485 | kfree(rcpu->queue); | |
486 | free_bulkq: | |
487 | free_percpu(rcpu->bulkq); | |
488 | free_rcu: | |
489 | kfree(rcpu); | |
490 | return NULL; | |
491 | } | |
492 | ||
0fe875c5 | 493 | static void __cpu_map_entry_free(struct rcu_head *rcu) |
6710e112 JDB |
494 | { |
495 | struct bpf_cpu_map_entry *rcpu; | |
6710e112 JDB |
496 | |
497 | /* This cpu_map_entry have been disconnected from map and one | |
fb5aacdf | 498 | * RCU grace-period have elapsed. Thus, XDP cannot queue any |
6710e112 JDB |
499 | * new packets and cannot change/set flush_needed that can |
500 | * find this entry. | |
501 | */ | |
502 | rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); | |
503 | ||
6710e112 JDB |
504 | free_percpu(rcpu->bulkq); |
505 | /* Cannot kthread_stop() here, last put free rcpu resources */ | |
506 | put_cpu_map_entry(rcpu); | |
507 | } | |
508 | ||
509 | /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to | |
510 | * ensure any driver rcu critical sections have completed, but this | |
511 | * does not guarantee a flush has happened yet. Because driver side | |
512 | * rcu_read_lock/unlock only protects the running XDP program. The | |
513 | * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a | |
514 | * pending flush op doesn't fail. | |
515 | * | |
516 | * The bpf_cpu_map_entry is still used by the kthread, and there can | |
517 | * still be pending packets (in queue and percpu bulkq). A refcnt | |
518 | * makes sure to last user (kthread_stop vs. call_rcu) free memory | |
519 | * resources. | |
520 | * | |
521 | * The rcu callback __cpu_map_entry_free flush remaining packets in | |
522 | * percpu bulkq to queue. Due to caller map_delete_elem() disable | |
523 | * preemption, cannot call kthread_stop() to make sure queue is empty. | |
524 | * Instead a work_queue is started for stopping kthread, | |
fb5aacdf | 525 | * cpu_map_kthread_stop, which waits for an RCU grace period before |
6710e112 JDB |
526 | * stopping kthread, emptying the queue. |
527 | */ | |
0fe875c5 WY |
528 | static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, |
529 | u32 key_cpu, struct bpf_cpu_map_entry *rcpu) | |
6710e112 JDB |
530 | { |
531 | struct bpf_cpu_map_entry *old_rcpu; | |
532 | ||
782347b6 | 533 | old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); |
6710e112 JDB |
534 | if (old_rcpu) { |
535 | call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); | |
536 | INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); | |
537 | schedule_work(&old_rcpu->kthread_stop_wq); | |
538 | } | |
539 | } | |
540 | ||
0fe875c5 | 541 | static int cpu_map_delete_elem(struct bpf_map *map, void *key) |
6710e112 JDB |
542 | { |
543 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
544 | u32 key_cpu = *(u32 *)key; | |
545 | ||
546 | if (key_cpu >= map->max_entries) | |
547 | return -EINVAL; | |
548 | ||
549 | /* notice caller map_delete_elem() use preempt_disable() */ | |
550 | __cpu_map_entry_replace(cmap, key_cpu, NULL); | |
551 | return 0; | |
552 | } | |
553 | ||
0fe875c5 WY |
554 | static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, |
555 | u64 map_flags) | |
6710e112 JDB |
556 | { |
557 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
644bfe51 | 558 | struct bpf_cpumap_val cpumap_value = {}; |
6710e112 | 559 | struct bpf_cpu_map_entry *rcpu; |
6710e112 JDB |
560 | /* Array index key correspond to CPU number */ |
561 | u32 key_cpu = *(u32 *)key; | |
644bfe51 LB |
562 | |
563 | memcpy(&cpumap_value, value, map->value_size); | |
6710e112 JDB |
564 | |
565 | if (unlikely(map_flags > BPF_EXIST)) | |
566 | return -EINVAL; | |
567 | if (unlikely(key_cpu >= cmap->map.max_entries)) | |
568 | return -E2BIG; | |
569 | if (unlikely(map_flags == BPF_NOEXIST)) | |
570 | return -EEXIST; | |
644bfe51 | 571 | if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */ |
6710e112 JDB |
572 | return -EOVERFLOW; |
573 | ||
574 | /* Make sure CPU is a valid possible cpu */ | |
bc23d0e3 | 575 | if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) |
6710e112 JDB |
576 | return -ENODEV; |
577 | ||
644bfe51 | 578 | if (cpumap_value.qsize == 0) { |
6710e112 JDB |
579 | rcpu = NULL; /* Same as deleting */ |
580 | } else { | |
581 | /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ | |
e88cc05b | 582 | rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); |
6710e112 JDB |
583 | if (!rcpu) |
584 | return -ENOMEM; | |
d5df2830 | 585 | rcpu->cmap = cmap; |
6710e112 JDB |
586 | } |
587 | rcu_read_lock(); | |
588 | __cpu_map_entry_replace(cmap, key_cpu, rcpu); | |
589 | rcu_read_unlock(); | |
590 | return 0; | |
591 | } | |
592 | ||
0fe875c5 | 593 | static void cpu_map_free(struct bpf_map *map) |
6710e112 JDB |
594 | { |
595 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
6710e112 JDB |
596 | u32 i; |
597 | ||
598 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
599 | * so the bpf programs (can be more than one that used this map) were | |
600 | * disconnected from events. Wait for outstanding critical sections in | |
601 | * these programs to complete. The rcu critical section only guarantees | |
602 | * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. | |
603 | * It does __not__ ensure pending flush operations (if any) are | |
604 | * complete. | |
605 | */ | |
f6069b9a | 606 | |
6710e112 JDB |
607 | synchronize_rcu(); |
608 | ||
6710e112 JDB |
609 | /* For cpu_map the remote CPUs can still be using the entries |
610 | * (struct bpf_cpu_map_entry). | |
611 | */ | |
612 | for (i = 0; i < cmap->map.max_entries; i++) { | |
613 | struct bpf_cpu_map_entry *rcpu; | |
614 | ||
782347b6 | 615 | rcpu = rcu_dereference_raw(cmap->cpu_map[i]); |
6710e112 JDB |
616 | if (!rcpu) |
617 | continue; | |
618 | ||
fb5aacdf | 619 | /* bq flush and cleanup happens after RCU grace-period */ |
6710e112 JDB |
620 | __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ |
621 | } | |
6710e112 JDB |
622 | bpf_map_area_free(cmap->cpu_map); |
623 | kfree(cmap); | |
624 | } | |
625 | ||
782347b6 THJ |
626 | /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or |
627 | * by local_bh_disable() (from XDP calls inside NAPI). The | |
628 | * rcu_read_lock_bh_held() below makes lockdep accept both. | |
629 | */ | |
e6a4750f | 630 | static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) |
6710e112 JDB |
631 | { |
632 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
633 | struct bpf_cpu_map_entry *rcpu; | |
634 | ||
635 | if (key >= map->max_entries) | |
636 | return NULL; | |
637 | ||
782347b6 THJ |
638 | rcpu = rcu_dereference_check(cmap->cpu_map[key], |
639 | rcu_read_lock_bh_held()); | |
6710e112 JDB |
640 | return rcpu; |
641 | } | |
642 | ||
643 | static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) | |
644 | { | |
645 | struct bpf_cpu_map_entry *rcpu = | |
646 | __cpu_map_lookup_elem(map, *(u32 *)key); | |
647 | ||
644bfe51 | 648 | return rcpu ? &rcpu->value : NULL; |
6710e112 JDB |
649 | } |
650 | ||
651 | static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
652 | { | |
653 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
654 | u32 index = key ? *(u32 *)key : U32_MAX; | |
655 | u32 *next = next_key; | |
656 | ||
657 | if (index >= cmap->map.max_entries) { | |
658 | *next = 0; | |
659 | return 0; | |
660 | } | |
661 | ||
662 | if (index == cmap->map.max_entries - 1) | |
663 | return -ENOENT; | |
664 | *next = index + 1; | |
665 | return 0; | |
666 | } | |
667 | ||
e6a4750f BT |
668 | static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) |
669 | { | |
e624d4ed HL |
670 | return __bpf_xdp_redirect_map(map, ifindex, flags, 0, |
671 | __cpu_map_lookup_elem); | |
e6a4750f BT |
672 | } |
673 | ||
2872e9ac | 674 | static int cpu_map_btf_id; |
6710e112 | 675 | const struct bpf_map_ops cpu_map_ops = { |
f4d05259 | 676 | .map_meta_equal = bpf_map_meta_equal, |
6710e112 JDB |
677 | .map_alloc = cpu_map_alloc, |
678 | .map_free = cpu_map_free, | |
679 | .map_delete_elem = cpu_map_delete_elem, | |
680 | .map_update_elem = cpu_map_update_elem, | |
681 | .map_lookup_elem = cpu_map_lookup_elem, | |
682 | .map_get_next_key = cpu_map_get_next_key, | |
e8d2bec0 | 683 | .map_check_btf = map_check_no_btf, |
2872e9ac AI |
684 | .map_btf_name = "bpf_cpu_map", |
685 | .map_btf_id = &cpu_map_btf_id, | |
e6a4750f | 686 | .map_redirect = cpu_map_redirect, |
6710e112 JDB |
687 | }; |
688 | ||
ebc4ecd4 | 689 | static void bq_flush_to_queue(struct xdp_bulk_queue *bq) |
6710e112 | 690 | { |
d5df2830 | 691 | struct bpf_cpu_map_entry *rcpu = bq->obj; |
f9419f7b JDB |
692 | unsigned int processed = 0, drops = 0; |
693 | const int to_cpu = rcpu->cpu; | |
6710e112 JDB |
694 | struct ptr_ring *q; |
695 | int i; | |
696 | ||
697 | if (unlikely(!bq->count)) | |
ebc4ecd4 | 698 | return; |
6710e112 JDB |
699 | |
700 | q = rcpu->queue; | |
701 | spin_lock(&q->producer_lock); | |
702 | ||
703 | for (i = 0; i < bq->count; i++) { | |
70280ed9 | 704 | struct xdp_frame *xdpf = bq->q[i]; |
6710e112 JDB |
705 | int err; |
706 | ||
70280ed9 | 707 | err = __ptr_ring_produce(q, xdpf); |
6710e112 | 708 | if (err) { |
f9419f7b | 709 | drops++; |
4bc188c7 | 710 | xdp_return_frame_rx_napi(xdpf); |
6710e112 | 711 | } |
f9419f7b | 712 | processed++; |
6710e112 JDB |
713 | } |
714 | bq->count = 0; | |
715 | spin_unlock(&q->producer_lock); | |
716 | ||
d5df2830 THJ |
717 | __list_del_clearprev(&bq->flush_node); |
718 | ||
f9419f7b JDB |
719 | /* Feedback loop via tracepoints */ |
720 | trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); | |
6710e112 JDB |
721 | } |
722 | ||
6710e112 JDB |
723 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
724 | * Thus, safe percpu variable access. | |
725 | */ | |
ebc4ecd4 | 726 | static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) |
6710e112 | 727 | { |
cdfafe98 | 728 | struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); |
6710e112 JDB |
729 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); |
730 | ||
731 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) | |
4bc188c7 | 732 | bq_flush_to_queue(bq); |
6710e112 JDB |
733 | |
734 | /* Notice, xdp_buff/page MUST be queued here, long enough for | |
735 | * driver to code invoking us to finished, due to driver | |
736 | * (e.g. ixgbe) recycle tricks based on page-refcnt. | |
737 | * | |
70280ed9 | 738 | * Thus, incoming xdp_frame is always queued here (else we race |
6710e112 JDB |
739 | * with another CPU on page-refcnt and remaining driver code). |
740 | * Queue time is very short, as driver will invoke flush | |
741 | * operation, when completing napi->poll call. | |
742 | */ | |
70280ed9 | 743 | bq->q[bq->count++] = xdpf; |
d5df2830 THJ |
744 | |
745 | if (!bq->flush_node.prev) | |
746 | list_add(&bq->flush_node, flush_list); | |
6710e112 JDB |
747 | } |
748 | ||
9c270af3 JDB |
749 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, |
750 | struct net_device *dev_rx) | |
751 | { | |
70280ed9 | 752 | struct xdp_frame *xdpf; |
9c270af3 | 753 | |
1b698fa5 | 754 | xdpf = xdp_convert_buff_to_frame(xdp); |
70280ed9 | 755 | if (unlikely(!xdpf)) |
1c601d82 | 756 | return -EOVERFLOW; |
9c270af3 | 757 | |
1c601d82 | 758 | /* Info needed when constructing SKB on remote CPU */ |
70280ed9 | 759 | xdpf->dev_rx = dev_rx; |
9c270af3 | 760 | |
70280ed9 | 761 | bq_enqueue(rcpu, xdpf); |
9c270af3 JDB |
762 | return 0; |
763 | } | |
764 | ||
11941f8a KKD |
765 | int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
766 | struct sk_buff *skb) | |
767 | { | |
768 | int ret; | |
769 | ||
770 | __skb_pull(skb, skb->mac_len); | |
771 | skb_set_redirected(skb, false); | |
772 | __ptr_set_bit(0, &skb); | |
773 | ||
774 | ret = ptr_ring_produce(rcpu->queue, skb); | |
775 | if (ret < 0) | |
776 | goto trace; | |
777 | ||
778 | wake_up_process(rcpu->kthread); | |
779 | trace: | |
780 | trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); | |
781 | return ret; | |
782 | } | |
783 | ||
cdfafe98 | 784 | void __cpu_map_flush(void) |
6710e112 | 785 | { |
cdfafe98 | 786 | struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); |
d5df2830 | 787 | struct xdp_bulk_queue *bq, *tmp; |
6710e112 | 788 | |
d5df2830 | 789 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { |
4bc188c7 | 790 | bq_flush_to_queue(bq); |
6710e112 JDB |
791 | |
792 | /* If already running, costs spin_lock_irqsave + smb_mb */ | |
d5df2830 | 793 | wake_up_process(bq->obj->kthread); |
6710e112 JDB |
794 | } |
795 | } | |
cdfafe98 BT |
796 | |
797 | static int __init cpu_map_init(void) | |
798 | { | |
799 | int cpu; | |
800 | ||
801 | for_each_possible_cpu(cpu) | |
802 | INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu)); | |
803 | return 0; | |
804 | } | |
805 | ||
806 | subsys_initcall(cpu_map_init); |