Commit | Line | Data |
---|---|---|
ddc64d0a | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6710e112 JDB |
2 | /* bpf/cpumap.c |
3 | * | |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
6710e112 JDB |
5 | */ |
6 | ||
7 | /* The 'cpumap' is primarily used as a backend map for XDP BPF helper | |
8 | * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'. | |
9 | * | |
10 | * Unlike devmap which redirects XDP frames out another NIC device, | |
11 | * this map type redirects raw XDP frames to another CPU. The remote | |
12 | * CPU will do SKB-allocation and call the normal network stack. | |
13 | * | |
14 | * This is a scalability and isolation mechanism, that allow | |
15 | * separating the early driver network XDP layer, from the rest of the | |
16 | * netstack, and assigning dedicated CPUs for this stage. This | |
17 | * basically allows for 10G wirespeed pre-filtering via bpf. | |
18 | */ | |
19 | #include <linux/bpf.h> | |
20 | #include <linux/filter.h> | |
21 | #include <linux/ptr_ring.h> | |
5ab073ff | 22 | #include <net/xdp.h> |
6710e112 JDB |
23 | |
24 | #include <linux/sched.h> | |
25 | #include <linux/workqueue.h> | |
26 | #include <linux/kthread.h> | |
27 | #include <linux/capability.h> | |
f9419f7b | 28 | #include <trace/events/xdp.h> |
6710e112 | 29 | |
bb024780 | 30 | #include <linux/netdevice.h> /* netif_receive_skb_list */ |
1c601d82 JDB |
31 | #include <linux/etherdevice.h> /* eth_type_trans */ |
32 | ||
6710e112 JDB |
33 | /* General idea: XDP packets getting XDP redirected to another CPU, |
34 | * will maximum be stored/queued for one driver ->poll() call. It is | |
d5df2830 | 35 | * guaranteed that queueing the frame and the flush operation happen on |
6710e112 JDB |
36 | * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() |
37 | * which queue in bpf_cpu_map_entry contains packets. | |
38 | */ | |
39 | ||
40 | #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ | |
d5df2830 THJ |
41 | struct bpf_cpu_map_entry; |
42 | struct bpf_cpu_map; | |
43 | ||
6710e112 JDB |
44 | struct xdp_bulk_queue { |
45 | void *q[CPU_MAP_BULK_SIZE]; | |
d5df2830 THJ |
46 | struct list_head flush_node; |
47 | struct bpf_cpu_map_entry *obj; | |
6710e112 JDB |
48 | unsigned int count; |
49 | }; | |
50 | ||
51 | /* Struct for every remote "destination" CPU in map */ | |
52 | struct bpf_cpu_map_entry { | |
f9419f7b JDB |
53 | u32 cpu; /* kthread CPU and map index */ |
54 | int map_id; /* Back reference to map */ | |
6710e112 JDB |
55 | |
56 | /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ | |
57 | struct xdp_bulk_queue __percpu *bulkq; | |
58 | ||
d5df2830 THJ |
59 | struct bpf_cpu_map *cmap; |
60 | ||
6710e112 JDB |
61 | /* Queue with potential multi-producers, and single-consumer kthread */ |
62 | struct ptr_ring *queue; | |
63 | struct task_struct *kthread; | |
644bfe51 LB |
64 | |
65 | struct bpf_cpumap_val value; | |
92164774 | 66 | struct bpf_prog *prog; |
6710e112 JDB |
67 | |
68 | atomic_t refcnt; /* Control when this struct can be free'ed */ | |
69 | struct rcu_head rcu; | |
644bfe51 LB |
70 | |
71 | struct work_struct kthread_stop_wq; | |
6710e112 JDB |
72 | }; |
73 | ||
74 | struct bpf_cpu_map { | |
75 | struct bpf_map map; | |
76 | /* Below members specific for map type */ | |
77 | struct bpf_cpu_map_entry **cpu_map; | |
6710e112 JDB |
78 | }; |
79 | ||
cdfafe98 BT |
80 | static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list); |
81 | ||
6710e112 JDB |
82 | static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) |
83 | { | |
92164774 | 84 | u32 value_size = attr->value_size; |
6710e112 JDB |
85 | struct bpf_cpu_map *cmap; |
86 | int err = -ENOMEM; | |
6710e112 | 87 | |
2c78ee89 | 88 | if (!bpf_capable()) |
6710e112 JDB |
89 | return ERR_PTR(-EPERM); |
90 | ||
91 | /* check sanity of attributes */ | |
92 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
92164774 LB |
93 | (value_size != offsetofend(struct bpf_cpumap_val, qsize) && |
94 | value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || | |
95 | attr->map_flags & ~BPF_F_NUMA_NODE) | |
6710e112 JDB |
96 | return ERR_PTR(-EINVAL); |
97 | ||
e88cc05b | 98 | cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT); |
6710e112 JDB |
99 | if (!cmap) |
100 | return ERR_PTR(-ENOMEM); | |
101 | ||
bd475643 | 102 | bpf_map_init_from_attr(&cmap->map, attr); |
6710e112 JDB |
103 | |
104 | /* Pre-limit array size based on NR_CPUS, not final CPU check */ | |
105 | if (cmap->map.max_entries > NR_CPUS) { | |
106 | err = -E2BIG; | |
107 | goto free_cmap; | |
108 | } | |
109 | ||
6710e112 JDB |
110 | /* Alloc array for possible remote "destination" CPUs */ |
111 | cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * | |
112 | sizeof(struct bpf_cpu_map_entry *), | |
113 | cmap->map.numa_node); | |
114 | if (!cmap->cpu_map) | |
711cabaf | 115 | goto free_cmap; |
6710e112 JDB |
116 | |
117 | return &cmap->map; | |
6710e112 JDB |
118 | free_cmap: |
119 | kfree(cmap); | |
120 | return ERR_PTR(err); | |
121 | } | |
122 | ||
6710e112 JDB |
123 | static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) |
124 | { | |
125 | atomic_inc(&rcpu->refcnt); | |
126 | } | |
127 | ||
128 | /* called from workqueue, to workaround syscall using preempt_disable */ | |
129 | static void cpu_map_kthread_stop(struct work_struct *work) | |
130 | { | |
131 | struct bpf_cpu_map_entry *rcpu; | |
132 | ||
133 | rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); | |
134 | ||
135 | /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier, | |
136 | * as it waits until all in-flight call_rcu() callbacks complete. | |
137 | */ | |
138 | rcu_barrier(); | |
139 | ||
140 | /* kthread_stop will wake_up_process and wait for it to complete */ | |
141 | kthread_stop(rcpu->kthread); | |
142 | } | |
143 | ||
5ab073ff JDB |
144 | static void __cpu_map_ring_cleanup(struct ptr_ring *ring) |
145 | { | |
146 | /* The tear-down procedure should have made sure that queue is | |
147 | * empty. See __cpu_map_entry_replace() and work-queue | |
148 | * invoked cpu_map_kthread_stop(). Catch any broken behaviour | |
149 | * gracefully and warn once. | |
150 | */ | |
70280ed9 | 151 | struct xdp_frame *xdpf; |
5ab073ff | 152 | |
70280ed9 JDB |
153 | while ((xdpf = ptr_ring_consume(ring))) |
154 | if (WARN_ON_ONCE(xdpf)) | |
03993094 | 155 | xdp_return_frame(xdpf); |
5ab073ff JDB |
156 | } |
157 | ||
158 | static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) | |
159 | { | |
160 | if (atomic_dec_and_test(&rcpu->refcnt)) { | |
92164774 LB |
161 | if (rcpu->prog) |
162 | bpf_prog_put(rcpu->prog); | |
5ab073ff JDB |
163 | /* The queue should be empty at this point */ |
164 | __cpu_map_ring_cleanup(rcpu->queue); | |
165 | ptr_ring_cleanup(rcpu->queue, NULL); | |
166 | kfree(rcpu->queue); | |
167 | kfree(rcpu); | |
168 | } | |
169 | } | |
170 | ||
92164774 LB |
171 | static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, |
172 | void **frames, int n, | |
173 | struct xdp_cpumap_stats *stats) | |
174 | { | |
175 | struct xdp_rxq_info rxq; | |
176 | struct xdp_buff xdp; | |
177 | int i, nframes = 0; | |
178 | ||
179 | if (!rcpu->prog) | |
180 | return n; | |
181 | ||
28b1520e | 182 | rcu_read_lock_bh(); |
92164774 LB |
183 | |
184 | xdp_set_return_frame_no_direct(); | |
185 | xdp.rxq = &rxq; | |
186 | ||
187 | for (i = 0; i < n; i++) { | |
188 | struct xdp_frame *xdpf = frames[i]; | |
189 | u32 act; | |
190 | int err; | |
191 | ||
192 | rxq.dev = xdpf->dev_rx; | |
193 | rxq.mem = xdpf->mem; | |
194 | /* TODO: report queue_index to xdp_rxq_info */ | |
195 | ||
196 | xdp_convert_frame_to_buff(xdpf, &xdp); | |
197 | ||
198 | act = bpf_prog_run_xdp(rcpu->prog, &xdp); | |
199 | switch (act) { | |
200 | case XDP_PASS: | |
201 | err = xdp_update_frame_from_buff(&xdp, xdpf); | |
202 | if (err < 0) { | |
203 | xdp_return_frame(xdpf); | |
204 | stats->drop++; | |
205 | } else { | |
206 | frames[nframes++] = xdpf; | |
207 | stats->pass++; | |
208 | } | |
209 | break; | |
28b1520e LB |
210 | case XDP_REDIRECT: |
211 | err = xdp_do_redirect(xdpf->dev_rx, &xdp, | |
212 | rcpu->prog); | |
213 | if (unlikely(err)) { | |
214 | xdp_return_frame(xdpf); | |
215 | stats->drop++; | |
216 | } else { | |
217 | stats->redirect++; | |
218 | } | |
219 | break; | |
92164774 LB |
220 | default: |
221 | bpf_warn_invalid_xdp_action(act); | |
df561f66 | 222 | fallthrough; |
92164774 LB |
223 | case XDP_DROP: |
224 | xdp_return_frame(xdpf); | |
225 | stats->drop++; | |
226 | break; | |
227 | } | |
228 | } | |
229 | ||
28b1520e LB |
230 | if (stats->redirect) |
231 | xdp_do_flush_map(); | |
232 | ||
92164774 LB |
233 | xdp_clear_return_frame_no_direct(); |
234 | ||
28b1520e | 235 | rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ |
92164774 LB |
236 | |
237 | return nframes; | |
238 | } | |
239 | ||
77361825 JDB |
240 | #define CPUMAP_BATCH 8 |
241 | ||
6710e112 JDB |
242 | static int cpu_map_kthread_run(void *data) |
243 | { | |
244 | struct bpf_cpu_map_entry *rcpu = data; | |
245 | ||
246 | set_current_state(TASK_INTERRUPTIBLE); | |
247 | ||
248 | /* When kthread gives stop order, then rcpu have been disconnected | |
249 | * from map, thus no new packets can enter. Remaining in-flight | |
250 | * per CPU stored packets are flushed to this queue. Wait honoring | |
251 | * kthread_stop signal until queue is empty. | |
252 | */ | |
253 | while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { | |
92164774 | 254 | struct xdp_cpumap_stats stats = {}; /* zero stats */ |
bb024780 | 255 | unsigned int kmem_alloc_drops = 0, sched = 0; |
92164774 | 256 | gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; |
77361825 | 257 | void *frames[CPUMAP_BATCH]; |
8f0504a9 | 258 | void *skbs[CPUMAP_BATCH]; |
92164774 | 259 | int i, n, m, nframes; |
bb024780 | 260 | LIST_HEAD(list); |
6710e112 | 261 | |
1c601d82 JDB |
262 | /* Release CPU reschedule checks */ |
263 | if (__ptr_ring_empty(rcpu->queue)) { | |
31749468 JDB |
264 | set_current_state(TASK_INTERRUPTIBLE); |
265 | /* Recheck to avoid lost wake-up */ | |
266 | if (__ptr_ring_empty(rcpu->queue)) { | |
267 | schedule(); | |
268 | sched = 1; | |
269 | } else { | |
270 | __set_current_state(TASK_RUNNING); | |
271 | } | |
1c601d82 | 272 | } else { |
f9419f7b | 273 | sched = cond_resched(); |
6710e112 | 274 | } |
1c601d82 | 275 | |
1c601d82 JDB |
276 | /* |
277 | * The bpf_cpu_map_entry is single consumer, with this | |
278 | * kthread CPU pinned. Lockless access to ptr_ring | |
279 | * consume side valid as no-resize allowed of queue. | |
280 | */ | |
92164774 LB |
281 | n = __ptr_ring_consume_batched(rcpu->queue, frames, |
282 | CPUMAP_BATCH); | |
86d23145 JDB |
283 | for (i = 0; i < n; i++) { |
284 | void *f = frames[i]; | |
285 | struct page *page = virt_to_page(f); | |
286 | ||
287 | /* Bring struct page memory area to curr CPU. Read by | |
288 | * build_skb_around via page_is_pfmemalloc(), and when | |
289 | * freed written by page_frag_free call. | |
290 | */ | |
291 | prefetchw(page); | |
292 | } | |
293 | ||
92164774 LB |
294 | /* Support running another XDP prog on this CPU */ |
295 | nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats); | |
296 | if (nframes) { | |
297 | m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); | |
298 | if (unlikely(m == 0)) { | |
299 | for (i = 0; i < nframes; i++) | |
300 | skbs[i] = NULL; /* effect: xdp_return_frame */ | |
bb024780 | 301 | kmem_alloc_drops += nframes; |
92164774 | 302 | } |
8f0504a9 | 303 | } |
77361825 JDB |
304 | |
305 | local_bh_disable(); | |
92164774 | 306 | for (i = 0; i < nframes; i++) { |
77361825 | 307 | struct xdp_frame *xdpf = frames[i]; |
8f0504a9 | 308 | struct sk_buff *skb = skbs[i]; |
1c601d82 | 309 | |
97a0e1ea LB |
310 | skb = __xdp_build_skb_from_frame(xdpf, skb, |
311 | xdpf->dev_rx); | |
1c601d82 | 312 | if (!skb) { |
03993094 | 313 | xdp_return_frame(xdpf); |
1c601d82 JDB |
314 | continue; |
315 | } | |
316 | ||
bb024780 | 317 | list_add_tail(&skb->list, &list); |
1c601d82 | 318 | } |
bb024780 LB |
319 | netif_receive_skb_list(&list); |
320 | ||
f9419f7b | 321 | /* Feedback loop via tracepoint */ |
bb024780 LB |
322 | trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, |
323 | sched, &stats); | |
f9419f7b | 324 | |
1c601d82 | 325 | local_bh_enable(); /* resched point, may call do_softirq() */ |
6710e112 JDB |
326 | } |
327 | __set_current_state(TASK_RUNNING); | |
328 | ||
329 | put_cpu_map_entry(rcpu); | |
330 | return 0; | |
331 | } | |
332 | ||
92164774 LB |
333 | bool cpu_map_prog_allowed(struct bpf_map *map) |
334 | { | |
335 | return map->map_type == BPF_MAP_TYPE_CPUMAP && | |
336 | map->value_size != offsetofend(struct bpf_cpumap_val, qsize); | |
337 | } | |
338 | ||
339 | static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) | |
340 | { | |
341 | struct bpf_prog *prog; | |
342 | ||
343 | prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); | |
344 | if (IS_ERR(prog)) | |
345 | return PTR_ERR(prog); | |
346 | ||
347 | if (prog->expected_attach_type != BPF_XDP_CPUMAP) { | |
348 | bpf_prog_put(prog); | |
349 | return -EINVAL; | |
350 | } | |
351 | ||
352 | rcpu->value.bpf_prog.id = prog->aux->id; | |
353 | rcpu->prog = prog; | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
644bfe51 | 358 | static struct bpf_cpu_map_entry * |
e88cc05b RG |
359 | __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, |
360 | u32 cpu) | |
6710e112 | 361 | { |
92164774 | 362 | int numa, err, i, fd = value->bpf_prog.fd; |
7fc17e90 | 363 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
6710e112 | 364 | struct bpf_cpu_map_entry *rcpu; |
d5df2830 | 365 | struct xdp_bulk_queue *bq; |
6710e112 JDB |
366 | |
367 | /* Have map->numa_node, but choose node of redirect target CPU */ | |
368 | numa = cpu_to_node(cpu); | |
369 | ||
e88cc05b | 370 | rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); |
6710e112 JDB |
371 | if (!rcpu) |
372 | return NULL; | |
373 | ||
374 | /* Alloc percpu bulkq */ | |
e88cc05b RG |
375 | rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), |
376 | sizeof(void *), gfp); | |
6710e112 JDB |
377 | if (!rcpu->bulkq) |
378 | goto free_rcu; | |
379 | ||
d5df2830 THJ |
380 | for_each_possible_cpu(i) { |
381 | bq = per_cpu_ptr(rcpu->bulkq, i); | |
382 | bq->obj = rcpu; | |
383 | } | |
384 | ||
6710e112 | 385 | /* Alloc queue */ |
e88cc05b RG |
386 | rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, |
387 | numa); | |
6710e112 JDB |
388 | if (!rcpu->queue) |
389 | goto free_bulkq; | |
390 | ||
644bfe51 | 391 | err = ptr_ring_init(rcpu->queue, value->qsize, gfp); |
6710e112 JDB |
392 | if (err) |
393 | goto free_queue; | |
394 | ||
f9419f7b | 395 | rcpu->cpu = cpu; |
e88cc05b | 396 | rcpu->map_id = map->id; |
644bfe51 | 397 | rcpu->value.qsize = value->qsize; |
6710e112 | 398 | |
c576b9c7 LB |
399 | if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) |
400 | goto free_ptr_ring; | |
401 | ||
6710e112 JDB |
402 | /* Setup kthread */ |
403 | rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, | |
e88cc05b RG |
404 | "cpumap/%d/map:%d", cpu, |
405 | map->id); | |
6710e112 | 406 | if (IS_ERR(rcpu->kthread)) |
c576b9c7 | 407 | goto free_prog; |
6710e112 JDB |
408 | |
409 | get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ | |
410 | get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ | |
411 | ||
412 | /* Make sure kthread runs on a single CPU */ | |
413 | kthread_bind(rcpu->kthread, cpu); | |
414 | wake_up_process(rcpu->kthread); | |
415 | ||
416 | return rcpu; | |
417 | ||
c576b9c7 LB |
418 | free_prog: |
419 | if (rcpu->prog) | |
420 | bpf_prog_put(rcpu->prog); | |
6710e112 JDB |
421 | free_ptr_ring: |
422 | ptr_ring_cleanup(rcpu->queue, NULL); | |
423 | free_queue: | |
424 | kfree(rcpu->queue); | |
425 | free_bulkq: | |
426 | free_percpu(rcpu->bulkq); | |
427 | free_rcu: | |
428 | kfree(rcpu); | |
429 | return NULL; | |
430 | } | |
431 | ||
0fe875c5 | 432 | static void __cpu_map_entry_free(struct rcu_head *rcu) |
6710e112 JDB |
433 | { |
434 | struct bpf_cpu_map_entry *rcpu; | |
6710e112 JDB |
435 | |
436 | /* This cpu_map_entry have been disconnected from map and one | |
fb5aacdf | 437 | * RCU grace-period have elapsed. Thus, XDP cannot queue any |
6710e112 JDB |
438 | * new packets and cannot change/set flush_needed that can |
439 | * find this entry. | |
440 | */ | |
441 | rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); | |
442 | ||
6710e112 JDB |
443 | free_percpu(rcpu->bulkq); |
444 | /* Cannot kthread_stop() here, last put free rcpu resources */ | |
445 | put_cpu_map_entry(rcpu); | |
446 | } | |
447 | ||
448 | /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to | |
449 | * ensure any driver rcu critical sections have completed, but this | |
450 | * does not guarantee a flush has happened yet. Because driver side | |
451 | * rcu_read_lock/unlock only protects the running XDP program. The | |
452 | * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a | |
453 | * pending flush op doesn't fail. | |
454 | * | |
455 | * The bpf_cpu_map_entry is still used by the kthread, and there can | |
456 | * still be pending packets (in queue and percpu bulkq). A refcnt | |
457 | * makes sure to last user (kthread_stop vs. call_rcu) free memory | |
458 | * resources. | |
459 | * | |
460 | * The rcu callback __cpu_map_entry_free flush remaining packets in | |
461 | * percpu bulkq to queue. Due to caller map_delete_elem() disable | |
462 | * preemption, cannot call kthread_stop() to make sure queue is empty. | |
463 | * Instead a work_queue is started for stopping kthread, | |
fb5aacdf | 464 | * cpu_map_kthread_stop, which waits for an RCU grace period before |
6710e112 JDB |
465 | * stopping kthread, emptying the queue. |
466 | */ | |
0fe875c5 WY |
467 | static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, |
468 | u32 key_cpu, struct bpf_cpu_map_entry *rcpu) | |
6710e112 JDB |
469 | { |
470 | struct bpf_cpu_map_entry *old_rcpu; | |
471 | ||
472 | old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); | |
473 | if (old_rcpu) { | |
474 | call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); | |
475 | INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); | |
476 | schedule_work(&old_rcpu->kthread_stop_wq); | |
477 | } | |
478 | } | |
479 | ||
0fe875c5 | 480 | static int cpu_map_delete_elem(struct bpf_map *map, void *key) |
6710e112 JDB |
481 | { |
482 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
483 | u32 key_cpu = *(u32 *)key; | |
484 | ||
485 | if (key_cpu >= map->max_entries) | |
486 | return -EINVAL; | |
487 | ||
488 | /* notice caller map_delete_elem() use preempt_disable() */ | |
489 | __cpu_map_entry_replace(cmap, key_cpu, NULL); | |
490 | return 0; | |
491 | } | |
492 | ||
0fe875c5 WY |
493 | static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, |
494 | u64 map_flags) | |
6710e112 JDB |
495 | { |
496 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
644bfe51 | 497 | struct bpf_cpumap_val cpumap_value = {}; |
6710e112 | 498 | struct bpf_cpu_map_entry *rcpu; |
6710e112 JDB |
499 | /* Array index key correspond to CPU number */ |
500 | u32 key_cpu = *(u32 *)key; | |
644bfe51 LB |
501 | |
502 | memcpy(&cpumap_value, value, map->value_size); | |
6710e112 JDB |
503 | |
504 | if (unlikely(map_flags > BPF_EXIST)) | |
505 | return -EINVAL; | |
506 | if (unlikely(key_cpu >= cmap->map.max_entries)) | |
507 | return -E2BIG; | |
508 | if (unlikely(map_flags == BPF_NOEXIST)) | |
509 | return -EEXIST; | |
644bfe51 | 510 | if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */ |
6710e112 JDB |
511 | return -EOVERFLOW; |
512 | ||
513 | /* Make sure CPU is a valid possible cpu */ | |
bc23d0e3 | 514 | if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) |
6710e112 JDB |
515 | return -ENODEV; |
516 | ||
644bfe51 | 517 | if (cpumap_value.qsize == 0) { |
6710e112 JDB |
518 | rcpu = NULL; /* Same as deleting */ |
519 | } else { | |
520 | /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ | |
e88cc05b | 521 | rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); |
6710e112 JDB |
522 | if (!rcpu) |
523 | return -ENOMEM; | |
d5df2830 | 524 | rcpu->cmap = cmap; |
6710e112 JDB |
525 | } |
526 | rcu_read_lock(); | |
527 | __cpu_map_entry_replace(cmap, key_cpu, rcpu); | |
528 | rcu_read_unlock(); | |
529 | return 0; | |
530 | } | |
531 | ||
0fe875c5 | 532 | static void cpu_map_free(struct bpf_map *map) |
6710e112 JDB |
533 | { |
534 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
6710e112 JDB |
535 | u32 i; |
536 | ||
537 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
538 | * so the bpf programs (can be more than one that used this map) were | |
539 | * disconnected from events. Wait for outstanding critical sections in | |
540 | * these programs to complete. The rcu critical section only guarantees | |
541 | * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. | |
542 | * It does __not__ ensure pending flush operations (if any) are | |
543 | * complete. | |
544 | */ | |
f6069b9a | 545 | |
6710e112 JDB |
546 | synchronize_rcu(); |
547 | ||
6710e112 JDB |
548 | /* For cpu_map the remote CPUs can still be using the entries |
549 | * (struct bpf_cpu_map_entry). | |
550 | */ | |
551 | for (i = 0; i < cmap->map.max_entries; i++) { | |
552 | struct bpf_cpu_map_entry *rcpu; | |
553 | ||
554 | rcpu = READ_ONCE(cmap->cpu_map[i]); | |
555 | if (!rcpu) | |
556 | continue; | |
557 | ||
fb5aacdf | 558 | /* bq flush and cleanup happens after RCU grace-period */ |
6710e112 JDB |
559 | __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ |
560 | } | |
6710e112 JDB |
561 | bpf_map_area_free(cmap->cpu_map); |
562 | kfree(cmap); | |
563 | } | |
564 | ||
e6a4750f | 565 | static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) |
6710e112 JDB |
566 | { |
567 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
568 | struct bpf_cpu_map_entry *rcpu; | |
569 | ||
570 | if (key >= map->max_entries) | |
571 | return NULL; | |
572 | ||
573 | rcpu = READ_ONCE(cmap->cpu_map[key]); | |
574 | return rcpu; | |
575 | } | |
576 | ||
577 | static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) | |
578 | { | |
579 | struct bpf_cpu_map_entry *rcpu = | |
580 | __cpu_map_lookup_elem(map, *(u32 *)key); | |
581 | ||
644bfe51 | 582 | return rcpu ? &rcpu->value : NULL; |
6710e112 JDB |
583 | } |
584 | ||
585 | static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
586 | { | |
587 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
588 | u32 index = key ? *(u32 *)key : U32_MAX; | |
589 | u32 *next = next_key; | |
590 | ||
591 | if (index >= cmap->map.max_entries) { | |
592 | *next = 0; | |
593 | return 0; | |
594 | } | |
595 | ||
596 | if (index == cmap->map.max_entries - 1) | |
597 | return -ENOENT; | |
598 | *next = index + 1; | |
599 | return 0; | |
600 | } | |
601 | ||
e6a4750f BT |
602 | static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags) |
603 | { | |
604 | return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem); | |
605 | } | |
606 | ||
2872e9ac | 607 | static int cpu_map_btf_id; |
6710e112 | 608 | const struct bpf_map_ops cpu_map_ops = { |
f4d05259 | 609 | .map_meta_equal = bpf_map_meta_equal, |
6710e112 JDB |
610 | .map_alloc = cpu_map_alloc, |
611 | .map_free = cpu_map_free, | |
612 | .map_delete_elem = cpu_map_delete_elem, | |
613 | .map_update_elem = cpu_map_update_elem, | |
614 | .map_lookup_elem = cpu_map_lookup_elem, | |
615 | .map_get_next_key = cpu_map_get_next_key, | |
e8d2bec0 | 616 | .map_check_btf = map_check_no_btf, |
2872e9ac AI |
617 | .map_btf_name = "bpf_cpu_map", |
618 | .map_btf_id = &cpu_map_btf_id, | |
e6a4750f | 619 | .map_redirect = cpu_map_redirect, |
6710e112 JDB |
620 | }; |
621 | ||
ebc4ecd4 | 622 | static void bq_flush_to_queue(struct xdp_bulk_queue *bq) |
6710e112 | 623 | { |
d5df2830 | 624 | struct bpf_cpu_map_entry *rcpu = bq->obj; |
f9419f7b JDB |
625 | unsigned int processed = 0, drops = 0; |
626 | const int to_cpu = rcpu->cpu; | |
6710e112 JDB |
627 | struct ptr_ring *q; |
628 | int i; | |
629 | ||
630 | if (unlikely(!bq->count)) | |
ebc4ecd4 | 631 | return; |
6710e112 JDB |
632 | |
633 | q = rcpu->queue; | |
634 | spin_lock(&q->producer_lock); | |
635 | ||
636 | for (i = 0; i < bq->count; i++) { | |
70280ed9 | 637 | struct xdp_frame *xdpf = bq->q[i]; |
6710e112 JDB |
638 | int err; |
639 | ||
70280ed9 | 640 | err = __ptr_ring_produce(q, xdpf); |
6710e112 | 641 | if (err) { |
f9419f7b | 642 | drops++; |
4bc188c7 | 643 | xdp_return_frame_rx_napi(xdpf); |
6710e112 | 644 | } |
f9419f7b | 645 | processed++; |
6710e112 JDB |
646 | } |
647 | bq->count = 0; | |
648 | spin_unlock(&q->producer_lock); | |
649 | ||
d5df2830 THJ |
650 | __list_del_clearprev(&bq->flush_node); |
651 | ||
f9419f7b JDB |
652 | /* Feedback loop via tracepoints */ |
653 | trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); | |
6710e112 JDB |
654 | } |
655 | ||
6710e112 JDB |
656 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
657 | * Thus, safe percpu variable access. | |
658 | */ | |
ebc4ecd4 | 659 | static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) |
6710e112 | 660 | { |
cdfafe98 | 661 | struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); |
6710e112 JDB |
662 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); |
663 | ||
664 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) | |
4bc188c7 | 665 | bq_flush_to_queue(bq); |
6710e112 JDB |
666 | |
667 | /* Notice, xdp_buff/page MUST be queued here, long enough for | |
668 | * driver to code invoking us to finished, due to driver | |
669 | * (e.g. ixgbe) recycle tricks based on page-refcnt. | |
670 | * | |
70280ed9 | 671 | * Thus, incoming xdp_frame is always queued here (else we race |
6710e112 JDB |
672 | * with another CPU on page-refcnt and remaining driver code). |
673 | * Queue time is very short, as driver will invoke flush | |
674 | * operation, when completing napi->poll call. | |
675 | */ | |
70280ed9 | 676 | bq->q[bq->count++] = xdpf; |
d5df2830 THJ |
677 | |
678 | if (!bq->flush_node.prev) | |
679 | list_add(&bq->flush_node, flush_list); | |
6710e112 JDB |
680 | } |
681 | ||
9c270af3 JDB |
682 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, |
683 | struct net_device *dev_rx) | |
684 | { | |
70280ed9 | 685 | struct xdp_frame *xdpf; |
9c270af3 | 686 | |
1b698fa5 | 687 | xdpf = xdp_convert_buff_to_frame(xdp); |
70280ed9 | 688 | if (unlikely(!xdpf)) |
1c601d82 | 689 | return -EOVERFLOW; |
9c270af3 | 690 | |
1c601d82 | 691 | /* Info needed when constructing SKB on remote CPU */ |
70280ed9 | 692 | xdpf->dev_rx = dev_rx; |
9c270af3 | 693 | |
70280ed9 | 694 | bq_enqueue(rcpu, xdpf); |
9c270af3 JDB |
695 | return 0; |
696 | } | |
697 | ||
cdfafe98 | 698 | void __cpu_map_flush(void) |
6710e112 | 699 | { |
cdfafe98 | 700 | struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); |
d5df2830 | 701 | struct xdp_bulk_queue *bq, *tmp; |
6710e112 | 702 | |
d5df2830 | 703 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { |
4bc188c7 | 704 | bq_flush_to_queue(bq); |
6710e112 JDB |
705 | |
706 | /* If already running, costs spin_lock_irqsave + smb_mb */ | |
d5df2830 | 707 | wake_up_process(bq->obj->kthread); |
6710e112 JDB |
708 | } |
709 | } | |
cdfafe98 BT |
710 | |
711 | static int __init cpu_map_init(void) | |
712 | { | |
713 | int cpu; | |
714 | ||
715 | for_each_possible_cpu(cpu) | |
716 | INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu)); | |
717 | return 0; | |
718 | } | |
719 | ||
720 | subsys_initcall(cpu_map_init); |