Commit | Line | Data |
---|---|---|
ddc64d0a | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6710e112 JDB |
2 | /* bpf/cpumap.c |
3 | * | |
4 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
6710e112 JDB |
5 | */ |
6 | ||
161939ab MT |
7 | /** |
8 | * DOC: cpu map | |
9 | * The 'cpumap' is primarily used as a backend map for XDP BPF helper | |
6710e112 JDB |
10 | * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'. |
11 | * | |
161939ab | 12 | * Unlike devmap which redirects XDP frames out to another NIC device, |
6710e112 JDB |
13 | * this map type redirects raw XDP frames to another CPU. The remote |
14 | * CPU will do SKB-allocation and call the normal network stack. | |
161939ab MT |
15 | */ |
16 | /* | |
6710e112 JDB |
17 | * This is a scalability and isolation mechanism, that allow |
18 | * separating the early driver network XDP layer, from the rest of the | |
19 | * netstack, and assigning dedicated CPUs for this stage. This | |
20 | * basically allows for 10G wirespeed pre-filtering via bpf. | |
21 | */ | |
11941f8a | 22 | #include <linux/bitops.h> |
6710e112 JDB |
23 | #include <linux/bpf.h> |
24 | #include <linux/filter.h> | |
25 | #include <linux/ptr_ring.h> | |
5ab073ff | 26 | #include <net/xdp.h> |
aa70d2d1 | 27 | #include <net/hotdata.h> |
6710e112 JDB |
28 | |
29 | #include <linux/sched.h> | |
30 | #include <linux/workqueue.h> | |
31 | #include <linux/kthread.h> | |
640a6045 | 32 | #include <linux/completion.h> |
f9419f7b | 33 | #include <trace/events/xdp.h> |
c317ab71 | 34 | #include <linux/btf_ids.h> |
6710e112 | 35 | |
4f8ab26a AL |
36 | #include <linux/netdevice.h> |
37 | #include <net/gro.h> | |
1c601d82 | 38 | |
6710e112 JDB |
39 | /* General idea: XDP packets getting XDP redirected to another CPU, |
40 | * will maximum be stored/queued for one driver ->poll() call. It is | |
d5df2830 | 41 | * guaranteed that queueing the frame and the flush operation happen on |
6710e112 JDB |
42 | * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() |
43 | * which queue in bpf_cpu_map_entry contains packets. | |
44 | */ | |
45 | ||
46 | #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ | |
d5df2830 THJ |
47 | struct bpf_cpu_map_entry; |
48 | struct bpf_cpu_map; | |
49 | ||
6710e112 JDB |
50 | struct xdp_bulk_queue { |
51 | void *q[CPU_MAP_BULK_SIZE]; | |
d5df2830 THJ |
52 | struct list_head flush_node; |
53 | struct bpf_cpu_map_entry *obj; | |
6710e112 JDB |
54 | unsigned int count; |
55 | }; | |
56 | ||
57 | /* Struct for every remote "destination" CPU in map */ | |
58 | struct bpf_cpu_map_entry { | |
f9419f7b JDB |
59 | u32 cpu; /* kthread CPU and map index */ |
60 | int map_id; /* Back reference to map */ | |
6710e112 JDB |
61 | |
62 | /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ | |
63 | struct xdp_bulk_queue __percpu *bulkq; | |
64 | ||
65 | /* Queue with potential multi-producers, and single-consumer kthread */ | |
66 | struct ptr_ring *queue; | |
67 | struct task_struct *kthread; | |
644bfe51 LB |
68 | |
69 | struct bpf_cpumap_val value; | |
92164774 | 70 | struct bpf_prog *prog; |
4f8ab26a | 71 | struct gro_node gro; |
6710e112 | 72 | |
640a6045 | 73 | struct completion kthread_running; |
8f8500a2 | 74 | struct rcu_work free_work; |
6710e112 JDB |
75 | }; |
76 | ||
77 | struct bpf_cpu_map { | |
78 | struct bpf_map map; | |
79 | /* Below members specific for map type */ | |
782347b6 | 80 | struct bpf_cpu_map_entry __rcu **cpu_map; |
6710e112 JDB |
81 | }; |
82 | ||
6710e112 JDB |
83 | static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) |
84 | { | |
92164774 | 85 | u32 value_size = attr->value_size; |
6710e112 | 86 | struct bpf_cpu_map *cmap; |
6710e112 | 87 | |
6710e112 JDB |
88 | /* check sanity of attributes */ |
89 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
92164774 LB |
90 | (value_size != offsetofend(struct bpf_cpumap_val, qsize) && |
91 | value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || | |
92 | attr->map_flags & ~BPF_F_NUMA_NODE) | |
6710e112 JDB |
93 | return ERR_PTR(-EINVAL); |
94 | ||
e39e739a FL |
95 | /* Pre-limit array size based on NR_CPUS, not final CPU check */ |
96 | if (attr->max_entries > NR_CPUS) | |
97 | return ERR_PTR(-E2BIG); | |
98 | ||
73cf09a3 | 99 | cmap = bpf_map_area_alloc(sizeof(*cmap), NUMA_NO_NODE); |
6710e112 JDB |
100 | if (!cmap) |
101 | return ERR_PTR(-ENOMEM); | |
102 | ||
bd475643 | 103 | bpf_map_init_from_attr(&cmap->map, attr); |
6710e112 | 104 | |
6710e112 JDB |
105 | /* Alloc array for possible remote "destination" CPUs */ |
106 | cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * | |
107 | sizeof(struct bpf_cpu_map_entry *), | |
108 | cmap->map.numa_node); | |
e39e739a FL |
109 | if (!cmap->cpu_map) { |
110 | bpf_map_area_free(cmap); | |
111 | return ERR_PTR(-ENOMEM); | |
112 | } | |
6710e112 JDB |
113 | |
114 | return &cmap->map; | |
6710e112 JDB |
115 | } |
116 | ||
5ab073ff JDB |
117 | static void __cpu_map_ring_cleanup(struct ptr_ring *ring) |
118 | { | |
119 | /* The tear-down procedure should have made sure that queue is | |
120 | * empty. See __cpu_map_entry_replace() and work-queue | |
121 | * invoked cpu_map_kthread_stop(). Catch any broken behaviour | |
122 | * gracefully and warn once. | |
123 | */ | |
7c62b75c HT |
124 | void *ptr; |
125 | ||
126 | while ((ptr = ptr_ring_consume(ring))) { | |
127 | WARN_ON_ONCE(1); | |
128 | if (unlikely(__ptr_test_bit(0, &ptr))) { | |
129 | __ptr_clear_bit(0, &ptr); | |
130 | kfree_skb(ptr); | |
131 | continue; | |
132 | } | |
133 | xdp_return_frame(ptr); | |
134 | } | |
5ab073ff JDB |
135 | } |
136 | ||
57efe762 AL |
137 | static u32 cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, |
138 | void **skbs, u32 skb_n, | |
139 | struct xdp_cpumap_stats *stats) | |
11941f8a | 140 | { |
11941f8a | 141 | struct xdp_buff xdp; |
57efe762 | 142 | u32 act, pass = 0; |
11941f8a KKD |
143 | int err; |
144 | ||
57efe762 AL |
145 | for (u32 i = 0; i < skb_n; i++) { |
146 | struct sk_buff *skb = skbs[i]; | |
147 | ||
11941f8a KKD |
148 | act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); |
149 | switch (act) { | |
150 | case XDP_PASS: | |
57efe762 | 151 | skbs[pass++] = skb; |
11941f8a KKD |
152 | break; |
153 | case XDP_REDIRECT: | |
11941f8a KKD |
154 | err = xdp_do_generic_redirect(skb->dev, skb, &xdp, |
155 | rcpu->prog); | |
156 | if (unlikely(err)) { | |
157 | kfree_skb(skb); | |
158 | stats->drop++; | |
159 | } else { | |
160 | stats->redirect++; | |
161 | } | |
57efe762 | 162 | break; |
11941f8a | 163 | default: |
c8064e5b | 164 | bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); |
11941f8a KKD |
165 | fallthrough; |
166 | case XDP_ABORTED: | |
167 | trace_xdp_exception(skb->dev, rcpu->prog, act); | |
168 | fallthrough; | |
169 | case XDP_DROP: | |
57efe762 | 170 | napi_consume_skb(skb, true); |
11941f8a | 171 | stats->drop++; |
57efe762 | 172 | break; |
11941f8a KKD |
173 | } |
174 | } | |
57efe762 AL |
175 | |
176 | stats->pass += pass; | |
177 | ||
178 | return pass; | |
11941f8a KKD |
179 | } |
180 | ||
92164774 LB |
181 | static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, |
182 | void **frames, int n, | |
183 | struct xdp_cpumap_stats *stats) | |
184 | { | |
2487007a | 185 | struct xdp_rxq_info rxq = {}; |
92164774 LB |
186 | struct xdp_buff xdp; |
187 | int i, nframes = 0; | |
188 | ||
92164774 LB |
189 | xdp_set_return_frame_no_direct(); |
190 | xdp.rxq = &rxq; | |
191 | ||
192 | for (i = 0; i < n; i++) { | |
193 | struct xdp_frame *xdpf = frames[i]; | |
194 | u32 act; | |
195 | int err; | |
196 | ||
197 | rxq.dev = xdpf->dev_rx; | |
56d95b0a | 198 | rxq.mem.type = xdpf->mem_type; |
92164774 LB |
199 | /* TODO: report queue_index to xdp_rxq_info */ |
200 | ||
201 | xdp_convert_frame_to_buff(xdpf, &xdp); | |
202 | ||
203 | act = bpf_prog_run_xdp(rcpu->prog, &xdp); | |
204 | switch (act) { | |
205 | case XDP_PASS: | |
206 | err = xdp_update_frame_from_buff(&xdp, xdpf); | |
207 | if (err < 0) { | |
208 | xdp_return_frame(xdpf); | |
209 | stats->drop++; | |
210 | } else { | |
211 | frames[nframes++] = xdpf; | |
92164774 LB |
212 | } |
213 | break; | |
28b1520e LB |
214 | case XDP_REDIRECT: |
215 | err = xdp_do_redirect(xdpf->dev_rx, &xdp, | |
216 | rcpu->prog); | |
217 | if (unlikely(err)) { | |
218 | xdp_return_frame(xdpf); | |
219 | stats->drop++; | |
220 | } else { | |
221 | stats->redirect++; | |
222 | } | |
223 | break; | |
92164774 | 224 | default: |
c8064e5b | 225 | bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); |
df561f66 | 226 | fallthrough; |
92164774 LB |
227 | case XDP_DROP: |
228 | xdp_return_frame(xdpf); | |
229 | stats->drop++; | |
230 | break; | |
231 | } | |
232 | } | |
233 | ||
11941f8a | 234 | xdp_clear_return_frame_no_direct(); |
57efe762 | 235 | stats->pass += nframes; |
11941f8a KKD |
236 | |
237 | return nframes; | |
238 | } | |
239 | ||
240 | #define CPUMAP_BATCH 8 | |
241 | ||
57efe762 AL |
242 | struct cpu_map_ret { |
243 | u32 xdp_n; | |
244 | u32 skb_n; | |
245 | }; | |
246 | ||
247 | static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, | |
248 | void **skbs, struct cpu_map_ret *ret, | |
249 | struct xdp_cpumap_stats *stats) | |
11941f8a | 250 | { |
401cb7da | 251 | struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; |
11941f8a KKD |
252 | |
253 | if (!rcpu->prog) | |
57efe762 | 254 | goto out; |
11941f8a | 255 | |
ed16b8a4 | 256 | rcu_read_lock(); |
401cb7da | 257 | bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); |
11941f8a | 258 | |
57efe762 AL |
259 | ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats); |
260 | if (unlikely(ret->skb_n)) | |
261 | ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n, | |
262 | stats); | |
11941f8a | 263 | |
28b1520e | 264 | if (stats->redirect) |
11941f8a | 265 | xdp_do_flush(); |
28b1520e | 266 | |
401cb7da | 267 | bpf_net_ctx_clear(bpf_net_ctx); |
ed16b8a4 | 268 | rcu_read_unlock(); |
92164774 | 269 | |
57efe762 AL |
270 | out: |
271 | if (unlikely(ret->skb_n) && ret->xdp_n) | |
272 | memmove(&skbs[ret->xdp_n], skbs, ret->skb_n * sizeof(*skbs)); | |
4f8ab26a AL |
273 | } |
274 | ||
275 | static void cpu_map_gro_flush(struct bpf_cpu_map_entry *rcpu, bool empty) | |
276 | { | |
277 | /* | |
278 | * If the ring is not empty, there'll be a new iteration soon, and we | |
279 | * only need to do a full flush if a tick is long (> 1 ms). | |
280 | * If the ring is empty, to not hold GRO packets in the stack for too | |
281 | * long, do a full flush. | |
282 | * This is equivalent to how NAPI decides whether to perform a full | |
283 | * flush. | |
284 | */ | |
285 | gro_flush(&rcpu->gro, !empty && HZ >= 1000); | |
286 | gro_normal_list(&rcpu->gro); | |
287 | } | |
288 | ||
6710e112 JDB |
289 | static int cpu_map_kthread_run(void *data) |
290 | { | |
291 | struct bpf_cpu_map_entry *rcpu = data; | |
00bf6312 | 292 | unsigned long last_qs = jiffies; |
4f8ab26a | 293 | u32 packets = 0; |
6710e112 | 294 | |
640a6045 | 295 | complete(&rcpu->kthread_running); |
6710e112 JDB |
296 | set_current_state(TASK_INTERRUPTIBLE); |
297 | ||
298 | /* When kthread gives stop order, then rcpu have been disconnected | |
299 | * from map, thus no new packets can enter. Remaining in-flight | |
300 | * per CPU stored packets are flushed to this queue. Wait honoring | |
301 | * kthread_stop signal until queue is empty. | |
302 | */ | |
303 | while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { | |
92164774 | 304 | struct xdp_cpumap_stats stats = {}; /* zero stats */ |
bb024780 | 305 | unsigned int kmem_alloc_drops = 0, sched = 0; |
57efe762 | 306 | struct cpu_map_ret ret = { }; |
77361825 | 307 | void *frames[CPUMAP_BATCH]; |
8f0504a9 | 308 | void *skbs[CPUMAP_BATCH]; |
57efe762 | 309 | u32 i, n, m; |
4f8ab26a | 310 | bool empty; |
6710e112 | 311 | |
1c601d82 JDB |
312 | /* Release CPU reschedule checks */ |
313 | if (__ptr_ring_empty(rcpu->queue)) { | |
31749468 JDB |
314 | set_current_state(TASK_INTERRUPTIBLE); |
315 | /* Recheck to avoid lost wake-up */ | |
316 | if (__ptr_ring_empty(rcpu->queue)) { | |
317 | schedule(); | |
318 | sched = 1; | |
00bf6312 | 319 | last_qs = jiffies; |
31749468 JDB |
320 | } else { |
321 | __set_current_state(TASK_RUNNING); | |
322 | } | |
1c601d82 | 323 | } else { |
00bf6312 | 324 | rcu_softirq_qs_periodic(last_qs); |
f9419f7b | 325 | sched = cond_resched(); |
6710e112 | 326 | } |
1c601d82 | 327 | |
1c601d82 JDB |
328 | /* |
329 | * The bpf_cpu_map_entry is single consumer, with this | |
330 | * kthread CPU pinned. Lockless access to ptr_ring | |
331 | * consume side valid as no-resize allowed of queue. | |
332 | */ | |
92164774 LB |
333 | n = __ptr_ring_consume_batched(rcpu->queue, frames, |
334 | CPUMAP_BATCH); | |
57efe762 | 335 | for (i = 0; i < n; i++) { |
86d23145 | 336 | void *f = frames[i]; |
11941f8a KKD |
337 | struct page *page; |
338 | ||
339 | if (unlikely(__ptr_test_bit(0, &f))) { | |
340 | struct sk_buff *skb = f; | |
341 | ||
342 | __ptr_clear_bit(0, &skb); | |
57efe762 | 343 | skbs[ret.skb_n++] = skb; |
11941f8a KKD |
344 | continue; |
345 | } | |
346 | ||
57efe762 | 347 | frames[ret.xdp_n++] = f; |
11941f8a | 348 | page = virt_to_page(f); |
86d23145 JDB |
349 | |
350 | /* Bring struct page memory area to curr CPU. Read by | |
351 | * build_skb_around via page_is_pfmemalloc(), and when | |
352 | * freed written by page_frag_free call. | |
353 | */ | |
354 | prefetchw(page); | |
355 | } | |
356 | ||
ed16b8a4 AL |
357 | local_bh_disable(); |
358 | ||
92164774 | 359 | /* Support running another XDP prog on this CPU */ |
57efe762 | 360 | cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats); |
ed16b8a4 | 361 | if (!ret.xdp_n) |
57efe762 | 362 | goto stats; |
57efe762 | 363 | |
ed16b8a4 | 364 | m = napi_skb_cache_get_bulk(skbs, ret.xdp_n); |
57efe762 AL |
365 | if (unlikely(m < ret.xdp_n)) { |
366 | for (i = m; i < ret.xdp_n; i++) | |
367 | xdp_return_frame(frames[i]); | |
368 | ||
369 | if (ret.skb_n) | |
370 | memmove(&skbs[m], &skbs[ret.xdp_n], | |
371 | ret.skb_n * sizeof(*skbs)); | |
372 | ||
373 | kmem_alloc_drops += ret.xdp_n - m; | |
374 | ret.xdp_n = m; | |
8f0504a9 | 375 | } |
77361825 | 376 | |
57efe762 | 377 | for (i = 0; i < ret.xdp_n; i++) { |
77361825 | 378 | struct xdp_frame *xdpf = frames[i]; |
1c601d82 | 379 | |
57efe762 AL |
380 | /* Can fail only when !skb -- already handled above */ |
381 | __xdp_build_skb_from_frame(xdpf, skbs[i], xdpf->dev_rx); | |
1c601d82 | 382 | } |
bb024780 | 383 | |
57efe762 | 384 | stats: |
23dc9867 DX |
385 | /* Feedback loop via tracepoint. |
386 | * NB: keep before recv to allow measuring enqueue/dequeue latency. | |
387 | */ | |
bb024780 LB |
388 | trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, |
389 | sched, &stats); | |
f9419f7b | 390 | |
57efe762 AL |
391 | for (i = 0; i < ret.xdp_n + ret.skb_n; i++) |
392 | gro_receive_skb(&rcpu->gro, skbs[i]); | |
4f8ab26a AL |
393 | |
394 | /* Flush either every 64 packets or in case of empty ring */ | |
395 | packets += n; | |
396 | empty = __ptr_ring_empty(rcpu->queue); | |
397 | if (packets >= NAPI_POLL_WEIGHT || empty) { | |
398 | cpu_map_gro_flush(rcpu, empty); | |
399 | packets = 0; | |
400 | } | |
401 | ||
1c601d82 | 402 | local_bh_enable(); /* resched point, may call do_softirq() */ |
6710e112 JDB |
403 | } |
404 | __set_current_state(TASK_RUNNING); | |
405 | ||
6710e112 JDB |
406 | return 0; |
407 | } | |
408 | ||
f45d5b6c THJ |
409 | static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, |
410 | struct bpf_map *map, int fd) | |
92164774 LB |
411 | { |
412 | struct bpf_prog *prog; | |
413 | ||
414 | prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); | |
415 | if (IS_ERR(prog)) | |
416 | return PTR_ERR(prog); | |
417 | ||
f45d5b6c THJ |
418 | if (prog->expected_attach_type != BPF_XDP_CPUMAP || |
419 | !bpf_prog_map_compatible(map, prog)) { | |
92164774 LB |
420 | bpf_prog_put(prog); |
421 | return -EINVAL; | |
422 | } | |
423 | ||
424 | rcpu->value.bpf_prog.id = prog->aux->id; | |
425 | rcpu->prog = prog; | |
426 | ||
427 | return 0; | |
428 | } | |
429 | ||
644bfe51 | 430 | static struct bpf_cpu_map_entry * |
e88cc05b RG |
431 | __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, |
432 | u32 cpu) | |
6710e112 | 433 | { |
92164774 | 434 | int numa, err, i, fd = value->bpf_prog.fd; |
7fc17e90 | 435 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
6710e112 | 436 | struct bpf_cpu_map_entry *rcpu; |
d5df2830 | 437 | struct xdp_bulk_queue *bq; |
6710e112 JDB |
438 | |
439 | /* Have map->numa_node, but choose node of redirect target CPU */ | |
440 | numa = cpu_to_node(cpu); | |
441 | ||
e88cc05b | 442 | rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); |
6710e112 JDB |
443 | if (!rcpu) |
444 | return NULL; | |
445 | ||
446 | /* Alloc percpu bulkq */ | |
e88cc05b RG |
447 | rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), |
448 | sizeof(void *), gfp); | |
6710e112 JDB |
449 | if (!rcpu->bulkq) |
450 | goto free_rcu; | |
451 | ||
d5df2830 THJ |
452 | for_each_possible_cpu(i) { |
453 | bq = per_cpu_ptr(rcpu->bulkq, i); | |
454 | bq->obj = rcpu; | |
455 | } | |
456 | ||
6710e112 | 457 | /* Alloc queue */ |
e88cc05b RG |
458 | rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, |
459 | numa); | |
6710e112 JDB |
460 | if (!rcpu->queue) |
461 | goto free_bulkq; | |
462 | ||
644bfe51 | 463 | err = ptr_ring_init(rcpu->queue, value->qsize, gfp); |
6710e112 JDB |
464 | if (err) |
465 | goto free_queue; | |
466 | ||
f9419f7b | 467 | rcpu->cpu = cpu; |
e88cc05b | 468 | rcpu->map_id = map->id; |
644bfe51 | 469 | rcpu->value.qsize = value->qsize; |
4f8ab26a | 470 | gro_init(&rcpu->gro); |
6710e112 | 471 | |
f45d5b6c | 472 | if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) |
c576b9c7 LB |
473 | goto free_ptr_ring; |
474 | ||
6710e112 | 475 | /* Setup kthread */ |
640a6045 | 476 | init_completion(&rcpu->kthread_running); |
6710e112 | 477 | rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, |
e88cc05b RG |
478 | "cpumap/%d/map:%d", cpu, |
479 | map->id); | |
6710e112 | 480 | if (IS_ERR(rcpu->kthread)) |
c576b9c7 | 481 | goto free_prog; |
6710e112 | 482 | |
6710e112 JDB |
483 | /* Make sure kthread runs on a single CPU */ |
484 | kthread_bind(rcpu->kthread, cpu); | |
485 | wake_up_process(rcpu->kthread); | |
486 | ||
640a6045 HT |
487 | /* Make sure kthread has been running, so kthread_stop() will not |
488 | * stop the kthread prematurely and all pending frames or skbs | |
489 | * will be handled by the kthread before kthread_stop() returns. | |
490 | */ | |
491 | wait_for_completion(&rcpu->kthread_running); | |
492 | ||
6710e112 JDB |
493 | return rcpu; |
494 | ||
c576b9c7 LB |
495 | free_prog: |
496 | if (rcpu->prog) | |
497 | bpf_prog_put(rcpu->prog); | |
6710e112 | 498 | free_ptr_ring: |
4f8ab26a | 499 | gro_cleanup(&rcpu->gro); |
6710e112 JDB |
500 | ptr_ring_cleanup(rcpu->queue, NULL); |
501 | free_queue: | |
502 | kfree(rcpu->queue); | |
503 | free_bulkq: | |
504 | free_percpu(rcpu->bulkq); | |
505 | free_rcu: | |
506 | kfree(rcpu); | |
507 | return NULL; | |
508 | } | |
509 | ||
8f8500a2 | 510 | static void __cpu_map_entry_free(struct work_struct *work) |
6710e112 JDB |
511 | { |
512 | struct bpf_cpu_map_entry *rcpu; | |
6710e112 JDB |
513 | |
514 | /* This cpu_map_entry have been disconnected from map and one | |
8f8500a2 | 515 | * RCU grace-period have elapsed. Thus, XDP cannot queue any |
6710e112 JDB |
516 | * new packets and cannot change/set flush_needed that can |
517 | * find this entry. | |
518 | */ | |
8f8500a2 HT |
519 | rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work); |
520 | ||
521 | /* kthread_stop will wake_up_process and wait for it to complete. | |
522 | * cpu_map_kthread_run() makes sure the pointer ring is empty | |
523 | * before exiting. | |
524 | */ | |
525 | kthread_stop(rcpu->kthread); | |
6710e112 | 526 | |
8f8500a2 HT |
527 | if (rcpu->prog) |
528 | bpf_prog_put(rcpu->prog); | |
4f8ab26a | 529 | gro_cleanup(&rcpu->gro); |
8f8500a2 HT |
530 | /* The queue should be empty at this point */ |
531 | __cpu_map_ring_cleanup(rcpu->queue); | |
532 | ptr_ring_cleanup(rcpu->queue, NULL); | |
533 | kfree(rcpu->queue); | |
6710e112 | 534 | free_percpu(rcpu->bulkq); |
8f8500a2 | 535 | kfree(rcpu); |
6710e112 JDB |
536 | } |
537 | ||
8f8500a2 HT |
538 | /* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old |
539 | * entry is no longer in use before freeing. We use queue_rcu_work() to call | |
540 | * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace | |
541 | * period. This means that (a) all pending enqueue and flush operations have | |
542 | * completed (because of the RCU callback), and (b) we are in a workqueue | |
543 | * context where we can stop the kthread and wait for it to exit before freeing | |
544 | * everything. | |
6710e112 | 545 | */ |
0fe875c5 WY |
546 | static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, |
547 | u32 key_cpu, struct bpf_cpu_map_entry *rcpu) | |
6710e112 JDB |
548 | { |
549 | struct bpf_cpu_map_entry *old_rcpu; | |
550 | ||
782347b6 | 551 | old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu))); |
6710e112 | 552 | if (old_rcpu) { |
8f8500a2 HT |
553 | INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free); |
554 | queue_rcu_work(system_wq, &old_rcpu->free_work); | |
6710e112 JDB |
555 | } |
556 | } | |
557 | ||
d7ba4cc9 | 558 | static long cpu_map_delete_elem(struct bpf_map *map, void *key) |
6710e112 JDB |
559 | { |
560 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
561 | u32 key_cpu = *(u32 *)key; | |
562 | ||
563 | if (key_cpu >= map->max_entries) | |
564 | return -EINVAL; | |
565 | ||
8f8500a2 | 566 | /* notice caller map_delete_elem() uses rcu_read_lock() */ |
6710e112 JDB |
567 | __cpu_map_entry_replace(cmap, key_cpu, NULL); |
568 | return 0; | |
569 | } | |
570 | ||
d7ba4cc9 JK |
571 | static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value, |
572 | u64 map_flags) | |
6710e112 JDB |
573 | { |
574 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
644bfe51 | 575 | struct bpf_cpumap_val cpumap_value = {}; |
6710e112 | 576 | struct bpf_cpu_map_entry *rcpu; |
6710e112 JDB |
577 | /* Array index key correspond to CPU number */ |
578 | u32 key_cpu = *(u32 *)key; | |
644bfe51 LB |
579 | |
580 | memcpy(&cpumap_value, value, map->value_size); | |
6710e112 JDB |
581 | |
582 | if (unlikely(map_flags > BPF_EXIST)) | |
583 | return -EINVAL; | |
584 | if (unlikely(key_cpu >= cmap->map.max_entries)) | |
585 | return -E2BIG; | |
586 | if (unlikely(map_flags == BPF_NOEXIST)) | |
587 | return -EEXIST; | |
644bfe51 | 588 | if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */ |
6710e112 JDB |
589 | return -EOVERFLOW; |
590 | ||
591 | /* Make sure CPU is a valid possible cpu */ | |
bc23d0e3 | 592 | if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) |
6710e112 JDB |
593 | return -ENODEV; |
594 | ||
644bfe51 | 595 | if (cpumap_value.qsize == 0) { |
6710e112 JDB |
596 | rcpu = NULL; /* Same as deleting */ |
597 | } else { | |
598 | /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ | |
e88cc05b | 599 | rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); |
6710e112 JDB |
600 | if (!rcpu) |
601 | return -ENOMEM; | |
602 | } | |
603 | rcu_read_lock(); | |
604 | __cpu_map_entry_replace(cmap, key_cpu, rcpu); | |
605 | rcu_read_unlock(); | |
606 | return 0; | |
607 | } | |
608 | ||
0fe875c5 | 609 | static void cpu_map_free(struct bpf_map *map) |
6710e112 JDB |
610 | { |
611 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
6710e112 JDB |
612 | u32 i; |
613 | ||
614 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
615 | * so the bpf programs (can be more than one that used this map) were | |
616 | * disconnected from events. Wait for outstanding critical sections in | |
c2e42ddf HT |
617 | * these programs to complete. synchronize_rcu() below not only |
618 | * guarantees no further "XDP/bpf-side" reads against | |
619 | * bpf_cpu_map->cpu_map, but also ensure pending flush operations | |
620 | * (if any) are completed. | |
6710e112 JDB |
621 | */ |
622 | synchronize_rcu(); | |
623 | ||
c2e42ddf HT |
624 | /* The only possible user of bpf_cpu_map_entry is |
625 | * cpu_map_kthread_run(). | |
6710e112 JDB |
626 | */ |
627 | for (i = 0; i < cmap->map.max_entries; i++) { | |
628 | struct bpf_cpu_map_entry *rcpu; | |
629 | ||
782347b6 | 630 | rcpu = rcu_dereference_raw(cmap->cpu_map[i]); |
6710e112 JDB |
631 | if (!rcpu) |
632 | continue; | |
633 | ||
c2e42ddf HT |
634 | /* Stop kthread and cleanup entry directly */ |
635 | __cpu_map_entry_free(&rcpu->free_work.work); | |
6710e112 | 636 | } |
6710e112 | 637 | bpf_map_area_free(cmap->cpu_map); |
73cf09a3 | 638 | bpf_map_area_free(cmap); |
6710e112 JDB |
639 | } |
640 | ||
782347b6 THJ |
641 | /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or |
642 | * by local_bh_disable() (from XDP calls inside NAPI). The | |
643 | * rcu_read_lock_bh_held() below makes lockdep accept both. | |
644 | */ | |
e6a4750f | 645 | static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) |
6710e112 JDB |
646 | { |
647 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
648 | struct bpf_cpu_map_entry *rcpu; | |
649 | ||
650 | if (key >= map->max_entries) | |
651 | return NULL; | |
652 | ||
782347b6 THJ |
653 | rcpu = rcu_dereference_check(cmap->cpu_map[key], |
654 | rcu_read_lock_bh_held()); | |
6710e112 JDB |
655 | return rcpu; |
656 | } | |
657 | ||
658 | static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) | |
659 | { | |
660 | struct bpf_cpu_map_entry *rcpu = | |
661 | __cpu_map_lookup_elem(map, *(u32 *)key); | |
662 | ||
644bfe51 | 663 | return rcpu ? &rcpu->value : NULL; |
6710e112 JDB |
664 | } |
665 | ||
666 | static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
667 | { | |
668 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
669 | u32 index = key ? *(u32 *)key : U32_MAX; | |
670 | u32 *next = next_key; | |
671 | ||
672 | if (index >= cmap->map.max_entries) { | |
673 | *next = 0; | |
674 | return 0; | |
675 | } | |
676 | ||
677 | if (index == cmap->map.max_entries - 1) | |
678 | return -ENOENT; | |
679 | *next = index + 1; | |
680 | return 0; | |
681 | } | |
682 | ||
d7ba4cc9 | 683 | static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags) |
e6a4750f | 684 | { |
32637e33 | 685 | return __bpf_xdp_redirect_map(map, index, flags, 0, |
e624d4ed | 686 | __cpu_map_lookup_elem); |
e6a4750f BT |
687 | } |
688 | ||
835f1fca YS |
689 | static u64 cpu_map_mem_usage(const struct bpf_map *map) |
690 | { | |
691 | u64 usage = sizeof(struct bpf_cpu_map); | |
692 | ||
693 | /* Currently the dynamically allocated elements are not counted */ | |
694 | usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); | |
695 | return usage; | |
696 | } | |
697 | ||
c317ab71 | 698 | BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map) |
6710e112 | 699 | const struct bpf_map_ops cpu_map_ops = { |
f4d05259 | 700 | .map_meta_equal = bpf_map_meta_equal, |
6710e112 JDB |
701 | .map_alloc = cpu_map_alloc, |
702 | .map_free = cpu_map_free, | |
703 | .map_delete_elem = cpu_map_delete_elem, | |
704 | .map_update_elem = cpu_map_update_elem, | |
705 | .map_lookup_elem = cpu_map_lookup_elem, | |
706 | .map_get_next_key = cpu_map_get_next_key, | |
e8d2bec0 | 707 | .map_check_btf = map_check_no_btf, |
835f1fca | 708 | .map_mem_usage = cpu_map_mem_usage, |
c317ab71 | 709 | .map_btf_id = &cpu_map_btf_ids[0], |
e6a4750f | 710 | .map_redirect = cpu_map_redirect, |
6710e112 JDB |
711 | }; |
712 | ||
ebc4ecd4 | 713 | static void bq_flush_to_queue(struct xdp_bulk_queue *bq) |
6710e112 | 714 | { |
d5df2830 | 715 | struct bpf_cpu_map_entry *rcpu = bq->obj; |
f9419f7b JDB |
716 | unsigned int processed = 0, drops = 0; |
717 | const int to_cpu = rcpu->cpu; | |
6710e112 JDB |
718 | struct ptr_ring *q; |
719 | int i; | |
720 | ||
721 | if (unlikely(!bq->count)) | |
ebc4ecd4 | 722 | return; |
6710e112 JDB |
723 | |
724 | q = rcpu->queue; | |
725 | spin_lock(&q->producer_lock); | |
726 | ||
727 | for (i = 0; i < bq->count; i++) { | |
70280ed9 | 728 | struct xdp_frame *xdpf = bq->q[i]; |
6710e112 JDB |
729 | int err; |
730 | ||
70280ed9 | 731 | err = __ptr_ring_produce(q, xdpf); |
6710e112 | 732 | if (err) { |
f9419f7b | 733 | drops++; |
4bc188c7 | 734 | xdp_return_frame_rx_napi(xdpf); |
6710e112 | 735 | } |
f9419f7b | 736 | processed++; |
6710e112 JDB |
737 | } |
738 | bq->count = 0; | |
739 | spin_unlock(&q->producer_lock); | |
740 | ||
d5df2830 THJ |
741 | __list_del_clearprev(&bq->flush_node); |
742 | ||
f9419f7b JDB |
743 | /* Feedback loop via tracepoints */ |
744 | trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); | |
6710e112 JDB |
745 | } |
746 | ||
6710e112 JDB |
747 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
748 | * Thus, safe percpu variable access. | |
749 | */ | |
ebc4ecd4 | 750 | static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) |
6710e112 JDB |
751 | { |
752 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); | |
753 | ||
754 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) | |
4bc188c7 | 755 | bq_flush_to_queue(bq); |
6710e112 JDB |
756 | |
757 | /* Notice, xdp_buff/page MUST be queued here, long enough for | |
758 | * driver to code invoking us to finished, due to driver | |
759 | * (e.g. ixgbe) recycle tricks based on page-refcnt. | |
760 | * | |
70280ed9 | 761 | * Thus, incoming xdp_frame is always queued here (else we race |
6710e112 JDB |
762 | * with another CPU on page-refcnt and remaining driver code). |
763 | * Queue time is very short, as driver will invoke flush | |
764 | * operation, when completing napi->poll call. | |
765 | */ | |
70280ed9 | 766 | bq->q[bq->count++] = xdpf; |
d5df2830 | 767 | |
e3d69f58 SAS |
768 | if (!bq->flush_node.prev) { |
769 | struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list(); | |
770 | ||
d5df2830 | 771 | list_add(&bq->flush_node, flush_list); |
e3d69f58 | 772 | } |
6710e112 JDB |
773 | } |
774 | ||
d53ad5d8 | 775 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, |
9c270af3 JDB |
776 | struct net_device *dev_rx) |
777 | { | |
1c601d82 | 778 | /* Info needed when constructing SKB on remote CPU */ |
70280ed9 | 779 | xdpf->dev_rx = dev_rx; |
9c270af3 | 780 | |
70280ed9 | 781 | bq_enqueue(rcpu, xdpf); |
9c270af3 JDB |
782 | return 0; |
783 | } | |
784 | ||
11941f8a KKD |
785 | int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
786 | struct sk_buff *skb) | |
787 | { | |
788 | int ret; | |
789 | ||
790 | __skb_pull(skb, skb->mac_len); | |
791 | skb_set_redirected(skb, false); | |
792 | __ptr_set_bit(0, &skb); | |
793 | ||
794 | ret = ptr_ring_produce(rcpu->queue, skb); | |
795 | if (ret < 0) | |
796 | goto trace; | |
797 | ||
798 | wake_up_process(rcpu->kthread); | |
799 | trace: | |
800 | trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); | |
801 | return ret; | |
802 | } | |
803 | ||
d839a731 | 804 | void __cpu_map_flush(struct list_head *flush_list) |
6710e112 | 805 | { |
d5df2830 | 806 | struct xdp_bulk_queue *bq, *tmp; |
6710e112 | 807 | |
d5df2830 | 808 | list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { |
4bc188c7 | 809 | bq_flush_to_queue(bq); |
6710e112 JDB |
810 | |
811 | /* If already running, costs spin_lock_irqsave + smb_mb */ | |
d5df2830 | 812 | wake_up_process(bq->obj->kthread); |
6710e112 JDB |
813 | } |
814 | } |