Commit | Line | Data |
---|---|---|
6710e112 JDB |
1 | /* bpf/cpumap.c |
2 | * | |
3 | * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. | |
4 | * Released under terms in GPL version 2. See COPYING. | |
5 | */ | |
6 | ||
7 | /* The 'cpumap' is primarily used as a backend map for XDP BPF helper | |
8 | * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'. | |
9 | * | |
10 | * Unlike devmap which redirects XDP frames out another NIC device, | |
11 | * this map type redirects raw XDP frames to another CPU. The remote | |
12 | * CPU will do SKB-allocation and call the normal network stack. | |
13 | * | |
14 | * This is a scalability and isolation mechanism, that allow | |
15 | * separating the early driver network XDP layer, from the rest of the | |
16 | * netstack, and assigning dedicated CPUs for this stage. This | |
17 | * basically allows for 10G wirespeed pre-filtering via bpf. | |
18 | */ | |
19 | #include <linux/bpf.h> | |
20 | #include <linux/filter.h> | |
21 | #include <linux/ptr_ring.h> | |
5ab073ff | 22 | #include <net/xdp.h> |
6710e112 JDB |
23 | |
24 | #include <linux/sched.h> | |
25 | #include <linux/workqueue.h> | |
26 | #include <linux/kthread.h> | |
27 | #include <linux/capability.h> | |
f9419f7b | 28 | #include <trace/events/xdp.h> |
6710e112 | 29 | |
1c601d82 JDB |
30 | #include <linux/netdevice.h> /* netif_receive_skb_core */ |
31 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
32 | ||
6710e112 JDB |
33 | /* General idea: XDP packets getting XDP redirected to another CPU, |
34 | * will maximum be stored/queued for one driver ->poll() call. It is | |
35 | * guaranteed that setting flush bit and flush operation happen on | |
36 | * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr() | |
37 | * which queue in bpf_cpu_map_entry contains packets. | |
38 | */ | |
39 | ||
40 | #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ | |
41 | struct xdp_bulk_queue { | |
42 | void *q[CPU_MAP_BULK_SIZE]; | |
43 | unsigned int count; | |
44 | }; | |
45 | ||
46 | /* Struct for every remote "destination" CPU in map */ | |
47 | struct bpf_cpu_map_entry { | |
f9419f7b JDB |
48 | u32 cpu; /* kthread CPU and map index */ |
49 | int map_id; /* Back reference to map */ | |
6710e112 JDB |
50 | u32 qsize; /* Queue size placeholder for map lookup */ |
51 | ||
52 | /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ | |
53 | struct xdp_bulk_queue __percpu *bulkq; | |
54 | ||
55 | /* Queue with potential multi-producers, and single-consumer kthread */ | |
56 | struct ptr_ring *queue; | |
57 | struct task_struct *kthread; | |
58 | struct work_struct kthread_stop_wq; | |
59 | ||
60 | atomic_t refcnt; /* Control when this struct can be free'ed */ | |
61 | struct rcu_head rcu; | |
62 | }; | |
63 | ||
64 | struct bpf_cpu_map { | |
65 | struct bpf_map map; | |
66 | /* Below members specific for map type */ | |
67 | struct bpf_cpu_map_entry **cpu_map; | |
68 | unsigned long __percpu *flush_needed; | |
69 | }; | |
70 | ||
71 | static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, | |
72 | struct xdp_bulk_queue *bq); | |
73 | ||
74 | static u64 cpu_map_bitmap_size(const union bpf_attr *attr) | |
75 | { | |
76 | return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); | |
77 | } | |
78 | ||
79 | static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) | |
80 | { | |
81 | struct bpf_cpu_map *cmap; | |
82 | int err = -ENOMEM; | |
83 | u64 cost; | |
84 | int ret; | |
85 | ||
86 | if (!capable(CAP_SYS_ADMIN)) | |
87 | return ERR_PTR(-EPERM); | |
88 | ||
89 | /* check sanity of attributes */ | |
90 | if (attr->max_entries == 0 || attr->key_size != 4 || | |
91 | attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) | |
92 | return ERR_PTR(-EINVAL); | |
93 | ||
94 | cmap = kzalloc(sizeof(*cmap), GFP_USER); | |
95 | if (!cmap) | |
96 | return ERR_PTR(-ENOMEM); | |
97 | ||
bd475643 | 98 | bpf_map_init_from_attr(&cmap->map, attr); |
6710e112 JDB |
99 | |
100 | /* Pre-limit array size based on NR_CPUS, not final CPU check */ | |
101 | if (cmap->map.max_entries > NR_CPUS) { | |
102 | err = -E2BIG; | |
103 | goto free_cmap; | |
104 | } | |
105 | ||
106 | /* make sure page count doesn't overflow */ | |
107 | cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); | |
108 | cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); | |
109 | if (cost >= U32_MAX - PAGE_SIZE) | |
110 | goto free_cmap; | |
111 | cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; | |
112 | ||
113 | /* Notice returns -EPERM on if map size is larger than memlock limit */ | |
114 | ret = bpf_map_precharge_memlock(cmap->map.pages); | |
115 | if (ret) { | |
116 | err = ret; | |
117 | goto free_cmap; | |
118 | } | |
119 | ||
120 | /* A per cpu bitfield with a bit per possible CPU in map */ | |
121 | cmap->flush_needed = __alloc_percpu(cpu_map_bitmap_size(attr), | |
122 | __alignof__(unsigned long)); | |
123 | if (!cmap->flush_needed) | |
124 | goto free_cmap; | |
125 | ||
126 | /* Alloc array for possible remote "destination" CPUs */ | |
127 | cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * | |
128 | sizeof(struct bpf_cpu_map_entry *), | |
129 | cmap->map.numa_node); | |
130 | if (!cmap->cpu_map) | |
131 | goto free_percpu; | |
132 | ||
133 | return &cmap->map; | |
134 | free_percpu: | |
135 | free_percpu(cmap->flush_needed); | |
136 | free_cmap: | |
137 | kfree(cmap); | |
138 | return ERR_PTR(err); | |
139 | } | |
140 | ||
6710e112 JDB |
141 | static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) |
142 | { | |
143 | atomic_inc(&rcpu->refcnt); | |
144 | } | |
145 | ||
146 | /* called from workqueue, to workaround syscall using preempt_disable */ | |
147 | static void cpu_map_kthread_stop(struct work_struct *work) | |
148 | { | |
149 | struct bpf_cpu_map_entry *rcpu; | |
150 | ||
151 | rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); | |
152 | ||
153 | /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier, | |
154 | * as it waits until all in-flight call_rcu() callbacks complete. | |
155 | */ | |
156 | rcu_barrier(); | |
157 | ||
158 | /* kthread_stop will wake_up_process and wait for it to complete */ | |
159 | kthread_stop(rcpu->kthread); | |
160 | } | |
161 | ||
1c601d82 JDB |
162 | /* For now, xdp_pkt is a cpumap internal data structure, with info |
163 | * carried between enqueue to dequeue. It is mapped into the top | |
164 | * headroom of the packet, to avoid allocating separate mem. | |
165 | */ | |
166 | struct xdp_pkt { | |
167 | void *data; | |
168 | u16 len; | |
169 | u16 headroom; | |
170 | u16 metasize; | |
5ab073ff JDB |
171 | /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, |
172 | * while mem info is valid on remote CPU. | |
173 | */ | |
174 | struct xdp_mem_info mem; | |
1c601d82 JDB |
175 | struct net_device *dev_rx; |
176 | }; | |
177 | ||
178 | /* Convert xdp_buff to xdp_pkt */ | |
179 | static struct xdp_pkt *convert_to_xdp_pkt(struct xdp_buff *xdp) | |
180 | { | |
181 | struct xdp_pkt *xdp_pkt; | |
182 | int metasize; | |
183 | int headroom; | |
184 | ||
185 | /* Assure headroom is available for storing info */ | |
186 | headroom = xdp->data - xdp->data_hard_start; | |
187 | metasize = xdp->data - xdp->data_meta; | |
188 | metasize = metasize > 0 ? metasize : 0; | |
03c4cc38 | 189 | if (unlikely((headroom - metasize) < sizeof(*xdp_pkt))) |
1c601d82 JDB |
190 | return NULL; |
191 | ||
192 | /* Store info in top of packet */ | |
193 | xdp_pkt = xdp->data_hard_start; | |
194 | ||
195 | xdp_pkt->data = xdp->data; | |
196 | xdp_pkt->len = xdp->data_end - xdp->data; | |
197 | xdp_pkt->headroom = headroom - sizeof(*xdp_pkt); | |
198 | xdp_pkt->metasize = metasize; | |
199 | ||
5ab073ff JDB |
200 | /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ |
201 | xdp_pkt->mem = xdp->rxq->mem; | |
202 | ||
1c601d82 JDB |
203 | return xdp_pkt; |
204 | } | |
205 | ||
0fe875c5 WY |
206 | static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, |
207 | struct xdp_pkt *xdp_pkt) | |
1c601d82 JDB |
208 | { |
209 | unsigned int frame_size; | |
210 | void *pkt_data_start; | |
211 | struct sk_buff *skb; | |
212 | ||
213 | /* build_skb need to place skb_shared_info after SKB end, and | |
214 | * also want to know the memory "truesize". Thus, need to | |
215 | * know the memory frame size backing xdp_buff. | |
216 | * | |
217 | * XDP was designed to have PAGE_SIZE frames, but this | |
218 | * assumption is not longer true with ixgbe and i40e. It | |
219 | * would be preferred to set frame_size to 2048 or 4096 | |
220 | * depending on the driver. | |
221 | * frame_size = 2048; | |
222 | * frame_len = frame_size - sizeof(*xdp_pkt); | |
223 | * | |
224 | * Instead, with info avail, skb_shared_info in placed after | |
225 | * packet len. This, unfortunately fakes the truesize. | |
226 | * Another disadvantage of this approach, the skb_shared_info | |
227 | * is not at a fixed memory location, with mixed length | |
228 | * packets, which is bad for cache-line hotness. | |
229 | */ | |
230 | frame_size = SKB_DATA_ALIGN(xdp_pkt->len) + xdp_pkt->headroom + | |
231 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
232 | ||
233 | pkt_data_start = xdp_pkt->data - xdp_pkt->headroom; | |
234 | skb = build_skb(pkt_data_start, frame_size); | |
235 | if (!skb) | |
236 | return NULL; | |
237 | ||
238 | skb_reserve(skb, xdp_pkt->headroom); | |
239 | __skb_put(skb, xdp_pkt->len); | |
240 | if (xdp_pkt->metasize) | |
241 | skb_metadata_set(skb, xdp_pkt->metasize); | |
242 | ||
243 | /* Essential SKB info: protocol and skb->dev */ | |
244 | skb->protocol = eth_type_trans(skb, xdp_pkt->dev_rx); | |
245 | ||
246 | /* Optional SKB info, currently missing: | |
247 | * - HW checksum info (skb->ip_summed) | |
248 | * - HW RX hash (skb_set_hash) | |
249 | * - RX ring dev queue index (skb_record_rx_queue) | |
250 | */ | |
251 | ||
252 | return skb; | |
253 | } | |
254 | ||
5ab073ff JDB |
255 | static void __cpu_map_ring_cleanup(struct ptr_ring *ring) |
256 | { | |
257 | /* The tear-down procedure should have made sure that queue is | |
258 | * empty. See __cpu_map_entry_replace() and work-queue | |
259 | * invoked cpu_map_kthread_stop(). Catch any broken behaviour | |
260 | * gracefully and warn once. | |
261 | */ | |
262 | struct xdp_pkt *xdp_pkt; | |
263 | ||
264 | while ((xdp_pkt = ptr_ring_consume(ring))) | |
265 | if (WARN_ON_ONCE(xdp_pkt)) | |
266 | xdp_return_frame(xdp_pkt, &xdp_pkt->mem); | |
267 | } | |
268 | ||
269 | static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) | |
270 | { | |
271 | if (atomic_dec_and_test(&rcpu->refcnt)) { | |
272 | /* The queue should be empty at this point */ | |
273 | __cpu_map_ring_cleanup(rcpu->queue); | |
274 | ptr_ring_cleanup(rcpu->queue, NULL); | |
275 | kfree(rcpu->queue); | |
276 | kfree(rcpu); | |
277 | } | |
278 | } | |
279 | ||
6710e112 JDB |
280 | static int cpu_map_kthread_run(void *data) |
281 | { | |
282 | struct bpf_cpu_map_entry *rcpu = data; | |
283 | ||
284 | set_current_state(TASK_INTERRUPTIBLE); | |
285 | ||
286 | /* When kthread gives stop order, then rcpu have been disconnected | |
287 | * from map, thus no new packets can enter. Remaining in-flight | |
288 | * per CPU stored packets are flushed to this queue. Wait honoring | |
289 | * kthread_stop signal until queue is empty. | |
290 | */ | |
291 | while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { | |
f9419f7b | 292 | unsigned int processed = 0, drops = 0, sched = 0; |
6710e112 JDB |
293 | struct xdp_pkt *xdp_pkt; |
294 | ||
1c601d82 JDB |
295 | /* Release CPU reschedule checks */ |
296 | if (__ptr_ring_empty(rcpu->queue)) { | |
31749468 JDB |
297 | set_current_state(TASK_INTERRUPTIBLE); |
298 | /* Recheck to avoid lost wake-up */ | |
299 | if (__ptr_ring_empty(rcpu->queue)) { | |
300 | schedule(); | |
301 | sched = 1; | |
302 | } else { | |
303 | __set_current_state(TASK_RUNNING); | |
304 | } | |
1c601d82 | 305 | } else { |
f9419f7b | 306 | sched = cond_resched(); |
6710e112 | 307 | } |
1c601d82 JDB |
308 | |
309 | /* Process packets in rcpu->queue */ | |
310 | local_bh_disable(); | |
311 | /* | |
312 | * The bpf_cpu_map_entry is single consumer, with this | |
313 | * kthread CPU pinned. Lockless access to ptr_ring | |
314 | * consume side valid as no-resize allowed of queue. | |
315 | */ | |
316 | while ((xdp_pkt = __ptr_ring_consume(rcpu->queue))) { | |
317 | struct sk_buff *skb; | |
318 | int ret; | |
319 | ||
320 | skb = cpu_map_build_skb(rcpu, xdp_pkt); | |
321 | if (!skb) { | |
5ab073ff | 322 | xdp_return_frame(xdp_pkt, &xdp_pkt->mem); |
1c601d82 JDB |
323 | continue; |
324 | } | |
325 | ||
326 | /* Inject into network stack */ | |
327 | ret = netif_receive_skb_core(skb); | |
328 | if (ret == NET_RX_DROP) | |
329 | drops++; | |
330 | ||
331 | /* Limit BH-disable period */ | |
332 | if (++processed == 8) | |
333 | break; | |
334 | } | |
f9419f7b JDB |
335 | /* Feedback loop via tracepoint */ |
336 | trace_xdp_cpumap_kthread(rcpu->map_id, processed, drops, sched); | |
337 | ||
1c601d82 | 338 | local_bh_enable(); /* resched point, may call do_softirq() */ |
6710e112 JDB |
339 | } |
340 | __set_current_state(TASK_RUNNING); | |
341 | ||
342 | put_cpu_map_entry(rcpu); | |
343 | return 0; | |
344 | } | |
345 | ||
0fe875c5 WY |
346 | static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, |
347 | int map_id) | |
6710e112 | 348 | { |
7fc17e90 | 349 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
6710e112 JDB |
350 | struct bpf_cpu_map_entry *rcpu; |
351 | int numa, err; | |
352 | ||
353 | /* Have map->numa_node, but choose node of redirect target CPU */ | |
354 | numa = cpu_to_node(cpu); | |
355 | ||
356 | rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa); | |
357 | if (!rcpu) | |
358 | return NULL; | |
359 | ||
360 | /* Alloc percpu bulkq */ | |
361 | rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq), | |
362 | sizeof(void *), gfp); | |
363 | if (!rcpu->bulkq) | |
364 | goto free_rcu; | |
365 | ||
366 | /* Alloc queue */ | |
367 | rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa); | |
368 | if (!rcpu->queue) | |
369 | goto free_bulkq; | |
370 | ||
371 | err = ptr_ring_init(rcpu->queue, qsize, gfp); | |
372 | if (err) | |
373 | goto free_queue; | |
374 | ||
f9419f7b JDB |
375 | rcpu->cpu = cpu; |
376 | rcpu->map_id = map_id; | |
377 | rcpu->qsize = qsize; | |
6710e112 JDB |
378 | |
379 | /* Setup kthread */ | |
380 | rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, | |
381 | "cpumap/%d/map:%d", cpu, map_id); | |
382 | if (IS_ERR(rcpu->kthread)) | |
383 | goto free_ptr_ring; | |
384 | ||
385 | get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ | |
386 | get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ | |
387 | ||
388 | /* Make sure kthread runs on a single CPU */ | |
389 | kthread_bind(rcpu->kthread, cpu); | |
390 | wake_up_process(rcpu->kthread); | |
391 | ||
392 | return rcpu; | |
393 | ||
394 | free_ptr_ring: | |
395 | ptr_ring_cleanup(rcpu->queue, NULL); | |
396 | free_queue: | |
397 | kfree(rcpu->queue); | |
398 | free_bulkq: | |
399 | free_percpu(rcpu->bulkq); | |
400 | free_rcu: | |
401 | kfree(rcpu); | |
402 | return NULL; | |
403 | } | |
404 | ||
0fe875c5 | 405 | static void __cpu_map_entry_free(struct rcu_head *rcu) |
6710e112 JDB |
406 | { |
407 | struct bpf_cpu_map_entry *rcpu; | |
408 | int cpu; | |
409 | ||
410 | /* This cpu_map_entry have been disconnected from map and one | |
411 | * RCU graze-period have elapsed. Thus, XDP cannot queue any | |
412 | * new packets and cannot change/set flush_needed that can | |
413 | * find this entry. | |
414 | */ | |
415 | rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); | |
416 | ||
417 | /* Flush remaining packets in percpu bulkq */ | |
418 | for_each_online_cpu(cpu) { | |
419 | struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); | |
420 | ||
421 | /* No concurrent bq_enqueue can run at this point */ | |
422 | bq_flush_to_queue(rcpu, bq); | |
423 | } | |
424 | free_percpu(rcpu->bulkq); | |
425 | /* Cannot kthread_stop() here, last put free rcpu resources */ | |
426 | put_cpu_map_entry(rcpu); | |
427 | } | |
428 | ||
429 | /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to | |
430 | * ensure any driver rcu critical sections have completed, but this | |
431 | * does not guarantee a flush has happened yet. Because driver side | |
432 | * rcu_read_lock/unlock only protects the running XDP program. The | |
433 | * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a | |
434 | * pending flush op doesn't fail. | |
435 | * | |
436 | * The bpf_cpu_map_entry is still used by the kthread, and there can | |
437 | * still be pending packets (in queue and percpu bulkq). A refcnt | |
438 | * makes sure to last user (kthread_stop vs. call_rcu) free memory | |
439 | * resources. | |
440 | * | |
441 | * The rcu callback __cpu_map_entry_free flush remaining packets in | |
442 | * percpu bulkq to queue. Due to caller map_delete_elem() disable | |
443 | * preemption, cannot call kthread_stop() to make sure queue is empty. | |
444 | * Instead a work_queue is started for stopping kthread, | |
445 | * cpu_map_kthread_stop, which waits for an RCU graze period before | |
446 | * stopping kthread, emptying the queue. | |
447 | */ | |
0fe875c5 WY |
448 | static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, |
449 | u32 key_cpu, struct bpf_cpu_map_entry *rcpu) | |
6710e112 JDB |
450 | { |
451 | struct bpf_cpu_map_entry *old_rcpu; | |
452 | ||
453 | old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); | |
454 | if (old_rcpu) { | |
455 | call_rcu(&old_rcpu->rcu, __cpu_map_entry_free); | |
456 | INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop); | |
457 | schedule_work(&old_rcpu->kthread_stop_wq); | |
458 | } | |
459 | } | |
460 | ||
0fe875c5 | 461 | static int cpu_map_delete_elem(struct bpf_map *map, void *key) |
6710e112 JDB |
462 | { |
463 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
464 | u32 key_cpu = *(u32 *)key; | |
465 | ||
466 | if (key_cpu >= map->max_entries) | |
467 | return -EINVAL; | |
468 | ||
469 | /* notice caller map_delete_elem() use preempt_disable() */ | |
470 | __cpu_map_entry_replace(cmap, key_cpu, NULL); | |
471 | return 0; | |
472 | } | |
473 | ||
0fe875c5 WY |
474 | static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, |
475 | u64 map_flags) | |
6710e112 JDB |
476 | { |
477 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
478 | struct bpf_cpu_map_entry *rcpu; | |
479 | ||
480 | /* Array index key correspond to CPU number */ | |
481 | u32 key_cpu = *(u32 *)key; | |
482 | /* Value is the queue size */ | |
483 | u32 qsize = *(u32 *)value; | |
484 | ||
485 | if (unlikely(map_flags > BPF_EXIST)) | |
486 | return -EINVAL; | |
487 | if (unlikely(key_cpu >= cmap->map.max_entries)) | |
488 | return -E2BIG; | |
489 | if (unlikely(map_flags == BPF_NOEXIST)) | |
490 | return -EEXIST; | |
491 | if (unlikely(qsize > 16384)) /* sanity limit on qsize */ | |
492 | return -EOVERFLOW; | |
493 | ||
494 | /* Make sure CPU is a valid possible cpu */ | |
495 | if (!cpu_possible(key_cpu)) | |
496 | return -ENODEV; | |
497 | ||
498 | if (qsize == 0) { | |
499 | rcpu = NULL; /* Same as deleting */ | |
500 | } else { | |
501 | /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ | |
502 | rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); | |
503 | if (!rcpu) | |
504 | return -ENOMEM; | |
505 | } | |
506 | rcu_read_lock(); | |
507 | __cpu_map_entry_replace(cmap, key_cpu, rcpu); | |
508 | rcu_read_unlock(); | |
509 | return 0; | |
510 | } | |
511 | ||
0fe875c5 | 512 | static void cpu_map_free(struct bpf_map *map) |
6710e112 JDB |
513 | { |
514 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
515 | int cpu; | |
516 | u32 i; | |
517 | ||
518 | /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | |
519 | * so the bpf programs (can be more than one that used this map) were | |
520 | * disconnected from events. Wait for outstanding critical sections in | |
521 | * these programs to complete. The rcu critical section only guarantees | |
522 | * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map. | |
523 | * It does __not__ ensure pending flush operations (if any) are | |
524 | * complete. | |
525 | */ | |
526 | synchronize_rcu(); | |
527 | ||
528 | /* To ensure all pending flush operations have completed wait for flush | |
529 | * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. | |
530 | * Because the above synchronize_rcu() ensures the map is disconnected | |
531 | * from the program we can assume no new bits will be set. | |
532 | */ | |
533 | for_each_online_cpu(cpu) { | |
534 | unsigned long *bitmap = per_cpu_ptr(cmap->flush_needed, cpu); | |
535 | ||
536 | while (!bitmap_empty(bitmap, cmap->map.max_entries)) | |
537 | cond_resched(); | |
538 | } | |
539 | ||
540 | /* For cpu_map the remote CPUs can still be using the entries | |
541 | * (struct bpf_cpu_map_entry). | |
542 | */ | |
543 | for (i = 0; i < cmap->map.max_entries; i++) { | |
544 | struct bpf_cpu_map_entry *rcpu; | |
545 | ||
546 | rcpu = READ_ONCE(cmap->cpu_map[i]); | |
547 | if (!rcpu) | |
548 | continue; | |
549 | ||
550 | /* bq flush and cleanup happens after RCU graze-period */ | |
551 | __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ | |
552 | } | |
553 | free_percpu(cmap->flush_needed); | |
554 | bpf_map_area_free(cmap->cpu_map); | |
555 | kfree(cmap); | |
556 | } | |
557 | ||
558 | struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) | |
559 | { | |
560 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
561 | struct bpf_cpu_map_entry *rcpu; | |
562 | ||
563 | if (key >= map->max_entries) | |
564 | return NULL; | |
565 | ||
566 | rcpu = READ_ONCE(cmap->cpu_map[key]); | |
567 | return rcpu; | |
568 | } | |
569 | ||
570 | static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) | |
571 | { | |
572 | struct bpf_cpu_map_entry *rcpu = | |
573 | __cpu_map_lookup_elem(map, *(u32 *)key); | |
574 | ||
575 | return rcpu ? &rcpu->qsize : NULL; | |
576 | } | |
577 | ||
578 | static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) | |
579 | { | |
580 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
581 | u32 index = key ? *(u32 *)key : U32_MAX; | |
582 | u32 *next = next_key; | |
583 | ||
584 | if (index >= cmap->map.max_entries) { | |
585 | *next = 0; | |
586 | return 0; | |
587 | } | |
588 | ||
589 | if (index == cmap->map.max_entries - 1) | |
590 | return -ENOENT; | |
591 | *next = index + 1; | |
592 | return 0; | |
593 | } | |
594 | ||
595 | const struct bpf_map_ops cpu_map_ops = { | |
596 | .map_alloc = cpu_map_alloc, | |
597 | .map_free = cpu_map_free, | |
598 | .map_delete_elem = cpu_map_delete_elem, | |
599 | .map_update_elem = cpu_map_update_elem, | |
600 | .map_lookup_elem = cpu_map_lookup_elem, | |
601 | .map_get_next_key = cpu_map_get_next_key, | |
602 | }; | |
603 | ||
604 | static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, | |
605 | struct xdp_bulk_queue *bq) | |
606 | { | |
f9419f7b JDB |
607 | unsigned int processed = 0, drops = 0; |
608 | const int to_cpu = rcpu->cpu; | |
6710e112 JDB |
609 | struct ptr_ring *q; |
610 | int i; | |
611 | ||
612 | if (unlikely(!bq->count)) | |
613 | return 0; | |
614 | ||
615 | q = rcpu->queue; | |
616 | spin_lock(&q->producer_lock); | |
617 | ||
618 | for (i = 0; i < bq->count; i++) { | |
5ab073ff | 619 | struct xdp_pkt *xdp_pkt = bq->q[i]; |
6710e112 JDB |
620 | int err; |
621 | ||
622 | err = __ptr_ring_produce(q, xdp_pkt); | |
623 | if (err) { | |
f9419f7b | 624 | drops++; |
5ab073ff | 625 | xdp_return_frame(xdp_pkt->data, &xdp_pkt->mem); |
6710e112 | 626 | } |
f9419f7b | 627 | processed++; |
6710e112 JDB |
628 | } |
629 | bq->count = 0; | |
630 | spin_unlock(&q->producer_lock); | |
631 | ||
f9419f7b JDB |
632 | /* Feedback loop via tracepoints */ |
633 | trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); | |
6710e112 JDB |
634 | return 0; |
635 | } | |
636 | ||
6710e112 JDB |
637 | /* Runs under RCU-read-side, plus in softirq under NAPI protection. |
638 | * Thus, safe percpu variable access. | |
639 | */ | |
9c270af3 | 640 | static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_pkt *xdp_pkt) |
6710e112 JDB |
641 | { |
642 | struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); | |
643 | ||
644 | if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) | |
645 | bq_flush_to_queue(rcpu, bq); | |
646 | ||
647 | /* Notice, xdp_buff/page MUST be queued here, long enough for | |
648 | * driver to code invoking us to finished, due to driver | |
649 | * (e.g. ixgbe) recycle tricks based on page-refcnt. | |
650 | * | |
651 | * Thus, incoming xdp_pkt is always queued here (else we race | |
652 | * with another CPU on page-refcnt and remaining driver code). | |
653 | * Queue time is very short, as driver will invoke flush | |
654 | * operation, when completing napi->poll call. | |
655 | */ | |
656 | bq->q[bq->count++] = xdp_pkt; | |
657 | return 0; | |
658 | } | |
659 | ||
9c270af3 JDB |
660 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, |
661 | struct net_device *dev_rx) | |
662 | { | |
663 | struct xdp_pkt *xdp_pkt; | |
9c270af3 | 664 | |
1c601d82 | 665 | xdp_pkt = convert_to_xdp_pkt(xdp); |
03c4cc38 | 666 | if (unlikely(!xdp_pkt)) |
1c601d82 | 667 | return -EOVERFLOW; |
9c270af3 | 668 | |
1c601d82 JDB |
669 | /* Info needed when constructing SKB on remote CPU */ |
670 | xdp_pkt->dev_rx = dev_rx; | |
9c270af3 JDB |
671 | |
672 | bq_enqueue(rcpu, xdp_pkt); | |
673 | return 0; | |
674 | } | |
675 | ||
6710e112 JDB |
676 | void __cpu_map_insert_ctx(struct bpf_map *map, u32 bit) |
677 | { | |
678 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
679 | unsigned long *bitmap = this_cpu_ptr(cmap->flush_needed); | |
680 | ||
681 | __set_bit(bit, bitmap); | |
682 | } | |
683 | ||
684 | void __cpu_map_flush(struct bpf_map *map) | |
685 | { | |
686 | struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); | |
687 | unsigned long *bitmap = this_cpu_ptr(cmap->flush_needed); | |
688 | u32 bit; | |
689 | ||
690 | /* The napi->poll softirq makes sure __cpu_map_insert_ctx() | |
691 | * and __cpu_map_flush() happen on same CPU. Thus, the percpu | |
692 | * bitmap indicate which percpu bulkq have packets. | |
693 | */ | |
694 | for_each_set_bit(bit, bitmap, map->max_entries) { | |
695 | struct bpf_cpu_map_entry *rcpu = READ_ONCE(cmap->cpu_map[bit]); | |
696 | struct xdp_bulk_queue *bq; | |
697 | ||
698 | /* This is possible if entry is removed by user space | |
699 | * between xdp redirect and flush op. | |
700 | */ | |
701 | if (unlikely(!rcpu)) | |
702 | continue; | |
703 | ||
704 | __clear_bit(bit, bitmap); | |
705 | ||
706 | /* Flush all frames in bulkq to real queue */ | |
707 | bq = this_cpu_ptr(rcpu->bulkq); | |
708 | bq_flush_to_queue(rcpu, bq); | |
709 | ||
710 | /* If already running, costs spin_lock_irqsave + smb_mb */ | |
711 | wake_up_process(rcpu->kthread); | |
712 | } | |
713 | } |