Commit | Line | Data |
---|---|---|
e886bf9d | 1 | // SPDX-License-Identifier: GPL-2.0 |
55834c59 AP |
2 | /* |
3 | * KASAN quarantine. | |
4 | * | |
5 | * Author: Alexander Potapenko <glider@google.com> | |
6 | * Copyright (C) 2016 Google, Inc. | |
7 | * | |
8 | * Based on code by Dmitry Chernenkov. | |
55834c59 AP |
9 | */ |
10 | ||
d7196d87 AK |
11 | #define pr_fmt(fmt) "kasan: " fmt |
12 | ||
55834c59 AP |
13 | #include <linux/gfp.h> |
14 | #include <linux/hash.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/percpu.h> | |
18 | #include <linux/printk.h> | |
19 | #include <linux/shrinker.h> | |
20 | #include <linux/slab.h> | |
ce5bec54 | 21 | #include <linux/srcu.h> |
55834c59 AP |
22 | #include <linux/string.h> |
23 | #include <linux/types.h> | |
6c82d45c | 24 | #include <linux/cpuhotplug.h> |
55834c59 AP |
25 | |
26 | #include "../slab.h" | |
27 | #include "kasan.h" | |
28 | ||
29 | /* Data structure and operations for quarantine queues. */ | |
30 | ||
31 | /* | |
f0953a1b | 32 | * Each queue is a single-linked list, which also stores the total size of |
55834c59 AP |
33 | * objects inside of it. |
34 | */ | |
35 | struct qlist_head { | |
36 | struct qlist_node *head; | |
37 | struct qlist_node *tail; | |
38 | size_t bytes; | |
6c82d45c | 39 | bool offline; |
55834c59 AP |
40 | }; |
41 | ||
42 | #define QLIST_INIT { NULL, NULL, 0 } | |
43 | ||
44 | static bool qlist_empty(struct qlist_head *q) | |
45 | { | |
46 | return !q->head; | |
47 | } | |
48 | ||
49 | static void qlist_init(struct qlist_head *q) | |
50 | { | |
51 | q->head = q->tail = NULL; | |
52 | q->bytes = 0; | |
53 | } | |
54 | ||
55 | static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, | |
56 | size_t size) | |
57 | { | |
58 | if (unlikely(qlist_empty(q))) | |
59 | q->head = qlink; | |
60 | else | |
61 | q->tail->next = qlink; | |
62 | q->tail = qlink; | |
63 | qlink->next = NULL; | |
64 | q->bytes += size; | |
65 | } | |
66 | ||
67 | static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) | |
68 | { | |
69 | if (unlikely(qlist_empty(from))) | |
70 | return; | |
71 | ||
72 | if (qlist_empty(to)) { | |
73 | *to = *from; | |
74 | qlist_init(from); | |
75 | return; | |
76 | } | |
77 | ||
78 | to->tail->next = from->head; | |
79 | to->tail = from->tail; | |
80 | to->bytes += from->bytes; | |
81 | ||
82 | qlist_init(from); | |
83 | } | |
84 | ||
64abdcb2 DV |
85 | #define QUARANTINE_PERCPU_SIZE (1 << 20) |
86 | #define QUARANTINE_BATCHES \ | |
87 | (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS) | |
55834c59 AP |
88 | |
89 | /* | |
90 | * The object quarantine consists of per-cpu queues and a global queue, | |
91 | * guarded by quarantine_lock. | |
92 | */ | |
93 | static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine); | |
94 | ||
64abdcb2 DV |
95 | /* Round-robin FIFO array of batches. */ |
96 | static struct qlist_head global_quarantine[QUARANTINE_BATCHES]; | |
97 | static int quarantine_head; | |
98 | static int quarantine_tail; | |
99 | /* Total size of all objects in global_quarantine across all batches. */ | |
100 | static unsigned long quarantine_size; | |
026d1eaf | 101 | static DEFINE_RAW_SPINLOCK(quarantine_lock); |
ce5bec54 | 102 | DEFINE_STATIC_SRCU(remove_cache_srcu); |
55834c59 | 103 | |
07d067e4 Z |
104 | struct cpu_shrink_qlist { |
105 | raw_spinlock_t lock; | |
106 | struct qlist_head qlist; | |
107 | }; | |
108 | ||
109 | static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = { | |
110 | .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock), | |
111 | }; | |
07d067e4 | 112 | |
55834c59 | 113 | /* Maximum size of the global queue. */ |
64abdcb2 DV |
114 | static unsigned long quarantine_max_size; |
115 | ||
116 | /* | |
117 | * Target size of a batch in global_quarantine. | |
118 | * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM. | |
119 | */ | |
120 | static unsigned long quarantine_batch_size; | |
55834c59 AP |
121 | |
122 | /* | |
123 | * The fraction of physical memory the quarantine is allowed to occupy. | |
124 | * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep | |
125 | * the ratio low to avoid OOM. | |
126 | */ | |
127 | #define QUARANTINE_FRACTION 32 | |
128 | ||
55834c59 AP |
129 | static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) |
130 | { | |
6e48a966 | 131 | return virt_to_slab(qlink)->slab_cache; |
55834c59 AP |
132 | } |
133 | ||
134 | static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) | |
135 | { | |
136 | struct kasan_free_meta *free_info = | |
137 | container_of(qlink, struct kasan_free_meta, | |
138 | quarantine_link); | |
139 | ||
140 | return ((void *)free_info) - cache->kasan_info.free_meta_offset; | |
141 | } | |
142 | ||
143 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) | |
144 | { | |
145 | void *object = qlink_to_object(qlink, cache); | |
773688a6 | 146 | struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); |
55834c59 | 147 | |
711d3491 ME |
148 | /* |
149 | * Note: Keep per-object metadata to allow KASAN print stack traces for | |
150 | * use-after-free-before-realloc bugs. | |
151 | */ | |
f7376aed | 152 | |
26dca996 AK |
153 | /* |
154 | * If init_on_free is enabled and KASAN's free metadata is stored in | |
155 | * the object, zero the metadata. Otherwise, the object's memory will | |
156 | * not be properly zeroed, as KASAN saves the metadata after the slab | |
157 | * allocator zeroes the object. | |
158 | */ | |
159 | if (slab_want_init_on_free(cache) && | |
160 | cache->kasan_info.free_meta_offset == 0) | |
773688a6 | 161 | memzero_explicit(free_meta, sizeof(*free_meta)); |
97593cad | 162 | |
55834c59 | 163 | ___cache_free(cache, object, _THIS_IP_); |
55834c59 AP |
164 | } |
165 | ||
166 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) | |
167 | { | |
168 | struct qlist_node *qlink; | |
169 | ||
170 | if (unlikely(qlist_empty(q))) | |
171 | return; | |
172 | ||
173 | qlink = q->head; | |
174 | while (qlink) { | |
175 | struct kmem_cache *obj_cache = | |
176 | cache ? cache : qlink_to_cache(qlink); | |
177 | struct qlist_node *next = qlink->next; | |
178 | ||
179 | qlink_free(qlink, obj_cache); | |
180 | qlink = next; | |
181 | } | |
182 | qlist_init(q); | |
183 | } | |
184 | ||
f00748bf | 185 | bool kasan_quarantine_put(struct kmem_cache *cache, void *object) |
55834c59 AP |
186 | { |
187 | unsigned long flags; | |
188 | struct qlist_head *q; | |
189 | struct qlist_head temp = QLIST_INIT; | |
6476792f | 190 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
55834c59 | 191 | |
97593cad AK |
192 | /* |
193 | * If there's no metadata for this object, don't put it into | |
194 | * quarantine. | |
195 | */ | |
196 | if (!meta) | |
197 | return false; | |
198 | ||
ce5bec54 DV |
199 | /* |
200 | * Note: irq must be disabled until after we move the batch to the | |
f00748bf AK |
201 | * global quarantine. Otherwise kasan_quarantine_remove_cache() can |
202 | * miss some objects belonging to the cache if they are in our local | |
203 | * temp list. kasan_quarantine_remove_cache() executes on_each_cpu() | |
204 | * at the beginning which ensures that it either sees the objects in | |
205 | * per-cpu lists or in the global quarantine. | |
ce5bec54 | 206 | */ |
55834c59 AP |
207 | local_irq_save(flags); |
208 | ||
209 | q = this_cpu_ptr(&cpu_quarantine); | |
6c82d45c KYL |
210 | if (q->offline) { |
211 | local_irq_restore(flags); | |
97593cad | 212 | return false; |
6c82d45c | 213 | } |
6476792f | 214 | qlist_put(q, &meta->quarantine_link, cache->size); |
ce5bec54 | 215 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { |
55834c59 AP |
216 | qlist_move_all(q, &temp); |
217 | ||
026d1eaf | 218 | raw_spin_lock(&quarantine_lock); |
64abdcb2 DV |
219 | WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); |
220 | qlist_move_all(&temp, &global_quarantine[quarantine_tail]); | |
221 | if (global_quarantine[quarantine_tail].bytes >= | |
222 | READ_ONCE(quarantine_batch_size)) { | |
223 | int new_tail; | |
224 | ||
225 | new_tail = quarantine_tail + 1; | |
226 | if (new_tail == QUARANTINE_BATCHES) | |
227 | new_tail = 0; | |
228 | if (new_tail != quarantine_head) | |
229 | quarantine_tail = new_tail; | |
230 | } | |
026d1eaf | 231 | raw_spin_unlock(&quarantine_lock); |
55834c59 | 232 | } |
ce5bec54 DV |
233 | |
234 | local_irq_restore(flags); | |
97593cad AK |
235 | |
236 | return true; | |
55834c59 AP |
237 | } |
238 | ||
f00748bf | 239 | void kasan_quarantine_reduce(void) |
55834c59 | 240 | { |
64abdcb2 | 241 | size_t total_size, new_quarantine_size, percpu_quarantines; |
55834c59 | 242 | unsigned long flags; |
ce5bec54 | 243 | int srcu_idx; |
55834c59 | 244 | struct qlist_head to_free = QLIST_INIT; |
55834c59 | 245 | |
64abdcb2 DV |
246 | if (likely(READ_ONCE(quarantine_size) <= |
247 | READ_ONCE(quarantine_max_size))) | |
55834c59 AP |
248 | return; |
249 | ||
ce5bec54 | 250 | /* |
f00748bf | 251 | * srcu critical section ensures that kasan_quarantine_remove_cache() |
ce5bec54 DV |
252 | * will not miss objects belonging to the cache while they are in our |
253 | * local to_free list. srcu is chosen because (1) it gives us private | |
254 | * grace period domain that does not interfere with anything else, | |
255 | * and (2) it allows synchronize_srcu() to return without waiting | |
256 | * if there are no pending read critical sections (which is the | |
257 | * expected case). | |
258 | */ | |
259 | srcu_idx = srcu_read_lock(&remove_cache_srcu); | |
026d1eaf | 260 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
55834c59 AP |
261 | |
262 | /* | |
263 | * Update quarantine size in case of hotplug. Allocate a fraction of | |
264 | * the installed memory to quarantine minus per-cpu queue limits. | |
265 | */ | |
ca79b0c2 | 266 | total_size = (totalram_pages() << PAGE_SHIFT) / |
55834c59 | 267 | QUARANTINE_FRACTION; |
c3cee372 | 268 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
64abdcb2 DV |
269 | new_quarantine_size = (total_size < percpu_quarantines) ? |
270 | 0 : total_size - percpu_quarantines; | |
271 | WRITE_ONCE(quarantine_max_size, new_quarantine_size); | |
272 | /* Aim at consuming at most 1/2 of slots in quarantine. */ | |
273 | WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, | |
274 | 2 * total_size / QUARANTINE_BATCHES)); | |
275 | ||
276 | if (likely(quarantine_size > quarantine_max_size)) { | |
277 | qlist_move_all(&global_quarantine[quarantine_head], &to_free); | |
278 | WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes); | |
279 | quarantine_head++; | |
280 | if (quarantine_head == QUARANTINE_BATCHES) | |
281 | quarantine_head = 0; | |
55834c59 | 282 | } |
55834c59 | 283 | |
026d1eaf | 284 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
55834c59 AP |
285 | |
286 | qlist_free_all(&to_free, NULL); | |
ce5bec54 | 287 | srcu_read_unlock(&remove_cache_srcu, srcu_idx); |
55834c59 AP |
288 | } |
289 | ||
290 | static void qlist_move_cache(struct qlist_head *from, | |
291 | struct qlist_head *to, | |
292 | struct kmem_cache *cache) | |
293 | { | |
0ab686d8 | 294 | struct qlist_node *curr; |
55834c59 AP |
295 | |
296 | if (unlikely(qlist_empty(from))) | |
297 | return; | |
298 | ||
299 | curr = from->head; | |
0ab686d8 | 300 | qlist_init(from); |
55834c59 | 301 | while (curr) { |
0ab686d8 JK |
302 | struct qlist_node *next = curr->next; |
303 | struct kmem_cache *obj_cache = qlink_to_cache(curr); | |
304 | ||
305 | if (obj_cache == cache) | |
306 | qlist_put(to, curr, obj_cache->size); | |
307 | else | |
308 | qlist_put(from, curr, obj_cache->size); | |
309 | ||
310 | curr = next; | |
55834c59 AP |
311 | } |
312 | } | |
313 | ||
07d067e4 Z |
314 | static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) |
315 | { | |
316 | struct kmem_cache *cache = arg; | |
317 | unsigned long flags; | |
318 | struct cpu_shrink_qlist *sq; | |
319 | ||
320 | sq = this_cpu_ptr(&shrink_qlist); | |
321 | raw_spin_lock_irqsave(&sq->lock, flags); | |
322 | qlist_move_cache(q, &sq->qlist, cache); | |
323 | raw_spin_unlock_irqrestore(&sq->lock, flags); | |
324 | } | |
07d067e4 Z |
325 | |
326 | static void per_cpu_remove_cache(void *arg) | |
327 | { | |
55834c59 AP |
328 | struct qlist_head *q; |
329 | ||
330 | q = this_cpu_ptr(&cpu_quarantine); | |
31fa985b Z |
331 | /* |
332 | * Ensure the ordering between the writing to q->offline and | |
333 | * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted | |
334 | * by interrupt. | |
335 | */ | |
336 | if (READ_ONCE(q->offline)) | |
337 | return; | |
07d067e4 | 338 | __per_cpu_remove_cache(q, arg); |
55834c59 AP |
339 | } |
340 | ||
f9fa1d91 | 341 | /* Free all quarantined objects belonging to cache. */ |
f00748bf | 342 | void kasan_quarantine_remove_cache(struct kmem_cache *cache) |
55834c59 | 343 | { |
64abdcb2 | 344 | unsigned long flags, i; |
55834c59 | 345 | struct qlist_head to_free = QLIST_INIT; |
be41d814 Z |
346 | int cpu; |
347 | struct cpu_shrink_qlist *sq; | |
55834c59 | 348 | |
ce5bec54 DV |
349 | /* |
350 | * Must be careful to not miss any objects that are being moved from | |
f00748bf AK |
351 | * per-cpu list to the global quarantine in kasan_quarantine_put(), |
352 | * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu() | |
ce5bec54 DV |
353 | * achieves the first goal, while synchronize_srcu() achieves the |
354 | * second. | |
355 | */ | |
55834c59 AP |
356 | on_each_cpu(per_cpu_remove_cache, cache, 1); |
357 | ||
be41d814 Z |
358 | for_each_online_cpu(cpu) { |
359 | sq = per_cpu_ptr(&shrink_qlist, cpu); | |
360 | raw_spin_lock_irqsave(&sq->lock, flags); | |
361 | qlist_move_cache(&sq->qlist, &to_free, cache); | |
362 | raw_spin_unlock_irqrestore(&sq->lock, flags); | |
07d067e4 | 363 | } |
be41d814 | 364 | qlist_free_all(&to_free, cache); |
07d067e4 | 365 | |
026d1eaf | 366 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
68fd814a DV |
367 | for (i = 0; i < QUARANTINE_BATCHES; i++) { |
368 | if (qlist_empty(&global_quarantine[i])) | |
369 | continue; | |
64abdcb2 | 370 | qlist_move_cache(&global_quarantine[i], &to_free, cache); |
68fd814a | 371 | /* Scanning whole quarantine can take a while. */ |
026d1eaf | 372 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
68fd814a | 373 | cond_resched(); |
026d1eaf | 374 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
68fd814a | 375 | } |
026d1eaf | 376 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
55834c59 AP |
377 | |
378 | qlist_free_all(&to_free, cache); | |
ce5bec54 DV |
379 | |
380 | synchronize_srcu(&remove_cache_srcu); | |
55834c59 | 381 | } |
6c82d45c KYL |
382 | |
383 | static int kasan_cpu_online(unsigned int cpu) | |
384 | { | |
385 | this_cpu_ptr(&cpu_quarantine)->offline = false; | |
386 | return 0; | |
387 | } | |
388 | ||
389 | static int kasan_cpu_offline(unsigned int cpu) | |
390 | { | |
391 | struct qlist_head *q; | |
392 | ||
393 | q = this_cpu_ptr(&cpu_quarantine); | |
394 | /* Ensure the ordering between the writing to q->offline and | |
395 | * qlist_free_all. Otherwise, cpu_quarantine may be corrupted | |
396 | * by interrupt. | |
397 | */ | |
398 | WRITE_ONCE(q->offline, true); | |
399 | barrier(); | |
400 | qlist_free_all(q, NULL); | |
401 | return 0; | |
402 | } | |
403 | ||
404 | static int __init kasan_cpu_quarantine_init(void) | |
405 | { | |
406 | int ret = 0; | |
407 | ||
408 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", | |
409 | kasan_cpu_online, kasan_cpu_offline); | |
410 | if (ret < 0) | |
d7196d87 | 411 | pr_err("cpu quarantine register failed [%d]\n", ret); |
6c82d45c KYL |
412 | return ret; |
413 | } | |
414 | late_initcall(kasan_cpu_quarantine_init); |