Commit | Line | Data |
---|---|---|
e886bf9d | 1 | // SPDX-License-Identifier: GPL-2.0 |
55834c59 AP |
2 | /* |
3 | * KASAN quarantine. | |
4 | * | |
5 | * Author: Alexander Potapenko <glider@google.com> | |
6 | * Copyright (C) 2016 Google, Inc. | |
7 | * | |
8 | * Based on code by Dmitry Chernenkov. | |
55834c59 AP |
9 | */ |
10 | ||
11 | #include <linux/gfp.h> | |
12 | #include <linux/hash.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/percpu.h> | |
16 | #include <linux/printk.h> | |
17 | #include <linux/shrinker.h> | |
18 | #include <linux/slab.h> | |
ce5bec54 | 19 | #include <linux/srcu.h> |
55834c59 AP |
20 | #include <linux/string.h> |
21 | #include <linux/types.h> | |
6c82d45c | 22 | #include <linux/cpuhotplug.h> |
55834c59 AP |
23 | |
24 | #include "../slab.h" | |
25 | #include "kasan.h" | |
26 | ||
27 | /* Data structure and operations for quarantine queues. */ | |
28 | ||
29 | /* | |
f0953a1b | 30 | * Each queue is a single-linked list, which also stores the total size of |
55834c59 AP |
31 | * objects inside of it. |
32 | */ | |
33 | struct qlist_head { | |
34 | struct qlist_node *head; | |
35 | struct qlist_node *tail; | |
36 | size_t bytes; | |
6c82d45c | 37 | bool offline; |
55834c59 AP |
38 | }; |
39 | ||
40 | #define QLIST_INIT { NULL, NULL, 0 } | |
41 | ||
42 | static bool qlist_empty(struct qlist_head *q) | |
43 | { | |
44 | return !q->head; | |
45 | } | |
46 | ||
47 | static void qlist_init(struct qlist_head *q) | |
48 | { | |
49 | q->head = q->tail = NULL; | |
50 | q->bytes = 0; | |
51 | } | |
52 | ||
53 | static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, | |
54 | size_t size) | |
55 | { | |
56 | if (unlikely(qlist_empty(q))) | |
57 | q->head = qlink; | |
58 | else | |
59 | q->tail->next = qlink; | |
60 | q->tail = qlink; | |
61 | qlink->next = NULL; | |
62 | q->bytes += size; | |
63 | } | |
64 | ||
65 | static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) | |
66 | { | |
67 | if (unlikely(qlist_empty(from))) | |
68 | return; | |
69 | ||
70 | if (qlist_empty(to)) { | |
71 | *to = *from; | |
72 | qlist_init(from); | |
73 | return; | |
74 | } | |
75 | ||
76 | to->tail->next = from->head; | |
77 | to->tail = from->tail; | |
78 | to->bytes += from->bytes; | |
79 | ||
80 | qlist_init(from); | |
81 | } | |
82 | ||
64abdcb2 DV |
83 | #define QUARANTINE_PERCPU_SIZE (1 << 20) |
84 | #define QUARANTINE_BATCHES \ | |
85 | (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS) | |
55834c59 AP |
86 | |
87 | /* | |
88 | * The object quarantine consists of per-cpu queues and a global queue, | |
89 | * guarded by quarantine_lock. | |
90 | */ | |
91 | static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine); | |
92 | ||
64abdcb2 DV |
93 | /* Round-robin FIFO array of batches. */ |
94 | static struct qlist_head global_quarantine[QUARANTINE_BATCHES]; | |
95 | static int quarantine_head; | |
96 | static int quarantine_tail; | |
97 | /* Total size of all objects in global_quarantine across all batches. */ | |
98 | static unsigned long quarantine_size; | |
026d1eaf | 99 | static DEFINE_RAW_SPINLOCK(quarantine_lock); |
ce5bec54 | 100 | DEFINE_STATIC_SRCU(remove_cache_srcu); |
55834c59 | 101 | |
07d067e4 Z |
102 | struct cpu_shrink_qlist { |
103 | raw_spinlock_t lock; | |
104 | struct qlist_head qlist; | |
105 | }; | |
106 | ||
107 | static DEFINE_PER_CPU(struct cpu_shrink_qlist, shrink_qlist) = { | |
108 | .lock = __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist.lock), | |
109 | }; | |
07d067e4 | 110 | |
55834c59 | 111 | /* Maximum size of the global queue. */ |
64abdcb2 DV |
112 | static unsigned long quarantine_max_size; |
113 | ||
114 | /* | |
115 | * Target size of a batch in global_quarantine. | |
116 | * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM. | |
117 | */ | |
118 | static unsigned long quarantine_batch_size; | |
55834c59 AP |
119 | |
120 | /* | |
121 | * The fraction of physical memory the quarantine is allowed to occupy. | |
122 | * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep | |
123 | * the ratio low to avoid OOM. | |
124 | */ | |
125 | #define QUARANTINE_FRACTION 32 | |
126 | ||
55834c59 AP |
127 | static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) |
128 | { | |
6e48a966 | 129 | return virt_to_slab(qlink)->slab_cache; |
55834c59 AP |
130 | } |
131 | ||
132 | static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) | |
133 | { | |
134 | struct kasan_free_meta *free_info = | |
135 | container_of(qlink, struct kasan_free_meta, | |
136 | quarantine_link); | |
137 | ||
138 | return ((void *)free_info) - cache->kasan_info.free_meta_offset; | |
139 | } | |
140 | ||
141 | static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) | |
142 | { | |
143 | void *object = qlink_to_object(qlink, cache); | |
26dca996 | 144 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
55834c59 AP |
145 | unsigned long flags; |
146 | ||
f7376aed AR |
147 | if (IS_ENABLED(CONFIG_SLAB)) |
148 | local_irq_save(flags); | |
149 | ||
26dca996 AK |
150 | /* |
151 | * If init_on_free is enabled and KASAN's free metadata is stored in | |
152 | * the object, zero the metadata. Otherwise, the object's memory will | |
153 | * not be properly zeroed, as KASAN saves the metadata after the slab | |
154 | * allocator zeroes the object. | |
155 | */ | |
156 | if (slab_want_init_on_free(cache) && | |
157 | cache->kasan_info.free_meta_offset == 0) | |
158 | memzero_explicit(meta, sizeof(*meta)); | |
159 | ||
97593cad | 160 | /* |
f0953a1b | 161 | * As the object now gets freed from the quarantine, assume that its |
97593cad AK |
162 | * free track is no longer valid. |
163 | */ | |
06bc4cf6 | 164 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; |
97593cad | 165 | |
55834c59 | 166 | ___cache_free(cache, object, _THIS_IP_); |
f7376aed AR |
167 | |
168 | if (IS_ENABLED(CONFIG_SLAB)) | |
169 | local_irq_restore(flags); | |
55834c59 AP |
170 | } |
171 | ||
172 | static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) | |
173 | { | |
174 | struct qlist_node *qlink; | |
175 | ||
176 | if (unlikely(qlist_empty(q))) | |
177 | return; | |
178 | ||
179 | qlink = q->head; | |
180 | while (qlink) { | |
181 | struct kmem_cache *obj_cache = | |
182 | cache ? cache : qlink_to_cache(qlink); | |
183 | struct qlist_node *next = qlink->next; | |
184 | ||
185 | qlink_free(qlink, obj_cache); | |
186 | qlink = next; | |
187 | } | |
188 | qlist_init(q); | |
189 | } | |
190 | ||
f00748bf | 191 | bool kasan_quarantine_put(struct kmem_cache *cache, void *object) |
55834c59 AP |
192 | { |
193 | unsigned long flags; | |
194 | struct qlist_head *q; | |
195 | struct qlist_head temp = QLIST_INIT; | |
6476792f | 196 | struct kasan_free_meta *meta = kasan_get_free_meta(cache, object); |
55834c59 | 197 | |
97593cad AK |
198 | /* |
199 | * If there's no metadata for this object, don't put it into | |
200 | * quarantine. | |
201 | */ | |
202 | if (!meta) | |
203 | return false; | |
204 | ||
ce5bec54 DV |
205 | /* |
206 | * Note: irq must be disabled until after we move the batch to the | |
f00748bf AK |
207 | * global quarantine. Otherwise kasan_quarantine_remove_cache() can |
208 | * miss some objects belonging to the cache if they are in our local | |
209 | * temp list. kasan_quarantine_remove_cache() executes on_each_cpu() | |
210 | * at the beginning which ensures that it either sees the objects in | |
211 | * per-cpu lists or in the global quarantine. | |
ce5bec54 | 212 | */ |
55834c59 AP |
213 | local_irq_save(flags); |
214 | ||
215 | q = this_cpu_ptr(&cpu_quarantine); | |
6c82d45c KYL |
216 | if (q->offline) { |
217 | local_irq_restore(flags); | |
97593cad | 218 | return false; |
6c82d45c | 219 | } |
6476792f | 220 | qlist_put(q, &meta->quarantine_link, cache->size); |
ce5bec54 | 221 | if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { |
55834c59 AP |
222 | qlist_move_all(q, &temp); |
223 | ||
026d1eaf | 224 | raw_spin_lock(&quarantine_lock); |
64abdcb2 DV |
225 | WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); |
226 | qlist_move_all(&temp, &global_quarantine[quarantine_tail]); | |
227 | if (global_quarantine[quarantine_tail].bytes >= | |
228 | READ_ONCE(quarantine_batch_size)) { | |
229 | int new_tail; | |
230 | ||
231 | new_tail = quarantine_tail + 1; | |
232 | if (new_tail == QUARANTINE_BATCHES) | |
233 | new_tail = 0; | |
234 | if (new_tail != quarantine_head) | |
235 | quarantine_tail = new_tail; | |
236 | } | |
026d1eaf | 237 | raw_spin_unlock(&quarantine_lock); |
55834c59 | 238 | } |
ce5bec54 DV |
239 | |
240 | local_irq_restore(flags); | |
97593cad AK |
241 | |
242 | return true; | |
55834c59 AP |
243 | } |
244 | ||
f00748bf | 245 | void kasan_quarantine_reduce(void) |
55834c59 | 246 | { |
64abdcb2 | 247 | size_t total_size, new_quarantine_size, percpu_quarantines; |
55834c59 | 248 | unsigned long flags; |
ce5bec54 | 249 | int srcu_idx; |
55834c59 | 250 | struct qlist_head to_free = QLIST_INIT; |
55834c59 | 251 | |
64abdcb2 DV |
252 | if (likely(READ_ONCE(quarantine_size) <= |
253 | READ_ONCE(quarantine_max_size))) | |
55834c59 AP |
254 | return; |
255 | ||
ce5bec54 | 256 | /* |
f00748bf | 257 | * srcu critical section ensures that kasan_quarantine_remove_cache() |
ce5bec54 DV |
258 | * will not miss objects belonging to the cache while they are in our |
259 | * local to_free list. srcu is chosen because (1) it gives us private | |
260 | * grace period domain that does not interfere with anything else, | |
261 | * and (2) it allows synchronize_srcu() to return without waiting | |
262 | * if there are no pending read critical sections (which is the | |
263 | * expected case). | |
264 | */ | |
265 | srcu_idx = srcu_read_lock(&remove_cache_srcu); | |
026d1eaf | 266 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
55834c59 AP |
267 | |
268 | /* | |
269 | * Update quarantine size in case of hotplug. Allocate a fraction of | |
270 | * the installed memory to quarantine minus per-cpu queue limits. | |
271 | */ | |
ca79b0c2 | 272 | total_size = (totalram_pages() << PAGE_SHIFT) / |
55834c59 | 273 | QUARANTINE_FRACTION; |
c3cee372 | 274 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
64abdcb2 DV |
275 | new_quarantine_size = (total_size < percpu_quarantines) ? |
276 | 0 : total_size - percpu_quarantines; | |
277 | WRITE_ONCE(quarantine_max_size, new_quarantine_size); | |
278 | /* Aim at consuming at most 1/2 of slots in quarantine. */ | |
279 | WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, | |
280 | 2 * total_size / QUARANTINE_BATCHES)); | |
281 | ||
282 | if (likely(quarantine_size > quarantine_max_size)) { | |
283 | qlist_move_all(&global_quarantine[quarantine_head], &to_free); | |
284 | WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes); | |
285 | quarantine_head++; | |
286 | if (quarantine_head == QUARANTINE_BATCHES) | |
287 | quarantine_head = 0; | |
55834c59 | 288 | } |
55834c59 | 289 | |
026d1eaf | 290 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
55834c59 AP |
291 | |
292 | qlist_free_all(&to_free, NULL); | |
ce5bec54 | 293 | srcu_read_unlock(&remove_cache_srcu, srcu_idx); |
55834c59 AP |
294 | } |
295 | ||
296 | static void qlist_move_cache(struct qlist_head *from, | |
297 | struct qlist_head *to, | |
298 | struct kmem_cache *cache) | |
299 | { | |
0ab686d8 | 300 | struct qlist_node *curr; |
55834c59 AP |
301 | |
302 | if (unlikely(qlist_empty(from))) | |
303 | return; | |
304 | ||
305 | curr = from->head; | |
0ab686d8 | 306 | qlist_init(from); |
55834c59 | 307 | while (curr) { |
0ab686d8 JK |
308 | struct qlist_node *next = curr->next; |
309 | struct kmem_cache *obj_cache = qlink_to_cache(curr); | |
310 | ||
311 | if (obj_cache == cache) | |
312 | qlist_put(to, curr, obj_cache->size); | |
313 | else | |
314 | qlist_put(from, curr, obj_cache->size); | |
315 | ||
316 | curr = next; | |
55834c59 AP |
317 | } |
318 | } | |
319 | ||
07d067e4 Z |
320 | static void __per_cpu_remove_cache(struct qlist_head *q, void *arg) |
321 | { | |
322 | struct kmem_cache *cache = arg; | |
323 | unsigned long flags; | |
324 | struct cpu_shrink_qlist *sq; | |
325 | ||
326 | sq = this_cpu_ptr(&shrink_qlist); | |
327 | raw_spin_lock_irqsave(&sq->lock, flags); | |
328 | qlist_move_cache(q, &sq->qlist, cache); | |
329 | raw_spin_unlock_irqrestore(&sq->lock, flags); | |
330 | } | |
07d067e4 Z |
331 | |
332 | static void per_cpu_remove_cache(void *arg) | |
333 | { | |
55834c59 AP |
334 | struct qlist_head *q; |
335 | ||
336 | q = this_cpu_ptr(&cpu_quarantine); | |
31fa985b Z |
337 | /* |
338 | * Ensure the ordering between the writing to q->offline and | |
339 | * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted | |
340 | * by interrupt. | |
341 | */ | |
342 | if (READ_ONCE(q->offline)) | |
343 | return; | |
07d067e4 | 344 | __per_cpu_remove_cache(q, arg); |
55834c59 AP |
345 | } |
346 | ||
f9fa1d91 | 347 | /* Free all quarantined objects belonging to cache. */ |
f00748bf | 348 | void kasan_quarantine_remove_cache(struct kmem_cache *cache) |
55834c59 | 349 | { |
64abdcb2 | 350 | unsigned long flags, i; |
55834c59 | 351 | struct qlist_head to_free = QLIST_INIT; |
be41d814 Z |
352 | int cpu; |
353 | struct cpu_shrink_qlist *sq; | |
55834c59 | 354 | |
ce5bec54 DV |
355 | /* |
356 | * Must be careful to not miss any objects that are being moved from | |
f00748bf AK |
357 | * per-cpu list to the global quarantine in kasan_quarantine_put(), |
358 | * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu() | |
ce5bec54 DV |
359 | * achieves the first goal, while synchronize_srcu() achieves the |
360 | * second. | |
361 | */ | |
55834c59 AP |
362 | on_each_cpu(per_cpu_remove_cache, cache, 1); |
363 | ||
be41d814 Z |
364 | for_each_online_cpu(cpu) { |
365 | sq = per_cpu_ptr(&shrink_qlist, cpu); | |
366 | raw_spin_lock_irqsave(&sq->lock, flags); | |
367 | qlist_move_cache(&sq->qlist, &to_free, cache); | |
368 | raw_spin_unlock_irqrestore(&sq->lock, flags); | |
07d067e4 | 369 | } |
be41d814 | 370 | qlist_free_all(&to_free, cache); |
07d067e4 | 371 | |
026d1eaf | 372 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
68fd814a DV |
373 | for (i = 0; i < QUARANTINE_BATCHES; i++) { |
374 | if (qlist_empty(&global_quarantine[i])) | |
375 | continue; | |
64abdcb2 | 376 | qlist_move_cache(&global_quarantine[i], &to_free, cache); |
68fd814a | 377 | /* Scanning whole quarantine can take a while. */ |
026d1eaf | 378 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
68fd814a | 379 | cond_resched(); |
026d1eaf | 380 | raw_spin_lock_irqsave(&quarantine_lock, flags); |
68fd814a | 381 | } |
026d1eaf | 382 | raw_spin_unlock_irqrestore(&quarantine_lock, flags); |
55834c59 AP |
383 | |
384 | qlist_free_all(&to_free, cache); | |
ce5bec54 DV |
385 | |
386 | synchronize_srcu(&remove_cache_srcu); | |
55834c59 | 387 | } |
6c82d45c KYL |
388 | |
389 | static int kasan_cpu_online(unsigned int cpu) | |
390 | { | |
391 | this_cpu_ptr(&cpu_quarantine)->offline = false; | |
392 | return 0; | |
393 | } | |
394 | ||
395 | static int kasan_cpu_offline(unsigned int cpu) | |
396 | { | |
397 | struct qlist_head *q; | |
398 | ||
399 | q = this_cpu_ptr(&cpu_quarantine); | |
400 | /* Ensure the ordering between the writing to q->offline and | |
401 | * qlist_free_all. Otherwise, cpu_quarantine may be corrupted | |
402 | * by interrupt. | |
403 | */ | |
404 | WRITE_ONCE(q->offline, true); | |
405 | barrier(); | |
406 | qlist_free_all(q, NULL); | |
407 | return 0; | |
408 | } | |
409 | ||
410 | static int __init kasan_cpu_quarantine_init(void) | |
411 | { | |
412 | int ret = 0; | |
413 | ||
414 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", | |
415 | kasan_cpu_online, kasan_cpu_offline); | |
416 | if (ret < 0) | |
417 | pr_err("kasan cpu quarantine register failed [%d]\n", ret); | |
418 | return ret; | |
419 | } | |
420 | late_initcall(kasan_cpu_quarantine_init); |