Merge tag 'xfs-5.12-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-block.git] / mm / kasan / quarantine.c
CommitLineData
e886bf9d 1// SPDX-License-Identifier: GPL-2.0
55834c59
AP
2/*
3 * KASAN quarantine.
4 *
5 * Author: Alexander Potapenko <glider@google.com>
6 * Copyright (C) 2016 Google, Inc.
7 *
8 * Based on code by Dmitry Chernenkov.
55834c59
AP
9 */
10
11#include <linux/gfp.h>
12#include <linux/hash.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/percpu.h>
16#include <linux/printk.h>
17#include <linux/shrinker.h>
18#include <linux/slab.h>
ce5bec54 19#include <linux/srcu.h>
55834c59
AP
20#include <linux/string.h>
21#include <linux/types.h>
6c82d45c 22#include <linux/cpuhotplug.h>
55834c59
AP
23
24#include "../slab.h"
25#include "kasan.h"
26
27/* Data structure and operations for quarantine queues. */
28
29/*
30 * Each queue is a signle-linked list, which also stores the total size of
31 * objects inside of it.
32 */
33struct qlist_head {
34 struct qlist_node *head;
35 struct qlist_node *tail;
36 size_t bytes;
6c82d45c 37 bool offline;
55834c59
AP
38};
39
40#define QLIST_INIT { NULL, NULL, 0 }
41
42static bool qlist_empty(struct qlist_head *q)
43{
44 return !q->head;
45}
46
47static void qlist_init(struct qlist_head *q)
48{
49 q->head = q->tail = NULL;
50 q->bytes = 0;
51}
52
53static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
54 size_t size)
55{
56 if (unlikely(qlist_empty(q)))
57 q->head = qlink;
58 else
59 q->tail->next = qlink;
60 q->tail = qlink;
61 qlink->next = NULL;
62 q->bytes += size;
63}
64
65static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
66{
67 if (unlikely(qlist_empty(from)))
68 return;
69
70 if (qlist_empty(to)) {
71 *to = *from;
72 qlist_init(from);
73 return;
74 }
75
76 to->tail->next = from->head;
77 to->tail = from->tail;
78 to->bytes += from->bytes;
79
80 qlist_init(from);
81}
82
64abdcb2
DV
83#define QUARANTINE_PERCPU_SIZE (1 << 20)
84#define QUARANTINE_BATCHES \
85 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
55834c59
AP
86
87/*
88 * The object quarantine consists of per-cpu queues and a global queue,
89 * guarded by quarantine_lock.
90 */
91static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
92
64abdcb2
DV
93/* Round-robin FIFO array of batches. */
94static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
95static int quarantine_head;
96static int quarantine_tail;
97/* Total size of all objects in global_quarantine across all batches. */
98static unsigned long quarantine_size;
026d1eaf 99static DEFINE_RAW_SPINLOCK(quarantine_lock);
ce5bec54 100DEFINE_STATIC_SRCU(remove_cache_srcu);
55834c59
AP
101
102/* Maximum size of the global queue. */
64abdcb2
DV
103static unsigned long quarantine_max_size;
104
105/*
106 * Target size of a batch in global_quarantine.
107 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
108 */
109static unsigned long quarantine_batch_size;
55834c59
AP
110
111/*
112 * The fraction of physical memory the quarantine is allowed to occupy.
113 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
114 * the ratio low to avoid OOM.
115 */
116#define QUARANTINE_FRACTION 32
117
55834c59
AP
118static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
119{
120 return virt_to_head_page(qlink)->slab_cache;
121}
122
123static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
124{
125 struct kasan_free_meta *free_info =
126 container_of(qlink, struct kasan_free_meta,
127 quarantine_link);
128
129 return ((void *)free_info) - cache->kasan_info.free_meta_offset;
130}
131
132static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
133{
134 void *object = qlink_to_object(qlink, cache);
55834c59
AP
135 unsigned long flags;
136
f7376aed
AR
137 if (IS_ENABLED(CONFIG_SLAB))
138 local_irq_save(flags);
139
97593cad
AK
140 /*
141 * As the object now gets freed from the quaratine, assume that its
142 * free track is no longer valid.
143 */
e4b7818b 144 *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
97593cad 145
55834c59 146 ___cache_free(cache, object, _THIS_IP_);
f7376aed
AR
147
148 if (IS_ENABLED(CONFIG_SLAB))
149 local_irq_restore(flags);
55834c59
AP
150}
151
152static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
153{
154 struct qlist_node *qlink;
155
156 if (unlikely(qlist_empty(q)))
157 return;
158
159 qlink = q->head;
160 while (qlink) {
161 struct kmem_cache *obj_cache =
162 cache ? cache : qlink_to_cache(qlink);
163 struct qlist_node *next = qlink->next;
164
165 qlink_free(qlink, obj_cache);
166 qlink = next;
167 }
168 qlist_init(q);
169}
170
f00748bf 171bool kasan_quarantine_put(struct kmem_cache *cache, void *object)
55834c59
AP
172{
173 unsigned long flags;
174 struct qlist_head *q;
175 struct qlist_head temp = QLIST_INIT;
6476792f 176 struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
55834c59 177
97593cad
AK
178 /*
179 * If there's no metadata for this object, don't put it into
180 * quarantine.
181 */
182 if (!meta)
183 return false;
184
ce5bec54
DV
185 /*
186 * Note: irq must be disabled until after we move the batch to the
f00748bf
AK
187 * global quarantine. Otherwise kasan_quarantine_remove_cache() can
188 * miss some objects belonging to the cache if they are in our local
189 * temp list. kasan_quarantine_remove_cache() executes on_each_cpu()
190 * at the beginning which ensures that it either sees the objects in
191 * per-cpu lists or in the global quarantine.
ce5bec54 192 */
55834c59
AP
193 local_irq_save(flags);
194
195 q = this_cpu_ptr(&cpu_quarantine);
6c82d45c
KYL
196 if (q->offline) {
197 local_irq_restore(flags);
97593cad 198 return false;
6c82d45c 199 }
6476792f 200 qlist_put(q, &meta->quarantine_link, cache->size);
ce5bec54 201 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
55834c59
AP
202 qlist_move_all(q, &temp);
203
026d1eaf 204 raw_spin_lock(&quarantine_lock);
64abdcb2
DV
205 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
206 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
207 if (global_quarantine[quarantine_tail].bytes >=
208 READ_ONCE(quarantine_batch_size)) {
209 int new_tail;
210
211 new_tail = quarantine_tail + 1;
212 if (new_tail == QUARANTINE_BATCHES)
213 new_tail = 0;
214 if (new_tail != quarantine_head)
215 quarantine_tail = new_tail;
216 }
026d1eaf 217 raw_spin_unlock(&quarantine_lock);
55834c59 218 }
ce5bec54
DV
219
220 local_irq_restore(flags);
97593cad
AK
221
222 return true;
55834c59
AP
223}
224
f00748bf 225void kasan_quarantine_reduce(void)
55834c59 226{
64abdcb2 227 size_t total_size, new_quarantine_size, percpu_quarantines;
55834c59 228 unsigned long flags;
ce5bec54 229 int srcu_idx;
55834c59 230 struct qlist_head to_free = QLIST_INIT;
55834c59 231
64abdcb2
DV
232 if (likely(READ_ONCE(quarantine_size) <=
233 READ_ONCE(quarantine_max_size)))
55834c59
AP
234 return;
235
ce5bec54 236 /*
f00748bf 237 * srcu critical section ensures that kasan_quarantine_remove_cache()
ce5bec54
DV
238 * will not miss objects belonging to the cache while they are in our
239 * local to_free list. srcu is chosen because (1) it gives us private
240 * grace period domain that does not interfere with anything else,
241 * and (2) it allows synchronize_srcu() to return without waiting
242 * if there are no pending read critical sections (which is the
243 * expected case).
244 */
245 srcu_idx = srcu_read_lock(&remove_cache_srcu);
026d1eaf 246 raw_spin_lock_irqsave(&quarantine_lock, flags);
55834c59
AP
247
248 /*
249 * Update quarantine size in case of hotplug. Allocate a fraction of
250 * the installed memory to quarantine minus per-cpu queue limits.
251 */
ca79b0c2 252 total_size = (totalram_pages() << PAGE_SHIFT) /
55834c59 253 QUARANTINE_FRACTION;
c3cee372 254 percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
64abdcb2
DV
255 new_quarantine_size = (total_size < percpu_quarantines) ?
256 0 : total_size - percpu_quarantines;
257 WRITE_ONCE(quarantine_max_size, new_quarantine_size);
258 /* Aim at consuming at most 1/2 of slots in quarantine. */
259 WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
260 2 * total_size / QUARANTINE_BATCHES));
261
262 if (likely(quarantine_size > quarantine_max_size)) {
263 qlist_move_all(&global_quarantine[quarantine_head], &to_free);
264 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
265 quarantine_head++;
266 if (quarantine_head == QUARANTINE_BATCHES)
267 quarantine_head = 0;
55834c59 268 }
55834c59 269
026d1eaf 270 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
55834c59
AP
271
272 qlist_free_all(&to_free, NULL);
ce5bec54 273 srcu_read_unlock(&remove_cache_srcu, srcu_idx);
55834c59
AP
274}
275
276static void qlist_move_cache(struct qlist_head *from,
277 struct qlist_head *to,
278 struct kmem_cache *cache)
279{
0ab686d8 280 struct qlist_node *curr;
55834c59
AP
281
282 if (unlikely(qlist_empty(from)))
283 return;
284
285 curr = from->head;
0ab686d8 286 qlist_init(from);
55834c59 287 while (curr) {
0ab686d8
JK
288 struct qlist_node *next = curr->next;
289 struct kmem_cache *obj_cache = qlink_to_cache(curr);
290
291 if (obj_cache == cache)
292 qlist_put(to, curr, obj_cache->size);
293 else
294 qlist_put(from, curr, obj_cache->size);
295
296 curr = next;
55834c59
AP
297 }
298}
299
300static void per_cpu_remove_cache(void *arg)
301{
302 struct kmem_cache *cache = arg;
303 struct qlist_head to_free = QLIST_INIT;
304 struct qlist_head *q;
305
306 q = this_cpu_ptr(&cpu_quarantine);
307 qlist_move_cache(q, &to_free, cache);
308 qlist_free_all(&to_free, cache);
309}
310
f9fa1d91 311/* Free all quarantined objects belonging to cache. */
f00748bf 312void kasan_quarantine_remove_cache(struct kmem_cache *cache)
55834c59 313{
64abdcb2 314 unsigned long flags, i;
55834c59
AP
315 struct qlist_head to_free = QLIST_INIT;
316
ce5bec54
DV
317 /*
318 * Must be careful to not miss any objects that are being moved from
f00748bf
AK
319 * per-cpu list to the global quarantine in kasan_quarantine_put(),
320 * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu()
ce5bec54
DV
321 * achieves the first goal, while synchronize_srcu() achieves the
322 * second.
323 */
55834c59
AP
324 on_each_cpu(per_cpu_remove_cache, cache, 1);
325
026d1eaf 326 raw_spin_lock_irqsave(&quarantine_lock, flags);
68fd814a
DV
327 for (i = 0; i < QUARANTINE_BATCHES; i++) {
328 if (qlist_empty(&global_quarantine[i]))
329 continue;
64abdcb2 330 qlist_move_cache(&global_quarantine[i], &to_free, cache);
68fd814a 331 /* Scanning whole quarantine can take a while. */
026d1eaf 332 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
68fd814a 333 cond_resched();
026d1eaf 334 raw_spin_lock_irqsave(&quarantine_lock, flags);
68fd814a 335 }
026d1eaf 336 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
55834c59
AP
337
338 qlist_free_all(&to_free, cache);
ce5bec54
DV
339
340 synchronize_srcu(&remove_cache_srcu);
55834c59 341}
6c82d45c
KYL
342
343static int kasan_cpu_online(unsigned int cpu)
344{
345 this_cpu_ptr(&cpu_quarantine)->offline = false;
346 return 0;
347}
348
349static int kasan_cpu_offline(unsigned int cpu)
350{
351 struct qlist_head *q;
352
353 q = this_cpu_ptr(&cpu_quarantine);
354 /* Ensure the ordering between the writing to q->offline and
355 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
356 * by interrupt.
357 */
358 WRITE_ONCE(q->offline, true);
359 barrier();
360 qlist_free_all(q, NULL);
361 return 0;
362}
363
364static int __init kasan_cpu_quarantine_init(void)
365{
366 int ret = 0;
367
368 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
369 kasan_cpu_online, kasan_cpu_offline);
370 if (ret < 0)
371 pr_err("kasan cpu quarantine register failed [%d]\n", ret);
372 return ret;
373}
374late_initcall(kasan_cpu_quarantine_init);