Commit | Line | Data |
---|---|---|
e886bf9d | 1 | // SPDX-License-Identifier: GPL-2.0 |
bffa986c AK |
2 | /* |
3 | * This file contains common generic and tag-based KASAN code. | |
4 | * | |
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | |
7 | * | |
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | |
9 | * Andrey Konovalov <andreyknvl@gmail.com> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/export.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/kasan.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/kmemleak.h> | |
23 | #include <linux/linkage.h> | |
24 | #include <linux/memblock.h> | |
25 | #include <linux/memory.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/printk.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/sched/task_stack.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/stacktrace.h> | |
33 | #include <linux/string.h> | |
34 | #include <linux/types.h> | |
35 | #include <linux/vmalloc.h> | |
36 | #include <linux/bug.h> | |
37 | ||
38 | #include "kasan.h" | |
39 | #include "../slab.h" | |
40 | ||
41 | static inline int in_irqentry_text(unsigned long ptr) | |
42 | { | |
43 | return (ptr >= (unsigned long)&__irqentry_text_start && | |
44 | ptr < (unsigned long)&__irqentry_text_end) || | |
45 | (ptr >= (unsigned long)&__softirqentry_text_start && | |
46 | ptr < (unsigned long)&__softirqentry_text_end); | |
47 | } | |
48 | ||
49 | static inline void filter_irq_stacks(struct stack_trace *trace) | |
50 | { | |
51 | int i; | |
52 | ||
53 | if (!trace->nr_entries) | |
54 | return; | |
55 | for (i = 0; i < trace->nr_entries; i++) | |
56 | if (in_irqentry_text(trace->entries[i])) { | |
57 | /* Include the irqentry function into the stack. */ | |
58 | trace->nr_entries = i + 1; | |
59 | break; | |
60 | } | |
61 | } | |
62 | ||
63 | static inline depot_stack_handle_t save_stack(gfp_t flags) | |
64 | { | |
65 | unsigned long entries[KASAN_STACK_DEPTH]; | |
66 | struct stack_trace trace = { | |
67 | .nr_entries = 0, | |
68 | .entries = entries, | |
69 | .max_entries = KASAN_STACK_DEPTH, | |
70 | .skip = 0 | |
71 | }; | |
72 | ||
73 | save_stack_trace(&trace); | |
74 | filter_irq_stacks(&trace); | |
75 | if (trace.nr_entries != 0 && | |
76 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | |
77 | trace.nr_entries--; | |
78 | ||
79 | return depot_save_stack(&trace, flags); | |
80 | } | |
81 | ||
82 | static inline void set_track(struct kasan_track *track, gfp_t flags) | |
83 | { | |
84 | track->pid = current->pid; | |
85 | track->stack = save_stack(flags); | |
86 | } | |
87 | ||
88 | void kasan_enable_current(void) | |
89 | { | |
90 | current->kasan_depth++; | |
91 | } | |
92 | ||
93 | void kasan_disable_current(void) | |
94 | { | |
95 | current->kasan_depth--; | |
96 | } | |
97 | ||
98 | void kasan_check_read(const volatile void *p, unsigned int size) | |
99 | { | |
100 | check_memory_region((unsigned long)p, size, false, _RET_IP_); | |
101 | } | |
102 | EXPORT_SYMBOL(kasan_check_read); | |
103 | ||
104 | void kasan_check_write(const volatile void *p, unsigned int size) | |
105 | { | |
106 | check_memory_region((unsigned long)p, size, true, _RET_IP_); | |
107 | } | |
108 | EXPORT_SYMBOL(kasan_check_write); | |
109 | ||
110 | #undef memset | |
111 | void *memset(void *addr, int c, size_t len) | |
112 | { | |
113 | check_memory_region((unsigned long)addr, len, true, _RET_IP_); | |
114 | ||
115 | return __memset(addr, c, len); | |
116 | } | |
117 | ||
118 | #undef memmove | |
119 | void *memmove(void *dest, const void *src, size_t len) | |
120 | { | |
121 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | |
122 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | |
123 | ||
124 | return __memmove(dest, src, len); | |
125 | } | |
126 | ||
127 | #undef memcpy | |
128 | void *memcpy(void *dest, const void *src, size_t len) | |
129 | { | |
130 | check_memory_region((unsigned long)src, len, false, _RET_IP_); | |
131 | check_memory_region((unsigned long)dest, len, true, _RET_IP_); | |
132 | ||
133 | return __memcpy(dest, src, len); | |
134 | } | |
135 | ||
136 | /* | |
137 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. | |
138 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. | |
139 | */ | |
140 | void kasan_poison_shadow(const void *address, size_t size, u8 value) | |
141 | { | |
142 | void *shadow_start, *shadow_end; | |
143 | ||
7f94ffbc AK |
144 | /* |
145 | * Perform shadow offset calculation based on untagged address, as | |
146 | * some of the callers (e.g. kasan_poison_object_data) pass tagged | |
147 | * addresses to this function. | |
148 | */ | |
149 | address = reset_tag(address); | |
150 | ||
bffa986c AK |
151 | shadow_start = kasan_mem_to_shadow(address); |
152 | shadow_end = kasan_mem_to_shadow(address + size); | |
153 | ||
154 | __memset(shadow_start, value, shadow_end - shadow_start); | |
155 | } | |
156 | ||
157 | void kasan_unpoison_shadow(const void *address, size_t size) | |
158 | { | |
7f94ffbc AK |
159 | u8 tag = get_tag(address); |
160 | ||
161 | /* | |
162 | * Perform shadow offset calculation based on untagged address, as | |
163 | * some of the callers (e.g. kasan_unpoison_object_data) pass tagged | |
164 | * addresses to this function. | |
165 | */ | |
166 | address = reset_tag(address); | |
167 | ||
168 | kasan_poison_shadow(address, size, tag); | |
bffa986c AK |
169 | |
170 | if (size & KASAN_SHADOW_MASK) { | |
171 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); | |
7f94ffbc AK |
172 | |
173 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | |
174 | *shadow = tag; | |
175 | else | |
176 | *shadow = size & KASAN_SHADOW_MASK; | |
bffa986c AK |
177 | } |
178 | } | |
179 | ||
180 | static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) | |
181 | { | |
182 | void *base = task_stack_page(task); | |
183 | size_t size = sp - base; | |
184 | ||
185 | kasan_unpoison_shadow(base, size); | |
186 | } | |
187 | ||
188 | /* Unpoison the entire stack for a task. */ | |
189 | void kasan_unpoison_task_stack(struct task_struct *task) | |
190 | { | |
191 | __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); | |
192 | } | |
193 | ||
194 | /* Unpoison the stack for the current task beyond a watermark sp value. */ | |
195 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) | |
196 | { | |
197 | /* | |
198 | * Calculate the task stack base address. Avoid using 'current' | |
199 | * because this function is called by early resume code which hasn't | |
200 | * yet set up the percpu register (%gs). | |
201 | */ | |
202 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); | |
203 | ||
204 | kasan_unpoison_shadow(base, watermark - base); | |
205 | } | |
206 | ||
207 | /* | |
208 | * Clear all poison for the region between the current SP and a provided | |
209 | * watermark value, as is sometimes required prior to hand-crafted asm function | |
210 | * returns in the middle of functions. | |
211 | */ | |
212 | void kasan_unpoison_stack_above_sp_to(const void *watermark) | |
213 | { | |
214 | const void *sp = __builtin_frame_address(0); | |
215 | size_t size = watermark - sp; | |
216 | ||
217 | if (WARN_ON(sp > watermark)) | |
218 | return; | |
219 | kasan_unpoison_shadow(sp, size); | |
220 | } | |
221 | ||
222 | void kasan_alloc_pages(struct page *page, unsigned int order) | |
223 | { | |
2813b9c0 AK |
224 | u8 tag; |
225 | unsigned long i; | |
226 | ||
7f94ffbc AK |
227 | if (unlikely(PageHighMem(page))) |
228 | return; | |
2813b9c0 AK |
229 | |
230 | tag = random_tag(); | |
231 | for (i = 0; i < (1 << order); i++) | |
232 | page_kasan_tag_set(page + i, tag); | |
7f94ffbc | 233 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
bffa986c AK |
234 | } |
235 | ||
236 | void kasan_free_pages(struct page *page, unsigned int order) | |
237 | { | |
238 | if (likely(!PageHighMem(page))) | |
239 | kasan_poison_shadow(page_address(page), | |
240 | PAGE_SIZE << order, | |
241 | KASAN_FREE_PAGE); | |
242 | } | |
243 | ||
244 | /* | |
245 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | |
246 | * For larger allocations larger redzones are used. | |
247 | */ | |
248 | static inline unsigned int optimal_redzone(unsigned int object_size) | |
249 | { | |
7f94ffbc AK |
250 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
251 | return 0; | |
252 | ||
bffa986c AK |
253 | return |
254 | object_size <= 64 - 16 ? 16 : | |
255 | object_size <= 128 - 32 ? 32 : | |
256 | object_size <= 512 - 64 ? 64 : | |
257 | object_size <= 4096 - 128 ? 128 : | |
258 | object_size <= (1 << 14) - 256 ? 256 : | |
259 | object_size <= (1 << 15) - 512 ? 512 : | |
260 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; | |
261 | } | |
262 | ||
263 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, | |
264 | slab_flags_t *flags) | |
265 | { | |
266 | unsigned int orig_size = *size; | |
7f94ffbc | 267 | unsigned int redzone_size; |
bffa986c AK |
268 | int redzone_adjust; |
269 | ||
270 | /* Add alloc meta. */ | |
271 | cache->kasan_info.alloc_meta_offset = *size; | |
272 | *size += sizeof(struct kasan_alloc_meta); | |
273 | ||
274 | /* Add free meta. */ | |
7f94ffbc AK |
275 | if (IS_ENABLED(CONFIG_KASAN_GENERIC) && |
276 | (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || | |
277 | cache->object_size < sizeof(struct kasan_free_meta))) { | |
bffa986c AK |
278 | cache->kasan_info.free_meta_offset = *size; |
279 | *size += sizeof(struct kasan_free_meta); | |
280 | } | |
bffa986c | 281 | |
7f94ffbc AK |
282 | redzone_size = optimal_redzone(cache->object_size); |
283 | redzone_adjust = redzone_size - (*size - cache->object_size); | |
bffa986c AK |
284 | if (redzone_adjust > 0) |
285 | *size += redzone_adjust; | |
286 | ||
287 | *size = min_t(unsigned int, KMALLOC_MAX_SIZE, | |
7f94ffbc | 288 | max(*size, cache->object_size + redzone_size)); |
bffa986c AK |
289 | |
290 | /* | |
291 | * If the metadata doesn't fit, don't enable KASAN at all. | |
292 | */ | |
293 | if (*size <= cache->kasan_info.alloc_meta_offset || | |
294 | *size <= cache->kasan_info.free_meta_offset) { | |
295 | cache->kasan_info.alloc_meta_offset = 0; | |
296 | cache->kasan_info.free_meta_offset = 0; | |
297 | *size = orig_size; | |
298 | return; | |
299 | } | |
300 | ||
301 | *flags |= SLAB_KASAN; | |
302 | } | |
303 | ||
304 | size_t kasan_metadata_size(struct kmem_cache *cache) | |
305 | { | |
306 | return (cache->kasan_info.alloc_meta_offset ? | |
307 | sizeof(struct kasan_alloc_meta) : 0) + | |
308 | (cache->kasan_info.free_meta_offset ? | |
309 | sizeof(struct kasan_free_meta) : 0); | |
310 | } | |
311 | ||
312 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, | |
313 | const void *object) | |
314 | { | |
315 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); | |
316 | return (void *)object + cache->kasan_info.alloc_meta_offset; | |
317 | } | |
318 | ||
319 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, | |
320 | const void *object) | |
321 | { | |
322 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); | |
323 | return (void *)object + cache->kasan_info.free_meta_offset; | |
324 | } | |
325 | ||
326 | void kasan_poison_slab(struct page *page) | |
327 | { | |
2813b9c0 AK |
328 | unsigned long i; |
329 | ||
330 | for (i = 0; i < (1 << compound_order(page)); i++) | |
331 | page_kasan_tag_reset(page + i); | |
bffa986c AK |
332 | kasan_poison_shadow(page_address(page), |
333 | PAGE_SIZE << compound_order(page), | |
334 | KASAN_KMALLOC_REDZONE); | |
335 | } | |
336 | ||
337 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) | |
338 | { | |
339 | kasan_unpoison_shadow(object, cache->object_size); | |
340 | } | |
341 | ||
342 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |
343 | { | |
344 | kasan_poison_shadow(object, | |
345 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), | |
346 | KASAN_KMALLOC_REDZONE); | |
347 | } | |
348 | ||
7f94ffbc AK |
349 | /* |
350 | * Since it's desirable to only call object contructors once during slab | |
351 | * allocation, we preassign tags to all such objects. Also preassign tags for | |
352 | * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. | |
353 | * For SLAB allocator we can't preassign tags randomly since the freelist is | |
354 | * stored as an array of indexes instead of a linked list. Assign tags based | |
355 | * on objects indexes, so that objects that are next to each other get | |
356 | * different tags. | |
357 | * After a tag is assigned, the object always gets allocated with the same tag. | |
358 | * The reason is that we can't change tags for objects with constructors on | |
359 | * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor | |
360 | * code can save the pointer to the object somewhere (e.g. in the object | |
361 | * itself). Then if we retag it, the old saved pointer will become invalid. | |
362 | */ | |
363 | static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) | |
364 | { | |
365 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) | |
366 | return new ? KASAN_TAG_KERNEL : random_tag(); | |
367 | ||
368 | #ifdef CONFIG_SLAB | |
369 | return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); | |
370 | #else | |
371 | return new ? random_tag() : get_tag(object); | |
372 | #endif | |
373 | } | |
374 | ||
66afc7f1 AK |
375 | void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, |
376 | const void *object) | |
bffa986c AK |
377 | { |
378 | struct kasan_alloc_meta *alloc_info; | |
379 | ||
380 | if (!(cache->flags & SLAB_KASAN)) | |
381 | return (void *)object; | |
382 | ||
383 | alloc_info = get_alloc_info(cache, object); | |
384 | __memset(alloc_info, 0, sizeof(*alloc_info)); | |
385 | ||
7f94ffbc AK |
386 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
387 | object = set_tag(object, assign_tag(cache, object, true)); | |
388 | ||
bffa986c AK |
389 | return (void *)object; |
390 | } | |
391 | ||
66afc7f1 AK |
392 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, |
393 | gfp_t flags) | |
bffa986c AK |
394 | { |
395 | return kasan_kmalloc(cache, object, cache->object_size, flags); | |
396 | } | |
397 | ||
7f94ffbc AK |
398 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) |
399 | { | |
400 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
401 | return shadow_byte < 0 || | |
402 | shadow_byte >= KASAN_SHADOW_SCALE_SIZE; | |
403 | else | |
404 | return tag != (u8)shadow_byte; | |
405 | } | |
406 | ||
bffa986c AK |
407 | static bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
408 | unsigned long ip, bool quarantine) | |
409 | { | |
410 | s8 shadow_byte; | |
7f94ffbc AK |
411 | u8 tag; |
412 | void *tagged_object; | |
bffa986c AK |
413 | unsigned long rounded_up_size; |
414 | ||
7f94ffbc AK |
415 | tag = get_tag(object); |
416 | tagged_object = object; | |
417 | object = reset_tag(object); | |
418 | ||
bffa986c AK |
419 | if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != |
420 | object)) { | |
7f94ffbc | 421 | kasan_report_invalid_free(tagged_object, ip); |
bffa986c AK |
422 | return true; |
423 | } | |
424 | ||
425 | /* RCU slabs could be legally used after free within the RCU period */ | |
426 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) | |
427 | return false; | |
428 | ||
429 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); | |
7f94ffbc AK |
430 | if (shadow_invalid(tag, shadow_byte)) { |
431 | kasan_report_invalid_free(tagged_object, ip); | |
bffa986c AK |
432 | return true; |
433 | } | |
434 | ||
435 | rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); | |
436 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); | |
437 | ||
7f94ffbc AK |
438 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || |
439 | unlikely(!(cache->flags & SLAB_KASAN))) | |
bffa986c AK |
440 | return false; |
441 | ||
442 | set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); | |
443 | quarantine_put(get_free_info(cache, object), cache); | |
7f94ffbc AK |
444 | |
445 | return IS_ENABLED(CONFIG_KASAN_GENERIC); | |
bffa986c AK |
446 | } |
447 | ||
448 | bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | |
449 | { | |
450 | return __kasan_slab_free(cache, object, ip, true); | |
451 | } | |
452 | ||
66afc7f1 AK |
453 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, |
454 | size_t size, gfp_t flags) | |
bffa986c AK |
455 | { |
456 | unsigned long redzone_start; | |
457 | unsigned long redzone_end; | |
7f94ffbc | 458 | u8 tag; |
bffa986c AK |
459 | |
460 | if (gfpflags_allow_blocking(flags)) | |
461 | quarantine_reduce(); | |
462 | ||
463 | if (unlikely(object == NULL)) | |
464 | return NULL; | |
465 | ||
466 | redzone_start = round_up((unsigned long)(object + size), | |
467 | KASAN_SHADOW_SCALE_SIZE); | |
468 | redzone_end = round_up((unsigned long)object + cache->object_size, | |
469 | KASAN_SHADOW_SCALE_SIZE); | |
470 | ||
7f94ffbc AK |
471 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
472 | tag = assign_tag(cache, object, false); | |
473 | ||
474 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ | |
475 | kasan_unpoison_shadow(set_tag(object, tag), size); | |
bffa986c AK |
476 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
477 | KASAN_KMALLOC_REDZONE); | |
478 | ||
479 | if (cache->flags & SLAB_KASAN) | |
480 | set_track(&get_alloc_info(cache, object)->alloc_track, flags); | |
481 | ||
7f94ffbc | 482 | return set_tag(object, tag); |
bffa986c AK |
483 | } |
484 | EXPORT_SYMBOL(kasan_kmalloc); | |
485 | ||
66afc7f1 AK |
486 | void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, |
487 | gfp_t flags) | |
bffa986c AK |
488 | { |
489 | struct page *page; | |
490 | unsigned long redzone_start; | |
491 | unsigned long redzone_end; | |
492 | ||
493 | if (gfpflags_allow_blocking(flags)) | |
494 | quarantine_reduce(); | |
495 | ||
496 | if (unlikely(ptr == NULL)) | |
497 | return NULL; | |
498 | ||
499 | page = virt_to_page(ptr); | |
500 | redzone_start = round_up((unsigned long)(ptr + size), | |
501 | KASAN_SHADOW_SCALE_SIZE); | |
502 | redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); | |
503 | ||
504 | kasan_unpoison_shadow(ptr, size); | |
505 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | |
506 | KASAN_PAGE_REDZONE); | |
507 | ||
508 | return (void *)ptr; | |
509 | } | |
510 | ||
66afc7f1 | 511 | void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) |
bffa986c AK |
512 | { |
513 | struct page *page; | |
514 | ||
515 | if (unlikely(object == ZERO_SIZE_PTR)) | |
516 | return (void *)object; | |
517 | ||
518 | page = virt_to_head_page(object); | |
519 | ||
520 | if (unlikely(!PageSlab(page))) | |
521 | return kasan_kmalloc_large(object, size, flags); | |
522 | else | |
523 | return kasan_kmalloc(page->slab_cache, object, size, flags); | |
524 | } | |
525 | ||
526 | void kasan_poison_kfree(void *ptr, unsigned long ip) | |
527 | { | |
528 | struct page *page; | |
529 | ||
530 | page = virt_to_head_page(ptr); | |
531 | ||
532 | if (unlikely(!PageSlab(page))) { | |
2813b9c0 | 533 | if (ptr != page_address(page)) { |
bffa986c AK |
534 | kasan_report_invalid_free(ptr, ip); |
535 | return; | |
536 | } | |
537 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | |
538 | KASAN_FREE_PAGE); | |
539 | } else { | |
540 | __kasan_slab_free(page->slab_cache, ptr, ip, false); | |
541 | } | |
542 | } | |
543 | ||
544 | void kasan_kfree_large(void *ptr, unsigned long ip) | |
545 | { | |
2813b9c0 | 546 | if (ptr != page_address(virt_to_head_page(ptr))) |
bffa986c AK |
547 | kasan_report_invalid_free(ptr, ip); |
548 | /* The object will be poisoned by page_alloc. */ | |
549 | } | |
550 | ||
551 | int kasan_module_alloc(void *addr, size_t size) | |
552 | { | |
553 | void *ret; | |
554 | size_t scaled_size; | |
555 | size_t shadow_size; | |
556 | unsigned long shadow_start; | |
557 | ||
558 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | |
559 | scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; | |
560 | shadow_size = round_up(scaled_size, PAGE_SIZE); | |
561 | ||
562 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | |
563 | return -EINVAL; | |
564 | ||
565 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, | |
566 | shadow_start + shadow_size, | |
080eb83f | 567 | GFP_KERNEL, |
bffa986c AK |
568 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
569 | __builtin_return_address(0)); | |
570 | ||
571 | if (ret) { | |
080eb83f | 572 | __memset(ret, KASAN_SHADOW_INIT, shadow_size); |
bffa986c AK |
573 | find_vm_area(addr)->flags |= VM_KASAN; |
574 | kmemleak_ignore(ret); | |
575 | return 0; | |
576 | } | |
577 | ||
578 | return -ENOMEM; | |
579 | } | |
580 | ||
581 | void kasan_free_shadow(const struct vm_struct *vm) | |
582 | { | |
583 | if (vm->flags & VM_KASAN) | |
584 | vfree(kasan_mem_to_shadow(vm->addr)); | |
585 | } | |
586 | ||
587 | #ifdef CONFIG_MEMORY_HOTPLUG | |
588 | static bool shadow_mapped(unsigned long addr) | |
589 | { | |
590 | pgd_t *pgd = pgd_offset_k(addr); | |
591 | p4d_t *p4d; | |
592 | pud_t *pud; | |
593 | pmd_t *pmd; | |
594 | pte_t *pte; | |
595 | ||
596 | if (pgd_none(*pgd)) | |
597 | return false; | |
598 | p4d = p4d_offset(pgd, addr); | |
599 | if (p4d_none(*p4d)) | |
600 | return false; | |
601 | pud = pud_offset(p4d, addr); | |
602 | if (pud_none(*pud)) | |
603 | return false; | |
604 | ||
605 | /* | |
606 | * We can't use pud_large() or pud_huge(), the first one is | |
607 | * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse | |
608 | * pud_bad(), if pud is bad then it's bad because it's huge. | |
609 | */ | |
610 | if (pud_bad(*pud)) | |
611 | return true; | |
612 | pmd = pmd_offset(pud, addr); | |
613 | if (pmd_none(*pmd)) | |
614 | return false; | |
615 | ||
616 | if (pmd_bad(*pmd)) | |
617 | return true; | |
618 | pte = pte_offset_kernel(pmd, addr); | |
619 | return !pte_none(*pte); | |
620 | } | |
621 | ||
622 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, | |
623 | unsigned long action, void *data) | |
624 | { | |
625 | struct memory_notify *mem_data = data; | |
626 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; | |
627 | unsigned long shadow_end, shadow_size; | |
628 | ||
629 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; | |
630 | start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); | |
631 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); | |
632 | shadow_size = nr_shadow_pages << PAGE_SHIFT; | |
633 | shadow_end = shadow_start + shadow_size; | |
634 | ||
635 | if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || | |
636 | WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) | |
637 | return NOTIFY_BAD; | |
638 | ||
639 | switch (action) { | |
640 | case MEM_GOING_ONLINE: { | |
641 | void *ret; | |
642 | ||
643 | /* | |
644 | * If shadow is mapped already than it must have been mapped | |
645 | * during the boot. This could happen if we onlining previously | |
646 | * offlined memory. | |
647 | */ | |
648 | if (shadow_mapped(shadow_start)) | |
649 | return NOTIFY_OK; | |
650 | ||
651 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, | |
652 | shadow_end, GFP_KERNEL, | |
653 | PAGE_KERNEL, VM_NO_GUARD, | |
654 | pfn_to_nid(mem_data->start_pfn), | |
655 | __builtin_return_address(0)); | |
656 | if (!ret) | |
657 | return NOTIFY_BAD; | |
658 | ||
659 | kmemleak_ignore(ret); | |
660 | return NOTIFY_OK; | |
661 | } | |
662 | case MEM_CANCEL_ONLINE: | |
663 | case MEM_OFFLINE: { | |
664 | struct vm_struct *vm; | |
665 | ||
666 | /* | |
667 | * shadow_start was either mapped during boot by kasan_init() | |
668 | * or during memory online by __vmalloc_node_range(). | |
669 | * In the latter case we can use vfree() to free shadow. | |
670 | * Non-NULL result of the find_vm_area() will tell us if | |
671 | * that was the second case. | |
672 | * | |
673 | * Currently it's not possible to free shadow mapped | |
674 | * during boot by kasan_init(). It's because the code | |
675 | * to do that hasn't been written yet. So we'll just | |
676 | * leak the memory. | |
677 | */ | |
678 | vm = find_vm_area((void *)shadow_start); | |
679 | if (vm) | |
680 | vfree((void *)shadow_start); | |
681 | } | |
682 | } | |
683 | ||
684 | return NOTIFY_OK; | |
685 | } | |
686 | ||
687 | static int __init kasan_memhotplug_init(void) | |
688 | { | |
689 | hotplug_memory_notifier(kasan_mem_notifier, 0); | |
690 | ||
691 | return 0; | |
692 | } | |
693 | ||
694 | core_initcall(kasan_memhotplug_init); | |
695 | #endif |