Commit | Line | Data |
---|---|---|
4ca8cc8d AP |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * x86 KMSAN support. | |
4 | * | |
5 | * Copyright (C) 2022, Google LLC | |
6 | * Author: Alexander Potapenko <glider@google.com> | |
7 | */ | |
8 | ||
9 | #ifndef _ASM_X86_KMSAN_H | |
10 | #define _ASM_X86_KMSAN_H | |
11 | ||
12 | #ifndef MODULE | |
13 | ||
ce732a75 | 14 | #include <asm/cpu_entry_area.h> |
4ca8cc8d AP |
15 | #include <asm/processor.h> |
16 | #include <linux/mmzone.h> | |
17 | ||
ce732a75 AP |
18 | DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_shadow); |
19 | DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_origin); | |
20 | ||
21 | /* | |
22 | * Functions below are declared in the header to make sure they are inlined. | |
23 | * They all are called from kmsan_get_metadata() for every memory access in | |
24 | * the kernel, so speed is important here. | |
25 | */ | |
26 | ||
27 | /* | |
28 | * Compute metadata addresses for the CPU entry area on x86. | |
29 | */ | |
30 | static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin) | |
31 | { | |
32 | unsigned long addr64 = (unsigned long)addr; | |
33 | char *metadata_array; | |
34 | unsigned long off; | |
35 | int cpu; | |
36 | ||
37 | if ((addr64 < CPU_ENTRY_AREA_BASE) || | |
38 | (addr64 >= (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE))) | |
39 | return NULL; | |
40 | cpu = (addr64 - CPU_ENTRY_AREA_BASE) / CPU_ENTRY_AREA_SIZE; | |
41 | off = addr64 - (unsigned long)get_cpu_entry_area(cpu); | |
42 | if ((off < 0) || (off >= CPU_ENTRY_AREA_SIZE)) | |
43 | return NULL; | |
44 | metadata_array = is_origin ? cpu_entry_area_origin : | |
45 | cpu_entry_area_shadow; | |
46 | return &per_cpu(metadata_array[off], cpu); | |
47 | } | |
48 | ||
4ca8cc8d AP |
49 | /* |
50 | * Taken from arch/x86/mm/physaddr.h to avoid using an instrumented version. | |
51 | */ | |
52 | static inline bool kmsan_phys_addr_valid(unsigned long addr) | |
53 | { | |
54 | if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) | |
55 | return !(addr >> boot_cpu_data.x86_phys_bits); | |
56 | else | |
57 | return true; | |
58 | } | |
59 | ||
60 | /* | |
61 | * Taken from arch/x86/mm/physaddr.c to avoid using an instrumented version. | |
62 | */ | |
63 | static inline bool kmsan_virt_addr_valid(void *addr) | |
64 | { | |
65 | unsigned long x = (unsigned long)addr; | |
66 | unsigned long y = x - __START_KERNEL_map; | |
f6564fce | 67 | bool ret; |
4ca8cc8d AP |
68 | |
69 | /* use the carry flag to determine if x was < __START_KERNEL_map */ | |
70 | if (unlikely(x > y)) { | |
71 | x = y + phys_base; | |
72 | ||
73 | if (y >= KERNEL_IMAGE_SIZE) | |
74 | return false; | |
75 | } else { | |
76 | x = y + (__START_KERNEL_map - PAGE_OFFSET); | |
77 | ||
78 | /* carry flag will be set if starting x was >= PAGE_OFFSET */ | |
79 | if ((x > y) || !kmsan_phys_addr_valid(x)) | |
80 | return false; | |
81 | } | |
82 | ||
f6564fce ME |
83 | /* |
84 | * pfn_valid() relies on RCU, and may call into the scheduler on exiting | |
85 | * the critical section. However, this would result in recursion with | |
86 | * KMSAN. Therefore, disable preemption here, and re-enable preemption | |
87 | * below while suppressing reschedules to avoid recursion. | |
88 | * | |
89 | * Note, this sacrifices occasionally breaking scheduling guarantees. | |
90 | * Although, a kernel compiled with KMSAN has already given up on any | |
91 | * performance guarantees due to being heavily instrumented. | |
92 | */ | |
93 | preempt_disable(); | |
94 | ret = pfn_valid(x >> PAGE_SHIFT); | |
95 | preempt_enable_no_resched(); | |
96 | ||
97 | return ret; | |
4ca8cc8d AP |
98 | } |
99 | ||
100 | #endif /* !MODULE */ | |
101 | ||
102 | #endif /* _ASM_X86_KMSAN_H */ |