mm/usercopy: Detect vmalloc overruns
[linux-2.6-block.git] / mm / usercopy.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
f5509cc1
KC
2/*
3 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4 * which are designed to protect kernel memory from needless exposure
5 * and overwrite under many unintended conditions. This code is based
6 * on PAX_USERCOPY, which is:
7 *
8 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9 * Security Inc.
f5509cc1
KC
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/mm.h>
314eed30 14#include <linux/highmem.h>
f5509cc1 15#include <linux/slab.h>
5b825c3a 16#include <linux/sched.h>
29930025
IM
17#include <linux/sched/task.h>
18#include <linux/sched/task_stack.h>
96dc4f9f 19#include <linux/thread_info.h>
0aef499f 20#include <linux/vmalloc.h>
b5cb15d9
CR
21#include <linux/atomic.h>
22#include <linux/jump_label.h>
f5509cc1 23#include <asm/sections.h>
0b3eb091 24#include "slab.h"
f5509cc1 25
f5509cc1
KC
26/*
27 * Checks if a given pointer and length is contained by the current
28 * stack frame (if possible).
29 *
30 * Returns:
31 * NOT_STACK: not at all on the stack
32 * GOOD_FRAME: fully within a valid stack frame
2792d84e 33 * GOOD_STACK: within the current stack (when can't frame-check exactly)
f5509cc1
KC
34 * BAD_STACK: error condition (invalid stack position or bad stack frame)
35 */
36static noinline int check_stack_object(const void *obj, unsigned long len)
37{
38 const void * const stack = task_stack_page(current);
39 const void * const stackend = stack + THREAD_SIZE;
40 int ret;
41
42 /* Object is not on the stack at all. */
43 if (obj + len <= stack || stackend <= obj)
44 return NOT_STACK;
45
46 /*
47 * Reject: object partially overlaps the stack (passing the
5ce1be0e 48 * check above means at least one end is within the stack,
f5509cc1
KC
49 * so if this check fails, the other end is outside the stack).
50 */
51 if (obj < stack || stackend < obj + len)
52 return BAD_STACK;
53
54 /* Check if object is safely within a valid frame. */
55 ret = arch_within_stack_frames(stack, stackend, obj, len);
56 if (ret)
57 return ret;
58
2792d84e
KC
59 /* Finally, check stack depth if possible. */
60#ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
61 if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
62 if ((void *)current_stack_pointer < obj + len)
63 return BAD_STACK;
64 } else {
65 if (obj < (void *)current_stack_pointer)
66 return BAD_STACK;
67 }
68#endif
69
f5509cc1
KC
70 return GOOD_STACK;
71}
72
b394d468 73/*
afcc90f8
KC
74 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
75 * an unexpected state during a copy_from_user() or copy_to_user() call.
b394d468
KC
76 * There are several checks being performed on the buffer by the
77 * __check_object_size() function. Normal stack buffer usage should never
78 * trip the checks, and kernel text addressing will always trip the check.
afcc90f8
KC
79 * For cache objects, it is checking that only the whitelisted range of
80 * bytes for a given cache is being accessed (via the cache's usersize and
81 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
82 * kmem_cache_create_usercopy() function to create the cache (and
83 * carefully audit the whitelist range).
b394d468
KC
84 */
85void __noreturn usercopy_abort(const char *name, const char *detail,
86 bool to_user, unsigned long offset,
87 unsigned long len)
f5509cc1 88{
b394d468
KC
89 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
90 to_user ? "exposure" : "overwrite",
91 to_user ? "from" : "to",
92 name ? : "unknown?!",
93 detail ? " '" : "", detail ? : "", detail ? "'" : "",
94 offset, len);
95
f5509cc1
KC
96 /*
97 * For greater effect, it would be nice to do do_group_exit(),
98 * but BUG() actually hooks all the lock-breaking and per-arch
99 * Oops code, so that is used here instead.
100 */
101 BUG();
102}
103
104/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
f4e6e289
KC
105static bool overlaps(const unsigned long ptr, unsigned long n,
106 unsigned long low, unsigned long high)
f5509cc1 107{
f4e6e289 108 const unsigned long check_low = ptr;
f5509cc1
KC
109 unsigned long check_high = check_low + n;
110
111 /* Does not overlap if entirely above or entirely below. */
94cd97af 112 if (check_low >= high || check_high <= low)
f5509cc1
KC
113 return false;
114
115 return true;
116}
117
118/* Is this address range in the kernel text area? */
f4e6e289
KC
119static inline void check_kernel_text_object(const unsigned long ptr,
120 unsigned long n, bool to_user)
f5509cc1
KC
121{
122 unsigned long textlow = (unsigned long)_stext;
123 unsigned long texthigh = (unsigned long)_etext;
124 unsigned long textlow_linear, texthigh_linear;
125
126 if (overlaps(ptr, n, textlow, texthigh))
f4e6e289 127 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
f5509cc1
KC
128
129 /*
130 * Some architectures have virtual memory mappings with a secondary
131 * mapping of the kernel text, i.e. there is more than one virtual
132 * kernel address that points to the kernel image. It is usually
133 * when there is a separate linear physical memory mapping, in that
134 * __pa() is not just the reverse of __va(). This can be detected
135 * and checked:
136 */
46f6236a 137 textlow_linear = (unsigned long)lm_alias(textlow);
f5509cc1
KC
138 /* No different mapping: we're done. */
139 if (textlow_linear == textlow)
f4e6e289 140 return;
f5509cc1
KC
141
142 /* Check the secondary mapping... */
46f6236a 143 texthigh_linear = (unsigned long)lm_alias(texthigh);
f5509cc1 144 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
f4e6e289
KC
145 usercopy_abort("linear kernel text", NULL, to_user,
146 ptr - textlow_linear, n);
f5509cc1
KC
147}
148
f4e6e289
KC
149static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
150 bool to_user)
f5509cc1
KC
151{
152 /* Reject if object wraps past end of memory. */
95153169 153 if (ptr + (n - 1) < ptr)
f4e6e289 154 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
f5509cc1
KC
155
156 /* Reject if NULL or ZERO-allocation. */
157 if (ZERO_OR_NULL_PTR(ptr))
f4e6e289 158 usercopy_abort("null address", NULL, to_user, ptr, n);
f5509cc1
KC
159}
160
8e1f74ea 161/* Checks for allocs that are marked in some way as spanning multiple pages. */
f4e6e289
KC
162static inline void check_page_span(const void *ptr, unsigned long n,
163 struct page *page, bool to_user)
f5509cc1 164{
8e1f74ea 165#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
f5509cc1 166 const void *end = ptr + n - 1;
8e1f74ea 167 struct page *endpage;
f5509cc1
KC
168 bool is_reserved, is_cma;
169
f5509cc1
KC
170 /*
171 * Sometimes the kernel data regions are not marked Reserved (see
172 * check below). And sometimes [_sdata,_edata) does not cover
173 * rodata and/or bss, so check each range explicitly.
174 */
175
176 /* Allow reads of kernel rodata region (if not marked as Reserved). */
177 if (ptr >= (const void *)__start_rodata &&
178 end <= (const void *)__end_rodata) {
179 if (!to_user)
f4e6e289
KC
180 usercopy_abort("rodata", NULL, to_user, 0, n);
181 return;
f5509cc1
KC
182 }
183
184 /* Allow kernel data region (if not marked as Reserved). */
185 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
f4e6e289 186 return;
f5509cc1
KC
187
188 /* Allow kernel bss region (if not marked as Reserved). */
189 if (ptr >= (const void *)__bss_start &&
190 end <= (const void *)__bss_stop)
f4e6e289 191 return;
f5509cc1
KC
192
193 /* Is the object wholly within one base page? */
194 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
195 ((unsigned long)end & (unsigned long)PAGE_MASK)))
f4e6e289 196 return;
f5509cc1 197
8e1f74ea 198 /* Allow if fully inside the same compound (__GFP_COMP) page. */
f5509cc1
KC
199 endpage = virt_to_head_page(end);
200 if (likely(endpage == page))
f4e6e289 201 return;
f5509cc1
KC
202
203 /*
204 * Reject if range is entirely either Reserved (i.e. special or
205 * device memory), or CMA. Otherwise, reject since the object spans
206 * several independently allocated pages.
207 */
208 is_reserved = PageReserved(page);
209 is_cma = is_migrate_cma_page(page);
210 if (!is_reserved && !is_cma)
f4e6e289 211 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
f5509cc1
KC
212
213 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
214 page = virt_to_head_page(ptr);
215 if (is_reserved && !PageReserved(page))
f4e6e289
KC
216 usercopy_abort("spans Reserved and non-Reserved pages",
217 NULL, to_user, 0, n);
f5509cc1 218 if (is_cma && !is_migrate_cma_page(page))
f4e6e289
KC
219 usercopy_abort("spans CMA and non-CMA pages", NULL,
220 to_user, 0, n);
f5509cc1 221 }
8e1f74ea 222#endif
8e1f74ea
KC
223}
224
f4e6e289
KC
225static inline void check_heap_object(const void *ptr, unsigned long n,
226 bool to_user)
8e1f74ea 227{
0b3eb091 228 struct folio *folio;
8e1f74ea 229
8e1f74ea 230 if (!virt_addr_valid(ptr))
f4e6e289 231 return;
8e1f74ea 232
4e140f59
MWO
233 if (is_kmap_addr(ptr)) {
234 unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1);
235
236 if ((unsigned long)ptr + n - 1 > page_end)
237 usercopy_abort("kmap", NULL, to_user,
238 offset_in_page(ptr), n);
239 return;
240 }
241
0aef499f
MWO
242 if (is_vmalloc_addr(ptr)) {
243 struct vm_struct *area = find_vm_area(ptr);
244 unsigned long offset;
245
246 if (!area) {
247 usercopy_abort("vmalloc", "no area", to_user, 0, n);
248 return;
249 }
250
251 offset = ptr - area->addr;
252 if (offset + n > get_vm_area_size(area))
253 usercopy_abort("vmalloc", NULL, to_user, offset, n);
254 return;
255 }
256
4e140f59 257 folio = virt_to_folio(ptr);
8e1f74ea 258
0b3eb091 259 if (folio_test_slab(folio)) {
f4e6e289 260 /* Check slab allocator for flags and size. */
0b3eb091 261 __check_heap_object(ptr, n, folio_slab(folio), to_user);
f4e6e289
KC
262 } else {
263 /* Verify object does not incorrectly span multiple pages. */
0b3eb091 264 check_page_span(ptr, n, folio_page(folio, 0), to_user);
f4e6e289 265 }
f5509cc1
KC
266}
267
b5cb15d9
CR
268static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
269
f5509cc1
KC
270/*
271 * Validates that the given object is:
272 * - not bogus address
7bff3c06
QC
273 * - fully contained by stack (or stack frame, when available)
274 * - fully within SLAB object (or object whitelist area, when available)
f5509cc1
KC
275 * - not in kernel text
276 */
277void __check_object_size(const void *ptr, unsigned long n, bool to_user)
278{
b5cb15d9
CR
279 if (static_branch_unlikely(&bypass_usercopy_checks))
280 return;
281
f5509cc1
KC
282 /* Skip all tests if size is zero. */
283 if (!n)
284 return;
285
286 /* Check for invalid addresses. */
f4e6e289 287 check_bogus_address((const unsigned long)ptr, n, to_user);
f5509cc1 288
f5509cc1
KC
289 /* Check for bad stack object. */
290 switch (check_stack_object(ptr, n)) {
291 case NOT_STACK:
292 /* Object is not touching the current process stack. */
293 break;
294 case GOOD_FRAME:
295 case GOOD_STACK:
296 /*
297 * Object is either in the correct frame (when it
298 * is possible to check) or just generally on the
299 * process stack (when frame checking not available).
300 */
301 return;
302 default:
2792d84e
KC
303 usercopy_abort("process stack", NULL, to_user,
304#ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
305 IS_ENABLED(CONFIG_STACK_GROWSUP) ?
306 ptr - (void *)current_stack_pointer :
307 (void *)current_stack_pointer - ptr,
308#else
309 0,
310#endif
311 n);
f5509cc1
KC
312 }
313
7bff3c06
QC
314 /* Check for bad heap object. */
315 check_heap_object(ptr, n, to_user);
316
f5509cc1 317 /* Check for object in kernel to avoid text exposure. */
f4e6e289 318 check_kernel_text_object((const unsigned long)ptr, n, to_user);
f5509cc1
KC
319}
320EXPORT_SYMBOL(__check_object_size);
b5cb15d9
CR
321
322static bool enable_checks __initdata = true;
323
324static int __init parse_hardened_usercopy(char *str)
325{
05fe3c10
RD
326 if (strtobool(str, &enable_checks))
327 pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
328 str);
329 return 1;
b5cb15d9
CR
330}
331
332__setup("hardened_usercopy=", parse_hardened_usercopy);
333
334static int __init set_hardened_usercopy(void)
335{
336 if (enable_checks == false)
337 static_branch_enable(&bypass_usercopy_checks);
338 return 1;
339}
340
341late_initcall(set_hardened_usercopy);