treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[linux-block.git] / arch / x86 / mm / mmap.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
cc503c1b 2/*
675a0813 3 * Flexible mmap layout support
cc503c1b
JK
4 *
5 * Based on code by Ingo Molnar and Andi Kleen, copyrighted
6 * as follows:
7 *
8f47e163 8 * Copyright 2003-2009 Red Hat Inc.
cc503c1b
JK
9 * All Rights Reserved.
10 * Copyright 2005 Andi Kleen, SUSE Labs.
11 * Copyright 2007 Jiri Kosina, SUSE Labs.
8817210d 12 */
cc503c1b
JK
13
14#include <linux/personality.h>
8817210d 15#include <linux/mm.h>
8817210d 16#include <linux/random.h>
cc503c1b 17#include <linux/limits.h>
3f07c014 18#include <linux/sched/signal.h>
01042607 19#include <linux/sched/mm.h>
e13b73dd 20#include <linux/compat.h>
80938332
MH
21#include <asm/elf.h>
22
be62a320
CB
23#include "physaddr.h"
24
cc99535e 25struct va_alignment __read_mostly va_align = {
9387f774
BP
26 .flags = -1,
27};
28
e8f01a8d 29unsigned long task_size_32bit(void)
8f3e474f
DS
30{
31 return IA32_PAGE_OFFSET;
32}
33
b569bab7 34unsigned long task_size_64bit(int full_addr_space)
1b028f78 35{
b569bab7 36 return full_addr_space ? TASK_SIZE_MAX : DEFAULT_MAP_WINDOW;
1b028f78
DS
37}
38
8f3e474f 39static unsigned long stack_maxrandom_size(unsigned long task_size)
80938332 40{
4e7c22d4 41 unsigned long max = 0;
01578e36 42 if (current->flags & PF_RANDOMIZE) {
e8f01a8d 43 max = (-1UL) & __STACK_RND_MASK(task_size == task_size_32bit());
8f3e474f 44 max <<= PAGE_SHIFT;
80938332
MH
45 }
46
47 return max;
48}
49
6a0b41d1
DS
50#ifdef CONFIG_COMPAT
51# define mmap32_rnd_bits mmap_rnd_compat_bits
52# define mmap64_rnd_bits mmap_rnd_bits
53#else
54# define mmap32_rnd_bits mmap_rnd_bits
55# define mmap64_rnd_bits mmap_rnd_bits
56#endif
57
8f3e474f
DS
58#define SIZE_128M (128 * 1024 * 1024UL)
59
954683a2 60static int mmap_is_legacy(void)
cc503c1b
JK
61{
62 if (current->personality & ADDR_COMPAT_LAYOUT)
63 return 1;
64
cc503c1b
JK
65 return sysctl_legacy_va_layout;
66}
67
6a0b41d1 68static unsigned long arch_rnd(unsigned int rndbits)
675a0813 69{
47ac5484
ON
70 if (!(current->flags & PF_RANDOMIZE))
71 return 0;
6a0b41d1
DS
72 return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
73}
82168140 74
6a0b41d1
DS
75unsigned long arch_mmap_rnd(void)
76{
77 return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
675a0813
HH
78}
79
8f2af155
KC
80static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
81 struct rlimit *rlim_stack)
675a0813 82{
8f2af155 83 unsigned long gap = rlim_stack->rlim_cur;
c204d21f 84 unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
8f3e474f
DS
85 unsigned long gap_min, gap_max;
86
c204d21f
RR
87 /* Values close to RLIM_INFINITY can overflow. */
88 if (gap + pad > gap)
89 gap += pad;
90
8f3e474f
DS
91 /*
92 * Top of mmap area (just below the process stack).
93 * Leave an at least ~128 MB hole with possible stack randomization.
94 */
c204d21f 95 gap_min = SIZE_128M;
8f3e474f 96 gap_max = (task_size / 6) * 5;
675a0813 97
8f3e474f
DS
98 if (gap < gap_min)
99 gap = gap_min;
100 else if (gap > gap_max)
101 gap = gap_max;
675a0813 102
8f3e474f
DS
103 return PAGE_ALIGN(task_size - gap - rnd);
104}
105
106static unsigned long mmap_legacy_base(unsigned long rnd,
107 unsigned long task_size)
108{
109 return __TASK_UNMAPPED_BASE(task_size) + rnd;
675a0813
HH
110}
111
cc503c1b
JK
112/*
113 * This function, called very early during the creation of a new
114 * process VM image, sets up which VM layout function to use:
115 */
1b028f78 116static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
8f2af155
KC
117 unsigned long random_factor, unsigned long task_size,
118 struct rlimit *rlim_stack)
cc503c1b 119{
1b028f78
DS
120 *legacy_base = mmap_legacy_base(random_factor, task_size);
121 if (mmap_is_legacy())
122 *base = *legacy_base;
123 else
8f2af155 124 *base = mmap_base(random_factor, task_size, rlim_stack);
1b028f78 125}
41aacc1e 126
8f2af155 127void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
1b028f78
DS
128{
129 if (mmap_is_legacy())
cc503c1b 130 mm->get_unmapped_area = arch_get_unmapped_area;
1b028f78 131 else
cc503c1b 132 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1b028f78
DS
133
134 arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
8f2af155
KC
135 arch_rnd(mmap64_rnd_bits), task_size_64bit(0),
136 rlim_stack);
1b028f78
DS
137
138#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
139 /*
140 * The mmap syscall mapping base decision depends solely on the
141 * syscall type (64-bit or compat). This applies for 64bit
142 * applications and 32bit applications. The 64bit syscall uses
143 * mmap_base, the compat syscall uses mmap_compat_base.
144 */
145 arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
8f2af155
KC
146 arch_rnd(mmap32_rnd_bits), task_size_32bit(),
147 rlim_stack);
1b028f78 148#endif
8817210d 149}
a8965276 150
e13b73dd
DS
151unsigned long get_mmap_base(int is_legacy)
152{
153 struct mm_struct *mm = current->mm;
154
155#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
a846446b 156 if (in_32bit_syscall()) {
e13b73dd
DS
157 return is_legacy ? mm->mmap_compat_legacy_base
158 : mm->mmap_compat_base;
159 }
160#endif
161 return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
162}
163
a8965276
KS
164const char *arch_vma_name(struct vm_area_struct *vma)
165{
166 if (vma->vm_flags & VM_MPX)
167 return "[mpx]";
168 return NULL;
169}
1e0f25db
KS
170
171/**
172 * mmap_address_hint_valid - Validate the address hint of mmap
173 * @addr: Address hint
174 * @len: Mapping length
175 *
176 * Check whether @addr and @addr + @len result in a valid mapping.
177 *
178 * On 32bit this only checks whether @addr + @len is <= TASK_SIZE.
179 *
180 * On 64bit with 5-level page tables another sanity check is required
181 * because mappings requested by mmap(@addr, 0) which cross the 47-bit
182 * virtual address boundary can cause the following theoretical issue:
183 *
184 * An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr
185 * is below the border of the 47-bit address space and @addr + @len is
186 * above the border.
187 *
188 * With 4-level paging this request succeeds, but the resulting mapping
189 * address will always be within the 47-bit virtual address space, because
190 * the hint address does not result in a valid mapping and is
191 * ignored. Hence applications which are not prepared to handle virtual
192 * addresses above 47-bit work correctly.
193 *
194 * With 5-level paging this request would be granted and result in a
195 * mapping which crosses the border of the 47-bit virtual address
196 * space. If the application cannot handle addresses above 47-bit this
197 * will lead to misbehaviour and hard to diagnose failures.
198 *
199 * Therefore ignore address hints which would result in a mapping crossing
200 * the 47-bit virtual address boundary.
201 *
202 * Note, that in the same scenario with MAP_FIXED the behaviour is
203 * different. The request with @addr < 47-bit and @addr + @len > 47-bit
204 * fails on a 4-level paging machine but succeeds on a 5-level paging
205 * machine. It is reasonable to expect that an application does not rely on
206 * the failure of such a fixed mapping request, so the restriction is not
207 * applied.
208 */
209bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
210{
211 if (TASK_SIZE - len < addr)
212 return false;
213
214 return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
215}
be62a320
CB
216
217/* Can we access it for direct reading/writing? Must be RAM: */
218int valid_phys_addr_range(phys_addr_t addr, size_t count)
219{
92c77f7c 220 return addr + count - 1 <= __pa(high_memory - 1);
be62a320
CB
221}
222
223/* Can we access it through mmap? Must be a valid physical address: */
224int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
225{
226 phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
227
228 return phys_addr_valid(addr + count - 1);
229}
42e4089c
AK
230
231/*
232 * Only allow root to set high MMIO mappings to PROT_NONE.
233 * This prevents an unpriv. user to set them to PROT_NONE and invert
234 * them, then pointing to valid memory for L1TF speculation.
235 *
236 * Note: for locked down kernels may want to disable the root override.
237 */
238bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
239{
240 if (!boot_cpu_has_bug(X86_BUG_L1TF))
241 return true;
242 if (!__pte_needs_invert(pgprot_val(prot)))
243 return true;
244 /* If it's real memory always allow */
245 if (pfn_valid(pfn))
246 return true;
b0a182f8 247 if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
42e4089c
AK
248 return false;
249 return true;
250}