1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDSO implementations.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
32 extern char vdso_start[], vdso_end[];
33 extern char vdso32_start[], vdso32_end[];
41 VVAR_DATA_PAGE_OFFSET,
42 VVAR_TIMENS_PAGE_OFFSET,
46 struct vdso_abi_info {
48 const char *vdso_code_start;
49 const char *vdso_code_end;
50 unsigned long vdso_pages;
52 struct vm_special_mapping *dm;
54 struct vm_special_mapping *cm;
57 static struct vdso_abi_info vdso_info[] __ro_after_init = {
60 .vdso_code_start = vdso_start,
61 .vdso_code_end = vdso_end,
63 #ifdef CONFIG_COMPAT_VDSO
66 .vdso_code_start = vdso32_start,
67 .vdso_code_end = vdso32_end,
69 #endif /* CONFIG_COMPAT_VDSO */
76 struct vdso_data data[CS_BASES];
78 } vdso_data_store __page_aligned_data;
79 struct vdso_data *vdso_data = vdso_data_store.data;
81 static int vdso_mremap(const struct vm_special_mapping *sm,
82 struct vm_area_struct *new_vma)
84 current->mm->context.vdso = (void *)new_vma->vm_start;
89 static int __init __vdso_init(enum vdso_abi abi)
92 struct page **vdso_pagelist;
95 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
96 pr_err("vDSO is not a valid ELF object!\n");
100 vdso_info[abi].vdso_pages = (
101 vdso_info[abi].vdso_code_end -
102 vdso_info[abi].vdso_code_start) >>
105 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
106 sizeof(struct page *),
108 if (vdso_pagelist == NULL)
111 /* Grab the vDSO code pages. */
112 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
114 for (i = 0; i < vdso_info[abi].vdso_pages; i++)
115 vdso_pagelist[i] = pfn_to_page(pfn + i);
117 vdso_info[abi].cm->pages = vdso_pagelist;
122 #ifdef CONFIG_TIME_NS
123 struct vdso_data *arch_get_vdso_data(void *vvar_page)
125 return (struct vdso_data *)(vvar_page);
129 * The vvar mapping contains data for a specific time namespace, so when a task
130 * changes namespace we must unmap its vvar data for the old namespace.
131 * Subsequent faults will map in data for the new namespace.
133 * For more details see timens_setup_vdso_data().
135 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
137 struct mm_struct *mm = task->mm;
138 struct vm_area_struct *vma;
139 VMA_ITERATOR(vmi, mm, 0);
143 for_each_vma(vmi, vma) {
144 unsigned long size = vma->vm_end - vma->vm_start;
146 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
147 zap_page_range(vma, vma->vm_start, size);
148 #ifdef CONFIG_COMPAT_VDSO
149 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
150 zap_page_range(vma, vma->vm_start, size);
154 mmap_read_unlock(mm);
158 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
160 if (likely(vma->vm_mm == current->mm))
161 return current->nsproxy->time_ns->vvar_page;
164 * VM_PFNMAP | VM_IO protect .fault() handler from being called
165 * through interfaces like /proc/$pid/mem or
166 * process_vm_{readv,writev}() as long as there's no .access()
167 * in special_mapping_vmops.
168 * For more details check_vma_flags() and __access_remote_vm()
170 WARN(1, "vvar_page accessed remotely");
175 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
181 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
182 struct vm_area_struct *vma, struct vm_fault *vmf)
184 struct page *timens_page = find_timens_vvar_page(vma);
187 switch (vmf->pgoff) {
188 case VVAR_DATA_PAGE_OFFSET:
190 pfn = page_to_pfn(timens_page);
192 pfn = sym_to_pfn(vdso_data);
194 #ifdef CONFIG_TIME_NS
195 case VVAR_TIMENS_PAGE_OFFSET:
197 * If a task belongs to a time namespace then a namespace
198 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
199 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
201 * See also the comment near timens_setup_vdso_data().
204 return VM_FAULT_SIGBUS;
205 pfn = sym_to_pfn(vdso_data);
207 #endif /* CONFIG_TIME_NS */
209 return VM_FAULT_SIGBUS;
212 return vmf_insert_pfn(vma, vmf->address, pfn);
215 static int __setup_additional_pages(enum vdso_abi abi,
216 struct mm_struct *mm,
217 struct linux_binprm *bprm,
220 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
221 unsigned long gp_flags = 0;
224 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
226 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
227 /* Be sure to map the data page */
228 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
230 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
231 if (IS_ERR_VALUE(vdso_base)) {
232 ret = ERR_PTR(vdso_base);
236 ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
237 VM_READ|VM_MAYREAD|VM_PFNMAP,
242 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
243 gp_flags = VM_ARM64_BTI;
245 vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
246 mm->context.vdso = (void *)vdso_base;
247 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
248 VM_READ|VM_EXEC|gp_flags|
249 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
257 mm->context.vdso = NULL;
263 * Create and map the vectors page for AArch32 tasks.
266 AA32_MAP_VECTORS, /* kuser helpers */
272 static struct page *aarch32_vectors_page __ro_after_init;
273 static struct page *aarch32_sig_page __ro_after_init;
275 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
276 struct vm_area_struct *new_vma)
278 current->mm->context.sigpage = (void *)new_vma->vm_start;
283 static struct vm_special_mapping aarch32_vdso_maps[] = {
284 [AA32_MAP_VECTORS] = {
285 .name = "[vectors]", /* ABI */
286 .pages = &aarch32_vectors_page,
288 [AA32_MAP_SIGPAGE] = {
289 .name = "[sigpage]", /* ABI */
290 .pages = &aarch32_sig_page,
291 .mremap = aarch32_sigpage_mremap,
299 .mremap = vdso_mremap,
303 static int aarch32_alloc_kuser_vdso_page(void)
305 extern char __kuser_helper_start[], __kuser_helper_end[];
306 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
307 unsigned long vdso_page;
309 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
312 vdso_page = get_zeroed_page(GFP_KERNEL);
316 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
318 aarch32_vectors_page = virt_to_page(vdso_page);
322 #define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
323 static int aarch32_alloc_sigpage(void)
325 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
326 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
327 __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
330 sigpage = (void *)__get_free_page(GFP_KERNEL);
334 memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
335 memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
336 aarch32_sig_page = virt_to_page(sigpage);
340 static int __init __aarch32_alloc_vdso_pages(void)
343 if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
346 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
347 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
349 return __vdso_init(VDSO_ABI_AA32);
352 static int __init aarch32_alloc_vdso_pages(void)
356 ret = __aarch32_alloc_vdso_pages();
360 ret = aarch32_alloc_sigpage();
364 return aarch32_alloc_kuser_vdso_page();
366 arch_initcall(aarch32_alloc_vdso_pages);
368 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
372 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
376 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
377 * not safe to CoW the page containing the CPU exception vectors.
379 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
381 VM_MAYREAD | VM_MAYEXEC,
382 &aarch32_vdso_maps[AA32_MAP_VECTORS]);
384 return PTR_ERR_OR_ZERO(ret);
387 static int aarch32_sigreturn_setup(struct mm_struct *mm)
392 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
393 if (IS_ERR_VALUE(addr)) {
399 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
402 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
403 VM_READ | VM_EXEC | VM_MAYREAD |
404 VM_MAYWRITE | VM_MAYEXEC,
405 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
409 mm->context.sigpage = (void *)addr;
412 return PTR_ERR_OR_ZERO(ret);
415 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
417 struct mm_struct *mm = current->mm;
420 if (mmap_write_lock_killable(mm))
423 ret = aarch32_kuser_helpers_setup(mm);
427 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
428 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
434 ret = aarch32_sigreturn_setup(mm);
436 mmap_write_unlock(mm);
439 #endif /* CONFIG_COMPAT */
446 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
453 .mremap = vdso_mremap,
457 static int __init vdso_init(void)
459 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
460 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
462 return __vdso_init(VDSO_ABI_AA64);
464 arch_initcall(vdso_init);
466 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
468 struct mm_struct *mm = current->mm;
471 if (mmap_write_lock_killable(mm))
474 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
475 mmap_write_unlock(mm);