arm64: remove mmap linked list from vdso
[linux-2.6-block.git] / arch / arm64 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31
32 extern char vdso_start[], vdso_end[];
33 extern char vdso32_start[], vdso32_end[];
34
35 enum vdso_abi {
36         VDSO_ABI_AA64,
37         VDSO_ABI_AA32,
38 };
39
40 enum vvar_pages {
41         VVAR_DATA_PAGE_OFFSET,
42         VVAR_TIMENS_PAGE_OFFSET,
43         VVAR_NR_PAGES,
44 };
45
46 struct vdso_abi_info {
47         const char *name;
48         const char *vdso_code_start;
49         const char *vdso_code_end;
50         unsigned long vdso_pages;
51         /* Data Mapping */
52         struct vm_special_mapping *dm;
53         /* Code Mapping */
54         struct vm_special_mapping *cm;
55 };
56
57 static struct vdso_abi_info vdso_info[] __ro_after_init = {
58         [VDSO_ABI_AA64] = {
59                 .name = "vdso",
60                 .vdso_code_start = vdso_start,
61                 .vdso_code_end = vdso_end,
62         },
63 #ifdef CONFIG_COMPAT_VDSO
64         [VDSO_ABI_AA32] = {
65                 .name = "vdso32",
66                 .vdso_code_start = vdso32_start,
67                 .vdso_code_end = vdso32_end,
68         },
69 #endif /* CONFIG_COMPAT_VDSO */
70 };
71
72 /*
73  * The vDSO data page.
74  */
75 static union {
76         struct vdso_data        data[CS_BASES];
77         u8                      page[PAGE_SIZE];
78 } vdso_data_store __page_aligned_data;
79 struct vdso_data *vdso_data = vdso_data_store.data;
80
81 static int vdso_mremap(const struct vm_special_mapping *sm,
82                 struct vm_area_struct *new_vma)
83 {
84         current->mm->context.vdso = (void *)new_vma->vm_start;
85
86         return 0;
87 }
88
89 static int __init __vdso_init(enum vdso_abi abi)
90 {
91         int i;
92         struct page **vdso_pagelist;
93         unsigned long pfn;
94
95         if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
96                 pr_err("vDSO is not a valid ELF object!\n");
97                 return -EINVAL;
98         }
99
100         vdso_info[abi].vdso_pages = (
101                         vdso_info[abi].vdso_code_end -
102                         vdso_info[abi].vdso_code_start) >>
103                         PAGE_SHIFT;
104
105         vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
106                                 sizeof(struct page *),
107                                 GFP_KERNEL);
108         if (vdso_pagelist == NULL)
109                 return -ENOMEM;
110
111         /* Grab the vDSO code pages. */
112         pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
113
114         for (i = 0; i < vdso_info[abi].vdso_pages; i++)
115                 vdso_pagelist[i] = pfn_to_page(pfn + i);
116
117         vdso_info[abi].cm->pages = vdso_pagelist;
118
119         return 0;
120 }
121
122 #ifdef CONFIG_TIME_NS
123 struct vdso_data *arch_get_vdso_data(void *vvar_page)
124 {
125         return (struct vdso_data *)(vvar_page);
126 }
127
128 /*
129  * The vvar mapping contains data for a specific time namespace, so when a task
130  * changes namespace we must unmap its vvar data for the old namespace.
131  * Subsequent faults will map in data for the new namespace.
132  *
133  * For more details see timens_setup_vdso_data().
134  */
135 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
136 {
137         struct mm_struct *mm = task->mm;
138         struct vm_area_struct *vma;
139         VMA_ITERATOR(vmi, mm, 0);
140
141         mmap_read_lock(mm);
142
143         for_each_vma(vmi, vma) {
144                 unsigned long size = vma->vm_end - vma->vm_start;
145
146                 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
147                         zap_page_range(vma, vma->vm_start, size);
148 #ifdef CONFIG_COMPAT_VDSO
149                 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
150                         zap_page_range(vma, vma->vm_start, size);
151 #endif
152         }
153
154         mmap_read_unlock(mm);
155         return 0;
156 }
157
158 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
159 {
160         if (likely(vma->vm_mm == current->mm))
161                 return current->nsproxy->time_ns->vvar_page;
162
163         /*
164          * VM_PFNMAP | VM_IO protect .fault() handler from being called
165          * through interfaces like /proc/$pid/mem or
166          * process_vm_{readv,writev}() as long as there's no .access()
167          * in special_mapping_vmops.
168          * For more details check_vma_flags() and __access_remote_vm()
169          */
170         WARN(1, "vvar_page accessed remotely");
171
172         return NULL;
173 }
174 #else
175 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
176 {
177         return NULL;
178 }
179 #endif
180
181 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
182                              struct vm_area_struct *vma, struct vm_fault *vmf)
183 {
184         struct page *timens_page = find_timens_vvar_page(vma);
185         unsigned long pfn;
186
187         switch (vmf->pgoff) {
188         case VVAR_DATA_PAGE_OFFSET:
189                 if (timens_page)
190                         pfn = page_to_pfn(timens_page);
191                 else
192                         pfn = sym_to_pfn(vdso_data);
193                 break;
194 #ifdef CONFIG_TIME_NS
195         case VVAR_TIMENS_PAGE_OFFSET:
196                 /*
197                  * If a task belongs to a time namespace then a namespace
198                  * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
199                  * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
200                  * offset.
201                  * See also the comment near timens_setup_vdso_data().
202                  */
203                 if (!timens_page)
204                         return VM_FAULT_SIGBUS;
205                 pfn = sym_to_pfn(vdso_data);
206                 break;
207 #endif /* CONFIG_TIME_NS */
208         default:
209                 return VM_FAULT_SIGBUS;
210         }
211
212         return vmf_insert_pfn(vma, vmf->address, pfn);
213 }
214
215 static int __setup_additional_pages(enum vdso_abi abi,
216                                     struct mm_struct *mm,
217                                     struct linux_binprm *bprm,
218                                     int uses_interp)
219 {
220         unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
221         unsigned long gp_flags = 0;
222         void *ret;
223
224         BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
225
226         vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
227         /* Be sure to map the data page */
228         vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
229
230         vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
231         if (IS_ERR_VALUE(vdso_base)) {
232                 ret = ERR_PTR(vdso_base);
233                 goto up_fail;
234         }
235
236         ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
237                                        VM_READ|VM_MAYREAD|VM_PFNMAP,
238                                        vdso_info[abi].dm);
239         if (IS_ERR(ret))
240                 goto up_fail;
241
242         if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
243                 gp_flags = VM_ARM64_BTI;
244
245         vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
246         mm->context.vdso = (void *)vdso_base;
247         ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
248                                        VM_READ|VM_EXEC|gp_flags|
249                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
250                                        vdso_info[abi].cm);
251         if (IS_ERR(ret))
252                 goto up_fail;
253
254         return 0;
255
256 up_fail:
257         mm->context.vdso = NULL;
258         return PTR_ERR(ret);
259 }
260
261 #ifdef CONFIG_COMPAT
262 /*
263  * Create and map the vectors page for AArch32 tasks.
264  */
265 enum aarch32_map {
266         AA32_MAP_VECTORS, /* kuser helpers */
267         AA32_MAP_SIGPAGE,
268         AA32_MAP_VVAR,
269         AA32_MAP_VDSO,
270 };
271
272 static struct page *aarch32_vectors_page __ro_after_init;
273 static struct page *aarch32_sig_page __ro_after_init;
274
275 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
276                                   struct vm_area_struct *new_vma)
277 {
278         current->mm->context.sigpage = (void *)new_vma->vm_start;
279
280         return 0;
281 }
282
283 static struct vm_special_mapping aarch32_vdso_maps[] = {
284         [AA32_MAP_VECTORS] = {
285                 .name   = "[vectors]", /* ABI */
286                 .pages  = &aarch32_vectors_page,
287         },
288         [AA32_MAP_SIGPAGE] = {
289                 .name   = "[sigpage]", /* ABI */
290                 .pages  = &aarch32_sig_page,
291                 .mremap = aarch32_sigpage_mremap,
292         },
293         [AA32_MAP_VVAR] = {
294                 .name = "[vvar]",
295                 .fault = vvar_fault,
296         },
297         [AA32_MAP_VDSO] = {
298                 .name = "[vdso]",
299                 .mremap = vdso_mremap,
300         },
301 };
302
303 static int aarch32_alloc_kuser_vdso_page(void)
304 {
305         extern char __kuser_helper_start[], __kuser_helper_end[];
306         int kuser_sz = __kuser_helper_end - __kuser_helper_start;
307         unsigned long vdso_page;
308
309         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
310                 return 0;
311
312         vdso_page = get_zeroed_page(GFP_KERNEL);
313         if (!vdso_page)
314                 return -ENOMEM;
315
316         memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
317                kuser_sz);
318         aarch32_vectors_page = virt_to_page(vdso_page);
319         return 0;
320 }
321
322 #define COMPAT_SIGPAGE_POISON_WORD      0xe7fddef1
323 static int aarch32_alloc_sigpage(void)
324 {
325         extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
326         int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
327         __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
328         void *sigpage;
329
330         sigpage = (void *)__get_free_page(GFP_KERNEL);
331         if (!sigpage)
332                 return -ENOMEM;
333
334         memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
335         memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
336         aarch32_sig_page = virt_to_page(sigpage);
337         return 0;
338 }
339
340 static int __init __aarch32_alloc_vdso_pages(void)
341 {
342
343         if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
344                 return 0;
345
346         vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
347         vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
348
349         return __vdso_init(VDSO_ABI_AA32);
350 }
351
352 static int __init aarch32_alloc_vdso_pages(void)
353 {
354         int ret;
355
356         ret = __aarch32_alloc_vdso_pages();
357         if (ret)
358                 return ret;
359
360         ret = aarch32_alloc_sigpage();
361         if (ret)
362                 return ret;
363
364         return aarch32_alloc_kuser_vdso_page();
365 }
366 arch_initcall(aarch32_alloc_vdso_pages);
367
368 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
369 {
370         void *ret;
371
372         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
373                 return 0;
374
375         /*
376          * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
377          * not safe to CoW the page containing the CPU exception vectors.
378          */
379         ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
380                                        VM_READ | VM_EXEC |
381                                        VM_MAYREAD | VM_MAYEXEC,
382                                        &aarch32_vdso_maps[AA32_MAP_VECTORS]);
383
384         return PTR_ERR_OR_ZERO(ret);
385 }
386
387 static int aarch32_sigreturn_setup(struct mm_struct *mm)
388 {
389         unsigned long addr;
390         void *ret;
391
392         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
393         if (IS_ERR_VALUE(addr)) {
394                 ret = ERR_PTR(addr);
395                 goto out;
396         }
397
398         /*
399          * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
400          * set breakpoints.
401          */
402         ret = _install_special_mapping(mm, addr, PAGE_SIZE,
403                                        VM_READ | VM_EXEC | VM_MAYREAD |
404                                        VM_MAYWRITE | VM_MAYEXEC,
405                                        &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
406         if (IS_ERR(ret))
407                 goto out;
408
409         mm->context.sigpage = (void *)addr;
410
411 out:
412         return PTR_ERR_OR_ZERO(ret);
413 }
414
415 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
416 {
417         struct mm_struct *mm = current->mm;
418         int ret;
419
420         if (mmap_write_lock_killable(mm))
421                 return -EINTR;
422
423         ret = aarch32_kuser_helpers_setup(mm);
424         if (ret)
425                 goto out;
426
427         if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
428                 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
429                                                uses_interp);
430                 if (ret)
431                         goto out;
432         }
433
434         ret = aarch32_sigreturn_setup(mm);
435 out:
436         mmap_write_unlock(mm);
437         return ret;
438 }
439 #endif /* CONFIG_COMPAT */
440
441 enum aarch64_map {
442         AA64_MAP_VVAR,
443         AA64_MAP_VDSO,
444 };
445
446 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
447         [AA64_MAP_VVAR] = {
448                 .name   = "[vvar]",
449                 .fault = vvar_fault,
450         },
451         [AA64_MAP_VDSO] = {
452                 .name   = "[vdso]",
453                 .mremap = vdso_mremap,
454         },
455 };
456
457 static int __init vdso_init(void)
458 {
459         vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
460         vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
461
462         return __vdso_init(VDSO_ABI_AA64);
463 }
464 arch_initcall(vdso_init);
465
466 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
467 {
468         struct mm_struct *mm = current->mm;
469         int ret;
470
471         if (mmap_write_lock_killable(mm))
472                 return -EINTR;
473
474         ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
475         mmap_write_unlock(mm);
476
477         return ret;
478 }