Merge tag 'sched_ext-for-6.12-rc1-fixes-1' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / arch / s390 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/time_namespace.h>
20 #include <linux/random.h>
21 #include <vdso/datapage.h>
22 #include <asm/vdso/vsyscall.h>
23 #include <asm/alternative.h>
24 #include <asm/vdso.h>
25
26 extern char vdso64_start[], vdso64_end[];
27 extern char vdso32_start[], vdso32_end[];
28
29 static struct vm_special_mapping vvar_mapping;
30
31 static union vdso_data_store vdso_data_store __page_aligned_data;
32
33 struct vdso_data *vdso_data = vdso_data_store.data;
34
35 #ifdef CONFIG_TIME_NS
36 struct vdso_data *arch_get_vdso_data(void *vvar_page)
37 {
38         return (struct vdso_data *)(vvar_page);
39 }
40
41 /*
42  * The VVAR page layout depends on whether a task belongs to the root or
43  * non-root time namespace. Whenever a task changes its namespace, the VVAR
44  * page tables are cleared and then they will be re-faulted with a
45  * corresponding layout.
46  * See also the comment near timens_setup_vdso_data() for details.
47  */
48 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
49 {
50         struct mm_struct *mm = task->mm;
51         VMA_ITERATOR(vmi, mm, 0);
52         struct vm_area_struct *vma;
53
54         mmap_read_lock(mm);
55         for_each_vma(vmi, vma) {
56                 if (!vma_is_special_mapping(vma, &vvar_mapping))
57                         continue;
58                 zap_vma_pages(vma);
59                 break;
60         }
61         mmap_read_unlock(mm);
62         return 0;
63 }
64 #endif
65
66 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
67                              struct vm_area_struct *vma, struct vm_fault *vmf)
68 {
69         struct page *timens_page = find_timens_vvar_page(vma);
70         unsigned long addr, pfn;
71         vm_fault_t err;
72
73         switch (vmf->pgoff) {
74         case VVAR_DATA_PAGE_OFFSET:
75                 pfn = virt_to_pfn(vdso_data);
76                 if (timens_page) {
77                         /*
78                          * Fault in VVAR page too, since it will be accessed
79                          * to get clock data anyway.
80                          */
81                         addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
82                         err = vmf_insert_pfn(vma, addr, pfn);
83                         if (unlikely(err & VM_FAULT_ERROR))
84                                 return err;
85                         pfn = page_to_pfn(timens_page);
86                 }
87                 break;
88 #ifdef CONFIG_TIME_NS
89         case VVAR_TIMENS_PAGE_OFFSET:
90                 /*
91                  * If a task belongs to a time namespace then a namespace
92                  * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
93                  * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
94                  * offset.
95                  * See also the comment near timens_setup_vdso_data().
96                  */
97                 if (!timens_page)
98                         return VM_FAULT_SIGBUS;
99                 pfn = virt_to_pfn(vdso_data);
100                 break;
101 #endif /* CONFIG_TIME_NS */
102         default:
103                 return VM_FAULT_SIGBUS;
104         }
105         return vmf_insert_pfn(vma, vmf->address, pfn);
106 }
107
108 static int vdso_mremap(const struct vm_special_mapping *sm,
109                        struct vm_area_struct *vma)
110 {
111         current->mm->context.vdso_base = vma->vm_start;
112         return 0;
113 }
114
115 static struct vm_special_mapping vvar_mapping = {
116         .name = "[vvar]",
117         .fault = vvar_fault,
118 };
119
120 static struct vm_special_mapping vdso64_mapping = {
121         .name = "[vdso]",
122         .mremap = vdso_mremap,
123 };
124
125 static struct vm_special_mapping vdso32_mapping = {
126         .name = "[vdso]",
127         .mremap = vdso_mremap,
128 };
129
130 int vdso_getcpu_init(void)
131 {
132         set_tod_programmable_field(smp_processor_id());
133         return 0;
134 }
135 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
136
137 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
138 {
139         unsigned long vvar_start, vdso_text_start, vdso_text_len;
140         struct vm_special_mapping *vdso_mapping;
141         struct mm_struct *mm = current->mm;
142         struct vm_area_struct *vma;
143         int rc;
144
145         BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
146         if (mmap_write_lock_killable(mm))
147                 return -EINTR;
148
149         if (is_compat_task()) {
150                 vdso_text_len = vdso32_end - vdso32_start;
151                 vdso_mapping = &vdso32_mapping;
152         } else {
153                 vdso_text_len = vdso64_end - vdso64_start;
154                 vdso_mapping = &vdso64_mapping;
155         }
156         vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
157         rc = vvar_start;
158         if (IS_ERR_VALUE(vvar_start))
159                 goto out;
160         vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
161                                        VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
162                                        VM_PFNMAP,
163                                        &vvar_mapping);
164         rc = PTR_ERR(vma);
165         if (IS_ERR(vma))
166                 goto out;
167         vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
168         /* VM_MAYWRITE for COW so gdb can set breakpoints */
169         vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
170                                        VM_READ|VM_EXEC|
171                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
172                                        vdso_mapping);
173         if (IS_ERR(vma)) {
174                 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
175                 rc = PTR_ERR(vma);
176         } else {
177                 current->mm->context.vdso_base = vdso_text_start;
178                 rc = 0;
179         }
180 out:
181         mmap_write_unlock(mm);
182         return rc;
183 }
184
185 static unsigned long vdso_addr(unsigned long start, unsigned long len)
186 {
187         unsigned long addr, end, offset;
188
189         /*
190          * Round up the start address. It can start out unaligned as a result
191          * of stack start randomization.
192          */
193         start = PAGE_ALIGN(start);
194
195         /* Round the lowest possible end address up to a PMD boundary. */
196         end = (start + len + PMD_SIZE - 1) & PMD_MASK;
197         if (end >= VDSO_BASE)
198                 end = VDSO_BASE;
199         end -= len;
200
201         if (end > start) {
202                 offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
203                 addr = start + (offset << PAGE_SHIFT);
204         } else {
205                 addr = start;
206         }
207         return addr;
208 }
209
210 unsigned long vdso_text_size(void)
211 {
212         unsigned long size;
213
214         if (is_compat_task())
215                 size = vdso32_end - vdso32_start;
216         else
217                 size = vdso64_end - vdso64_start;
218         return PAGE_ALIGN(size);
219 }
220
221 unsigned long vdso_size(void)
222 {
223         return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
224 }
225
226 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
227 {
228         unsigned long addr = VDSO_BASE;
229         unsigned long size = vdso_size();
230
231         if (current->flags & PF_RANDOMIZE)
232                 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
233         return map_vdso(addr, size);
234 }
235
236 static struct page ** __init vdso_setup_pages(void *start, void *end)
237 {
238         int pages = (end - start) >> PAGE_SHIFT;
239         struct page **pagelist;
240         int i;
241
242         pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
243         if (!pagelist)
244                 panic("%s: Cannot allocate page list for VDSO", __func__);
245         for (i = 0; i < pages; i++)
246                 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
247         return pagelist;
248 }
249
250 static void vdso_apply_alternatives(void)
251 {
252         const struct elf64_shdr *alt, *shdr;
253         struct alt_instr *start, *end;
254         const struct elf64_hdr *hdr;
255
256         hdr = (struct elf64_hdr *)vdso64_start;
257         shdr = (void *)hdr + hdr->e_shoff;
258         alt = find_section(hdr, shdr, ".altinstructions");
259         if (!alt)
260                 return;
261         start = (void *)hdr + alt->sh_offset;
262         end = (void *)hdr + alt->sh_offset + alt->sh_size;
263         apply_alternatives(start, end);
264 }
265
266 static int __init vdso_init(void)
267 {
268         vdso_apply_alternatives();
269         vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
270         if (IS_ENABLED(CONFIG_COMPAT))
271                 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
272         return 0;
273 }
274 arch_initcall(vdso_init);