2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
23 extern unsigned short vdso_sync_cpuid;
26 void __init init_vdso_image(const struct vdso_image *image)
29 int npages = (image->size) / PAGE_SIZE;
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
41 #if defined(CONFIG_X86_64)
42 static int __init init_vdso(void)
44 init_vdso_image(&vdso_image_64);
46 #ifdef CONFIG_X86_X32_ABI
47 init_vdso_image(&vdso_image_x32);
52 subsys_initcall(init_vdso);
57 /* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
60 This doesn't give that many random bits.
62 Only used for the 64-bit and x32 vdsos. */
63 static unsigned long vdso_addr(unsigned long start, unsigned len)
68 unsigned long addr, end;
70 end = (start + PMD_SIZE - 1) & PMD_MASK;
71 if (end >= TASK_SIZE_MAX)
74 /* This loses some more bits than a modulo, but is cheaper */
75 offset = get_random_int() & (PTRS_PER_PTE - 1);
76 addr = start + (offset << PAGE_SHIFT);
81 * page-align it here so that get_unmapped_area doesn't
82 * align it wrongfully again to the next page. addr can come in 4K
83 * unaligned here as a result of stack start randomization.
85 addr = PAGE_ALIGN(addr);
86 addr = align_vdso_addr(addr);
92 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
94 struct mm_struct *mm = current->mm;
95 struct vm_area_struct *vma;
98 static struct page *no_pages[] = {NULL};
99 static struct vm_special_mapping vvar_mapping = {
104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack,
106 image->sym_end_mapping);
111 down_write(&mm->mmap_sem);
113 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
114 if (IS_ERR_VALUE(addr)) {
119 current->mm->context.vdso = (void __user *)addr;
122 * MAYWRITE to allow gdb to COW and set breakpoints
124 vma = _install_special_mapping(mm,
128 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
129 &image->text_mapping);
136 vma = _install_special_mapping(mm,
138 image->sym_end_mapping - image->size,
147 if (image->sym_vvar_page)
148 ret = remap_pfn_range(vma,
149 addr + image->sym_vvar_page,
150 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
157 #ifdef CONFIG_HPET_TIMER
158 if (hpet_address && image->sym_hpet_page) {
159 ret = io_remap_pfn_range(vma,
160 addr + image->sym_hpet_page,
161 hpet_address >> PAGE_SHIFT,
163 pgprot_noncached(PAGE_READONLY));
172 current->mm->context.vdso = NULL;
174 up_write(&mm->mmap_sem);
178 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
179 static int load_vdso32(void)
183 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
186 ret = map_vdso(selected_vdso32, false);
190 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
191 current_thread_info()->sysenter_return =
192 current->mm->context.vdso +
193 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
200 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
205 return map_vdso(&vdso_image_64, true);
209 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
212 #ifdef CONFIG_X86_X32_ABI
213 if (test_thread_flag(TIF_X32)) {
217 return map_vdso(&vdso_image_x32, true);
221 return load_vdso32();
225 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
227 return load_vdso32();
232 static __init int vdso_setup(char *s)
234 vdso64_enabled = simple_strtoul(s, NULL, 0);
237 __setup("vdso=", vdso_setup);