2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
23 extern unsigned short vdso_sync_cpuid;
26 void __init init_vdso_image(const struct vdso_image *image)
29 int npages = (image->size) / PAGE_SIZE;
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
33 image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE);
35 apply_alternatives((struct alt_instr *)(image->data + image->alt),
36 (struct alt_instr *)(image->data + image->alt +
40 #if defined(CONFIG_X86_64)
41 static int __init init_vdso(void)
43 init_vdso_image(&vdso_image_64);
45 #ifdef CONFIG_X86_X32_ABI
46 init_vdso_image(&vdso_image_x32);
51 subsys_initcall(init_vdso);
56 /* Put the vdso above the (randomized) stack with another randomized offset.
57 This way there is no hole in the middle of address space.
58 To save memory make sure it is still in the same PTE as the stack top.
59 This doesn't give that many random bits.
61 Only used for the 64-bit and x32 vdsos. */
62 static unsigned long vdso_addr(unsigned long start, unsigned len)
64 unsigned long addr, end;
66 end = (start + PMD_SIZE - 1) & PMD_MASK;
67 if (end >= TASK_SIZE_MAX)
70 /* This loses some more bits than a modulo, but is cheaper */
71 offset = get_random_int() & (PTRS_PER_PTE - 1);
72 addr = start + (offset << PAGE_SHIFT);
77 * page-align it here so that get_unmapped_area doesn't
78 * align it wrongfully again to the next page. addr can come in 4K
79 * unaligned here as a result of stack start randomization.
81 addr = PAGE_ALIGN(addr);
82 addr = align_vdso_addr(addr);
87 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
89 struct mm_struct *mm = current->mm;
90 struct vm_area_struct *vma;
95 addr = vdso_addr(current->mm->start_stack,
96 image->sym_end_mapping);
101 down_write(&mm->mmap_sem);
103 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
104 if (IS_ERR_VALUE(addr)) {
109 current->mm->context.vdso = (void __user *)addr;
112 * MAYWRITE to allow gdb to COW and set breakpoints
114 ret = install_special_mapping(mm,
118 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
124 vma = _install_special_mapping(mm,
126 image->sym_end_mapping - image->size,
135 if (image->sym_vvar_page)
136 ret = remap_pfn_range(vma,
137 addr + image->sym_vvar_page,
138 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
145 #ifdef CONFIG_HPET_TIMER
146 if (hpet_address && image->sym_hpet_page) {
147 ret = io_remap_pfn_range(vma,
148 addr + image->sym_hpet_page,
149 hpet_address >> PAGE_SHIFT,
151 pgprot_noncached(PAGE_READONLY));
160 current->mm->context.vdso = NULL;
162 up_write(&mm->mmap_sem);
166 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
167 static int load_vdso32(void)
171 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
174 ret = map_vdso(selected_vdso32, false);
178 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
179 current_thread_info()->sysenter_return =
180 current->mm->context.vdso +
181 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
188 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
193 return map_vdso(&vdso_image_64, true);
197 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
200 #ifdef CONFIG_X86_X32_ABI
201 if (test_thread_flag(TIF_X32)) {
205 return map_vdso(&vdso_image_x32, true);
209 return load_vdso32();
213 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
215 return load_vdso32();
220 static __init int vdso_setup(char *s)
222 vdso64_enabled = simple_strtoul(s, NULL, 0);
225 __setup("vdso=", vdso_setup);