s390/vdso: move vdso mapping to its own function
authorSven Schnelle <svens@linux.ibm.com>
Wed, 6 Apr 2022 06:35:26 +0000 (08:35 +0200)
committerHeiko Carstens <hca@linux.ibm.com>
Mon, 25 Apr 2022 11:54:14 +0000 (13:54 +0200)
This is a preparation patch for adding vdso randomization to s390.
It adds a function vdso_size(), which will be used later in calculating
the STACK_TOP value. It also moves the vdso mapping into a new function
vdso_map(), to keep the code similar to other architectures.

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/include/asm/processor.h
arch/s390/kernel/vdso.c

index ff1e25d515a855f666278ca3e444ccecb9b1da80..a3ab8cbcc5e49c259c98379c2b9d89ee28e7f3a4 100644 (file)
@@ -83,6 +83,7 @@ void cpu_detect_mhz_feature(void);
 extern const struct seq_operations cpuinfo_op;
 extern void execve_tail(void);
 extern void __bpon(void);
+unsigned long vdso_size(void);
 
 /*
  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
index 99694260cac97bc618bd7cce2c2672fdf9053604..22cb727d58217e588ba97d0bb703bc0ff53b1835 100644 (file)
@@ -160,10 +160,9 @@ int vdso_getcpu_init(void)
 }
 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
 
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
 {
-       unsigned long vdso_text_len, vdso_mapping_len;
-       unsigned long vvar_start, vdso_text_start;
+       unsigned long vvar_start, vdso_text_start, vdso_text_len;
        struct vm_special_mapping *vdso_mapping;
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
@@ -180,8 +179,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
                vdso_text_len = vdso64_end - vdso64_start;
                vdso_mapping = &vdso64_mapping;
        }
-       vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
-       vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
+       vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
        rc = vvar_start;
        if (IS_ERR_VALUE(vvar_start))
                goto out;
@@ -210,6 +208,22 @@ out:
        return rc;
 }
 
+unsigned long vdso_size(void)
+{
+       unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+
+       if (is_compat_task())
+               size += vdso32_end - vdso32_start;
+       else
+               size += vdso64_end - vdso64_start;
+       return PAGE_ALIGN(size);
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       return map_vdso(0, vdso_size());
+}
+
 static struct page ** __init vdso_setup_pages(void *start, void *end)
 {
        int pages = (end - start) >> PAGE_SHIFT;