Merge tag 'probes-fixes-v6.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / s390 / mm / mmap.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  flexible mmap layout support
4  *
5  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6  * All Rights Reserved.
7  *
8  * Started by Ingo Molnar <mingo@elte.hu>
9  */
10
11 #include <linux/elf-randomize.h>
12 #include <linux/personality.h>
13 #include <linux/mm.h>
14 #include <linux/mman.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/mm.h>
17 #include <linux/random.h>
18 #include <linux/compat.h>
19 #include <linux/security.h>
20 #include <asm/elf.h>
21
22 static unsigned long stack_maxrandom_size(void)
23 {
24         if (!(current->flags & PF_RANDOMIZE))
25                 return 0;
26         return STACK_RND_MASK << PAGE_SHIFT;
27 }
28
29 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
30 {
31         if (current->personality & ADDR_COMPAT_LAYOUT)
32                 return 1;
33         if (rlim_stack->rlim_cur == RLIM_INFINITY)
34                 return 1;
35         return sysctl_legacy_va_layout;
36 }
37
38 unsigned long arch_mmap_rnd(void)
39 {
40         return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
41 }
42
43 static unsigned long mmap_base_legacy(unsigned long rnd)
44 {
45         return TASK_UNMAPPED_BASE + rnd;
46 }
47
48 static inline unsigned long mmap_base(unsigned long rnd,
49                                       struct rlimit *rlim_stack)
50 {
51         unsigned long gap = rlim_stack->rlim_cur;
52         unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
53         unsigned long gap_min, gap_max;
54
55         /* Values close to RLIM_INFINITY can overflow. */
56         if (gap + pad > gap)
57                 gap += pad;
58
59         /*
60          * Top of mmap area (just below the process stack).
61          * Leave at least a ~128 MB hole.
62          */
63         gap_min = SZ_128M;
64         gap_max = (STACK_TOP / 6) * 5;
65
66         if (gap < gap_min)
67                 gap = gap_min;
68         else if (gap > gap_max)
69                 gap = gap_max;
70
71         return PAGE_ALIGN(STACK_TOP - gap - rnd);
72 }
73
74 static int get_align_mask(struct file *filp, unsigned long flags)
75 {
76         if (!(current->flags & PF_RANDOMIZE))
77                 return 0;
78         if (filp || (flags & MAP_SHARED))
79                 return MMAP_ALIGN_MASK << PAGE_SHIFT;
80         return 0;
81 }
82
83 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
84                                      unsigned long len, unsigned long pgoff,
85                                      unsigned long flags)
86 {
87         struct mm_struct *mm = current->mm;
88         struct vm_area_struct *vma;
89         struct vm_unmapped_area_info info = {};
90
91         if (len > TASK_SIZE - mmap_min_addr)
92                 return -ENOMEM;
93
94         if (flags & MAP_FIXED)
95                 goto check_asce_limit;
96
97         if (addr) {
98                 addr = PAGE_ALIGN(addr);
99                 vma = find_vma(mm, addr);
100                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101                     (!vma || addr + len <= vm_start_gap(vma)))
102                         goto check_asce_limit;
103         }
104
105         info.length = len;
106         info.low_limit = mm->mmap_base;
107         info.high_limit = TASK_SIZE;
108         info.align_mask = get_align_mask(filp, flags);
109         info.align_offset = pgoff << PAGE_SHIFT;
110         addr = vm_unmapped_area(&info);
111         if (offset_in_page(addr))
112                 return addr;
113
114 check_asce_limit:
115         return check_asce_limit(mm, addr, len);
116 }
117
118 unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
119                                              unsigned long len, unsigned long pgoff,
120                                              unsigned long flags)
121 {
122         struct vm_area_struct *vma;
123         struct mm_struct *mm = current->mm;
124         struct vm_unmapped_area_info info = {};
125
126         /* requested length too big for entire address space */
127         if (len > TASK_SIZE - mmap_min_addr)
128                 return -ENOMEM;
129
130         if (flags & MAP_FIXED)
131                 goto check_asce_limit;
132
133         /* requesting a specific address */
134         if (addr) {
135                 addr = PAGE_ALIGN(addr);
136                 vma = find_vma(mm, addr);
137                 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
138                                 (!vma || addr + len <= vm_start_gap(vma)))
139                         goto check_asce_limit;
140         }
141
142         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
143         info.length = len;
144         info.low_limit = PAGE_SIZE;
145         info.high_limit = mm->mmap_base;
146         info.align_mask = get_align_mask(filp, flags);
147         info.align_offset = pgoff << PAGE_SHIFT;
148         addr = vm_unmapped_area(&info);
149
150         /*
151          * A failed mmap() very likely causes application failure,
152          * so fall back to the bottom-up function here. This scenario
153          * can happen with large stack limits and large mmap()
154          * allocations.
155          */
156         if (offset_in_page(addr)) {
157                 VM_BUG_ON(addr != -ENOMEM);
158                 info.flags = 0;
159                 info.low_limit = TASK_UNMAPPED_BASE;
160                 info.high_limit = TASK_SIZE;
161                 addr = vm_unmapped_area(&info);
162                 if (offset_in_page(addr))
163                         return addr;
164         }
165
166 check_asce_limit:
167         return check_asce_limit(mm, addr, len);
168 }
169
170 /*
171  * This function, called very early during the creation of a new
172  * process VM image, sets up which VM layout function to use:
173  */
174 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
175 {
176         unsigned long random_factor = 0UL;
177
178         if (current->flags & PF_RANDOMIZE)
179                 random_factor = arch_mmap_rnd();
180
181         /*
182          * Fall back to the standard layout if the personality
183          * bit is set, or if the expected stack growth is unlimited:
184          */
185         if (mmap_is_legacy(rlim_stack)) {
186                 mm->mmap_base = mmap_base_legacy(random_factor);
187                 clear_bit(MMF_TOPDOWN, &mm->flags);
188         } else {
189                 mm->mmap_base = mmap_base(random_factor, rlim_stack);
190                 set_bit(MMF_TOPDOWN, &mm->flags);
191         }
192 }
193
194 static const pgprot_t protection_map[16] = {
195         [VM_NONE]                                       = PAGE_NONE,
196         [VM_READ]                                       = PAGE_RO,
197         [VM_WRITE]                                      = PAGE_RO,
198         [VM_WRITE | VM_READ]                            = PAGE_RO,
199         [VM_EXEC]                                       = PAGE_RX,
200         [VM_EXEC | VM_READ]                             = PAGE_RX,
201         [VM_EXEC | VM_WRITE]                            = PAGE_RX,
202         [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_RX,
203         [VM_SHARED]                                     = PAGE_NONE,
204         [VM_SHARED | VM_READ]                           = PAGE_RO,
205         [VM_SHARED | VM_WRITE]                          = PAGE_RW,
206         [VM_SHARED | VM_WRITE | VM_READ]                = PAGE_RW,
207         [VM_SHARED | VM_EXEC]                           = PAGE_RX,
208         [VM_SHARED | VM_EXEC | VM_READ]                 = PAGE_RX,
209         [VM_SHARED | VM_EXEC | VM_WRITE]                = PAGE_RWX,
210         [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = PAGE_RWX
211 };
212 DECLARE_VM_GET_PAGE_PROT