treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[linux-block.git] / arch / parisc / kernel / sys_parisc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4  *    PARISC specific syscalls
5  *
6  *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7  *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
8  *    Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
9  *    Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
10  */
11
12 #include <linux/uaccess.h>
13 #include <asm/elf.h>
14 #include <linux/file.h>
15 #include <linux/fs.h>
16 #include <linux/linkage.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/mm.h>
21 #include <linux/shm.h>
22 #include <linux/syscalls.h>
23 #include <linux/utsname.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26
27 /* we construct an artificial offset for the mapping based on the physical
28  * address of the kernel mapping variable */
29 #define GET_LAST_MMAP(filp)             \
30         (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
31 #define SET_LAST_MMAP(filp, val)        \
32          { /* nothing */ }
33
34 static int get_offset(unsigned int last_mmap)
35 {
36         return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
37 }
38
39 static unsigned long shared_align_offset(unsigned int last_mmap,
40                                          unsigned long pgoff)
41 {
42         return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
43 }
44
45 static inline unsigned long COLOR_ALIGN(unsigned long addr,
46                          unsigned int last_mmap, unsigned long pgoff)
47 {
48         unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
49         unsigned long off  = (SHM_COLOUR-1) &
50                 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
51
52         return base + off;
53 }
54
55 /*
56  * Top of mmap area (just below the process stack).
57  */
58
59 /*
60  * When called from arch_get_unmapped_area(), rlim_stack will be NULL,
61  * indicating that "current" should be used instead of a passed-in
62  * value from the exec bprm as done with arch_pick_mmap_layout().
63  */
64 static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
65 {
66         unsigned long stack_base;
67
68         /* Limit stack size - see setup_arg_pages() in fs/exec.c */
69         stack_base = rlim_stack ? rlim_stack->rlim_max
70                                 : rlimit_max(RLIMIT_STACK);
71         if (stack_base > STACK_SIZE_MAX)
72                 stack_base = STACK_SIZE_MAX;
73
74         /* Add space for stack randomization. */
75         if (current->flags & PF_RANDOMIZE)
76                 stack_base += (STACK_RND_MASK << PAGE_SHIFT);
77
78         return PAGE_ALIGN(STACK_TOP - stack_base);
79 }
80
81
82 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
83                 unsigned long len, unsigned long pgoff, unsigned long flags)
84 {
85         struct mm_struct *mm = current->mm;
86         struct vm_area_struct *vma, *prev;
87         unsigned long task_size = TASK_SIZE;
88         int do_color_align, last_mmap;
89         struct vm_unmapped_area_info info;
90
91         if (len > task_size)
92                 return -ENOMEM;
93
94         do_color_align = 0;
95         if (filp || (flags & MAP_SHARED))
96                 do_color_align = 1;
97         last_mmap = GET_LAST_MMAP(filp);
98
99         if (flags & MAP_FIXED) {
100                 if ((flags & MAP_SHARED) && last_mmap &&
101                     (addr - shared_align_offset(last_mmap, pgoff))
102                                 & (SHM_COLOUR - 1))
103                         return -EINVAL;
104                 goto found_addr;
105         }
106
107         if (addr) {
108                 if (do_color_align && last_mmap)
109                         addr = COLOR_ALIGN(addr, last_mmap, pgoff);
110                 else
111                         addr = PAGE_ALIGN(addr);
112
113                 vma = find_vma_prev(mm, addr, &prev);
114                 if (task_size - len >= addr &&
115                     (!vma || addr + len <= vm_start_gap(vma)) &&
116                     (!prev || addr >= vm_end_gap(prev)))
117                         goto found_addr;
118         }
119
120         info.flags = 0;
121         info.length = len;
122         info.low_limit = mm->mmap_legacy_base;
123         info.high_limit = mmap_upper_limit(NULL);
124         info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
125         info.align_offset = shared_align_offset(last_mmap, pgoff);
126         addr = vm_unmapped_area(&info);
127
128 found_addr:
129         if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
130                 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
131
132         return addr;
133 }
134
135 unsigned long
136 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
137                           const unsigned long len, const unsigned long pgoff,
138                           const unsigned long flags)
139 {
140         struct vm_area_struct *vma, *prev;
141         struct mm_struct *mm = current->mm;
142         unsigned long addr = addr0;
143         int do_color_align, last_mmap;
144         struct vm_unmapped_area_info info;
145
146         /* requested length too big for entire address space */
147         if (len > TASK_SIZE)
148                 return -ENOMEM;
149
150         do_color_align = 0;
151         if (filp || (flags & MAP_SHARED))
152                 do_color_align = 1;
153         last_mmap = GET_LAST_MMAP(filp);
154
155         if (flags & MAP_FIXED) {
156                 if ((flags & MAP_SHARED) && last_mmap &&
157                     (addr - shared_align_offset(last_mmap, pgoff))
158                         & (SHM_COLOUR - 1))
159                         return -EINVAL;
160                 goto found_addr;
161         }
162
163         /* requesting a specific address */
164         if (addr) {
165                 if (do_color_align && last_mmap)
166                         addr = COLOR_ALIGN(addr, last_mmap, pgoff);
167                 else
168                         addr = PAGE_ALIGN(addr);
169
170                 vma = find_vma_prev(mm, addr, &prev);
171                 if (TASK_SIZE - len >= addr &&
172                     (!vma || addr + len <= vm_start_gap(vma)) &&
173                     (!prev || addr >= vm_end_gap(prev)))
174                         goto found_addr;
175         }
176
177         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
178         info.length = len;
179         info.low_limit = PAGE_SIZE;
180         info.high_limit = mm->mmap_base;
181         info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
182         info.align_offset = shared_align_offset(last_mmap, pgoff);
183         addr = vm_unmapped_area(&info);
184         if (!(addr & ~PAGE_MASK))
185                 goto found_addr;
186         VM_BUG_ON(addr != -ENOMEM);
187
188         /*
189          * A failed mmap() very likely causes application failure,
190          * so fall back to the bottom-up function here. This scenario
191          * can happen with large stack limits and large mmap()
192          * allocations.
193          */
194         return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
195
196 found_addr:
197         if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
198                 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
199
200         return addr;
201 }
202
203 static int mmap_is_legacy(void)
204 {
205         if (current->personality & ADDR_COMPAT_LAYOUT)
206                 return 1;
207
208         /* parisc stack always grows up - so a unlimited stack should
209          * not be an indicator to use the legacy memory layout.
210          * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
211          *      return 1;
212          */
213
214         return sysctl_legacy_va_layout;
215 }
216
217 static unsigned long mmap_rnd(void)
218 {
219         unsigned long rnd = 0;
220
221         if (current->flags & PF_RANDOMIZE)
222                 rnd = get_random_int() & MMAP_RND_MASK;
223
224         return rnd << PAGE_SHIFT;
225 }
226
227 unsigned long arch_mmap_rnd(void)
228 {
229         return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
230 }
231
232 static unsigned long mmap_legacy_base(void)
233 {
234         return TASK_UNMAPPED_BASE + mmap_rnd();
235 }
236
237 /*
238  * This function, called very early during the creation of a new
239  * process VM image, sets up which VM layout function to use:
240  */
241 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
242 {
243         mm->mmap_legacy_base = mmap_legacy_base();
244         mm->mmap_base = mmap_upper_limit(rlim_stack);
245
246         if (mmap_is_legacy()) {
247                 mm->mmap_base = mm->mmap_legacy_base;
248                 mm->get_unmapped_area = arch_get_unmapped_area;
249         } else {
250                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
251         }
252 }
253
254
255 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
256         unsigned long prot, unsigned long flags, unsigned long fd,
257         unsigned long pgoff)
258 {
259         /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
260            we have. */
261         return ksys_mmap_pgoff(addr, len, prot, flags, fd,
262                                pgoff >> (PAGE_SHIFT - 12));
263 }
264
265 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
266                 unsigned long prot, unsigned long flags, unsigned long fd,
267                 unsigned long offset)
268 {
269         if (!(offset & ~PAGE_MASK)) {
270                 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
271                                         offset >> PAGE_SHIFT);
272         } else {
273                 return -EINVAL;
274         }
275 }
276
277 /* Fucking broken ABI */
278
279 #ifdef CONFIG_64BIT
280 asmlinkage long parisc_truncate64(const char __user * path,
281                                         unsigned int high, unsigned int low)
282 {
283         return ksys_truncate(path, (long)high << 32 | low);
284 }
285
286 asmlinkage long parisc_ftruncate64(unsigned int fd,
287                                         unsigned int high, unsigned int low)
288 {
289         return ksys_ftruncate(fd, (long)high << 32 | low);
290 }
291
292 /* stubs for the benefit of the syscall_table since truncate64 and truncate 
293  * are identical on LP64 */
294 asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
295 {
296         return ksys_truncate(path, length);
297 }
298 asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
299 {
300         return ksys_ftruncate(fd, length);
301 }
302 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
303 {
304         return sys_fcntl(fd, cmd, arg);
305 }
306 #else
307
308 asmlinkage long parisc_truncate64(const char __user * path,
309                                         unsigned int high, unsigned int low)
310 {
311         return ksys_truncate(path, (loff_t)high << 32 | low);
312 }
313
314 asmlinkage long parisc_ftruncate64(unsigned int fd,
315                                         unsigned int high, unsigned int low)
316 {
317         return sys_ftruncate64(fd, (loff_t)high << 32 | low);
318 }
319 #endif
320
321 asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
322                                         unsigned int high, unsigned int low)
323 {
324         return ksys_pread64(fd, buf, count, (loff_t)high << 32 | low);
325 }
326
327 asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
328                         size_t count, unsigned int high, unsigned int low)
329 {
330         return ksys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
331 }
332
333 asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
334                                     size_t count)
335 {
336         return ksys_readahead(fd, (loff_t)high << 32 | low, count);
337 }
338
339 asmlinkage long parisc_fadvise64_64(int fd,
340                         unsigned int high_off, unsigned int low_off,
341                         unsigned int high_len, unsigned int low_len, int advice)
342 {
343         return ksys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
344                         (loff_t)high_len << 32 | low_len, advice);
345 }
346
347 asmlinkage long parisc_sync_file_range(int fd,
348                         u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
349                         unsigned int flags)
350 {
351         return ksys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
352                         (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
353 }
354
355 asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
356                                 u32 lenhi, u32 lenlo)
357 {
358         return ksys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
359                               ((u64)lenhi << 32) | lenlo);
360 }
361
362 long parisc_personality(unsigned long personality)
363 {
364         long err;
365
366         if (personality(current->personality) == PER_LINUX32
367             && personality(personality) == PER_LINUX)
368                 personality = (personality & ~PER_MASK) | PER_LINUX32;
369
370         err = sys_personality(personality);
371         if (personality(err) == PER_LINUX32)
372                 err = (err & ~PER_MASK) | PER_LINUX;
373
374         return err;
375 }