sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[linux-2.6-block.git] / arch / parisc / kernel / sys_parisc.c
1
2 /*
3  *    PARISC specific syscalls
4  *
5  *    Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6  *    Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7  *    Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8  *    Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
9  *
10  *
11  *    This program is free software; you can redistribute it and/or modify
12  *    it under the terms of the GNU General Public License as published by
13  *    the Free Software Foundation; either version 2 of the License, or
14  *    (at your option) any later version.
15  *
16  *    This program is distributed in the hope that it will be useful,
17  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *    GNU General Public License for more details.
20  *
21  *    You should have received a copy of the GNU General Public License
22  *    along with this program; if not, write to the Free Software
23  *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
24  */
25
26 #include <linux/uaccess.h>
27 #include <asm/elf.h>
28 #include <linux/file.h>
29 #include <linux/fs.h>
30 #include <linux/linkage.h>
31 #include <linux/mm.h>
32 #include <linux/mman.h>
33 #include <linux/sched/signal.h>
34 #include <linux/shm.h>
35 #include <linux/syscalls.h>
36 #include <linux/utsname.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39
40 /* we construct an artificial offset for the mapping based on the physical
41  * address of the kernel mapping variable */
42 #define GET_LAST_MMAP(filp)             \
43         (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
44 #define SET_LAST_MMAP(filp, val)        \
45          { /* nothing */ }
46
47 static int get_offset(unsigned int last_mmap)
48 {
49         return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
50 }
51
52 static unsigned long shared_align_offset(unsigned int last_mmap,
53                                          unsigned long pgoff)
54 {
55         return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
56 }
57
58 static inline unsigned long COLOR_ALIGN(unsigned long addr,
59                          unsigned int last_mmap, unsigned long pgoff)
60 {
61         unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
62         unsigned long off  = (SHM_COLOUR-1) &
63                 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
64
65         return base + off;
66 }
67
68 /*
69  * Top of mmap area (just below the process stack).
70  */
71
72 static unsigned long mmap_upper_limit(void)
73 {
74         unsigned long stack_base;
75
76         /* Limit stack size - see setup_arg_pages() in fs/exec.c */
77         stack_base = rlimit_max(RLIMIT_STACK);
78         if (stack_base > STACK_SIZE_MAX)
79                 stack_base = STACK_SIZE_MAX;
80
81         /* Add space for stack randomization. */
82         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
83
84         return PAGE_ALIGN(STACK_TOP - stack_base);
85 }
86
87
88 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
89                 unsigned long len, unsigned long pgoff, unsigned long flags)
90 {
91         struct mm_struct *mm = current->mm;
92         struct vm_area_struct *vma;
93         unsigned long task_size = TASK_SIZE;
94         int do_color_align, last_mmap;
95         struct vm_unmapped_area_info info;
96
97         if (len > task_size)
98                 return -ENOMEM;
99
100         do_color_align = 0;
101         if (filp || (flags & MAP_SHARED))
102                 do_color_align = 1;
103         last_mmap = GET_LAST_MMAP(filp);
104
105         if (flags & MAP_FIXED) {
106                 if ((flags & MAP_SHARED) && last_mmap &&
107                     (addr - shared_align_offset(last_mmap, pgoff))
108                                 & (SHM_COLOUR - 1))
109                         return -EINVAL;
110                 goto found_addr;
111         }
112
113         if (addr) {
114                 if (do_color_align && last_mmap)
115                         addr = COLOR_ALIGN(addr, last_mmap, pgoff);
116                 else
117                         addr = PAGE_ALIGN(addr);
118
119                 vma = find_vma(mm, addr);
120                 if (task_size - len >= addr &&
121                     (!vma || addr + len <= vma->vm_start))
122                         goto found_addr;
123         }
124
125         info.flags = 0;
126         info.length = len;
127         info.low_limit = mm->mmap_legacy_base;
128         info.high_limit = mmap_upper_limit();
129         info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
130         info.align_offset = shared_align_offset(last_mmap, pgoff);
131         addr = vm_unmapped_area(&info);
132
133 found_addr:
134         if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
135                 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
136
137         return addr;
138 }
139
140 unsigned long
141 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
142                           const unsigned long len, const unsigned long pgoff,
143                           const unsigned long flags)
144 {
145         struct vm_area_struct *vma;
146         struct mm_struct *mm = current->mm;
147         unsigned long addr = addr0;
148         int do_color_align, last_mmap;
149         struct vm_unmapped_area_info info;
150
151 #ifdef CONFIG_64BIT
152         /* This should only ever run for 32-bit processes.  */
153         BUG_ON(!test_thread_flag(TIF_32BIT));
154 #endif
155
156         /* requested length too big for entire address space */
157         if (len > TASK_SIZE)
158                 return -ENOMEM;
159
160         do_color_align = 0;
161         if (filp || (flags & MAP_SHARED))
162                 do_color_align = 1;
163         last_mmap = GET_LAST_MMAP(filp);
164
165         if (flags & MAP_FIXED) {
166                 if ((flags & MAP_SHARED) && last_mmap &&
167                     (addr - shared_align_offset(last_mmap, pgoff))
168                         & (SHM_COLOUR - 1))
169                         return -EINVAL;
170                 goto found_addr;
171         }
172
173         /* requesting a specific address */
174         if (addr) {
175                 if (do_color_align && last_mmap)
176                         addr = COLOR_ALIGN(addr, last_mmap, pgoff);
177                 else
178                         addr = PAGE_ALIGN(addr);
179                 vma = find_vma(mm, addr);
180                 if (TASK_SIZE - len >= addr &&
181                     (!vma || addr + len <= vma->vm_start))
182                         goto found_addr;
183         }
184
185         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
186         info.length = len;
187         info.low_limit = PAGE_SIZE;
188         info.high_limit = mm->mmap_base;
189         info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
190         info.align_offset = shared_align_offset(last_mmap, pgoff);
191         addr = vm_unmapped_area(&info);
192         if (!(addr & ~PAGE_MASK))
193                 goto found_addr;
194         VM_BUG_ON(addr != -ENOMEM);
195
196         /*
197          * A failed mmap() very likely causes application failure,
198          * so fall back to the bottom-up function here. This scenario
199          * can happen with large stack limits and large mmap()
200          * allocations.
201          */
202         return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
203
204 found_addr:
205         if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
206                 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
207
208         return addr;
209 }
210
211 static int mmap_is_legacy(void)
212 {
213         if (current->personality & ADDR_COMPAT_LAYOUT)
214                 return 1;
215
216         /* parisc stack always grows up - so a unlimited stack should
217          * not be an indicator to use the legacy memory layout.
218          * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
219          *      return 1;
220          */
221
222         return sysctl_legacy_va_layout;
223 }
224
225 static unsigned long mmap_rnd(void)
226 {
227         unsigned long rnd = 0;
228
229         if (current->flags & PF_RANDOMIZE)
230                 rnd = get_random_int() & MMAP_RND_MASK;
231
232         return rnd << PAGE_SHIFT;
233 }
234
235 unsigned long arch_mmap_rnd(void)
236 {
237         return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
238 }
239
240 static unsigned long mmap_legacy_base(void)
241 {
242         return TASK_UNMAPPED_BASE + mmap_rnd();
243 }
244
245 /*
246  * This function, called very early during the creation of a new
247  * process VM image, sets up which VM layout function to use:
248  */
249 void arch_pick_mmap_layout(struct mm_struct *mm)
250 {
251         mm->mmap_legacy_base = mmap_legacy_base();
252         mm->mmap_base = mmap_upper_limit();
253
254         if (mmap_is_legacy()) {
255                 mm->mmap_base = mm->mmap_legacy_base;
256                 mm->get_unmapped_area = arch_get_unmapped_area;
257         } else {
258                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
259         }
260 }
261
262
263 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
264         unsigned long prot, unsigned long flags, unsigned long fd,
265         unsigned long pgoff)
266 {
267         /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
268            we have. */
269         return sys_mmap_pgoff(addr, len, prot, flags, fd,
270                               pgoff >> (PAGE_SHIFT - 12));
271 }
272
273 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
274                 unsigned long prot, unsigned long flags, unsigned long fd,
275                 unsigned long offset)
276 {
277         if (!(offset & ~PAGE_MASK)) {
278                 return sys_mmap_pgoff(addr, len, prot, flags, fd,
279                                         offset >> PAGE_SHIFT);
280         } else {
281                 return -EINVAL;
282         }
283 }
284
285 /* Fucking broken ABI */
286
287 #ifdef CONFIG_64BIT
288 asmlinkage long parisc_truncate64(const char __user * path,
289                                         unsigned int high, unsigned int low)
290 {
291         return sys_truncate(path, (long)high << 32 | low);
292 }
293
294 asmlinkage long parisc_ftruncate64(unsigned int fd,
295                                         unsigned int high, unsigned int low)
296 {
297         return sys_ftruncate(fd, (long)high << 32 | low);
298 }
299
300 /* stubs for the benefit of the syscall_table since truncate64 and truncate 
301  * are identical on LP64 */
302 asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
303 {
304         return sys_truncate(path, length);
305 }
306 asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
307 {
308         return sys_ftruncate(fd, length);
309 }
310 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
311 {
312         return sys_fcntl(fd, cmd, arg);
313 }
314 #else
315
316 asmlinkage long parisc_truncate64(const char __user * path,
317                                         unsigned int high, unsigned int low)
318 {
319         return sys_truncate64(path, (loff_t)high << 32 | low);
320 }
321
322 asmlinkage long parisc_ftruncate64(unsigned int fd,
323                                         unsigned int high, unsigned int low)
324 {
325         return sys_ftruncate64(fd, (loff_t)high << 32 | low);
326 }
327 #endif
328
329 asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
330                                         unsigned int high, unsigned int low)
331 {
332         return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
333 }
334
335 asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
336                         size_t count, unsigned int high, unsigned int low)
337 {
338         return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
339 }
340
341 asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
342                                     size_t count)
343 {
344         return sys_readahead(fd, (loff_t)high << 32 | low, count);
345 }
346
347 asmlinkage long parisc_fadvise64_64(int fd,
348                         unsigned int high_off, unsigned int low_off,
349                         unsigned int high_len, unsigned int low_len, int advice)
350 {
351         return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
352                         (loff_t)high_len << 32 | low_len, advice);
353 }
354
355 asmlinkage long parisc_sync_file_range(int fd,
356                         u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
357                         unsigned int flags)
358 {
359         return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
360                         (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
361 }
362
363 asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
364                                 u32 lenhi, u32 lenlo)
365 {
366         return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
367                              ((u64)lenhi << 32) | lenlo);
368 }
369
370 long parisc_personality(unsigned long personality)
371 {
372         long err;
373
374         if (personality(current->personality) == PER_LINUX32
375             && personality(personality) == PER_LINUX)
376                 personality = (personality & ~PER_MASK) | PER_LINUX32;
377
378         err = sys_personality(personality);
379         if (personality(err) == PER_LINUX32)
380                 err = (err & ~PER_MASK) | PER_LINUX;
381
382         return err;
383 }