2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
30 #include <asm/unistd.h>
31 #include <linux/elf.h>
34 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
35 unsigned long error_code);
37 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags,
39 unsigned long fd, unsigned long pgoff)
42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43 * so we need to shift the argument down by 1; m68k mmap64(3)
44 * (in libc) expects the last argument of mmap2 in 4Kb units.
46 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
50 * Perform the select(nd, in, out, ex, tv) and mmap() system
51 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
52 * handle more than 4 system call parameters, so these system calls
53 * used a memory block for parameter passing..
56 struct mmap_arg_struct {
65 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
67 struct mmap_arg_struct a;
70 if (copy_from_user(&a, arg, sizeof(a)))
74 if (a.offset & ~PAGE_MASK)
77 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
78 a.offset >> PAGE_SHIFT);
83 struct sel_arg_struct {
85 fd_set __user *inp, *outp, *exp;
86 struct timeval __user *tvp;
89 asmlinkage int old_select(struct sel_arg_struct __user *arg)
91 struct sel_arg_struct a;
93 if (copy_from_user(&a, arg, sizeof(a)))
95 /* sys_select() does the appropriate kernel locking */
96 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
100 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
102 * This is really horribly ugly.
104 asmlinkage int sys_ipc (uint call, int first, int second,
105 int third, void __user *ptr, long fifth)
109 version = call >> 16; /* hack for backward compatibility */
115 return sys_semop (first, ptr, second);
117 return sys_semget (first, second, third);
122 if (get_user(fourth.__pad, (void __user *__user *) ptr))
124 return sys_semctl (first, second, third, fourth);
132 return sys_msgsnd (first, ptr, second, third);
136 struct ipc_kludge tmp;
139 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
141 return sys_msgrcv (first, tmp.msgp, second,
145 return sys_msgrcv (first, ptr,
146 second, fifth, third);
149 return sys_msgget ((key_t) first, second);
151 return sys_msgctl (first, second, ptr);
161 ret = do_shmat (first, ptr, second, &raddr);
164 return put_user (raddr, (ulong __user *) third);
168 return sys_shmdt (ptr);
170 return sys_shmget (first, second, third);
172 return sys_shmctl (first, second, ptr);
180 /* Convert virtual (user) address VADDR to physical address PADDR */
181 #define virt_to_phys_040(vaddr) \
183 unsigned long _mmusr, _paddr; \
185 __asm__ __volatile__ (".chip 68040\n\t" \
187 "movec %%mmusr,%0\n\t" \
191 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
196 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
198 unsigned long paddr, i;
202 case FLUSH_SCOPE_ALL:
205 case FLUSH_CACHE_DATA:
206 /* This nop is needed for some broken versions of the 68040. */
207 __asm__ __volatile__ ("nop\n\t"
212 case FLUSH_CACHE_INSN:
213 __asm__ __volatile__ ("nop\n\t"
219 case FLUSH_CACHE_BOTH:
220 __asm__ __volatile__ ("nop\n\t"
228 case FLUSH_SCOPE_LINE:
229 /* Find the physical address of the first mapped page in the
231 if ((paddr = virt_to_phys_040(addr))) {
232 paddr += addr & ~(PAGE_MASK | 15);
233 len = (len + (addr & 15) + 15) >> 4;
235 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
244 if ((paddr = virt_to_phys_040(addr)))
251 len = (len + 15) >> 4;
253 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
258 case FLUSH_CACHE_DATA:
259 __asm__ __volatile__ ("nop\n\t"
261 "cpushl %%dc,(%0)\n\t"
265 case FLUSH_CACHE_INSN:
266 __asm__ __volatile__ ("nop\n\t"
268 "cpushl %%ic,(%0)\n\t"
273 case FLUSH_CACHE_BOTH:
274 __asm__ __volatile__ ("nop\n\t"
276 "cpushl %%bc,(%0)\n\t"
284 * No need to page align here since it is done by
285 * virt_to_phys_040().
289 /* Recompute physical address when crossing a page
293 if ((paddr = virt_to_phys_040(addr)))
307 case FLUSH_SCOPE_PAGE:
308 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
309 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
311 if (!(paddr = virt_to_phys_040(addr)))
315 case FLUSH_CACHE_DATA:
316 __asm__ __volatile__ ("nop\n\t"
318 "cpushp %%dc,(%0)\n\t"
322 case FLUSH_CACHE_INSN:
323 __asm__ __volatile__ ("nop\n\t"
325 "cpushp %%ic,(%0)\n\t"
330 case FLUSH_CACHE_BOTH:
331 __asm__ __volatile__ ("nop\n\t"
333 "cpushp %%bc,(%0)\n\t"
344 #define virt_to_phys_060(vaddr) \
346 unsigned long paddr; \
347 __asm__ __volatile__ (".chip 68060\n\t" \
356 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
358 unsigned long paddr, i;
362 * cpush %dc : flush DC, remains valid (with our %cacr setup)
363 * cpush %ic : invalidate IC
364 * cpush %bc : flush DC + invalidate IC
368 case FLUSH_SCOPE_ALL:
371 case FLUSH_CACHE_DATA:
372 __asm__ __volatile__ (".chip 68060\n\t"
376 case FLUSH_CACHE_INSN:
377 __asm__ __volatile__ (".chip 68060\n\t"
382 case FLUSH_CACHE_BOTH:
383 __asm__ __volatile__ (".chip 68060\n\t"
390 case FLUSH_SCOPE_LINE:
391 /* Find the physical address of the first mapped page in the
395 if (!(paddr = virt_to_phys_060(addr))) {
396 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
405 if ((paddr = virt_to_phys_060(addr)))
413 len = (len + 15) >> 4;
414 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
419 case FLUSH_CACHE_DATA:
420 __asm__ __volatile__ (".chip 68060\n\t"
421 "cpushl %%dc,(%0)\n\t"
425 case FLUSH_CACHE_INSN:
426 __asm__ __volatile__ (".chip 68060\n\t"
427 "cpushl %%ic,(%0)\n\t"
432 case FLUSH_CACHE_BOTH:
433 __asm__ __volatile__ (".chip 68060\n\t"
434 "cpushl %%bc,(%0)\n\t"
443 * We just want to jump to the first cache line
450 /* Recompute physical address when crossing a page
454 if ((paddr = virt_to_phys_060(addr)))
468 case FLUSH_SCOPE_PAGE:
469 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
470 addr &= PAGE_MASK; /* Workaround for bug in some
471 revisions of the 68060 */
472 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
474 if (!(paddr = virt_to_phys_060(addr)))
478 case FLUSH_CACHE_DATA:
479 __asm__ __volatile__ (".chip 68060\n\t"
480 "cpushp %%dc,(%0)\n\t"
484 case FLUSH_CACHE_INSN:
485 __asm__ __volatile__ (".chip 68060\n\t"
486 "cpushp %%ic,(%0)\n\t"
491 case FLUSH_CACHE_BOTH:
492 __asm__ __volatile__ (".chip 68060\n\t"
493 "cpushp %%bc,(%0)\n\t"
504 /* sys_cacheflush -- flush (part of) the processor cache. */
506 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
508 struct vm_area_struct *vma;
512 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
513 cache & ~FLUSH_CACHE_BOTH)
516 if (scope == FLUSH_SCOPE_ALL) {
517 /* Only the superuser may explicitly flush the whole cache. */
519 if (!capable(CAP_SYS_ADMIN))
523 * Verify that the specified address region actually belongs
526 vma = find_vma (current->mm, addr);
528 /* Check for overflow. */
529 if (addr + len < addr)
531 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
535 if (CPU_IS_020_OR_030) {
536 if (scope == FLUSH_SCOPE_LINE && len < 256) {
538 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
539 if (cache & FLUSH_CACHE_INSN)
541 if (cache & FLUSH_CACHE_DATA)
545 __asm__ __volatile__ ("movec %1, %%caar\n\t"
548 : "r" (cacr), "r" (addr));
552 /* Flush the whole cache, even if page granularity requested. */
554 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
555 if (cache & FLUSH_CACHE_INSN)
557 if (cache & FLUSH_CACHE_DATA)
559 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
565 * 040 or 060: don't blindly trust 'scope', someone could
566 * try to flush a few megs of memory.
569 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
570 scope=FLUSH_SCOPE_PAGE;
571 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
572 scope=FLUSH_SCOPE_ALL;
574 ret = cache_flush_040 (addr, scope, cache, len);
575 } else if (CPU_IS_060) {
576 ret = cache_flush_060 (addr, scope, cache, len);
584 asmlinkage int sys_getpagesize(void)
590 * Do a system call from kernel instead of calling sys_execve so we
591 * end up with proper pt_regs.
593 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
595 register long __res asm ("%d0") = __NR_execve;
596 register long __a asm ("%d1") = (long)(filename);
597 register long __b asm ("%d2") = (long)(argv);
598 register long __c asm ("%d3") = (long)(envp);
599 asm volatile ("trap #0" : "+d" (__res)
600 : "d" (__a), "d" (__b), "d" (__c));
604 asmlinkage unsigned long sys_get_thread_area(void)
606 return current_thread_info()->tp_value;
609 asmlinkage int sys_set_thread_area(unsigned long tp)
611 current_thread_info()->tp_value = tp;
615 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
618 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
619 unsigned long __user * mem)
621 /* This was borrowed from ARM's implementation. */
623 struct mm_struct *mm = current->mm;
628 unsigned long mem_value;
630 down_read(&mm->mmap_sem);
631 pgd = pgd_offset(mm, (unsigned long)mem);
632 if (!pgd_present(*pgd))
634 pmd = pmd_offset(pgd, (unsigned long)mem);
635 if (!pmd_present(*pmd))
637 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
638 if (!pte_present(*pte) || !pte_dirty(*pte)
639 || !pte_write(*pte)) {
640 pte_unmap_unlock(pte, ptl);
645 if (mem_value == oldval)
648 pte_unmap_unlock(pte, ptl);
649 up_read(&mm->mmap_sem);
653 up_read(&mm->mmap_sem);
654 /* This is not necessarily a bad access, we can get here if
655 a memory we're trying to write to should be copied-on-write.
656 Make the kernel do the necessary page stuff, then re-iterate.
657 Simulate a write access fault to do that. */
659 /* The first argument of the function corresponds to
660 D1, which is the first field of struct pt_regs. */
661 struct pt_regs *fp = (struct pt_regs *)&newval;
663 /* '3' is an RMW flag. */
664 if (do_page_fault(fp, (unsigned long)mem, 3))
665 /* If the do_page_fault() failed, we don't
666 have anything meaningful to return.
667 There should be a SIGSEGV pending for
674 asmlinkage int sys_atomic_barrier(void)
676 /* no code needed for uniprocs */