2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
30 #include <asm/unistd.h>
31 #include <linux/elf.h>
34 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
35 unsigned long error_code);
37 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags,
39 unsigned long fd, unsigned long pgoff)
42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43 * so we need to shift the argument down by 1; m68k mmap64(3)
44 * (in libc) expects the last argument of mmap2 in 4Kb units.
46 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
50 * Perform the select(nd, in, out, ex, tv) and mmap() system
51 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
52 * handle more than 4 system call parameters, so these system calls
53 * used a memory block for parameter passing..
56 struct mmap_arg_struct {
65 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
67 struct mmap_arg_struct a;
70 if (copy_from_user(&a, arg, sizeof(a)))
74 if (a.offset & ~PAGE_MASK)
77 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
78 a.offset >> PAGE_SHIFT);
84 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
86 * This is really horribly ugly.
88 asmlinkage int sys_ipc (uint call, int first, int second,
89 int third, void __user *ptr, long fifth)
93 version = call >> 16; /* hack for backward compatibility */
99 return sys_semop (first, ptr, second);
101 return sys_semget (first, second, third);
106 if (get_user(fourth.__pad, (void __user *__user *) ptr))
108 return sys_semctl (first, second, third, fourth);
116 return sys_msgsnd (first, ptr, second, third);
120 struct ipc_kludge tmp;
123 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
125 return sys_msgrcv (first, tmp.msgp, second,
129 return sys_msgrcv (first, ptr,
130 second, fifth, third);
133 return sys_msgget ((key_t) first, second);
135 return sys_msgctl (first, second, ptr);
145 ret = do_shmat (first, ptr, second, &raddr);
148 return put_user (raddr, (ulong __user *) third);
152 return sys_shmdt (ptr);
154 return sys_shmget (first, second, third);
156 return sys_shmctl (first, second, ptr);
164 /* Convert virtual (user) address VADDR to physical address PADDR */
165 #define virt_to_phys_040(vaddr) \
167 unsigned long _mmusr, _paddr; \
169 __asm__ __volatile__ (".chip 68040\n\t" \
171 "movec %%mmusr,%0\n\t" \
175 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
180 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
182 unsigned long paddr, i;
186 case FLUSH_SCOPE_ALL:
189 case FLUSH_CACHE_DATA:
190 /* This nop is needed for some broken versions of the 68040. */
191 __asm__ __volatile__ ("nop\n\t"
196 case FLUSH_CACHE_INSN:
197 __asm__ __volatile__ ("nop\n\t"
203 case FLUSH_CACHE_BOTH:
204 __asm__ __volatile__ ("nop\n\t"
212 case FLUSH_SCOPE_LINE:
213 /* Find the physical address of the first mapped page in the
215 if ((paddr = virt_to_phys_040(addr))) {
216 paddr += addr & ~(PAGE_MASK | 15);
217 len = (len + (addr & 15) + 15) >> 4;
219 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
228 if ((paddr = virt_to_phys_040(addr)))
235 len = (len + 15) >> 4;
237 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
242 case FLUSH_CACHE_DATA:
243 __asm__ __volatile__ ("nop\n\t"
245 "cpushl %%dc,(%0)\n\t"
249 case FLUSH_CACHE_INSN:
250 __asm__ __volatile__ ("nop\n\t"
252 "cpushl %%ic,(%0)\n\t"
257 case FLUSH_CACHE_BOTH:
258 __asm__ __volatile__ ("nop\n\t"
260 "cpushl %%bc,(%0)\n\t"
268 * No need to page align here since it is done by
269 * virt_to_phys_040().
273 /* Recompute physical address when crossing a page
277 if ((paddr = virt_to_phys_040(addr)))
291 case FLUSH_SCOPE_PAGE:
292 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
293 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
295 if (!(paddr = virt_to_phys_040(addr)))
299 case FLUSH_CACHE_DATA:
300 __asm__ __volatile__ ("nop\n\t"
302 "cpushp %%dc,(%0)\n\t"
306 case FLUSH_CACHE_INSN:
307 __asm__ __volatile__ ("nop\n\t"
309 "cpushp %%ic,(%0)\n\t"
314 case FLUSH_CACHE_BOTH:
315 __asm__ __volatile__ ("nop\n\t"
317 "cpushp %%bc,(%0)\n\t"
328 #define virt_to_phys_060(vaddr) \
330 unsigned long paddr; \
331 __asm__ __volatile__ (".chip 68060\n\t" \
340 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
342 unsigned long paddr, i;
346 * cpush %dc : flush DC, remains valid (with our %cacr setup)
347 * cpush %ic : invalidate IC
348 * cpush %bc : flush DC + invalidate IC
352 case FLUSH_SCOPE_ALL:
355 case FLUSH_CACHE_DATA:
356 __asm__ __volatile__ (".chip 68060\n\t"
360 case FLUSH_CACHE_INSN:
361 __asm__ __volatile__ (".chip 68060\n\t"
366 case FLUSH_CACHE_BOTH:
367 __asm__ __volatile__ (".chip 68060\n\t"
374 case FLUSH_SCOPE_LINE:
375 /* Find the physical address of the first mapped page in the
379 if (!(paddr = virt_to_phys_060(addr))) {
380 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
389 if ((paddr = virt_to_phys_060(addr)))
397 len = (len + 15) >> 4;
398 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
403 case FLUSH_CACHE_DATA:
404 __asm__ __volatile__ (".chip 68060\n\t"
405 "cpushl %%dc,(%0)\n\t"
409 case FLUSH_CACHE_INSN:
410 __asm__ __volatile__ (".chip 68060\n\t"
411 "cpushl %%ic,(%0)\n\t"
416 case FLUSH_CACHE_BOTH:
417 __asm__ __volatile__ (".chip 68060\n\t"
418 "cpushl %%bc,(%0)\n\t"
427 * We just want to jump to the first cache line
434 /* Recompute physical address when crossing a page
438 if ((paddr = virt_to_phys_060(addr)))
452 case FLUSH_SCOPE_PAGE:
453 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
454 addr &= PAGE_MASK; /* Workaround for bug in some
455 revisions of the 68060 */
456 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
458 if (!(paddr = virt_to_phys_060(addr)))
462 case FLUSH_CACHE_DATA:
463 __asm__ __volatile__ (".chip 68060\n\t"
464 "cpushp %%dc,(%0)\n\t"
468 case FLUSH_CACHE_INSN:
469 __asm__ __volatile__ (".chip 68060\n\t"
470 "cpushp %%ic,(%0)\n\t"
475 case FLUSH_CACHE_BOTH:
476 __asm__ __volatile__ (".chip 68060\n\t"
477 "cpushp %%bc,(%0)\n\t"
488 /* sys_cacheflush -- flush (part of) the processor cache. */
490 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
492 struct vm_area_struct *vma;
496 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
497 cache & ~FLUSH_CACHE_BOTH)
500 if (scope == FLUSH_SCOPE_ALL) {
501 /* Only the superuser may explicitly flush the whole cache. */
503 if (!capable(CAP_SYS_ADMIN))
507 * Verify that the specified address region actually belongs
510 vma = find_vma (current->mm, addr);
512 /* Check for overflow. */
513 if (addr + len < addr)
515 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
519 if (CPU_IS_020_OR_030) {
520 if (scope == FLUSH_SCOPE_LINE && len < 256) {
522 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
523 if (cache & FLUSH_CACHE_INSN)
525 if (cache & FLUSH_CACHE_DATA)
529 __asm__ __volatile__ ("movec %1, %%caar\n\t"
532 : "r" (cacr), "r" (addr));
536 /* Flush the whole cache, even if page granularity requested. */
538 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
539 if (cache & FLUSH_CACHE_INSN)
541 if (cache & FLUSH_CACHE_DATA)
543 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
549 * 040 or 060: don't blindly trust 'scope', someone could
550 * try to flush a few megs of memory.
553 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
554 scope=FLUSH_SCOPE_PAGE;
555 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
556 scope=FLUSH_SCOPE_ALL;
558 ret = cache_flush_040 (addr, scope, cache, len);
559 } else if (CPU_IS_060) {
560 ret = cache_flush_060 (addr, scope, cache, len);
568 asmlinkage int sys_getpagesize(void)
574 * Do a system call from kernel instead of calling sys_execve so we
575 * end up with proper pt_regs.
577 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
579 register long __res asm ("%d0") = __NR_execve;
580 register long __a asm ("%d1") = (long)(filename);
581 register long __b asm ("%d2") = (long)(argv);
582 register long __c asm ("%d3") = (long)(envp);
583 asm volatile ("trap #0" : "+d" (__res)
584 : "d" (__a), "d" (__b), "d" (__c));
588 asmlinkage unsigned long sys_get_thread_area(void)
590 return current_thread_info()->tp_value;
593 asmlinkage int sys_set_thread_area(unsigned long tp)
595 current_thread_info()->tp_value = tp;
599 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
602 sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
603 unsigned long __user * mem)
605 /* This was borrowed from ARM's implementation. */
607 struct mm_struct *mm = current->mm;
612 unsigned long mem_value;
614 down_read(&mm->mmap_sem);
615 pgd = pgd_offset(mm, (unsigned long)mem);
616 if (!pgd_present(*pgd))
618 pmd = pmd_offset(pgd, (unsigned long)mem);
619 if (!pmd_present(*pmd))
621 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
622 if (!pte_present(*pte) || !pte_dirty(*pte)
623 || !pte_write(*pte)) {
624 pte_unmap_unlock(pte, ptl);
629 if (mem_value == oldval)
632 pte_unmap_unlock(pte, ptl);
633 up_read(&mm->mmap_sem);
637 up_read(&mm->mmap_sem);
638 /* This is not necessarily a bad access, we can get here if
639 a memory we're trying to write to should be copied-on-write.
640 Make the kernel do the necessary page stuff, then re-iterate.
641 Simulate a write access fault to do that. */
643 /* The first argument of the function corresponds to
644 D1, which is the first field of struct pt_regs. */
645 struct pt_regs *fp = (struct pt_regs *)&newval;
647 /* '3' is an RMW flag. */
648 if (do_page_fault(fp, (unsigned long)mem, 3))
649 /* If the do_page_fault() failed, we don't
650 have anything meaningful to return.
651 There should be a SIGSEGV pending for
658 asmlinkage int sys_atomic_barrier(void)
660 /* no code needed for uniprocs */