powerpc: Add a proper syscall for switching endianness
authorMichael Ellerman <mpe@ellerman.id.au>
Sat, 28 Mar 2015 10:35:16 +0000 (21:35 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Sat, 28 Mar 2015 11:03:40 +0000 (22:03 +1100)
We currently have a "special" syscall for switching endianness. This is
syscall number 0x1ebe, which is handled explicitly in the 64-bit syscall
exception entry.

That has a few problems, firstly the syscall number is outside of the
usual range, which confuses various tools. For example strace doesn't
recognise the syscall at all.

Secondly it's handled explicitly as a special case in the syscall
exception entry, which is complicated enough without it.

As a first step toward removing the special syscall, we need to add a
regular syscall that implements the same functionality.

The logic is simple, it simply toggles the MSR_LE bit in the userspace
MSR. This is the same as the special syscall, with the caveat that the
special syscall clobbers fewer registers.

This version clobbers r9-r12, XER, CTR, and CR0-1,5-7.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/syscalls.c
arch/powerpc/kernel/systbl.S
arch/powerpc/kernel/systbl_chk.c
arch/powerpc/platforms/cell/spu_callbacks.c

index 91062eef582f9c1ed8d824f9e16bcde8a0f8714c..f1863a138b4a496d726bbfe791b2c4f034ea5973 100644 (file)
@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom)
 SYSCALL_SPU(memfd_create)
 SYSCALL_SPU(bpf)
 COMPAT_SYS(execveat)
+PPC64ONLY(switch_endian)
index 36b79c31eedda5cb090e73cafc6a64c937fcc5f7..f4f8b667d75be5614bd8f706b5573e21cdd9a685 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          363
+#define __NR_syscalls          364
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index ef5b5b1f31231648135ed092af027933c8dc3f06..e4aa173dae62361e3823f3eda242b2802cdd5da1 100644 (file)
 #define __NR_memfd_create      360
 #define __NR_bpf               361
 #define __NR_execveat          362
+#define __NR_switch_endian     363
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index d180caf2d6de749ecab74fe51bb60146ab044b69..afbc20019c2efba2b81b7cd6298941753d9776b5 100644 (file)
@@ -356,6 +356,11 @@ _GLOBAL(ppc64_swapcontext)
        bl      sys_swapcontext
        b       .Lsyscall_exit
 
+_GLOBAL(ppc_switch_endian)
+       bl      save_nvgprs
+       bl      sys_switch_endian
+       b       .Lsyscall_exit
+
 _GLOBAL(ret_from_fork)
        bl      schedule_tail
        REST_NVGPRS(r1)
index b2702e87db0d44ae28de907ac5517d6ee23879fd..5fa92706444b7f6e874b9c7161c1127974c4aa04 100644 (file)
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
        return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
                             (u64)len_high << 32 | len_low, advice);
 }
+
+long sys_switch_endian(void)
+{
+       struct thread_info *ti;
+
+       current->thread.regs->msr ^= MSR_LE;
+
+       /*
+        * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
+        * userspace. That also has the effect of restoring the non-volatile
+        * GPRs, so we saved them on the way in here.
+        */
+       ti = current_thread_info();
+       ti->flags |= _TIF_RESTOREALL;
+
+       return 0;
+}
index 7ab5d434e2eed9f6e120bb72c537bab29112c1b0..4d6b1d3a747f63cd8b9f54aff9ec3ebb2da11c94 100644 (file)
@@ -22,6 +22,7 @@
 #define PPC_SYS(func)          .llong  DOTSYM(ppc_##func),DOTSYM(ppc_##func)
 #define OLDSYS(func)           .llong  DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
 #define SYS32ONLY(func)                .llong  DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define PPC64ONLY(func)                .llong  DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
 #define SYSX(f, f3264, f32)    .llong  DOTSYM(f),DOTSYM(f3264)
 #else
 #define SYSCALL(func)          .long   sys_##func
@@ -29,6 +30,7 @@
 #define PPC_SYS(func)          .long   ppc_##func
 #define OLDSYS(func)           .long   sys_##func
 #define SYS32ONLY(func)                .long   sys_##func
+#define PPC64ONLY(func)                .long   sys_ni_syscall
 #define SYSX(f, f3264, f32)    .long   f32
 #endif
 #define SYSCALL_SPU(func)      SYSCALL(func)
index 238aa63ced8f97270a764757c77ddc3ccca17584..2384129f5893093f82bcd2f8896e2186127ecb49 100644 (file)
 #ifdef CONFIG_PPC64
 #define OLDSYS(func)           -1
 #define SYS32ONLY(func)                -1
+#define PPC64ONLY(func)                __NR_##func
 #else
 #define OLDSYS(func)           __NR_old##func
 #define SYS32ONLY(func)                __NR_##func
+#define PPC64ONLY(func)                -1
 #endif
 #define SYSX(f, f3264, f32)    -1
 
index b0ec78e8ad68882e0237beffa1612471cd4f4662..a494028b2cdfa52eb2accf00f54550c5958f729f 100644 (file)
@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
 #define PPC_SYS(func)          sys_ni_syscall,
 #define OLDSYS(func)           sys_ni_syscall,
 #define SYS32ONLY(func)                sys_ni_syscall,
+#define PPC64ONLY(func)                sys_ni_syscall,
 #define SYSX(f, f3264, f32)    sys_ni_syscall,
 
 #define SYSCALL_SPU(func)      sys_##func,