__compute_return_epc() uses CFC1 instruction which might result in a
authorRalf Baechle <ralf@linux-mips.org>
Mon, 9 May 2005 13:16:07 +0000 (13:16 +0000)
committerRalf Baechle <ralf@linux-mips.org>
Sat, 29 Oct 2005 18:31:13 +0000 (19:31 +0100)
coprocessor unusable exception since the process can lose its fpu
context by preemption.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/kernel/branch.c
include/asm-mips/fpu.h

index 01117e977a7fbba083fd02196f763f0e58028487..56aea5f526a7dbcef839b0ca352d58b745196923 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/branch.h>
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
+#include <asm/fpu.h>
 #include <asm/inst.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
@@ -161,10 +162,13 @@ int __compute_return_epc(struct pt_regs *regs)
         * And now the FPA/cp1 branch instructions.
         */
        case cop1_op:
-               if (!cpu_has_fpu)
-                       fcr31 = current->thread.fpu.soft.fcr31;
-               else
+               preempt_disable();
+               if (is_fpu_owner())
                        asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+               else
+                       fcr31 = current->thread.fpu.hard.fcr31;
+               preempt_enable();
+
                bit = (insn.i_format.rt >> 2);
                bit += (bit != 0);
                bit += 23;
index ea24e733b1bcf84a74e952258c5527dc9ad8b714..9c828b1f821875e273bdd3da8f9a6b3fcf98e27b 100644 (file)
@@ -80,9 +80,14 @@ do {                                                                 \
 
 #define clear_fpu_owner()      clear_thread_flag(TIF_USEDFPU)
 
+static inline int __is_fpu_owner(void)
+{
+       return test_thread_flag(TIF_USEDFPU);
+}
+
 static inline int is_fpu_owner(void)
 {
-       return cpu_has_fpu && test_thread_flag(TIF_USEDFPU);
+       return cpu_has_fpu && __is_fpu_owner();
 }
 
 static inline void own_fpu(void)
@@ -127,7 +132,7 @@ static inline void restore_fp(struct task_struct *tsk)
 static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
 {
        if (cpu_has_fpu) {
-               if ((tsk == current) && is_fpu_owner())
+               if ((tsk == current) && __is_fpu_owner())
                        _save_fp(current);
                return tsk->thread.fpu.hard.fpr;
        }