x86/fpu: Restore from kernel memory on the 64-bit path too
[linux-2.6-block.git] / arch / x86 / include / asm / fpu / internal.h
index fb04a3ded7ddb2ab284404f0caf0f1e6b1af23aa..2cf04fbcba5d7ba010564ae20efb9c452a60c522 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/compat.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
 
 #include <asm/user.h>
 #include <asm/fpu/api.h>
 /*
  * High level FPU state handling functions:
  */
-extern void fpu__initialize(struct fpu *fpu);
 extern void fpu__prepare_read(struct fpu *fpu);
 extern void fpu__prepare_write(struct fpu *fpu);
 extern void fpu__save(struct fpu *fpu);
-extern void fpu__restore(struct fpu *fpu);
 extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
 extern void fpu__drop(struct fpu *fpu);
 extern int  fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
@@ -122,6 +121,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
        err;                                                            \
 })
 
+#define kernel_insn_err(insn, output, input...)                                \
+({                                                                     \
+       int err;                                                        \
+       asm volatile("1:" #insn "\n\t"                                  \
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+                    "    jmp  2b\n"                                    \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
+       err;                                                            \
+})
+
 #define kernel_insn(insn, output, input...)                            \
        asm volatile("1:" #insn "\n\t"                                  \
                     "2:\n"                                             \
@@ -150,6 +164,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
                kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
+static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
+{
+       if (IS_ENABLED(CONFIG_X86_32))
+               return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+       else
+               return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
 {
        if (IS_ENABLED(CONFIG_X86_32))
@@ -163,6 +185,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
        kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
+static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
+{
+       return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
 {
        return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
@@ -362,6 +389,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
        return err;
 }
 
+/*
+ * Restore xstate from kernel space xsave area, return an error code instead of
+ * an exception.
+ */
+static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
+{
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err;
+
+       XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+
+       return err;
+}
+
 /*
  * These must be called with preempt disabled. Returns
  * 'true' if the FPU state is still intact and we can
@@ -486,6 +528,18 @@ static inline void fpregs_activate(struct fpu *fpu)
        trace_x86_fpu_regs_activated(fpu);
 }
 
+/*
+ * Internal helper, do not use directly. Use switch_fpu_return() instead.
+ */
+static inline void __fpregs_load_activate(struct fpu *fpu, int cpu)
+{
+       if (!fpregs_state_valid(fpu, cpu)) {
+               if (current->mm)
+                       copy_kernel_to_fpregs(&fpu->state);
+               fpregs_activate(fpu);
+       }
+}
+
 /*
  * FPU state switching for scheduling.
  *
@@ -496,11 +550,22 @@ static inline void fpregs_activate(struct fpu *fpu)
  *
  *  - switch_fpu_finish() restores the new state as
  *    necessary.
+ *
+ * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
+ * are saved in the current thread's FPU register state.
+ *
+ * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
+ * hold current()'s FPU registers. It is required to load the
+ * registers before returning to userland or using the content
+ * otherwise.
+ *
+ * The FPU context is only stored/restored for a user task and
+ * ->mm is used to distinguish between kernel and user threads.
  */
 static inline void
 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-       if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
+       if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
                if (!copy_fpregs_to_fpstate(old_fpu))
                        old_fpu->last_cpu = -1;
                else
@@ -508,8 +573,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 
                /* But leave fpu_fpregs_owner_ctx! */
                trace_x86_fpu_regs_deactivated(old_fpu);
-       } else
-               old_fpu->last_cpu = -1;
+       }
 }
 
 /*
@@ -522,31 +586,27 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
  */
 static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
-       bool preload = static_cpu_has(X86_FEATURE_FPU) &&
-                      new_fpu->initialized;
+       u32 pkru_val = init_pkru_value;
+       struct pkru_state *pk;
 
-       if (preload) {
-               if (!fpregs_state_valid(new_fpu, cpu))
-                       copy_kernel_to_fpregs(&new_fpu->state);
-               fpregs_activate(new_fpu);
-       }
-}
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return;
 
-/*
- * Needs to be preemption-safe.
- *
- * NOTE! user_fpu_begin() must be used only immediately before restoring
- * the save state. It does not do any saving/restoring on its own. In
- * lazy FPU mode, it is just an optimization to avoid a #NM exception,
- * the task can lose the FPU right after preempt_enable().
- */
-static inline void user_fpu_begin(void)
-{
-       struct fpu *fpu = &current->thread.fpu;
+       __fpregs_load_activate(new_fpu, cpu);
+
+       if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+               return;
 
-       preempt_disable();
-       fpregs_activate(fpu);
-       preempt_enable();
+       /*
+        * PKRU state is switched eagerly because it needs to be valid before we
+        * return to userland e.g. for a copy_to_user() operation.
+        */
+       if (current->mm) {
+               pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
+               if (pk)
+                       pkru_val = pk->pkru;
+       }
+       __write_pkru(pkru_val);
 }
 
 /*