x86-32, fpu: Rewrite fpu_save_init()
authorBrian Gerst <brgerst@gmail.com>
Sat, 4 Sep 2010 01:17:18 +0000 (21:17 -0400)
committerH. Peter Anvin <hpa@linux.intel.com>
Thu, 9 Sep 2010 21:17:31 +0000 (14:17 -0700)
Rewrite fpu_save_init() to prepare for merging with 64-bit.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1283563039-3466-12-git-send-email-brgerst@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/include/asm/i387.h

index 907967e4fa8d8aed5070f180c38827c9a884a198..b45abefb89f2a8c513abb12b73a18ba246eb5fcc 100644 (file)
@@ -73,6 +73,11 @@ static __always_inline __pure bool use_xsave(void)
        return static_cpu_has(X86_FEATURE_XSAVE);
 }
 
+static __always_inline __pure bool use_fxsr(void)
+{
+        return static_cpu_has(X86_FEATURE_FXSR);
+}
+
 extern void __sanitize_i387_state(struct task_struct *);
 
 static inline void sanitize_i387_state(struct task_struct *tsk)
@@ -211,6 +216,12 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
        return 0;
 }
 
+static inline void fpu_fxsave(struct fpu *fpu)
+{
+       asm volatile("fxsave %[fx]"
+                    : [fx] "=m" (fpu->state->fxsave));
+}
+
 /* We need a safe address that is cheap to find and that is already
    in L1 during context switch. The best choices are unfortunately
    different for UP and SMP */
@@ -226,36 +237,24 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
 static inline void fpu_save_init(struct fpu *fpu)
 {
        if (use_xsave()) {
-               struct xsave_struct *xstate = &fpu->state->xsave;
-               struct i387_fxsave_struct *fx = &fpu->state->fxsave;
-
                fpu_xsave(fpu);
 
                /*
                 * xsave header may indicate the init state of the FP.
                 */
-               if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
-                       goto end;
-
-               if (unlikely(fx->swd & X87_FSW_ES))
-                       asm volatile("fnclex");
-
-               /*
-                * we can do a simple return here or be paranoid :)
-                */
-               goto clear_state;
+               if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
+                       return;
+       } else if (use_fxsr()) {
+               fpu_fxsave(fpu);
+       } else {
+               asm volatile("fsave %[fx]; fwait"
+                            : [fx] "=m" (fpu->state->fsave));
+               return;
        }
 
-       /* Use more nops than strictly needed in case the compiler
-          varies code */
-       alternative_input(
-               "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
-               "fxsave %[fx]\n"
-               "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
-               X86_FEATURE_FXSR,
-               [fx] "m" (fpu->state->fxsave),
-               [fsw] "m" (fpu->state->fxsave.swd) : "memory");
-clear_state:
+       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
+               asm volatile("fnclex");
+
        /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
           is pending.  Clear the x87 state here by setting it to fixed
           values. safe_address is a random variable that should be in L1 */
@@ -265,8 +264,6 @@ clear_state:
                "fildl %[addr]",        /* set F?P to defined value */
                X86_FEATURE_FXSAVE_LEAK,
                [addr] "m" (safe_address));
-end:
-       ;
 }
 
 #endif /* CONFIG_X86_64 */