x86/fpu: Move i387.c and xsave.c to arch/x86/kernel/fpu/
authorIngo Molnar <mingo@kernel.org>
Wed, 22 Apr 2015 08:39:11 +0000 (10:39 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:15 +0000 (15:47 +0200)
Create a new subdirectory for the FPU support code in arch/x86/kernel/fpu/.

Rename 'i387.c' to 'core.c' - as this really collects the core FPU support
code, nothing i387 specific.

We'll better organize this directory in later patches.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/Makefile
arch/x86/kernel/fpu/Makefile [new file with mode: 0644]
arch/x86/kernel/fpu/core.c [new file with mode: 0644]
arch/x86/kernel/fpu/xsave.c [new file with mode: 0644]
arch/x86/kernel/i387.c [deleted file]
arch/x86/kernel/xsave.c [deleted file]

index 9bcd0b56ca1775aa82a9dee3a47614461bb7a881..febaf180621b128a03dd884fa91f58882e1ac36b 100644 (file)
@@ -44,7 +44,7 @@ obj-y                 += pci-iommu_table.o
 obj-y                  += resource.o
 
 obj-y                          += process.o
-obj-y                          += i387.o xsave.o
+obj-y                          += fpu/
 obj-y                          += ptrace.o
 obj-$(CONFIG_X86_32)           += tls.o
 obj-$(CONFIG_IA32_EMULATION)   += tls.o
diff --git a/arch/x86/kernel/fpu/Makefile b/arch/x86/kernel/fpu/Makefile
new file mode 100644 (file)
index 0000000..89fd66a
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Build rules for the FPU support code:
+#
+
+obj-y                          += core.o xsave.o
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
new file mode 100644 (file)
index 0000000..0110155
--- /dev/null
@@ -0,0 +1,718 @@
+/*
+ *  Copyright (C) 1994 Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *  General FPU state handling cleanups
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+#include <linux/module.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/sigcontext.h>
+#include <asm/processor.h>
+#include <asm/math_emu.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
+#include <asm/user.h>
+
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
+void kernel_fpu_disable(void)
+{
+       WARN_ON(this_cpu_read(in_kernel_fpu));
+       this_cpu_write(in_kernel_fpu, true);
+}
+
+void kernel_fpu_enable(void)
+{
+       this_cpu_write(in_kernel_fpu, false);
+}
+
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: the thread must not have fpu (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return true; in the likely case
+ * the thread has FPU but we are not going to set/clear TS.
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+       if (this_cpu_read(in_kernel_fpu))
+               return false;
+
+       if (use_eager_fpu())
+               return true;
+
+       return !__thread_has_fpu(current) &&
+               (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+       struct pt_regs *regs = get_irq_regs();
+       return regs && user_mode(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+bool irq_fpu_usable(void)
+{
+       return !in_interrupt() ||
+               interrupted_user_mode() ||
+               interrupted_kernel_fpu_idle();
+}
+EXPORT_SYMBOL(irq_fpu_usable);
+
+void __kernel_fpu_begin(void)
+{
+       struct task_struct *me = current;
+
+       this_cpu_write(in_kernel_fpu, true);
+
+       if (__thread_has_fpu(me)) {
+               __save_init_fpu(me);
+       } else {
+               this_cpu_write(fpu_owner_task, NULL);
+               if (!use_eager_fpu())
+                       clts();
+       }
+}
+EXPORT_SYMBOL(__kernel_fpu_begin);
+
+void __kernel_fpu_end(void)
+{
+       struct task_struct *me = current;
+
+       if (__thread_has_fpu(me)) {
+               if (WARN_ON(restore_fpu_checking(me)))
+                       fpu_reset_state(me);
+       } else if (!use_eager_fpu()) {
+               stts();
+       }
+
+       this_cpu_write(in_kernel_fpu, false);
+}
+EXPORT_SYMBOL(__kernel_fpu_end);
+
+/*
+ * Save the FPU state (initialize it if necessary):
+ *
+ * This only ever gets called for the current task.
+ */
+void fpu__save(struct task_struct *tsk)
+{
+       WARN_ON(tsk != current);
+
+       preempt_disable();
+       if (__thread_has_fpu(tsk)) {
+               if (use_eager_fpu()) {
+                       __save_fpu(tsk);
+               } else {
+                       __save_init_fpu(tsk);
+                       __thread_fpu_end(tsk);
+               }
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL_GPL(fpu__save);
+
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+unsigned int xstate_size;
+EXPORT_SYMBOL_GPL(xstate_size);
+static struct i387_fxsave_struct fx_scratch;
+
+static void mxcsr_feature_mask_init(void)
+{
+       unsigned long mask = 0;
+
+       if (cpu_has_fxsr) {
+               memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
+               asm volatile("fxsave %0" : "+m" (fx_scratch));
+               mask = fx_scratch.mxcsr_mask;
+               if (mask == 0)
+                       mask = 0x0000ffbf;
+       }
+       mxcsr_feature_mask &= mask;
+}
+
+static void fpstate_xstate_init_size(void)
+{
+       /*
+        * Note that xstate_size might be overwriten later during
+        * xsave_init().
+        */
+
+       if (!cpu_has_fpu) {
+               /*
+                * Disable xsave as we do not support it if i387
+                * emulation is enabled.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+               setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+               xstate_size = sizeof(struct i387_soft_struct);
+               return;
+       }
+
+       if (cpu_has_fxsr)
+               xstate_size = sizeof(struct i387_fxsave_struct);
+       else
+               xstate_size = sizeof(struct i387_fsave_struct);
+}
+
+/*
+ * Called on the boot CPU at bootup to set up the initial FPU state that
+ * is later cloned into all processes.
+ *
+ * Also called on secondary CPUs to set up the FPU state of their
+ * idle threads.
+ */
+void fpu__cpu_init(void)
+{
+       unsigned long cr0;
+       unsigned long cr4_mask = 0;
+
+#ifndef CONFIG_MATH_EMULATION
+       if (!cpu_has_fpu) {
+               pr_emerg("No FPU found and no math emulation present\n");
+               pr_emerg("Giving up\n");
+               for (;;)
+                       asm volatile("hlt");
+       }
+#endif
+       if (cpu_has_fxsr)
+               cr4_mask |= X86_CR4_OSFXSR;
+       if (cpu_has_xmm)
+               cr4_mask |= X86_CR4_OSXMMEXCPT;
+       if (cr4_mask)
+               cr4_set_bits(cr4_mask);
+
+       cr0 = read_cr0();
+       cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
+       if (!cpu_has_fpu)
+               cr0 |= X86_CR0_EM;
+       write_cr0(cr0);
+
+       /*
+        * fpstate_xstate_init_size() is only called once, to avoid overriding
+        * 'xstate_size' during (secondary CPU) bootup or during CPU hotplug.
+        */
+       if (xstate_size == 0)
+               fpstate_xstate_init_size();
+
+       mxcsr_feature_mask_init();
+       xsave_init();
+       eager_fpu_init();
+}
+
+void fpstate_init(struct fpu *fpu)
+{
+       if (!cpu_has_fpu) {
+               finit_soft_fpu(&fpu->state->soft);
+               return;
+       }
+
+       memset(fpu->state, 0, xstate_size);
+
+       if (cpu_has_fxsr) {
+               fx_finit(&fpu->state->fxsave);
+       } else {
+               struct i387_fsave_struct *fp = &fpu->state->fsave;
+               fp->cwd = 0xffff037fu;
+               fp->swd = 0xffff0000u;
+               fp->twd = 0xffffffffu;
+               fp->fos = 0xffff0000u;
+       }
+}
+EXPORT_SYMBOL_GPL(fpstate_init);
+
+int fpstate_alloc(struct fpu *fpu)
+{
+       if (fpu->state)
+               return 0;
+
+       fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+       if (!fpu->state)
+               return -ENOMEM;
+
+       /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
+       WARN_ON((unsigned long)fpu->state & 15);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(fpstate_alloc);
+
+/*
+ * Allocate the backing store for the current task's FPU registers
+ * and initialize the registers themselves as well.
+ *
+ * Can fail.
+ */
+int fpstate_alloc_init(struct task_struct *curr)
+{
+       int ret;
+
+       if (WARN_ON_ONCE(curr != current))
+               return -EINVAL;
+       if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
+               return -EINVAL;
+
+       /*
+        * Memory allocation at the first usage of the FPU and other state.
+        */
+       ret = fpstate_alloc(&curr->thread.fpu);
+       if (ret)
+               return ret;
+
+       fpstate_init(&curr->thread.fpu);
+
+       /* Safe to do for the current task: */
+       curr->flags |= PF_USED_MATH;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(fpstate_alloc_init);
+
+/*
+ * The _current_ task is using the FPU for the first time
+ * so initialize it and set the mxcsr to its default
+ * value at reset if we support XMM instructions and then
+ * remember the current task has used the FPU.
+ */
+static int fpu__unlazy_stopped(struct task_struct *child)
+{
+       int ret;
+
+       if (WARN_ON_ONCE(child == current))
+               return -EINVAL;
+
+       if (child->flags & PF_USED_MATH) {
+               task_disable_lazy_fpu_restore(child);
+               return 0;
+       }
+
+       /*
+        * Memory allocation at the first usage of the FPU and other state.
+        */
+       ret = fpstate_alloc(&child->thread.fpu);
+       if (ret)
+               return ret;
+
+       fpstate_init(&child->thread.fpu);
+
+       /* Safe to do for stopped child tasks: */
+       child->flags |= PF_USED_MATH;
+
+       return 0;
+}
+
+/*
+ * The xstateregs_active() routine is the same as the fpregs_active() routine,
+ * as the "regset->n" for the xstate regset will be updated based on the feature
+ * capabilites supported by the xsave.
+ */
+int fpregs_active(struct task_struct *target, const struct user_regset *regset)
+{
+       return tsk_used_math(target) ? regset->n : 0;
+}
+
+int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
+{
+       return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
+}
+
+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
+               unsigned int pos, unsigned int count,
+               void *kbuf, void __user *ubuf)
+{
+       int ret;
+
+       if (!cpu_has_fxsr)
+               return -ENODEV;
+
+       ret = fpu__unlazy_stopped(target);
+       if (ret)
+               return ret;
+
+       sanitize_i387_state(target);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  &target->thread.fpu.state->fxsave, 0, -1);
+}
+
+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
+               unsigned int pos, unsigned int count,
+               const void *kbuf, const void __user *ubuf)
+{
+       int ret;
+
+       if (!cpu_has_fxsr)
+               return -ENODEV;
+
+       ret = fpu__unlazy_stopped(target);
+       if (ret)
+               return ret;
+
+       sanitize_i387_state(target);
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &target->thread.fpu.state->fxsave, 0, -1);
+
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+        */
+       target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
+
+       /*
+        * update the header bits in the xsave header, indicating the
+        * presence of FP and SSE state.
+        */
+       if (cpu_has_xsave)
+               target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+
+       return ret;
+}
+
+int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
+               unsigned int pos, unsigned int count,
+               void *kbuf, void __user *ubuf)
+{
+       struct xsave_struct *xsave;
+       int ret;
+
+       if (!cpu_has_xsave)
+               return -ENODEV;
+
+       ret = fpu__unlazy_stopped(target);
+       if (ret)
+               return ret;
+
+       xsave = &target->thread.fpu.state->xsave;
+
+       /*
+        * Copy the 48bytes defined by the software first into the xstate
+        * memory layout in the thread struct, so that we can copy the entire
+        * xstateregs to the user using one user_regset_copyout().
+        */
+       memcpy(&xsave->i387.sw_reserved,
+               xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
+       /*
+        * Copy the xstate memory layout.
+        */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       return ret;
+}
+
+int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+                 unsigned int pos, unsigned int count,
+                 const void *kbuf, const void __user *ubuf)
+{
+       struct xsave_struct *xsave;
+       int ret;
+
+       if (!cpu_has_xsave)
+               return -ENODEV;
+
+       ret = fpu__unlazy_stopped(target);
+       if (ret)
+               return ret;
+
+       xsave = &target->thread.fpu.state->xsave;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+        */
+       xsave->i387.mxcsr &= mxcsr_feature_mask;
+       xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
+       /*
+        * These bits must be zero.
+        */
+       memset(&xsave->xsave_hdr.reserved, 0, 48);
+       return ret;
+}
+
+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+
+/*
+ * FPU tag word conversions.
+ */
+
+static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
+{
+       unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+
+       /* Transform each pair of bits into 01 (valid) or 00 (empty) */
+       tmp = ~twd;
+       tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+       /* and move the valid bits to the lower byte. */
+       tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+       tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+       tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+
+       return tmp;
+}
+
+#define FPREG_ADDR(f, n)       ((void *)&(f)->st_space + (n) * 16)
+#define FP_EXP_TAG_VALID       0
+#define FP_EXP_TAG_ZERO                1
+#define FP_EXP_TAG_SPECIAL     2
+#define FP_EXP_TAG_EMPTY       3
+
+static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
+{
+       struct _fpxreg *st;
+       u32 tos = (fxsave->swd >> 11) & 7;
+       u32 twd = (unsigned long) fxsave->twd;
+       u32 tag;
+       u32 ret = 0xffff0000u;
+       int i;
+
+       for (i = 0; i < 8; i++, twd >>= 1) {
+               if (twd & 0x1) {
+                       st = FPREG_ADDR(fxsave, (i - tos) & 7);
+
+                       switch (st->exponent & 0x7fff) {
+                       case 0x7fff:
+                               tag = FP_EXP_TAG_SPECIAL;
+                               break;
+                       case 0x0000:
+                               if (!st->significand[0] &&
+                                   !st->significand[1] &&
+                                   !st->significand[2] &&
+                                   !st->significand[3])
+                                       tag = FP_EXP_TAG_ZERO;
+                               else
+                                       tag = FP_EXP_TAG_SPECIAL;
+                               break;
+                       default:
+                               if (st->significand[3] & 0x8000)
+                                       tag = FP_EXP_TAG_VALID;
+                               else
+                                       tag = FP_EXP_TAG_SPECIAL;
+                               break;
+                       }
+               } else {
+                       tag = FP_EXP_TAG_EMPTY;
+               }
+               ret |= tag << (2 * i);
+       }
+       return ret;
+}
+
+/*
+ * FXSR floating point environment conversions.
+ */
+
+void
+convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+{
+       struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
+       struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
+       struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
+       int i;
+
+       env->cwd = fxsave->cwd | 0xffff0000u;
+       env->swd = fxsave->swd | 0xffff0000u;
+       env->twd = twd_fxsr_to_i387(fxsave);
+
+#ifdef CONFIG_X86_64
+       env->fip = fxsave->rip;
+       env->foo = fxsave->rdp;
+       /*
+        * should be actually ds/cs at fpu exception time, but
+        * that information is not available in 64bit mode.
+        */
+       env->fcs = task_pt_regs(tsk)->cs;
+       if (tsk == current) {
+               savesegment(ds, env->fos);
+       } else {
+               env->fos = tsk->thread.ds;
+       }
+       env->fos |= 0xffff0000;
+#else
+       env->fip = fxsave->fip;
+       env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
+       env->foo = fxsave->foo;
+       env->fos = fxsave->fos;
+#endif
+
+       for (i = 0; i < 8; ++i)
+               memcpy(&to[i], &from[i], sizeof(to[0]));
+}
+
+void convert_to_fxsr(struct task_struct *tsk,
+                    const struct user_i387_ia32_struct *env)
+
+{
+       struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
+       struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
+       struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
+       int i;
+
+       fxsave->cwd = env->cwd;
+       fxsave->swd = env->swd;
+       fxsave->twd = twd_i387_to_fxsr(env->twd);
+       fxsave->fop = (u16) ((u32) env->fcs >> 16);
+#ifdef CONFIG_X86_64
+       fxsave->rip = env->fip;
+       fxsave->rdp = env->foo;
+       /* cs and ds ignored */
+#else
+       fxsave->fip = env->fip;
+       fxsave->fcs = (env->fcs & 0xffff);
+       fxsave->foo = env->foo;
+       fxsave->fos = env->fos;
+#endif
+
+       for (i = 0; i < 8; ++i)
+               memcpy(&to[i], &from[i], sizeof(from[0]));
+}
+
+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
+              unsigned int pos, unsigned int count,
+              void *kbuf, void __user *ubuf)
+{
+       struct user_i387_ia32_struct env;
+       int ret;
+
+       ret = fpu__unlazy_stopped(target);
+       if (ret)
+               return ret;
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
+
+       if (!cpu_has_fxsr)
+               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                          &target->thread.fpu.state->fsave, 0,
+                                          -1);
+
+       sanitize_i387_state(target);
+
+       if (kbuf && pos == 0 && count == sizeof(env)) {
+               convert_from_fxsr(kbuf, target);
+               return 0;
+       }
+
+       convert_from_fxsr(&env, target);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
+}
+
+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
+              unsigned int pos, unsigned int count,
+              const void *kbuf, const void __user *ubuf)
+{
+       struct user_i387_ia32_struct env;
+       int ret;
+
+       ret = fpu__unlazy_stopped(target);
+       if (ret)
+               return ret;
+
+       sanitize_i387_state(target);
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
+
+       if (!cpu_has_fxsr)
+               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                         &target->thread.fpu.state->fsave, 0,
+                                         -1);
+
+       if (pos > 0 || count < sizeof(env))
+               convert_from_fxsr(&env, target);
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
+       if (!ret)
+               convert_to_fxsr(target, &env);
+
+       /*
+        * update the header bit in the xsave header, indicating the
+        * presence of FP.
+        */
+       if (cpu_has_xsave)
+               target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
+       return ret;
+}
+
+/*
+ * FPU state for core dumps.
+ * This is only used for a.out dumps now.
+ * It is declared generically using elf_fpregset_t (which is
+ * struct user_i387_struct) but is in fact only used for 32-bit
+ * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
+ */
+int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
+{
+       struct task_struct *tsk = current;
+       int fpvalid;
+
+       fpvalid = !!used_math();
+       if (fpvalid)
+               fpvalid = !fpregs_get(tsk, NULL,
+                                     0, sizeof(struct user_i387_ia32_struct),
+                                     fpu, NULL);
+
+       return fpvalid;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+
+static int __init no_387(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_FPU);
+       return 1;
+}
+
+__setup("no387", no_387);
+
+/*
+ * Set the X86_FEATURE_FPU CPU-capability bit based on
+ * trying to execute an actual sequence of FPU instructions:
+ */
+void fpu__detect(struct cpuinfo_x86 *c)
+{
+       unsigned long cr0;
+       u16 fsw, fcw;
+
+       fsw = fcw = 0xffff;
+
+       cr0 = read_cr0();
+       cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
+       write_cr0(cr0);
+
+       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+                    : "+m" (fsw), "+m" (fcw));
+
+       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+               set_cpu_cap(c, X86_FEATURE_FPU);
+       else
+               clear_cpu_cap(c, X86_FEATURE_FPU);
+
+       /* The final cr0 value is set in fpu_init() */
+}
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
new file mode 100644 (file)
index 0000000..163b5cc
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+ * xsave/xrstor support.
+ *
+ * Author: Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bootmem.h>
+#include <linux/compat.h>
+#include <linux/cpu.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
+#include <asm/sigframe.h>
+#include <asm/tlbflush.h>
+#include <asm/xcr.h>
+
+/*
+ * Supported feature mask by the CPU and the kernel.
+ */
+u64 pcntxt_mask;
+
+/*
+ * Represents init state for the supported extended state.
+ */
+struct xsave_struct *init_xstate_buf;
+
+static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
+static unsigned int *xstate_offsets, *xstate_sizes;
+static unsigned int xstate_comp_offsets[sizeof(pcntxt_mask)*8];
+static unsigned int xstate_features;
+
+/*
+ * If a processor implementation discern that a processor state component is
+ * in its initialized state it may modify the corresponding bit in the
+ * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory
+ * layout in the case of xsaveopt. While presenting the xstate information to
+ * the user, we always ensure that the memory layout of a feature will be in
+ * the init state if the corresponding header bit is zero. This is to ensure
+ * that the user doesn't see some stale state in the memory layout during
+ * signal handling, debugging etc.
+ */
+void __sanitize_i387_state(struct task_struct *tsk)
+{
+       struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
+       int feature_bit = 0x2;
+       u64 xstate_bv;
+
+       if (!fx)
+               return;
+
+       xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
+
+       /*
+        * None of the feature bits are in init state. So nothing else
+        * to do for us, as the memory layout is up to date.
+        */
+       if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
+               return;
+
+       /*
+        * FP is in init state
+        */
+       if (!(xstate_bv & XSTATE_FP)) {
+               fx->cwd = 0x37f;
+               fx->swd = 0;
+               fx->twd = 0;
+               fx->fop = 0;
+               fx->rip = 0;
+               fx->rdp = 0;
+               memset(&fx->st_space[0], 0, 128);
+       }
+
+       /*
+        * SSE is in init state
+        */
+       if (!(xstate_bv & XSTATE_SSE))
+               memset(&fx->xmm_space[0], 0, 256);
+
+       xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2;
+
+       /*
+        * Update all the other memory layouts for which the corresponding
+        * header bit is in the init state.
+        */
+       while (xstate_bv) {
+               if (xstate_bv & 0x1) {
+                       int offset = xstate_offsets[feature_bit];
+                       int size = xstate_sizes[feature_bit];
+
+                       memcpy(((void *) fx) + offset,
+                              ((void *) init_xstate_buf) + offset,
+                              size);
+               }
+
+               xstate_bv >>= 1;
+               feature_bit++;
+       }
+}
+
+/*
+ * Check for the presence of extended state information in the
+ * user fpstate pointer in the sigcontext.
+ */
+static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
+                                  void __user *fpstate,
+                                  struct _fpx_sw_bytes *fx_sw)
+{
+       int min_xstate_size = sizeof(struct i387_fxsave_struct) +
+                             sizeof(struct xsave_hdr_struct);
+       unsigned int magic2;
+
+       if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
+               return -1;
+
+       /* Check for the first magic field and other error scenarios. */
+       if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
+           fx_sw->xstate_size < min_xstate_size ||
+           fx_sw->xstate_size > xstate_size ||
+           fx_sw->xstate_size > fx_sw->extended_size)
+               return -1;
+
+       /*
+        * Check for the presence of second magic word at the end of memory
+        * layout. This detects the case where the user just copied the legacy
+        * fpstate layout with out copying the extended state information
+        * in the memory layout.
+        */
+       if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
+           || magic2 != FP_XSTATE_MAGIC2)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Signal frame handlers.
+ */
+static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
+{
+       if (use_fxsr()) {
+               struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+               struct user_i387_ia32_struct env;
+               struct _fpstate_ia32 __user *fp = buf;
+
+               convert_from_fxsr(&env, tsk);
+
+               if (__copy_to_user(buf, &env, sizeof(env)) ||
+                   __put_user(xsave->i387.swd, &fp->status) ||
+                   __put_user(X86_FXSR_MAGIC, &fp->magic))
+                       return -1;
+       } else {
+               struct i387_fsave_struct __user *fp = buf;
+               u32 swd;
+               if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
+                       return -1;
+       }
+
+       return 0;
+}
+
+static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
+{
+       struct xsave_struct __user *x = buf;
+       struct _fpx_sw_bytes *sw_bytes;
+       u32 xstate_bv;
+       int err;
+
+       /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
+       sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
+       err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
+
+       if (!use_xsave())
+               return err;
+
+       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
+
+       /*
+        * Read the xstate_bv which we copied (directly from the cpu or
+        * from the state in task struct) to the user buffers.
+        */
+       err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
+
+       /*
+        * For legacy compatible, we always set FP/SSE bits in the bit
+        * vector while saving the state to the user context. This will
+        * enable us capturing any changes(during sigreturn) to
+        * the FP/SSE bits by the legacy applications which don't touch
+        * xstate_bv in the xsave header.
+        *
+        * xsave aware apps can change the xstate_bv in the xsave
+        * header as well as change any contents in the memory layout.
+        * xrestore as part of sigreturn will capture all the changes.
+        */
+       xstate_bv |= XSTATE_FPSSE;
+
+       err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
+
+       return err;
+}
+
+static inline int save_user_xstate(struct xsave_struct __user *buf)
+{
+       int err;
+
+       if (use_xsave())
+               err = xsave_user(buf);
+       else if (use_fxsr())
+               err = fxsave_user((struct i387_fxsave_struct __user *) buf);
+       else
+               err = fsave_user((struct i387_fsave_struct __user *) buf);
+
+       if (unlikely(err) && __clear_user(buf, xstate_size))
+               err = -EFAULT;
+       return err;
+}
+
+/*
+ * Save the fpu, extended register state to the user signal frame.
+ *
+ * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
+ *  state is copied.
+ *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
+ *
+ *     buf == buf_fx for 64-bit frames and 32-bit fsave frame.
+ *     buf != buf_fx for 32-bit frames with fxstate.
+ *
+ * If the fpu, extended register state is live, save the state directly
+ * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
+ * copy the thread's fpu state to the user frame starting at 'buf_fx'.
+ *
+ * If this is a 32-bit frame with fxstate, put a fsave header before
+ * the aligned state at 'buf_fx'.
+ *
+ * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
+ * indicating the absence/presence of the extended state to the user.
+ */
+int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+{
+       struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
+       struct task_struct *tsk = current;
+       int ia32_fxstate = (buf != buf_fx);
+
+       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
+                        config_enabled(CONFIG_IA32_EMULATION));
+
+       if (!access_ok(VERIFY_WRITE, buf, size))
+               return -EACCES;
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_get(current, NULL, 0,
+                       sizeof(struct user_i387_ia32_struct), NULL,
+                       (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
+
+       if (user_has_fpu()) {
+               /* Save the live register state to the user directly. */
+               if (save_user_xstate(buf_fx))
+                       return -1;
+               /* Update the thread's fxstate to save the fsave header. */
+               if (ia32_fxstate)
+                       fpu_fxsave(&tsk->thread.fpu);
+       } else {
+               sanitize_i387_state(tsk);
+               if (__copy_to_user(buf_fx, xsave, xstate_size))
+                       return -1;
+       }
+
+       /* Save the fsave header for the 32-bit frames. */
+       if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
+               return -1;
+
+       if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
+               return -1;
+
+       return 0;
+}
+
+static inline void
+sanitize_restored_xstate(struct task_struct *tsk,
+                        struct user_i387_ia32_struct *ia32_env,
+                        u64 xstate_bv, int fx_only)
+{
+       struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
+       struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr;
+
+       if (use_xsave()) {
+               /* These bits must be zero. */
+               memset(xsave_hdr->reserved, 0, 48);
+
+               /*
+                * Init the state that is not present in the memory
+                * layout and not enabled by the OS.
+                */
+               if (fx_only)
+                       xsave_hdr->xstate_bv = XSTATE_FPSSE;
+               else
+                       xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv);
+       }
+
+       if (use_fxsr()) {
+               /*
+                * mscsr reserved bits must be masked to zero for security
+                * reasons.
+                */
+               xsave->i387.mxcsr &= mxcsr_feature_mask;
+
+               convert_to_fxsr(tsk, ia32_env);
+       }
+}
+
+/*
+ * Restore the extended state if present. Otherwise, restore the FP/SSE state.
+ */
+static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
+{
+       if (use_xsave()) {
+               if ((unsigned long)buf % 64 || fx_only) {
+                       u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
+                       xrstor_state(init_xstate_buf, init_bv);
+                       return fxrstor_user(buf);
+               } else {
+                       u64 init_bv = pcntxt_mask & ~xbv;
+                       if (unlikely(init_bv))
+                               xrstor_state(init_xstate_buf, init_bv);
+                       return xrestore_user(buf, xbv);
+               }
+       } else if (use_fxsr()) {
+               return fxrstor_user(buf);
+       } else
+               return frstor_user(buf);
+}
+
+int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
+{
+       int ia32_fxstate = (buf != buf_fx);
+       struct task_struct *tsk = current;
+       int state_size = xstate_size;
+       u64 xstate_bv = 0;
+       int fx_only = 0;
+
+       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
+                        config_enabled(CONFIG_IA32_EMULATION));
+
+       if (!buf) {
+               fpu_reset_state(tsk);
+               return 0;
+       }
+
+       if (!access_ok(VERIFY_READ, buf, size))
+               return -EACCES;
+
+       if (!used_math() && fpstate_alloc_init(tsk))
+               return -1;
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_set(current, NULL,
+                                      0, sizeof(struct user_i387_ia32_struct),
+                                      NULL, buf) != 0;
+
+       if (use_xsave()) {
+               struct _fpx_sw_bytes fx_sw_user;
+               if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
+                       /*
+                        * Couldn't find the extended state information in the
+                        * memory layout. Restore just the FP/SSE and init all
+                        * the other extended state.
+                        */
+                       state_size = sizeof(struct i387_fxsave_struct);
+                       fx_only = 1;
+               } else {
+                       state_size = fx_sw_user.xstate_size;
+                       xstate_bv = fx_sw_user.xstate_bv;
+               }
+       }
+
+       if (ia32_fxstate) {
+               /*
+                * For 32-bit frames with fxstate, copy the user state to the
+                * thread's fpu state, reconstruct fxstate from the fsave
+                * header. Sanitize the copied state etc.
+                */
+               struct fpu *fpu = &tsk->thread.fpu;
+               struct user_i387_ia32_struct env;
+               int err = 0;
+
+               /*
+                * Drop the current fpu which clears used_math(). This ensures
+                * that any context-switch during the copy of the new state,
+                * avoids the intermediate state from getting restored/saved.
+                * Thus avoiding the new restored state from getting corrupted.
+                * We will be ready to restore/save the state only after
+                * set_used_math() is again set.
+                */
+               drop_fpu(tsk);
+
+               if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
+                   __copy_from_user(&env, buf, sizeof(env))) {
+                       fpstate_init(fpu);
+                       err = -1;
+               } else {
+                       sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
+               }
+
+               set_used_math();
+               if (use_eager_fpu()) {
+                       preempt_disable();
+                       math_state_restore();
+                       preempt_enable();
+               }
+
+               return err;
+       } else {
+               /*
+                * For 64-bit frames and 32-bit fsave frames, restore the user
+                * state to the registers directly (with exceptions handled).
+                */
+               user_fpu_begin();
+               if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
+                       fpu_reset_state(tsk);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Prepare the SW reserved portion of the fxsave memory layout, indicating
+ * the presence of the extended state information in the memory layout
+ * pointed by the fpstate pointer in the sigcontext.
+ * This will be saved when ever the FP and extended state context is
+ * saved on the user stack during the signal handler delivery to the user.
+ */
+static void prepare_fx_sw_frame(void)
+{
+       int fsave_header_size = sizeof(struct i387_fsave_struct);
+       int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
+
+       if (config_enabled(CONFIG_X86_32))
+               size += fsave_header_size;
+
+       fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
+       fx_sw_reserved.extended_size = size;
+       fx_sw_reserved.xstate_bv = pcntxt_mask;
+       fx_sw_reserved.xstate_size = xstate_size;
+
+       if (config_enabled(CONFIG_IA32_EMULATION)) {
+               fx_sw_reserved_ia32 = fx_sw_reserved;
+               fx_sw_reserved_ia32.extended_size += fsave_header_size;
+       }
+}
+
+/*
+ * Enable the extended processor state save/restore feature
+ */
+static inline void xstate_enable(void)
+{
+       cr4_set_bits(X86_CR4_OSXSAVE);
+       xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
+}
+
+/*
+ * Record the offsets and sizes of different state managed by the xsave
+ * memory layout.
+ */
+static void __init setup_xstate_features(void)
+{
+       int eax, ebx, ecx, edx, leaf = 0x2;
+
+       xstate_features = fls64(pcntxt_mask);
+       xstate_offsets = alloc_bootmem(xstate_features * sizeof(int));
+       xstate_sizes = alloc_bootmem(xstate_features * sizeof(int));
+
+       do {
+               cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
+
+               if (eax == 0)
+                       break;
+
+               xstate_offsets[leaf] = ebx;
+               xstate_sizes[leaf] = eax;
+
+               leaf++;
+       } while (1);
+}
+
+/*
+ * This function sets up offsets and sizes of all extended states in
+ * xsave area. This supports both standard format and compacted format
+ * of the xsave aread.
+ *
+ * Input: void
+ * Output: void
+ */
+void setup_xstate_comp(void)
+{
+       unsigned int xstate_comp_sizes[sizeof(pcntxt_mask)*8];
+       int i;
+
+       /*
+        * The FP xstates and SSE xstates are legacy states. They are always
+        * in the fixed offsets in the xsave area in either compacted form
+        * or standard form.
+        */
+       xstate_comp_offsets[0] = 0;
+       xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
+
+       if (!cpu_has_xsaves) {
+               for (i = 2; i < xstate_features; i++) {
+                       if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
+                               xstate_comp_offsets[i] = xstate_offsets[i];
+                               xstate_comp_sizes[i] = xstate_sizes[i];
+                       }
+               }
+               return;
+       }
+
+       xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+
+       for (i = 2; i < xstate_features; i++) {
+               if (test_bit(i, (unsigned long *)&pcntxt_mask))
+                       xstate_comp_sizes[i] = xstate_sizes[i];
+               else
+                       xstate_comp_sizes[i] = 0;
+
+               if (i > 2)
+                       xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
+                                       + xstate_comp_sizes[i-1];
+
+       }
+}
+
+/*
+ * setup the xstate image representing the init state
+ */
+static void __init setup_init_fpu_buf(void)
+{
+       /*
+        * Setup init_xstate_buf to represent the init state of
+        * all the features managed by the xsave
+        */
+       init_xstate_buf = alloc_bootmem_align(xstate_size,
+                                             __alignof__(struct xsave_struct));
+       fx_finit(&init_xstate_buf->i387);
+
+       if (!cpu_has_xsave)
+               return;
+
+       setup_xstate_features();
+
+       if (cpu_has_xsaves) {
+               init_xstate_buf->xsave_hdr.xcomp_bv =
+                                               (u64)1 << 63 | pcntxt_mask;
+               init_xstate_buf->xsave_hdr.xstate_bv = pcntxt_mask;
+       }
+
+       /*
+        * Init all the features state with header_bv being 0x0
+        */
+       xrstor_state_booting(init_xstate_buf, -1);
+       /*
+        * Dump the init state again. This is to identify the init state
+        * of any feature which is not represented by all zero's.
+        */
+       xsave_state_booting(init_xstate_buf, -1);
+}
+
+static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+static int __init eager_fpu_setup(char *s)
+{
+       if (!strcmp(s, "on"))
+               eagerfpu = ENABLE;
+       else if (!strcmp(s, "off"))
+               eagerfpu = DISABLE;
+       else if (!strcmp(s, "auto"))
+               eagerfpu = AUTO;
+       return 1;
+}
+__setup("eagerfpu=", eager_fpu_setup);
+
+
+/*
+ * Calculate total size of enabled xstates in XCR0/pcntxt_mask.
+ */
+static void __init init_xstate_size(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       int i;
+
+       if (!cpu_has_xsaves) {
+               cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+               xstate_size = ebx;
+               return;
+       }
+
+       xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+       for (i = 2; i < 64; i++) {
+               if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
+                       cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
+                       xstate_size += eax;
+               }
+       }
+}
+
+/*
+ * Enable and initialize the xsave feature.
+ */
+static void __init xstate_enable_boot_cpu(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
+               WARN(1, KERN_ERR "XSTATE_CPUID missing\n");
+               return;
+       }
+
+       cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+       pcntxt_mask = eax + ((u64)edx << 32);
+
+       if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
+               pr_err("FP/SSE not shown under xsave features 0x%llx\n",
+                      pcntxt_mask);
+               BUG();
+       }
+
+       /*
+        * Support only the state known to OS.
+        */
+       pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
+
+       xstate_enable();
+
+       /*
+        * Recompute the context size for enabled features
+        */
+       init_xstate_size();
+
+       update_regset_xstate_info(xstate_size, pcntxt_mask);
+       prepare_fx_sw_frame();
+       setup_init_fpu_buf();
+
+       /* Auto enable eagerfpu for xsaveopt */
+       if (cpu_has_xsaveopt && eagerfpu != DISABLE)
+               eagerfpu = ENABLE;
+
+       if (pcntxt_mask & XSTATE_EAGER) {
+               if (eagerfpu == DISABLE) {
+                       pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
+                                       pcntxt_mask & XSTATE_EAGER);
+                       pcntxt_mask &= ~XSTATE_EAGER;
+               } else {
+                       eagerfpu = ENABLE;
+               }
+       }
+
+       pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x using %s\n",
+               pcntxt_mask, xstate_size,
+               cpu_has_xsaves ? "compacted form" : "standard form");
+}
+
+/*
+ * For the very first instance, this calls xstate_enable_boot_cpu();
+ * for all subsequent instances, this calls xstate_enable().
+ *
+ * This is somewhat obfuscated due to the lack of powerful enough
+ * overrides for the section checks.
+ */
+void xsave_init(void)
+{
+       static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
+       void (*this_func)(void);
+
+       if (!cpu_has_xsave)
+               return;
+
+       this_func = next_func;
+       next_func = xstate_enable;
+       this_func();
+}
+
+/*
+ * setup_init_fpu_buf() is __init and it is OK to call it here because
+ * init_xstate_buf will be unset only once during boot.
+ */
+void __init_refok eager_fpu_init(void)
+{
+       WARN_ON(used_math());
+       current_thread_info()->status = 0;
+
+       if (eagerfpu == ENABLE)
+               setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+
+       if (!cpu_has_eager_fpu) {
+               stts();
+               return;
+       }
+
+       if (!init_xstate_buf)
+               setup_init_fpu_buf();
+}
+
+/*
+ * Given the xsave area and a state inside, this function returns the
+ * address of the state.
+ *
+ * This is the API that is called to get xstate address in either
+ * standard format or compacted format of xsave area.
+ *
+ * Inputs:
+ *     xsave: base address of the xsave area;
+ *     xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
+ *     etc.)
+ * Output:
+ *     address of the state in the xsave area.
+ */
+void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
+{
+       int feature = fls64(xstate) - 1;
+       if (!test_bit(feature, (unsigned long *)&pcntxt_mask))
+               return NULL;
+
+       return (void *)xsave + xstate_comp_offsets[feature];
+}
+EXPORT_SYMBOL_GPL(get_xsave_addr);
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
deleted file mode 100644 (file)
index 0110155..0000000
+++ /dev/null
@@ -1,718 +0,0 @@
-/*
- *  Copyright (C) 1994 Linus Torvalds
- *
- *  Pentium III FXSR, SSE support
- *  General FPU state handling cleanups
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- */
-#include <linux/module.h>
-#include <linux/regset.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include <asm/sigcontext.h>
-#include <asm/processor.h>
-#include <asm/math_emu.h>
-#include <asm/tlbflush.h>
-#include <asm/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
-#include <asm/user.h>
-
-static DEFINE_PER_CPU(bool, in_kernel_fpu);
-
-void kernel_fpu_disable(void)
-{
-       WARN_ON(this_cpu_read(in_kernel_fpu));
-       this_cpu_write(in_kernel_fpu, true);
-}
-
-void kernel_fpu_enable(void)
-{
-       this_cpu_write(in_kernel_fpu, false);
-}
-
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
-static inline bool interrupted_kernel_fpu_idle(void)
-{
-       if (this_cpu_read(in_kernel_fpu))
-               return false;
-
-       if (use_eager_fpu())
-               return true;
-
-       return !__thread_has_fpu(current) &&
-               (read_cr0() & X86_CR0_TS);
-}
-
-/*
- * Were we in user mode (or vm86 mode) when we were
- * interrupted?
- *
- * Doing kernel_fpu_begin/end() is ok if we are running
- * in an interrupt context from user mode - we'll just
- * save the FPU state as required.
- */
-static inline bool interrupted_user_mode(void)
-{
-       struct pt_regs *regs = get_irq_regs();
-       return regs && user_mode(regs);
-}
-
-/*
- * Can we use the FPU in kernel mode with the
- * whole "kernel_fpu_begin/end()" sequence?
- *
- * It's always ok in process context (ie "not interrupt")
- * but it is sometimes ok even from an irq.
- */
-bool irq_fpu_usable(void)
-{
-       return !in_interrupt() ||
-               interrupted_user_mode() ||
-               interrupted_kernel_fpu_idle();
-}
-EXPORT_SYMBOL(irq_fpu_usable);
-
-void __kernel_fpu_begin(void)
-{
-       struct task_struct *me = current;
-
-       this_cpu_write(in_kernel_fpu, true);
-
-       if (__thread_has_fpu(me)) {
-               __save_init_fpu(me);
-       } else {
-               this_cpu_write(fpu_owner_task, NULL);
-               if (!use_eager_fpu())
-                       clts();
-       }
-}
-EXPORT_SYMBOL(__kernel_fpu_begin);
-
-void __kernel_fpu_end(void)
-{
-       struct task_struct *me = current;
-
-       if (__thread_has_fpu(me)) {
-               if (WARN_ON(restore_fpu_checking(me)))
-                       fpu_reset_state(me);
-       } else if (!use_eager_fpu()) {
-               stts();
-       }
-
-       this_cpu_write(in_kernel_fpu, false);
-}
-EXPORT_SYMBOL(__kernel_fpu_end);
-
-/*
- * Save the FPU state (initialize it if necessary):
- *
- * This only ever gets called for the current task.
- */
-void fpu__save(struct task_struct *tsk)
-{
-       WARN_ON(tsk != current);
-
-       preempt_disable();
-       if (__thread_has_fpu(tsk)) {
-               if (use_eager_fpu()) {
-                       __save_fpu(tsk);
-               } else {
-                       __save_init_fpu(tsk);
-                       __thread_fpu_end(tsk);
-               }
-       }
-       preempt_enable();
-}
-EXPORT_SYMBOL_GPL(fpu__save);
-
-unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
-unsigned int xstate_size;
-EXPORT_SYMBOL_GPL(xstate_size);
-static struct i387_fxsave_struct fx_scratch;
-
-static void mxcsr_feature_mask_init(void)
-{
-       unsigned long mask = 0;
-
-       if (cpu_has_fxsr) {
-               memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
-               asm volatile("fxsave %0" : "+m" (fx_scratch));
-               mask = fx_scratch.mxcsr_mask;
-               if (mask == 0)
-                       mask = 0x0000ffbf;
-       }
-       mxcsr_feature_mask &= mask;
-}
-
-static void fpstate_xstate_init_size(void)
-{
-       /*
-        * Note that xstate_size might be overwriten later during
-        * xsave_init().
-        */
-
-       if (!cpu_has_fpu) {
-               /*
-                * Disable xsave as we do not support it if i387
-                * emulation is enabled.
-                */
-               setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-               setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-               xstate_size = sizeof(struct i387_soft_struct);
-               return;
-       }
-
-       if (cpu_has_fxsr)
-               xstate_size = sizeof(struct i387_fxsave_struct);
-       else
-               xstate_size = sizeof(struct i387_fsave_struct);
-}
-
-/*
- * Called on the boot CPU at bootup to set up the initial FPU state that
- * is later cloned into all processes.
- *
- * Also called on secondary CPUs to set up the FPU state of their
- * idle threads.
- */
-void fpu__cpu_init(void)
-{
-       unsigned long cr0;
-       unsigned long cr4_mask = 0;
-
-#ifndef CONFIG_MATH_EMULATION
-       if (!cpu_has_fpu) {
-               pr_emerg("No FPU found and no math emulation present\n");
-               pr_emerg("Giving up\n");
-               for (;;)
-                       asm volatile("hlt");
-       }
-#endif
-       if (cpu_has_fxsr)
-               cr4_mask |= X86_CR4_OSFXSR;
-       if (cpu_has_xmm)
-               cr4_mask |= X86_CR4_OSXMMEXCPT;
-       if (cr4_mask)
-               cr4_set_bits(cr4_mask);
-
-       cr0 = read_cr0();
-       cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
-       if (!cpu_has_fpu)
-               cr0 |= X86_CR0_EM;
-       write_cr0(cr0);
-
-       /*
-        * fpstate_xstate_init_size() is only called once, to avoid overriding
-        * 'xstate_size' during (secondary CPU) bootup or during CPU hotplug.
-        */
-       if (xstate_size == 0)
-               fpstate_xstate_init_size();
-
-       mxcsr_feature_mask_init();
-       xsave_init();
-       eager_fpu_init();
-}
-
-void fpstate_init(struct fpu *fpu)
-{
-       if (!cpu_has_fpu) {
-               finit_soft_fpu(&fpu->state->soft);
-               return;
-       }
-
-       memset(fpu->state, 0, xstate_size);
-
-       if (cpu_has_fxsr) {
-               fx_finit(&fpu->state->fxsave);
-       } else {
-               struct i387_fsave_struct *fp = &fpu->state->fsave;
-               fp->cwd = 0xffff037fu;
-               fp->swd = 0xffff0000u;
-               fp->twd = 0xffffffffu;
-               fp->fos = 0xffff0000u;
-       }
-}
-EXPORT_SYMBOL_GPL(fpstate_init);
-
-int fpstate_alloc(struct fpu *fpu)
-{
-       if (fpu->state)
-               return 0;
-
-       fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
-       if (!fpu->state)
-               return -ENOMEM;
-
-       /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
-       WARN_ON((unsigned long)fpu->state & 15);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(fpstate_alloc);
-
-/*
- * Allocate the backing store for the current task's FPU registers
- * and initialize the registers themselves as well.
- *
- * Can fail.
- */
-int fpstate_alloc_init(struct task_struct *curr)
-{
-       int ret;
-
-       if (WARN_ON_ONCE(curr != current))
-               return -EINVAL;
-       if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
-               return -EINVAL;
-
-       /*
-        * Memory allocation at the first usage of the FPU and other state.
-        */
-       ret = fpstate_alloc(&curr->thread.fpu);
-       if (ret)
-               return ret;
-
-       fpstate_init(&curr->thread.fpu);
-
-       /* Safe to do for the current task: */
-       curr->flags |= PF_USED_MATH;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(fpstate_alloc_init);
-
-/*
- * The _current_ task is using the FPU for the first time
- * so initialize it and set the mxcsr to its default
- * value at reset if we support XMM instructions and then
- * remember the current task has used the FPU.
- */
-static int fpu__unlazy_stopped(struct task_struct *child)
-{
-       int ret;
-
-       if (WARN_ON_ONCE(child == current))
-               return -EINVAL;
-
-       if (child->flags & PF_USED_MATH) {
-               task_disable_lazy_fpu_restore(child);
-               return 0;
-       }
-
-       /*
-        * Memory allocation at the first usage of the FPU and other state.
-        */
-       ret = fpstate_alloc(&child->thread.fpu);
-       if (ret)
-               return ret;
-
-       fpstate_init(&child->thread.fpu);
-
-       /* Safe to do for stopped child tasks: */
-       child->flags |= PF_USED_MATH;
-
-       return 0;
-}
-
-/*
- * The xstateregs_active() routine is the same as the fpregs_active() routine,
- * as the "regset->n" for the xstate regset will be updated based on the feature
- * capabilites supported by the xsave.
- */
-int fpregs_active(struct task_struct *target, const struct user_regset *regset)
-{
-       return tsk_used_math(target) ? regset->n : 0;
-}
-
-int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
-{
-       return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
-}
-
-int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
-               unsigned int pos, unsigned int count,
-               void *kbuf, void __user *ubuf)
-{
-       int ret;
-
-       if (!cpu_has_fxsr)
-               return -ENODEV;
-
-       ret = fpu__unlazy_stopped(target);
-       if (ret)
-               return ret;
-
-       sanitize_i387_state(target);
-
-       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                  &target->thread.fpu.state->fxsave, 0, -1);
-}
-
-int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
-               unsigned int pos, unsigned int count,
-               const void *kbuf, const void __user *ubuf)
-{
-       int ret;
-
-       if (!cpu_has_fxsr)
-               return -ENODEV;
-
-       ret = fpu__unlazy_stopped(target);
-       if (ret)
-               return ret;
-
-       sanitize_i387_state(target);
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &target->thread.fpu.state->fxsave, 0, -1);
-
-       /*
-        * mxcsr reserved bits must be masked to zero for security reasons.
-        */
-       target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
-
-       /*
-        * update the header bits in the xsave header, indicating the
-        * presence of FP and SSE state.
-        */
-       if (cpu_has_xsave)
-               target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
-
-       return ret;
-}
-
-int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
-               unsigned int pos, unsigned int count,
-               void *kbuf, void __user *ubuf)
-{
-       struct xsave_struct *xsave;
-       int ret;
-
-       if (!cpu_has_xsave)
-               return -ENODEV;
-
-       ret = fpu__unlazy_stopped(target);
-       if (ret)
-               return ret;
-
-       xsave = &target->thread.fpu.state->xsave;
-
-       /*
-        * Copy the 48bytes defined by the software first into the xstate
-        * memory layout in the thread struct, so that we can copy the entire
-        * xstateregs to the user using one user_regset_copyout().
-        */
-       memcpy(&xsave->i387.sw_reserved,
-               xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
-       /*
-        * Copy the xstate memory layout.
-        */
-       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
-       return ret;
-}
-
-int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
-                 unsigned int pos, unsigned int count,
-                 const void *kbuf, const void __user *ubuf)
-{
-       struct xsave_struct *xsave;
-       int ret;
-
-       if (!cpu_has_xsave)
-               return -ENODEV;
-
-       ret = fpu__unlazy_stopped(target);
-       if (ret)
-               return ret;
-
-       xsave = &target->thread.fpu.state->xsave;
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
-       /*
-        * mxcsr reserved bits must be masked to zero for security reasons.
-        */
-       xsave->i387.mxcsr &= mxcsr_feature_mask;
-       xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
-       /*
-        * These bits must be zero.
-        */
-       memset(&xsave->xsave_hdr.reserved, 0, 48);
-       return ret;
-}
-
-#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-
-/*
- * FPU tag word conversions.
- */
-
-static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
-{
-       unsigned int tmp; /* to avoid 16 bit prefixes in the code */
-
-       /* Transform each pair of bits into 01 (valid) or 00 (empty) */
-       tmp = ~twd;
-       tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
-       /* and move the valid bits to the lower byte. */
-       tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
-       tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
-       tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
-
-       return tmp;
-}
-
-#define FPREG_ADDR(f, n)       ((void *)&(f)->st_space + (n) * 16)
-#define FP_EXP_TAG_VALID       0
-#define FP_EXP_TAG_ZERO                1
-#define FP_EXP_TAG_SPECIAL     2
-#define FP_EXP_TAG_EMPTY       3
-
-static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
-{
-       struct _fpxreg *st;
-       u32 tos = (fxsave->swd >> 11) & 7;
-       u32 twd = (unsigned long) fxsave->twd;
-       u32 tag;
-       u32 ret = 0xffff0000u;
-       int i;
-
-       for (i = 0; i < 8; i++, twd >>= 1) {
-               if (twd & 0x1) {
-                       st = FPREG_ADDR(fxsave, (i - tos) & 7);
-
-                       switch (st->exponent & 0x7fff) {
-                       case 0x7fff:
-                               tag = FP_EXP_TAG_SPECIAL;
-                               break;
-                       case 0x0000:
-                               if (!st->significand[0] &&
-                                   !st->significand[1] &&
-                                   !st->significand[2] &&
-                                   !st->significand[3])
-                                       tag = FP_EXP_TAG_ZERO;
-                               else
-                                       tag = FP_EXP_TAG_SPECIAL;
-                               break;
-                       default:
-                               if (st->significand[3] & 0x8000)
-                                       tag = FP_EXP_TAG_VALID;
-                               else
-                                       tag = FP_EXP_TAG_SPECIAL;
-                               break;
-                       }
-               } else {
-                       tag = FP_EXP_TAG_EMPTY;
-               }
-               ret |= tag << (2 * i);
-       }
-       return ret;
-}
-
-/*
- * FXSR floating point environment conversions.
- */
-
-void
-convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
-{
-       struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
-       struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
-       struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
-       int i;
-
-       env->cwd = fxsave->cwd | 0xffff0000u;
-       env->swd = fxsave->swd | 0xffff0000u;
-       env->twd = twd_fxsr_to_i387(fxsave);
-
-#ifdef CONFIG_X86_64
-       env->fip = fxsave->rip;
-       env->foo = fxsave->rdp;
-       /*
-        * should be actually ds/cs at fpu exception time, but
-        * that information is not available in 64bit mode.
-        */
-       env->fcs = task_pt_regs(tsk)->cs;
-       if (tsk == current) {
-               savesegment(ds, env->fos);
-       } else {
-               env->fos = tsk->thread.ds;
-       }
-       env->fos |= 0xffff0000;
-#else
-       env->fip = fxsave->fip;
-       env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
-       env->foo = fxsave->foo;
-       env->fos = fxsave->fos;
-#endif
-
-       for (i = 0; i < 8; ++i)
-               memcpy(&to[i], &from[i], sizeof(to[0]));
-}
-
-void convert_to_fxsr(struct task_struct *tsk,
-                    const struct user_i387_ia32_struct *env)
-
-{
-       struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
-       struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
-       struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
-       int i;
-
-       fxsave->cwd = env->cwd;
-       fxsave->swd = env->swd;
-       fxsave->twd = twd_i387_to_fxsr(env->twd);
-       fxsave->fop = (u16) ((u32) env->fcs >> 16);
-#ifdef CONFIG_X86_64
-       fxsave->rip = env->fip;
-       fxsave->rdp = env->foo;
-       /* cs and ds ignored */
-#else
-       fxsave->fip = env->fip;
-       fxsave->fcs = (env->fcs & 0xffff);
-       fxsave->foo = env->foo;
-       fxsave->fos = env->fos;
-#endif
-
-       for (i = 0; i < 8; ++i)
-               memcpy(&to[i], &from[i], sizeof(from[0]));
-}
-
-int fpregs_get(struct task_struct *target, const struct user_regset *regset,
-              unsigned int pos, unsigned int count,
-              void *kbuf, void __user *ubuf)
-{
-       struct user_i387_ia32_struct env;
-       int ret;
-
-       ret = fpu__unlazy_stopped(target);
-       if (ret)
-               return ret;
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
-
-       if (!cpu_has_fxsr)
-               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                          &target->thread.fpu.state->fsave, 0,
-                                          -1);
-
-       sanitize_i387_state(target);
-
-       if (kbuf && pos == 0 && count == sizeof(env)) {
-               convert_from_fxsr(kbuf, target);
-               return 0;
-       }
-
-       convert_from_fxsr(&env, target);
-
-       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
-}
-
-int fpregs_set(struct task_struct *target, const struct user_regset *regset,
-              unsigned int pos, unsigned int count,
-              const void *kbuf, const void __user *ubuf)
-{
-       struct user_i387_ia32_struct env;
-       int ret;
-
-       ret = fpu__unlazy_stopped(target);
-       if (ret)
-               return ret;
-
-       sanitize_i387_state(target);
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
-
-       if (!cpu_has_fxsr)
-               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                         &target->thread.fpu.state->fsave, 0,
-                                         -1);
-
-       if (pos > 0 || count < sizeof(env))
-               convert_from_fxsr(&env, target);
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
-       if (!ret)
-               convert_to_fxsr(target, &env);
-
-       /*
-        * update the header bit in the xsave header, indicating the
-        * presence of FP.
-        */
-       if (cpu_has_xsave)
-               target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
-       return ret;
-}
-
-/*
- * FPU state for core dumps.
- * This is only used for a.out dumps now.
- * It is declared generically using elf_fpregset_t (which is
- * struct user_i387_struct) but is in fact only used for 32-bit
- * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
- */
-int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
-{
-       struct task_struct *tsk = current;
-       int fpvalid;
-
-       fpvalid = !!used_math();
-       if (fpvalid)
-               fpvalid = !fpregs_get(tsk, NULL,
-                                     0, sizeof(struct user_i387_ia32_struct),
-                                     fpu, NULL);
-
-       return fpvalid;
-}
-EXPORT_SYMBOL(dump_fpu);
-
-#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
-
-static int __init no_387(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_FPU);
-       return 1;
-}
-
-__setup("no387", no_387);
-
-/*
- * Set the X86_FEATURE_FPU CPU-capability bit based on
- * trying to execute an actual sequence of FPU instructions:
- */
-void fpu__detect(struct cpuinfo_x86 *c)
-{
-       unsigned long cr0;
-       u16 fsw, fcw;
-
-       fsw = fcw = 0xffff;
-
-       cr0 = read_cr0();
-       cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
-       write_cr0(cr0);
-
-       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
-                    : "+m" (fsw), "+m" (fcw));
-
-       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
-               set_cpu_cap(c, X86_FEATURE_FPU);
-       else
-               clear_cpu_cap(c, X86_FEATURE_FPU);
-
-       /* The final cr0 value is set in fpu_init() */
-}
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
deleted file mode 100644 (file)
index 163b5cc..0000000
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- * xsave/xrstor support.
- *
- * Author: Suresh Siddha <suresh.b.siddha@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bootmem.h>
-#include <linux/compat.h>
-#include <linux/cpu.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
-#include <asm/sigframe.h>
-#include <asm/tlbflush.h>
-#include <asm/xcr.h>
-
-/*
- * Supported feature mask by the CPU and the kernel.
- */
-u64 pcntxt_mask;
-
-/*
- * Represents init state for the supported extended state.
- */
-struct xsave_struct *init_xstate_buf;
-
-static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
-static unsigned int *xstate_offsets, *xstate_sizes;
-static unsigned int xstate_comp_offsets[sizeof(pcntxt_mask)*8];
-static unsigned int xstate_features;
-
-/*
- * If a processor implementation discern that a processor state component is
- * in its initialized state it may modify the corresponding bit in the
- * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory
- * layout in the case of xsaveopt. While presenting the xstate information to
- * the user, we always ensure that the memory layout of a feature will be in
- * the init state if the corresponding header bit is zero. This is to ensure
- * that the user doesn't see some stale state in the memory layout during
- * signal handling, debugging etc.
- */
-void __sanitize_i387_state(struct task_struct *tsk)
-{
-       struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
-       int feature_bit = 0x2;
-       u64 xstate_bv;
-
-       if (!fx)
-               return;
-
-       xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
-
-       /*
-        * None of the feature bits are in init state. So nothing else
-        * to do for us, as the memory layout is up to date.
-        */
-       if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
-               return;
-
-       /*
-        * FP is in init state
-        */
-       if (!(xstate_bv & XSTATE_FP)) {
-               fx->cwd = 0x37f;
-               fx->swd = 0;
-               fx->twd = 0;
-               fx->fop = 0;
-               fx->rip = 0;
-               fx->rdp = 0;
-               memset(&fx->st_space[0], 0, 128);
-       }
-
-       /*
-        * SSE is in init state
-        */
-       if (!(xstate_bv & XSTATE_SSE))
-               memset(&fx->xmm_space[0], 0, 256);
-
-       xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2;
-
-       /*
-        * Update all the other memory layouts for which the corresponding
-        * header bit is in the init state.
-        */
-       while (xstate_bv) {
-               if (xstate_bv & 0x1) {
-                       int offset = xstate_offsets[feature_bit];
-                       int size = xstate_sizes[feature_bit];
-
-                       memcpy(((void *) fx) + offset,
-                              ((void *) init_xstate_buf) + offset,
-                              size);
-               }
-
-               xstate_bv >>= 1;
-               feature_bit++;
-       }
-}
-
-/*
- * Check for the presence of extended state information in the
- * user fpstate pointer in the sigcontext.
- */
-static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
-                                  void __user *fpstate,
-                                  struct _fpx_sw_bytes *fx_sw)
-{
-       int min_xstate_size = sizeof(struct i387_fxsave_struct) +
-                             sizeof(struct xsave_hdr_struct);
-       unsigned int magic2;
-
-       if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
-               return -1;
-
-       /* Check for the first magic field and other error scenarios. */
-       if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
-           fx_sw->xstate_size < min_xstate_size ||
-           fx_sw->xstate_size > xstate_size ||
-           fx_sw->xstate_size > fx_sw->extended_size)
-               return -1;
-
-       /*
-        * Check for the presence of second magic word at the end of memory
-        * layout. This detects the case where the user just copied the legacy
-        * fpstate layout with out copying the extended state information
-        * in the memory layout.
-        */
-       if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
-           || magic2 != FP_XSTATE_MAGIC2)
-               return -1;
-
-       return 0;
-}
-
-/*
- * Signal frame handlers.
- */
-static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
-{
-       if (use_fxsr()) {
-               struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
-               struct user_i387_ia32_struct env;
-               struct _fpstate_ia32 __user *fp = buf;
-
-               convert_from_fxsr(&env, tsk);
-
-               if (__copy_to_user(buf, &env, sizeof(env)) ||
-                   __put_user(xsave->i387.swd, &fp->status) ||
-                   __put_user(X86_FXSR_MAGIC, &fp->magic))
-                       return -1;
-       } else {
-               struct i387_fsave_struct __user *fp = buf;
-               u32 swd;
-               if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
-                       return -1;
-       }
-
-       return 0;
-}
-
-static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
-{
-       struct xsave_struct __user *x = buf;
-       struct _fpx_sw_bytes *sw_bytes;
-       u32 xstate_bv;
-       int err;
-
-       /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
-       sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
-       err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
-
-       if (!use_xsave())
-               return err;
-
-       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
-
-       /*
-        * Read the xstate_bv which we copied (directly from the cpu or
-        * from the state in task struct) to the user buffers.
-        */
-       err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
-
-       /*
-        * For legacy compatible, we always set FP/SSE bits in the bit
-        * vector while saving the state to the user context. This will
-        * enable us capturing any changes(during sigreturn) to
-        * the FP/SSE bits by the legacy applications which don't touch
-        * xstate_bv in the xsave header.
-        *
-        * xsave aware apps can change the xstate_bv in the xsave
-        * header as well as change any contents in the memory layout.
-        * xrestore as part of sigreturn will capture all the changes.
-        */
-       xstate_bv |= XSTATE_FPSSE;
-
-       err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
-
-       return err;
-}
-
-static inline int save_user_xstate(struct xsave_struct __user *buf)
-{
-       int err;
-
-       if (use_xsave())
-               err = xsave_user(buf);
-       else if (use_fxsr())
-               err = fxsave_user((struct i387_fxsave_struct __user *) buf);
-       else
-               err = fsave_user((struct i387_fsave_struct __user *) buf);
-
-       if (unlikely(err) && __clear_user(buf, xstate_size))
-               err = -EFAULT;
-       return err;
-}
-
-/*
- * Save the fpu, extended register state to the user signal frame.
- *
- * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
- *  state is copied.
- *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
- *
- *     buf == buf_fx for 64-bit frames and 32-bit fsave frame.
- *     buf != buf_fx for 32-bit frames with fxstate.
- *
- * If the fpu, extended register state is live, save the state directly
- * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
- * copy the thread's fpu state to the user frame starting at 'buf_fx'.
- *
- * If this is a 32-bit frame with fxstate, put a fsave header before
- * the aligned state at 'buf_fx'.
- *
- * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
- * indicating the absence/presence of the extended state to the user.
- */
-int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
-{
-       struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
-       struct task_struct *tsk = current;
-       int ia32_fxstate = (buf != buf_fx);
-
-       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
-                        config_enabled(CONFIG_IA32_EMULATION));
-
-       if (!access_ok(VERIFY_WRITE, buf, size))
-               return -EACCES;
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_get(current, NULL, 0,
-                       sizeof(struct user_i387_ia32_struct), NULL,
-                       (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
-
-       if (user_has_fpu()) {
-               /* Save the live register state to the user directly. */
-               if (save_user_xstate(buf_fx))
-                       return -1;
-               /* Update the thread's fxstate to save the fsave header. */
-               if (ia32_fxstate)
-                       fpu_fxsave(&tsk->thread.fpu);
-       } else {
-               sanitize_i387_state(tsk);
-               if (__copy_to_user(buf_fx, xsave, xstate_size))
-                       return -1;
-       }
-
-       /* Save the fsave header for the 32-bit frames. */
-       if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
-               return -1;
-
-       if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
-               return -1;
-
-       return 0;
-}
-
-static inline void
-sanitize_restored_xstate(struct task_struct *tsk,
-                        struct user_i387_ia32_struct *ia32_env,
-                        u64 xstate_bv, int fx_only)
-{
-       struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
-       struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr;
-
-       if (use_xsave()) {
-               /* These bits must be zero. */
-               memset(xsave_hdr->reserved, 0, 48);
-
-               /*
-                * Init the state that is not present in the memory
-                * layout and not enabled by the OS.
-                */
-               if (fx_only)
-                       xsave_hdr->xstate_bv = XSTATE_FPSSE;
-               else
-                       xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv);
-       }
-
-       if (use_fxsr()) {
-               /*
-                * mscsr reserved bits must be masked to zero for security
-                * reasons.
-                */
-               xsave->i387.mxcsr &= mxcsr_feature_mask;
-
-               convert_to_fxsr(tsk, ia32_env);
-       }
-}
-
-/*
- * Restore the extended state if present. Otherwise, restore the FP/SSE state.
- */
-static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
-{
-       if (use_xsave()) {
-               if ((unsigned long)buf % 64 || fx_only) {
-                       u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
-                       xrstor_state(init_xstate_buf, init_bv);
-                       return fxrstor_user(buf);
-               } else {
-                       u64 init_bv = pcntxt_mask & ~xbv;
-                       if (unlikely(init_bv))
-                               xrstor_state(init_xstate_buf, init_bv);
-                       return xrestore_user(buf, xbv);
-               }
-       } else if (use_fxsr()) {
-               return fxrstor_user(buf);
-       } else
-               return frstor_user(buf);
-}
-
-int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
-{
-       int ia32_fxstate = (buf != buf_fx);
-       struct task_struct *tsk = current;
-       int state_size = xstate_size;
-       u64 xstate_bv = 0;
-       int fx_only = 0;
-
-       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
-                        config_enabled(CONFIG_IA32_EMULATION));
-
-       if (!buf) {
-               fpu_reset_state(tsk);
-               return 0;
-       }
-
-       if (!access_ok(VERIFY_READ, buf, size))
-               return -EACCES;
-
-       if (!used_math() && fpstate_alloc_init(tsk))
-               return -1;
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_set(current, NULL,
-                                      0, sizeof(struct user_i387_ia32_struct),
-                                      NULL, buf) != 0;
-
-       if (use_xsave()) {
-               struct _fpx_sw_bytes fx_sw_user;
-               if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
-                       /*
-                        * Couldn't find the extended state information in the
-                        * memory layout. Restore just the FP/SSE and init all
-                        * the other extended state.
-                        */
-                       state_size = sizeof(struct i387_fxsave_struct);
-                       fx_only = 1;
-               } else {
-                       state_size = fx_sw_user.xstate_size;
-                       xstate_bv = fx_sw_user.xstate_bv;
-               }
-       }
-
-       if (ia32_fxstate) {
-               /*
-                * For 32-bit frames with fxstate, copy the user state to the
-                * thread's fpu state, reconstruct fxstate from the fsave
-                * header. Sanitize the copied state etc.
-                */
-               struct fpu *fpu = &tsk->thread.fpu;
-               struct user_i387_ia32_struct env;
-               int err = 0;
-
-               /*
-                * Drop the current fpu which clears used_math(). This ensures
-                * that any context-switch during the copy of the new state,
-                * avoids the intermediate state from getting restored/saved.
-                * Thus avoiding the new restored state from getting corrupted.
-                * We will be ready to restore/save the state only after
-                * set_used_math() is again set.
-                */
-               drop_fpu(tsk);
-
-               if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
-                   __copy_from_user(&env, buf, sizeof(env))) {
-                       fpstate_init(fpu);
-                       err = -1;
-               } else {
-                       sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
-               }
-
-               set_used_math();
-               if (use_eager_fpu()) {
-                       preempt_disable();
-                       math_state_restore();
-                       preempt_enable();
-               }
-
-               return err;
-       } else {
-               /*
-                * For 64-bit frames and 32-bit fsave frames, restore the user
-                * state to the registers directly (with exceptions handled).
-                */
-               user_fpu_begin();
-               if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
-                       fpu_reset_state(tsk);
-                       return -1;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * Prepare the SW reserved portion of the fxsave memory layout, indicating
- * the presence of the extended state information in the memory layout
- * pointed by the fpstate pointer in the sigcontext.
- * This will be saved when ever the FP and extended state context is
- * saved on the user stack during the signal handler delivery to the user.
- */
-static void prepare_fx_sw_frame(void)
-{
-       int fsave_header_size = sizeof(struct i387_fsave_struct);
-       int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
-
-       if (config_enabled(CONFIG_X86_32))
-               size += fsave_header_size;
-
-       fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
-       fx_sw_reserved.extended_size = size;
-       fx_sw_reserved.xstate_bv = pcntxt_mask;
-       fx_sw_reserved.xstate_size = xstate_size;
-
-       if (config_enabled(CONFIG_IA32_EMULATION)) {
-               fx_sw_reserved_ia32 = fx_sw_reserved;
-               fx_sw_reserved_ia32.extended_size += fsave_header_size;
-       }
-}
-
-/*
- * Enable the extended processor state save/restore feature
- */
-static inline void xstate_enable(void)
-{
-       cr4_set_bits(X86_CR4_OSXSAVE);
-       xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
-}
-
-/*
- * Record the offsets and sizes of different state managed by the xsave
- * memory layout.
- */
-static void __init setup_xstate_features(void)
-{
-       int eax, ebx, ecx, edx, leaf = 0x2;
-
-       xstate_features = fls64(pcntxt_mask);
-       xstate_offsets = alloc_bootmem(xstate_features * sizeof(int));
-       xstate_sizes = alloc_bootmem(xstate_features * sizeof(int));
-
-       do {
-               cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
-
-               if (eax == 0)
-                       break;
-
-               xstate_offsets[leaf] = ebx;
-               xstate_sizes[leaf] = eax;
-
-               leaf++;
-       } while (1);
-}
-
-/*
- * This function sets up offsets and sizes of all extended states in
- * xsave area. This supports both standard format and compacted format
- * of the xsave aread.
- *
- * Input: void
- * Output: void
- */
-void setup_xstate_comp(void)
-{
-       unsigned int xstate_comp_sizes[sizeof(pcntxt_mask)*8];
-       int i;
-
-       /*
-        * The FP xstates and SSE xstates are legacy states. They are always
-        * in the fixed offsets in the xsave area in either compacted form
-        * or standard form.
-        */
-       xstate_comp_offsets[0] = 0;
-       xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
-
-       if (!cpu_has_xsaves) {
-               for (i = 2; i < xstate_features; i++) {
-                       if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
-                               xstate_comp_offsets[i] = xstate_offsets[i];
-                               xstate_comp_sizes[i] = xstate_sizes[i];
-                       }
-               }
-               return;
-       }
-
-       xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-
-       for (i = 2; i < xstate_features; i++) {
-               if (test_bit(i, (unsigned long *)&pcntxt_mask))
-                       xstate_comp_sizes[i] = xstate_sizes[i];
-               else
-                       xstate_comp_sizes[i] = 0;
-
-               if (i > 2)
-                       xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
-                                       + xstate_comp_sizes[i-1];
-
-       }
-}
-
-/*
- * setup the xstate image representing the init state
- */
-static void __init setup_init_fpu_buf(void)
-{
-       /*
-        * Setup init_xstate_buf to represent the init state of
-        * all the features managed by the xsave
-        */
-       init_xstate_buf = alloc_bootmem_align(xstate_size,
-                                             __alignof__(struct xsave_struct));
-       fx_finit(&init_xstate_buf->i387);
-
-       if (!cpu_has_xsave)
-               return;
-
-       setup_xstate_features();
-
-       if (cpu_has_xsaves) {
-               init_xstate_buf->xsave_hdr.xcomp_bv =
-                                               (u64)1 << 63 | pcntxt_mask;
-               init_xstate_buf->xsave_hdr.xstate_bv = pcntxt_mask;
-       }
-
-       /*
-        * Init all the features state with header_bv being 0x0
-        */
-       xrstor_state_booting(init_xstate_buf, -1);
-       /*
-        * Dump the init state again. This is to identify the init state
-        * of any feature which is not represented by all zero's.
-        */
-       xsave_state_booting(init_xstate_buf, -1);
-}
-
-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
-static int __init eager_fpu_setup(char *s)
-{
-       if (!strcmp(s, "on"))
-               eagerfpu = ENABLE;
-       else if (!strcmp(s, "off"))
-               eagerfpu = DISABLE;
-       else if (!strcmp(s, "auto"))
-               eagerfpu = AUTO;
-       return 1;
-}
-__setup("eagerfpu=", eager_fpu_setup);
-
-
-/*
- * Calculate total size of enabled xstates in XCR0/pcntxt_mask.
- */
-static void __init init_xstate_size(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-       int i;
-
-       if (!cpu_has_xsaves) {
-               cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
-               xstate_size = ebx;
-               return;
-       }
-
-       xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-       for (i = 2; i < 64; i++) {
-               if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
-                       cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
-                       xstate_size += eax;
-               }
-       }
-}
-
-/*
- * Enable and initialize the xsave feature.
- */
-static void __init xstate_enable_boot_cpu(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
-               WARN(1, KERN_ERR "XSTATE_CPUID missing\n");
-               return;
-       }
-
-       cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
-       pcntxt_mask = eax + ((u64)edx << 32);
-
-       if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
-               pr_err("FP/SSE not shown under xsave features 0x%llx\n",
-                      pcntxt_mask);
-               BUG();
-       }
-
-       /*
-        * Support only the state known to OS.
-        */
-       pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
-
-       xstate_enable();
-
-       /*
-        * Recompute the context size for enabled features
-        */
-       init_xstate_size();
-
-       update_regset_xstate_info(xstate_size, pcntxt_mask);
-       prepare_fx_sw_frame();
-       setup_init_fpu_buf();
-
-       /* Auto enable eagerfpu for xsaveopt */
-       if (cpu_has_xsaveopt && eagerfpu != DISABLE)
-               eagerfpu = ENABLE;
-
-       if (pcntxt_mask & XSTATE_EAGER) {
-               if (eagerfpu == DISABLE) {
-                       pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
-                                       pcntxt_mask & XSTATE_EAGER);
-                       pcntxt_mask &= ~XSTATE_EAGER;
-               } else {
-                       eagerfpu = ENABLE;
-               }
-       }
-
-       pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x using %s\n",
-               pcntxt_mask, xstate_size,
-               cpu_has_xsaves ? "compacted form" : "standard form");
-}
-
-/*
- * For the very first instance, this calls xstate_enable_boot_cpu();
- * for all subsequent instances, this calls xstate_enable().
- *
- * This is somewhat obfuscated due to the lack of powerful enough
- * overrides for the section checks.
- */
-void xsave_init(void)
-{
-       static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
-       void (*this_func)(void);
-
-       if (!cpu_has_xsave)
-               return;
-
-       this_func = next_func;
-       next_func = xstate_enable;
-       this_func();
-}
-
-/*
- * setup_init_fpu_buf() is __init and it is OK to call it here because
- * init_xstate_buf will be unset only once during boot.
- */
-void __init_refok eager_fpu_init(void)
-{
-       WARN_ON(used_math());
-       current_thread_info()->status = 0;
-
-       if (eagerfpu == ENABLE)
-               setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-
-       if (!cpu_has_eager_fpu) {
-               stts();
-               return;
-       }
-
-       if (!init_xstate_buf)
-               setup_init_fpu_buf();
-}
-
-/*
- * Given the xsave area and a state inside, this function returns the
- * address of the state.
- *
- * This is the API that is called to get xstate address in either
- * standard format or compacted format of xsave area.
- *
- * Inputs:
- *     xsave: base address of the xsave area;
- *     xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
- *     etc.)
- * Output:
- *     address of the state in the xsave area.
- */
-void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
-{
-       int feature = fls64(xstate) - 1;
-       if (!test_bit(feature, (unsigned long *)&pcntxt_mask))
-               return NULL;
-
-       return (void *)xsave + xstate_comp_offsets[feature];
-}
-EXPORT_SYMBOL_GPL(get_xsave_addr);