x86/fpu: Move math_state_restore() to fpu/core.c
authorIngo Molnar <mingo@kernel.org>
Wed, 22 Apr 2015 10:50:13 +0000 (12:50 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:17 +0000 (15:47 +0200)
It's another piece of FPU internals that is better off close to
the other FPU internals.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/fpu/core.c
arch/x86/kernel/traps.c

index 787bf57b842209259c394d568923ed918872b3d2..7add2fb7369e2d143b0eb55e4b868327efbb7ac4 100644 (file)
@@ -227,6 +227,48 @@ static int fpu__unlazy_stopped(struct task_struct *child)
        return 0;
 }
 
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ *
+ * Must be called with kernel preemption disabled (eg with local
+ * local interrupts as in the case of do_device_not_available).
+ */
+void math_state_restore(void)
+{
+       struct task_struct *tsk = current;
+
+       if (!tsk_used_math(tsk)) {
+               local_irq_enable();
+               /*
+                * does a slab alloc which can sleep
+                */
+               if (fpstate_alloc_init(tsk)) {
+                       /*
+                        * ran out of memory!
+                        */
+                       do_group_exit(SIGKILL);
+                       return;
+               }
+               local_irq_disable();
+       }
+
+       /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
+       kernel_fpu_disable();
+       __thread_fpu_begin(tsk);
+       if (unlikely(restore_fpu_checking(tsk))) {
+               fpu_reset_state(tsk);
+               force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
+       } else {
+               tsk->thread.fpu.counter++;
+       }
+       kernel_fpu_enable();
+}
+EXPORT_SYMBOL_GPL(math_state_restore);
+
 void fpu__flush_thread(struct task_struct *tsk)
 {
        if (!use_eager_fpu()) {
index 465b335e749104e89b0b56492ff5bebf1567ac47..63c7fc3677b46c13f9e86575c12b3084195ed526 100644 (file)
@@ -826,48 +826,6 @@ asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
 {
 }
 
-/*
- * 'math_state_restore()' saves the current math information in the
- * old math state array, and gets the new ones from the current task
- *
- * Careful.. There are problems with IBM-designed IRQ13 behaviour.
- * Don't touch unless you *really* know how it works.
- *
- * Must be called with kernel preemption disabled (eg with local
- * local interrupts as in the case of do_device_not_available).
- */
-void math_state_restore(void)
-{
-       struct task_struct *tsk = current;
-
-       if (!tsk_used_math(tsk)) {
-               local_irq_enable();
-               /*
-                * does a slab alloc which can sleep
-                */
-               if (fpstate_alloc_init(tsk)) {
-                       /*
-                        * ran out of memory!
-                        */
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-               local_irq_disable();
-       }
-
-       /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
-       kernel_fpu_disable();
-       __thread_fpu_begin(tsk);
-       if (unlikely(restore_fpu_checking(tsk))) {
-               fpu_reset_state(tsk);
-               force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
-       } else {
-               tsk->thread.fpu.counter++;
-       }
-       kernel_fpu_enable();
-}
-EXPORT_SYMBOL_GPL(math_state_restore);
-
 dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {