x86/fpu: Use 'struct fpu' in fpu_reset_state()
authorIngo Molnar <mingo@kernel.org>
Thu, 23 Apr 2015 15:34:20 +0000 (17:34 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:26 +0000 (15:47 +0200)
Migrate this function to pure 'struct fpu' usage.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/fpu-internal.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xsave.c
arch/x86/kernel/signal.c

index 2d7934e4e394057007e15e77c62f1935a9a9257f..579f7d0a399ded66ad202af93f65209307a5826b 100644 (file)
@@ -380,10 +380,8 @@ static inline void restore_init_xstate(void)
  * Reset the FPU state in the eager case and drop it in the lazy case (later use
  * will reinit it).
  */
-static inline void fpu_reset_state(struct task_struct *tsk)
+static inline void fpu_reset_state(struct fpu *fpu)
 {
-       struct fpu *fpu = &tsk->thread.fpu;
-
        if (!use_eager_fpu())
                drop_fpu(fpu);
        else
@@ -460,7 +458,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
 
        if (fpu.preload) {
                if (unlikely(restore_fpu_checking(new_fpu)))
-                       fpu_reset_state(new);
+                       fpu_reset_state(new_fpu);
        }
 }
 
index 1ecd25028079079cd7fee724c3cb20d04b9022cd..41c92897f5749ea5d7c7b0dd4bbabde3afbaf4d2 100644 (file)
@@ -112,12 +112,11 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
 
 void __kernel_fpu_end(void)
 {
-       struct task_struct *me = current;
-       struct fpu *fpu = &me->thread.fpu;
+       struct fpu *fpu = &current->thread.fpu;
 
        if (fpu->has_fpu) {
                if (WARN_ON(restore_fpu_checking(fpu)))
-                       fpu_reset_state(me);
+                       fpu_reset_state(fpu);
        } else if (!use_eager_fpu()) {
                stts();
        }
@@ -371,7 +370,7 @@ void fpu__restore(void)
        kernel_fpu_disable();
        __thread_fpu_begin(fpu);
        if (unlikely(restore_fpu_checking(fpu))) {
-               fpu_reset_state(tsk);
+               fpu_reset_state(fpu);
                force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
        } else {
                tsk->thread.fpu.counter++;
index 049dc619481de42c46c3c5205acbd12f225b4d73..3953cbf8d7e7820bdca4dc956e3ca70e41630ee5 100644 (file)
@@ -343,7 +343,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
                         config_enabled(CONFIG_IA32_EMULATION));
 
        if (!buf) {
-               fpu_reset_state(tsk);
+               fpu_reset_state(fpu);
                return 0;
        }
 
@@ -417,7 +417,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
                 */
                user_fpu_begin();
                if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
-                       fpu_reset_state(tsk);
+                       fpu_reset_state(fpu);
                        return -1;
                }
        }
index 20a9d355af590eeb7b763abca42acbeebd62f2ee..bcb853e44d30289bf92345a4cfcfa1f63aafb9ab 100644 (file)
@@ -667,7 +667,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                 * Ensure the signal handler starts with the new fpu state.
                 */
                if (fpu->fpstate_active)
-                       fpu_reset_state(current);
+                       fpu_reset_state(fpu);
        }
        signal_setup_done(failed, ksig, stepping);
 }