x86/fpu: Remove failure return from fpstate_alloc_init()
authorIngo Molnar <mingo@kernel.org>
Mon, 27 Apr 2015 04:46:52 +0000 (06:46 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:50 +0000 (15:47 +0200)
Remove the failure code and propagate this down to callers.

Note that this function still has an 'init' aspect, which must be
called.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/fpu/internal.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xsave.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_entry.c

index 9454f21f0edf6538d5ebed5ccc5a2280b45788b5..1d0c5cee29ebdc6ea66d09495a6bcde979909613 100644 (file)
@@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void);
 extern void fpu__init_cpu_xstate(void);
 extern void fpu__init_system(struct cpuinfo_x86 *c);
 
-extern int fpstate_alloc_init(struct fpu *fpu);
+extern void fpstate_alloc_init(struct fpu *fpu);
 extern void fpstate_init(struct fpu *fpu);
 extern void fpu__clear(struct task_struct *tsk);
 
index 6b8d3e1b6ef889dbb024be52752deb7bcb7e783d..b44ac509064123fc0cce9b47db48be85c7757dd0 100644 (file)
@@ -259,26 +259,17 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 }
 
 /*
- * Allocate the backing store for the current task's FPU registers
- * and initialize the registers themselves as well.
- *
- * Can fail.
+ * Initialize the current task's in-memory FPU context:
  */
-int fpstate_alloc_init(struct fpu *fpu)
+void fpstate_alloc_init(struct fpu *fpu)
 {
-       int ret;
-
-       if (WARN_ON_ONCE(fpu != &current->thread.fpu))
-               return -EINVAL;
-       if (WARN_ON_ONCE(fpu->fpstate_active))
-               return -EINVAL;
+       WARN_ON_ONCE(fpu != &current->thread.fpu);
+       WARN_ON_ONCE(fpu->fpstate_active);
 
        fpstate_init(fpu);
 
        /* Safe to do for the current task: */
        fpu->fpstate_active = 1;
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(fpstate_alloc_init);
 
@@ -340,20 +331,8 @@ void fpu__restore(void)
        struct task_struct *tsk = current;
        struct fpu *fpu = &tsk->thread.fpu;
 
-       if (!fpu->fpstate_active) {
-               local_irq_enable();
-               /*
-                * does a slab alloc which can sleep
-                */
-               if (fpstate_alloc_init(fpu)) {
-                       /*
-                        * ran out of memory!
-                        */
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-               local_irq_disable();
-       }
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
        /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
        kernel_fpu_disable();
@@ -379,9 +358,7 @@ void fpu__clear(struct task_struct *tsk)
                drop_fpu(fpu);
        } else {
                if (!fpu->fpstate_active) {
-                       /* kthread execs. TODO: cleanup this horror. */
-                       if (WARN_ON(fpstate_alloc_init(fpu)))
-                               force_sig(SIGKILL, tsk);
+                       fpstate_alloc_init(fpu);
                        user_fpu_begin();
                }
                restore_init_xstate();
index c7d48eb0a19480207136acddf1212ccdb70ea101..dd2cef08a1a48d044caccd8ea63ce846d9131414 100644 (file)
@@ -358,8 +358,8 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
        if (!access_ok(VERIFY_READ, buf, size))
                return -EACCES;
 
-       if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
-               return -1;
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
        if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_set(current, NULL,
index 68529251e8978ea7bb87aacb235c2bc56f91d602..707f4e27ee91e1a3a1214a7d0dc8b79a6986ee7c 100644 (file)
@@ -6601,8 +6601,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        int r;
        sigset_t sigsaved;
 
-       if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
-               return -ENOMEM;
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
index 5e003704ebfaa0985684ed83befb816abc258f99..99ddfc274df371071ccf7c23b193a758eff0f25a 100644 (file)
@@ -149,12 +149,8 @@ void math_emulate(struct math_emu_info *info)
        struct desc_struct code_descriptor;
        struct fpu *fpu = &current->thread.fpu;
 
-       if (!fpu->fpstate_active) {
-               if (fpstate_alloc_init(fpu)) {
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-       }
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
 #ifdef RE_ENTRANT_CHECKING
        if (emulating) {