2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/traps.h>
13 #include <linux/hardirq.h>
16 * Track whether the kernel is using the FPU state
21 * - by IRQ context code to potentially use the FPU
24 * - to debug kernel_fpu_begin()/end() correctness
26 static DEFINE_PER_CPU(bool, in_kernel_fpu);
29 * Track which context is using the FPU on the CPU:
31 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
33 static void kernel_fpu_disable(void)
35 WARN_ON(this_cpu_read(in_kernel_fpu));
36 this_cpu_write(in_kernel_fpu, true);
39 static void kernel_fpu_enable(void)
41 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
42 this_cpu_write(in_kernel_fpu, false);
45 static bool kernel_fpu_disabled(void)
47 return this_cpu_read(in_kernel_fpu);
51 * Were we in an interrupt that interrupted kernel mode?
53 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
54 * pair does nothing at all: the thread must not have fpu (so
55 * that we don't try to save the FPU state), and TS must
56 * be set (so that the clts/stts pair does nothing that is
57 * visible in the interrupted kernel thread).
59 * Except for the eagerfpu case when we return true; in the likely case
60 * the thread has FPU but we are not going to set/clear TS.
62 static bool interrupted_kernel_fpu_idle(void)
64 if (kernel_fpu_disabled())
70 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
74 * Were we in user mode (or vm86 mode) when we were
77 * Doing kernel_fpu_begin/end() is ok if we are running
78 * in an interrupt context from user mode - we'll just
79 * save the FPU state as required.
81 static bool interrupted_user_mode(void)
83 struct pt_regs *regs = get_irq_regs();
84 return regs && user_mode(regs);
88 * Can we use the FPU in kernel mode with the
89 * whole "kernel_fpu_begin/end()" sequence?
91 * It's always ok in process context (ie "not interrupt")
92 * but it is sometimes ok even from an irq.
94 bool irq_fpu_usable(void)
96 return !in_interrupt() ||
97 interrupted_user_mode() ||
98 interrupted_kernel_fpu_idle();
100 EXPORT_SYMBOL(irq_fpu_usable);
102 void __kernel_fpu_begin(void)
104 struct fpu *fpu = ¤t->thread.fpu;
106 kernel_fpu_disable();
108 if (fpu->fpregs_active) {
109 copy_fpregs_to_fpstate(fpu);
111 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
112 __fpregs_activate_hw();
115 EXPORT_SYMBOL(__kernel_fpu_begin);
117 void __kernel_fpu_end(void)
119 struct fpu *fpu = ¤t->thread.fpu;
121 if (fpu->fpregs_active) {
122 if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
125 __fpregs_deactivate_hw();
130 EXPORT_SYMBOL(__kernel_fpu_end);
132 void kernel_fpu_begin(void)
135 WARN_ON_ONCE(!irq_fpu_usable());
136 __kernel_fpu_begin();
138 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
140 void kernel_fpu_end(void)
145 EXPORT_SYMBOL_GPL(kernel_fpu_end);
148 * CR0::TS save/restore functions:
150 int irq_ts_save(void)
153 * If in process context and not atomic, we can take a spurious DNA fault.
154 * Otherwise, doing clts() in process context requires disabling preemption
155 * or some heavy lifting like kernel_fpu_begin()
160 if (read_cr0() & X86_CR0_TS) {
167 EXPORT_SYMBOL_GPL(irq_ts_save);
169 void irq_ts_restore(int TS_state)
174 EXPORT_SYMBOL_GPL(irq_ts_restore);
177 * Save the FPU state (mark it for reload if necessary):
179 * This only ever gets called for the current task.
181 void fpu__save(struct fpu *fpu)
183 WARN_ON(fpu != ¤t->thread.fpu);
186 if (fpu->fpregs_active) {
187 if (!copy_fpregs_to_fpstate(fpu))
188 fpregs_deactivate(fpu);
192 EXPORT_SYMBOL_GPL(fpu__save);
195 * Legacy x87 fpstate state init:
197 static inline void fpstate_init_fstate(struct i387_fsave_struct *fp)
199 fp->cwd = 0xffff037fu;
200 fp->swd = 0xffff0000u;
201 fp->twd = 0xffffffffu;
202 fp->fos = 0xffff0000u;
205 void fpstate_init(union thread_xstate *state)
208 fpstate_init_soft(&state->soft);
212 memset(state, 0, xstate_size);
215 fpstate_init_fxstate(&state->fxsave);
217 fpstate_init_fstate(&state->fsave);
219 EXPORT_SYMBOL_GPL(fpstate_init);
222 * Copy the current task's FPU state to a new task's FPU context.
224 * In the 'eager' case we just save to the destination context.
226 * In the 'lazy' case we save to the source context, mark the FPU lazy
227 * via stts() and copy the source context into the destination context.
229 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
231 WARN_ON(src_fpu != ¤t->thread.fpu);
234 * Don't let 'init optimized' areas of the XSAVE area
235 * leak into the child task:
238 memset(&dst_fpu->state.xsave, 0, xstate_size);
241 * Save current FPU registers directly into the child
242 * FPU context, without any memory-to-memory copying.
244 * If the FPU context got destroyed in the process (FNSAVE
245 * done on old CPUs) then copy it back into the source
246 * context and mark the current task for lazy restore.
248 * We have to do all this with preemption disabled,
249 * mostly because of the FNSAVE case, because in that
250 * case we must not allow preemption in the window
251 * between the FNSAVE and us marking the context lazy.
253 * It shouldn't be an issue as even FNSAVE is plenty
254 * fast in terms of critical section length.
257 if (!copy_fpregs_to_fpstate(dst_fpu)) {
258 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
259 fpregs_deactivate(src_fpu);
264 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
266 dst_fpu->counter = 0;
267 dst_fpu->fpregs_active = 0;
268 dst_fpu->last_cpu = -1;
270 if (src_fpu->fpstate_active)
271 fpu_copy(dst_fpu, src_fpu);
277 * Activate the current task's in-memory FPU context,
278 * if it has not been used before:
280 void fpu__activate_curr(struct fpu *fpu)
282 WARN_ON_ONCE(fpu != ¤t->thread.fpu);
284 if (!fpu->fpstate_active) {
285 fpstate_init(&fpu->state);
287 /* Safe to do for the current task: */
288 fpu->fpstate_active = 1;
291 EXPORT_SYMBOL_GPL(fpu__activate_curr);
294 * This function must be called before we modify a stopped child's
297 * If the child has not used the FPU before then initialize its
300 * If the child has used the FPU before then unlazy it.
302 * [ After this function call, after registers in the fpstate are
303 * modified and the child task has woken up, the child task will
304 * restore the modified FPU state from the modified context. If we
305 * didn't clear its lazy status here then the lazy in-registers
306 * state pending on its former CPU could be restored, corrupting
307 * the modifications. ]
309 * This function is also called before we read a stopped child's
310 * FPU state - to make sure it's initialized if the child has
311 * no active FPU state.
313 * TODO: A future optimization would be to skip the unlazying in
314 * the read-only case, it's not strictly necessary for
315 * read-only access to the context.
317 static void fpu__activate_stopped(struct fpu *child_fpu)
319 WARN_ON_ONCE(child_fpu == ¤t->thread.fpu);
321 if (child_fpu->fpstate_active) {
322 child_fpu->last_cpu = -1;
324 fpstate_init(&child_fpu->state);
326 /* Safe to do for stopped child tasks: */
327 child_fpu->fpstate_active = 1;
332 * 'fpu__restore()' is called to copy FPU registers from
333 * the FPU fpstate to the live hw registers and to activate
334 * access to the hardware registers, so that FPU instructions
335 * can be used afterwards.
337 * Must be called with kernel preemption disabled (for example
338 * with local interrupts disabled, as it is in the case of
339 * do_device_not_available()).
341 void fpu__restore(void)
343 struct task_struct *tsk = current;
344 struct fpu *fpu = &tsk->thread.fpu;
346 fpu__activate_curr(fpu);
348 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
349 kernel_fpu_disable();
350 fpregs_activate(fpu);
351 if (unlikely(copy_fpstate_to_fpregs(fpu))) {
353 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
355 tsk->thread.fpu.counter++;
359 EXPORT_SYMBOL_GPL(fpu__restore);
362 * Drops current FPU state: deactivates the fpregs and
363 * the fpstate. NOTE: it still leaves previous contents
364 * in the fpregs in the eager-FPU case.
366 * This function can be used in cases where we know that
367 * a state-restore is coming: either an explicit one,
370 void fpu__drop(struct fpu *fpu)
375 if (fpu->fpregs_active) {
376 /* Ignore delayed exceptions from user space */
377 asm volatile("1: fwait\n"
379 _ASM_EXTABLE(1b, 2b));
380 fpregs_deactivate(fpu);
383 fpu->fpstate_active = 0;
389 * Clear the FPU state back to init state.
391 * Called by sys_execve(), by the signal handler code and by various
394 void fpu__clear(struct fpu *fpu)
396 WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
398 if (!use_eager_fpu()) {
399 /* FPU state will be reallocated lazily at the first use. */
402 if (!fpu->fpstate_active) {
403 fpu__activate_curr(fpu);
406 restore_init_xstate();
411 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
412 * as the "regset->n" for the xstate regset will be updated based on the feature
413 * capabilites supported by the xsave.
415 int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
417 struct fpu *target_fpu = &target->thread.fpu;
419 return target_fpu->fpstate_active ? regset->n : 0;
422 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
424 struct fpu *target_fpu = &target->thread.fpu;
426 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
429 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
430 unsigned int pos, unsigned int count,
431 void *kbuf, void __user *ubuf)
433 struct fpu *fpu = &target->thread.fpu;
438 fpu__activate_stopped(fpu);
439 fpstate_sanitize_xstate(fpu);
441 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
442 &fpu->state.fxsave, 0, -1);
445 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
446 unsigned int pos, unsigned int count,
447 const void *kbuf, const void __user *ubuf)
449 struct fpu *fpu = &target->thread.fpu;
455 fpu__activate_stopped(fpu);
456 fpstate_sanitize_xstate(fpu);
458 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
459 &fpu->state.fxsave, 0, -1);
462 * mxcsr reserved bits must be masked to zero for security reasons.
464 fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
467 * update the header bits in the xsave header, indicating the
468 * presence of FP and SSE state.
471 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
476 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
477 unsigned int pos, unsigned int count,
478 void *kbuf, void __user *ubuf)
480 struct fpu *fpu = &target->thread.fpu;
481 struct xsave_struct *xsave;
487 fpu__activate_stopped(fpu);
489 xsave = &fpu->state.xsave;
492 * Copy the 48bytes defined by the software first into the xstate
493 * memory layout in the thread struct, so that we can copy the entire
494 * xstateregs to the user using one user_regset_copyout().
496 memcpy(&xsave->i387.sw_reserved,
497 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
499 * Copy the xstate memory layout.
501 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
505 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
506 unsigned int pos, unsigned int count,
507 const void *kbuf, const void __user *ubuf)
509 struct fpu *fpu = &target->thread.fpu;
510 struct xsave_struct *xsave;
516 fpu__activate_stopped(fpu);
518 xsave = &fpu->state.xsave;
520 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
522 * mxcsr reserved bits must be masked to zero for security reasons.
524 xsave->i387.mxcsr &= mxcsr_feature_mask;
525 xsave->header.xfeatures &= xfeatures_mask;
527 * These bits must be zero.
529 memset(&xsave->header.reserved, 0, 48);
534 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
537 * FPU tag word conversions.
540 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
542 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
544 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
546 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
547 /* and move the valid bits to the lower byte. */
548 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
549 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
550 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
555 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
556 #define FP_EXP_TAG_VALID 0
557 #define FP_EXP_TAG_ZERO 1
558 #define FP_EXP_TAG_SPECIAL 2
559 #define FP_EXP_TAG_EMPTY 3
561 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
564 u32 tos = (fxsave->swd >> 11) & 7;
565 u32 twd = (unsigned long) fxsave->twd;
567 u32 ret = 0xffff0000u;
570 for (i = 0; i < 8; i++, twd >>= 1) {
572 st = FPREG_ADDR(fxsave, (i - tos) & 7);
574 switch (st->exponent & 0x7fff) {
576 tag = FP_EXP_TAG_SPECIAL;
579 if (!st->significand[0] &&
580 !st->significand[1] &&
581 !st->significand[2] &&
583 tag = FP_EXP_TAG_ZERO;
585 tag = FP_EXP_TAG_SPECIAL;
588 if (st->significand[3] & 0x8000)
589 tag = FP_EXP_TAG_VALID;
591 tag = FP_EXP_TAG_SPECIAL;
595 tag = FP_EXP_TAG_EMPTY;
597 ret |= tag << (2 * i);
603 * FXSR floating point environment conversions.
607 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
609 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
610 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
611 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
614 env->cwd = fxsave->cwd | 0xffff0000u;
615 env->swd = fxsave->swd | 0xffff0000u;
616 env->twd = twd_fxsr_to_i387(fxsave);
619 env->fip = fxsave->rip;
620 env->foo = fxsave->rdp;
622 * should be actually ds/cs at fpu exception time, but
623 * that information is not available in 64bit mode.
625 env->fcs = task_pt_regs(tsk)->cs;
626 if (tsk == current) {
627 savesegment(ds, env->fos);
629 env->fos = tsk->thread.ds;
631 env->fos |= 0xffff0000;
633 env->fip = fxsave->fip;
634 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
635 env->foo = fxsave->foo;
636 env->fos = fxsave->fos;
639 for (i = 0; i < 8; ++i)
640 memcpy(&to[i], &from[i], sizeof(to[0]));
643 void convert_to_fxsr(struct task_struct *tsk,
644 const struct user_i387_ia32_struct *env)
647 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
648 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
649 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
652 fxsave->cwd = env->cwd;
653 fxsave->swd = env->swd;
654 fxsave->twd = twd_i387_to_fxsr(env->twd);
655 fxsave->fop = (u16) ((u32) env->fcs >> 16);
657 fxsave->rip = env->fip;
658 fxsave->rdp = env->foo;
659 /* cs and ds ignored */
661 fxsave->fip = env->fip;
662 fxsave->fcs = (env->fcs & 0xffff);
663 fxsave->foo = env->foo;
664 fxsave->fos = env->fos;
667 for (i = 0; i < 8; ++i)
668 memcpy(&to[i], &from[i], sizeof(from[0]));
671 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
672 unsigned int pos, unsigned int count,
673 void *kbuf, void __user *ubuf)
675 struct fpu *fpu = &target->thread.fpu;
676 struct user_i387_ia32_struct env;
678 fpu__activate_stopped(fpu);
680 if (!static_cpu_has(X86_FEATURE_FPU))
681 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
684 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
685 &fpu->state.fsave, 0,
688 fpstate_sanitize_xstate(fpu);
690 if (kbuf && pos == 0 && count == sizeof(env)) {
691 convert_from_fxsr(kbuf, target);
695 convert_from_fxsr(&env, target);
697 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
700 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
701 unsigned int pos, unsigned int count,
702 const void *kbuf, const void __user *ubuf)
704 struct fpu *fpu = &target->thread.fpu;
705 struct user_i387_ia32_struct env;
708 fpu__activate_stopped(fpu);
709 fpstate_sanitize_xstate(fpu);
711 if (!static_cpu_has(X86_FEATURE_FPU))
712 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
715 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
716 &fpu->state.fsave, 0,
719 if (pos > 0 || count < sizeof(env))
720 convert_from_fxsr(&env, target);
722 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
724 convert_to_fxsr(target, &env);
727 * update the header bit in the xsave header, indicating the
731 fpu->state.xsave.header.xfeatures |= XSTATE_FP;
736 * FPU state for core dumps.
737 * This is only used for a.out dumps now.
738 * It is declared generically using elf_fpregset_t (which is
739 * struct user_i387_struct) but is in fact only used for 32-bit
740 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
742 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
744 struct task_struct *tsk = current;
745 struct fpu *fpu = &tsk->thread.fpu;
748 fpvalid = fpu->fpstate_active;
750 fpvalid = !fpregs_get(tsk, NULL,
751 0, sizeof(struct user_i387_ia32_struct),
756 EXPORT_SYMBOL(dump_fpu);
758 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
761 * x87 math exception handling:
764 static inline unsigned short get_fpu_cwd(struct fpu *fpu)
767 return fpu->state.fxsave.cwd;
769 return (unsigned short)fpu->state.fsave.cwd;
773 static inline unsigned short get_fpu_swd(struct fpu *fpu)
776 return fpu->state.fxsave.swd;
778 return (unsigned short)fpu->state.fsave.swd;
782 static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
785 return fpu->state.fxsave.mxcsr;
787 return MXCSR_DEFAULT;
791 int fpu__exception_code(struct fpu *fpu, int trap_nr)
795 if (trap_nr == X86_TRAP_MF) {
796 unsigned short cwd, swd;
798 * (~cwd & swd) will mask out exceptions that are not set to unmasked
799 * status. 0x3f is the exception bits in these regs, 0x200 is the
800 * C1 reg you need in case of a stack fault, 0x040 is the stack
801 * fault bit. We should only be taking one exception at a time,
802 * so if this combination doesn't produce any single exception,
803 * then we have a bad program that isn't synchronizing its FPU usage
804 * and it will suffer the consequences since we won't be able to
805 * fully reproduce the context of the exception
807 cwd = get_fpu_cwd(fpu);
808 swd = get_fpu_swd(fpu);
813 * The SIMD FPU exceptions are handled a little differently, as there
814 * is only a single status/control register. Thus, to determine which
815 * unmasked exception was caught we must mask the exception mask bits
816 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
818 unsigned short mxcsr = get_fpu_mxcsr(fpu);
819 err = ~(mxcsr >> 7) & mxcsr;
822 if (err & 0x001) { /* Invalid op */
824 * swd & 0x240 == 0x040: Stack Underflow
825 * swd & 0x240 == 0x240: Stack Overflow
826 * User must clear the SF bit (0x40) if set
829 } else if (err & 0x004) { /* Divide by Zero */
831 } else if (err & 0x008) { /* Overflow */
833 } else if (err & 0x012) { /* Denormal, Underflow */
835 } else if (err & 0x020) { /* Precision */
840 * If we're using IRQ 13, or supposedly even some trap
841 * X86_TRAP_MF implementations, it's possible
842 * we get a spurious trap, which is not an error.