2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/traps.h>
13 #include <linux/hardirq.h>
16 * Track whether the kernel is using the FPU state
21 * - by IRQ context code to potentially use the FPU
24 * - to debug kernel_fpu_begin()/end() correctness
26 static DEFINE_PER_CPU(bool, in_kernel_fpu);
29 * Track which context is using the FPU on the CPU:
31 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
33 static void kernel_fpu_disable(void)
35 WARN_ON(this_cpu_read(in_kernel_fpu));
36 this_cpu_write(in_kernel_fpu, true);
39 static void kernel_fpu_enable(void)
41 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
42 this_cpu_write(in_kernel_fpu, false);
45 static bool kernel_fpu_disabled(void)
47 return this_cpu_read(in_kernel_fpu);
51 * Were we in an interrupt that interrupted kernel mode?
53 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
54 * pair does nothing at all: the thread must not have fpu (so
55 * that we don't try to save the FPU state), and TS must
56 * be set (so that the clts/stts pair does nothing that is
57 * visible in the interrupted kernel thread).
59 * Except for the eagerfpu case when we return true; in the likely case
60 * the thread has FPU but we are not going to set/clear TS.
62 static bool interrupted_kernel_fpu_idle(void)
64 if (kernel_fpu_disabled())
70 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
74 * Were we in user mode (or vm86 mode) when we were
77 * Doing kernel_fpu_begin/end() is ok if we are running
78 * in an interrupt context from user mode - we'll just
79 * save the FPU state as required.
81 static bool interrupted_user_mode(void)
83 struct pt_regs *regs = get_irq_regs();
84 return regs && user_mode(regs);
88 * Can we use the FPU in kernel mode with the
89 * whole "kernel_fpu_begin/end()" sequence?
91 * It's always ok in process context (ie "not interrupt")
92 * but it is sometimes ok even from an irq.
94 bool irq_fpu_usable(void)
96 return !in_interrupt() ||
97 interrupted_user_mode() ||
98 interrupted_kernel_fpu_idle();
100 EXPORT_SYMBOL(irq_fpu_usable);
102 void __kernel_fpu_begin(void)
104 struct fpu *fpu = ¤t->thread.fpu;
106 kernel_fpu_disable();
108 if (fpu->fpregs_active) {
109 copy_fpregs_to_fpstate(fpu);
111 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
112 __fpregs_activate_hw();
115 EXPORT_SYMBOL(__kernel_fpu_begin);
117 void __kernel_fpu_end(void)
119 struct fpu *fpu = ¤t->thread.fpu;
121 if (fpu->fpregs_active) {
122 if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
125 __fpregs_deactivate_hw();
130 EXPORT_SYMBOL(__kernel_fpu_end);
132 void kernel_fpu_begin(void)
135 WARN_ON_ONCE(!irq_fpu_usable());
136 __kernel_fpu_begin();
138 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
140 void kernel_fpu_end(void)
145 EXPORT_SYMBOL_GPL(kernel_fpu_end);
148 * CR0::TS save/restore functions:
150 int irq_ts_save(void)
153 * If in process context and not atomic, we can take a spurious DNA fault.
154 * Otherwise, doing clts() in process context requires disabling preemption
155 * or some heavy lifting like kernel_fpu_begin()
160 if (read_cr0() & X86_CR0_TS) {
167 EXPORT_SYMBOL_GPL(irq_ts_save);
169 void irq_ts_restore(int TS_state)
174 EXPORT_SYMBOL_GPL(irq_ts_restore);
177 * Save the FPU state (mark it for reload if necessary):
179 * This only ever gets called for the current task.
181 void fpu__save(struct fpu *fpu)
183 WARN_ON(fpu != ¤t->thread.fpu);
186 if (fpu->fpregs_active) {
187 if (!copy_fpregs_to_fpstate(fpu))
188 fpregs_deactivate(fpu);
192 EXPORT_SYMBOL_GPL(fpu__save);
194 void fpstate_init(struct fpu *fpu)
197 finit_soft_fpu(&fpu->state.soft);
201 memset(&fpu->state, 0, xstate_size);
204 fx_finit(&fpu->state.fxsave);
206 struct i387_fsave_struct *fp = &fpu->state.fsave;
207 fp->cwd = 0xffff037fu;
208 fp->swd = 0xffff0000u;
209 fp->twd = 0xffffffffu;
210 fp->fos = 0xffff0000u;
213 EXPORT_SYMBOL_GPL(fpstate_init);
216 * Copy the current task's FPU state to a new task's FPU context.
218 * In the 'eager' case we just save to the destination context.
220 * In the 'lazy' case we save to the source context, mark the FPU lazy
221 * via stts() and copy the source context into the destination context.
223 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
225 WARN_ON(src_fpu != ¤t->thread.fpu);
228 * Don't let 'init optimized' areas of the XSAVE area
229 * leak into the child task:
232 memset(&dst_fpu->state.xsave, 0, xstate_size);
235 * Save current FPU registers directly into the child
236 * FPU context, without any memory-to-memory copying.
238 * If the FPU context got destroyed in the process (FNSAVE
239 * done on old CPUs) then copy it back into the source
240 * context and mark the current task for lazy restore.
242 * We have to do all this with preemption disabled,
243 * mostly because of the FNSAVE case, because in that
244 * case we must not allow preemption in the window
245 * between the FNSAVE and us marking the context lazy.
247 * It shouldn't be an issue as even FNSAVE is plenty
248 * fast in terms of critical section length.
251 if (!copy_fpregs_to_fpstate(dst_fpu)) {
252 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
253 fpregs_deactivate(src_fpu);
258 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
260 dst_fpu->counter = 0;
261 dst_fpu->fpregs_active = 0;
262 dst_fpu->last_cpu = -1;
264 if (src_fpu->fpstate_active)
265 fpu_copy(dst_fpu, src_fpu);
271 * Activate the current task's in-memory FPU context,
272 * if it has not been used before:
274 void fpu__activate_curr(struct fpu *fpu)
276 WARN_ON_ONCE(fpu != ¤t->thread.fpu);
278 if (!fpu->fpstate_active) {
281 /* Safe to do for the current task: */
282 fpu->fpstate_active = 1;
285 EXPORT_SYMBOL_GPL(fpu__activate_curr);
288 * This function must be called before we modify a stopped child's
291 * If the child has not used the FPU before then initialize its
294 * If the child has used the FPU before then unlazy it.
296 * [ After this function call, after registers in the fpstate are
297 * modified and the child task has woken up, the child task will
298 * restore the modified FPU state from the modified context. If we
299 * didn't clear its lazy status here then the lazy in-registers
300 * state pending on its former CPU could be restored, corrupting
301 * the modifications. ]
303 * This function is also called before we read a stopped child's
304 * FPU state - to make sure it's initialized if the child has
305 * no active FPU state.
307 * TODO: A future optimization would be to skip the unlazying in
308 * the read-only case, it's not strictly necessary for
309 * read-only access to the context.
311 static void fpu__activate_stopped(struct fpu *child_fpu)
313 WARN_ON_ONCE(child_fpu == ¤t->thread.fpu);
315 if (child_fpu->fpstate_active) {
316 child_fpu->last_cpu = -1;
318 fpstate_init(child_fpu);
320 /* Safe to do for stopped child tasks: */
321 child_fpu->fpstate_active = 1;
326 * 'fpu__restore()' is called to copy FPU registers from
327 * the FPU fpstate to the live hw registers and to activate
328 * access to the hardware registers, so that FPU instructions
329 * can be used afterwards.
331 * Must be called with kernel preemption disabled (for example
332 * with local interrupts disabled, as it is in the case of
333 * do_device_not_available()).
335 void fpu__restore(void)
337 struct task_struct *tsk = current;
338 struct fpu *fpu = &tsk->thread.fpu;
340 fpu__activate_curr(fpu);
342 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
343 kernel_fpu_disable();
344 fpregs_activate(fpu);
345 if (unlikely(copy_fpstate_to_fpregs(fpu))) {
347 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
349 tsk->thread.fpu.counter++;
353 EXPORT_SYMBOL_GPL(fpu__restore);
356 * Drops current FPU state: deactivates the fpregs and
357 * the fpstate. NOTE: it still leaves previous contents
358 * in the fpregs in the eager-FPU case.
360 * This function can be used in cases where we know that
361 * a state-restore is coming: either an explicit one,
364 void fpu__drop(struct fpu *fpu)
369 if (fpu->fpregs_active) {
370 /* Ignore delayed exceptions from user space */
371 asm volatile("1: fwait\n"
373 _ASM_EXTABLE(1b, 2b));
374 fpregs_deactivate(fpu);
377 fpu->fpstate_active = 0;
383 * Clear the FPU state back to init state.
385 * Called by sys_execve(), by the signal handler code and by various
388 void fpu__clear(struct fpu *fpu)
390 WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
392 if (!use_eager_fpu()) {
393 /* FPU state will be reallocated lazily at the first use. */
396 if (!fpu->fpstate_active) {
397 fpu__activate_curr(fpu);
400 restore_init_xstate();
405 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
406 * as the "regset->n" for the xstate regset will be updated based on the feature
407 * capabilites supported by the xsave.
409 int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
411 struct fpu *target_fpu = &target->thread.fpu;
413 return target_fpu->fpstate_active ? regset->n : 0;
416 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
418 struct fpu *target_fpu = &target->thread.fpu;
420 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
423 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
424 unsigned int pos, unsigned int count,
425 void *kbuf, void __user *ubuf)
427 struct fpu *fpu = &target->thread.fpu;
432 fpu__activate_stopped(fpu);
433 fpstate_sanitize_xstate(fpu);
435 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
436 &fpu->state.fxsave, 0, -1);
439 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
440 unsigned int pos, unsigned int count,
441 const void *kbuf, const void __user *ubuf)
443 struct fpu *fpu = &target->thread.fpu;
449 fpu__activate_stopped(fpu);
450 fpstate_sanitize_xstate(fpu);
452 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
453 &fpu->state.fxsave, 0, -1);
456 * mxcsr reserved bits must be masked to zero for security reasons.
458 fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
461 * update the header bits in the xsave header, indicating the
462 * presence of FP and SSE state.
465 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
470 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
471 unsigned int pos, unsigned int count,
472 void *kbuf, void __user *ubuf)
474 struct fpu *fpu = &target->thread.fpu;
475 struct xsave_struct *xsave;
481 fpu__activate_stopped(fpu);
483 xsave = &fpu->state.xsave;
486 * Copy the 48bytes defined by the software first into the xstate
487 * memory layout in the thread struct, so that we can copy the entire
488 * xstateregs to the user using one user_regset_copyout().
490 memcpy(&xsave->i387.sw_reserved,
491 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
493 * Copy the xstate memory layout.
495 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
499 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
500 unsigned int pos, unsigned int count,
501 const void *kbuf, const void __user *ubuf)
503 struct fpu *fpu = &target->thread.fpu;
504 struct xsave_struct *xsave;
510 fpu__activate_stopped(fpu);
512 xsave = &fpu->state.xsave;
514 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
516 * mxcsr reserved bits must be masked to zero for security reasons.
518 xsave->i387.mxcsr &= mxcsr_feature_mask;
519 xsave->header.xfeatures &= xfeatures_mask;
521 * These bits must be zero.
523 memset(&xsave->header.reserved, 0, 48);
528 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
531 * FPU tag word conversions.
534 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
536 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
538 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
540 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
541 /* and move the valid bits to the lower byte. */
542 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
543 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
544 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
549 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
550 #define FP_EXP_TAG_VALID 0
551 #define FP_EXP_TAG_ZERO 1
552 #define FP_EXP_TAG_SPECIAL 2
553 #define FP_EXP_TAG_EMPTY 3
555 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
558 u32 tos = (fxsave->swd >> 11) & 7;
559 u32 twd = (unsigned long) fxsave->twd;
561 u32 ret = 0xffff0000u;
564 for (i = 0; i < 8; i++, twd >>= 1) {
566 st = FPREG_ADDR(fxsave, (i - tos) & 7);
568 switch (st->exponent & 0x7fff) {
570 tag = FP_EXP_TAG_SPECIAL;
573 if (!st->significand[0] &&
574 !st->significand[1] &&
575 !st->significand[2] &&
577 tag = FP_EXP_TAG_ZERO;
579 tag = FP_EXP_TAG_SPECIAL;
582 if (st->significand[3] & 0x8000)
583 tag = FP_EXP_TAG_VALID;
585 tag = FP_EXP_TAG_SPECIAL;
589 tag = FP_EXP_TAG_EMPTY;
591 ret |= tag << (2 * i);
597 * FXSR floating point environment conversions.
601 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
603 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
604 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
605 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
608 env->cwd = fxsave->cwd | 0xffff0000u;
609 env->swd = fxsave->swd | 0xffff0000u;
610 env->twd = twd_fxsr_to_i387(fxsave);
613 env->fip = fxsave->rip;
614 env->foo = fxsave->rdp;
616 * should be actually ds/cs at fpu exception time, but
617 * that information is not available in 64bit mode.
619 env->fcs = task_pt_regs(tsk)->cs;
620 if (tsk == current) {
621 savesegment(ds, env->fos);
623 env->fos = tsk->thread.ds;
625 env->fos |= 0xffff0000;
627 env->fip = fxsave->fip;
628 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
629 env->foo = fxsave->foo;
630 env->fos = fxsave->fos;
633 for (i = 0; i < 8; ++i)
634 memcpy(&to[i], &from[i], sizeof(to[0]));
637 void convert_to_fxsr(struct task_struct *tsk,
638 const struct user_i387_ia32_struct *env)
641 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
642 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
643 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
646 fxsave->cwd = env->cwd;
647 fxsave->swd = env->swd;
648 fxsave->twd = twd_i387_to_fxsr(env->twd);
649 fxsave->fop = (u16) ((u32) env->fcs >> 16);
651 fxsave->rip = env->fip;
652 fxsave->rdp = env->foo;
653 /* cs and ds ignored */
655 fxsave->fip = env->fip;
656 fxsave->fcs = (env->fcs & 0xffff);
657 fxsave->foo = env->foo;
658 fxsave->fos = env->fos;
661 for (i = 0; i < 8; ++i)
662 memcpy(&to[i], &from[i], sizeof(from[0]));
665 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
666 unsigned int pos, unsigned int count,
667 void *kbuf, void __user *ubuf)
669 struct fpu *fpu = &target->thread.fpu;
670 struct user_i387_ia32_struct env;
672 fpu__activate_stopped(fpu);
674 if (!static_cpu_has(X86_FEATURE_FPU))
675 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
678 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
679 &fpu->state.fsave, 0,
682 fpstate_sanitize_xstate(fpu);
684 if (kbuf && pos == 0 && count == sizeof(env)) {
685 convert_from_fxsr(kbuf, target);
689 convert_from_fxsr(&env, target);
691 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
694 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
695 unsigned int pos, unsigned int count,
696 const void *kbuf, const void __user *ubuf)
698 struct fpu *fpu = &target->thread.fpu;
699 struct user_i387_ia32_struct env;
702 fpu__activate_stopped(fpu);
703 fpstate_sanitize_xstate(fpu);
705 if (!static_cpu_has(X86_FEATURE_FPU))
706 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
709 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
710 &fpu->state.fsave, 0,
713 if (pos > 0 || count < sizeof(env))
714 convert_from_fxsr(&env, target);
716 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
718 convert_to_fxsr(target, &env);
721 * update the header bit in the xsave header, indicating the
725 fpu->state.xsave.header.xfeatures |= XSTATE_FP;
730 * FPU state for core dumps.
731 * This is only used for a.out dumps now.
732 * It is declared generically using elf_fpregset_t (which is
733 * struct user_i387_struct) but is in fact only used for 32-bit
734 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
736 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
738 struct task_struct *tsk = current;
739 struct fpu *fpu = &tsk->thread.fpu;
742 fpvalid = fpu->fpstate_active;
744 fpvalid = !fpregs_get(tsk, NULL,
745 0, sizeof(struct user_i387_ia32_struct),
750 EXPORT_SYMBOL(dump_fpu);
752 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
755 * x87 math exception handling:
758 static inline unsigned short get_fpu_cwd(struct fpu *fpu)
761 return fpu->state.fxsave.cwd;
763 return (unsigned short)fpu->state.fsave.cwd;
767 static inline unsigned short get_fpu_swd(struct fpu *fpu)
770 return fpu->state.fxsave.swd;
772 return (unsigned short)fpu->state.fsave.swd;
776 static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
779 return fpu->state.fxsave.mxcsr;
781 return MXCSR_DEFAULT;
785 int fpu__exception_code(struct fpu *fpu, int trap_nr)
789 if (trap_nr == X86_TRAP_MF) {
790 unsigned short cwd, swd;
792 * (~cwd & swd) will mask out exceptions that are not set to unmasked
793 * status. 0x3f is the exception bits in these regs, 0x200 is the
794 * C1 reg you need in case of a stack fault, 0x040 is the stack
795 * fault bit. We should only be taking one exception at a time,
796 * so if this combination doesn't produce any single exception,
797 * then we have a bad program that isn't synchronizing its FPU usage
798 * and it will suffer the consequences since we won't be able to
799 * fully reproduce the context of the exception
801 cwd = get_fpu_cwd(fpu);
802 swd = get_fpu_swd(fpu);
807 * The SIMD FPU exceptions are handled a little differently, as there
808 * is only a single status/control register. Thus, to determine which
809 * unmasked exception was caught we must mask the exception mask bits
810 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
812 unsigned short mxcsr = get_fpu_mxcsr(fpu);
813 err = ~(mxcsr >> 7) & mxcsr;
816 if (err & 0x001) { /* Invalid op */
818 * swd & 0x240 == 0x040: Stack Underflow
819 * swd & 0x240 == 0x240: Stack Overflow
820 * User must clear the SF bit (0x40) if set
823 } else if (err & 0x004) { /* Divide by Zero */
825 } else if (err & 0x008) { /* Overflow */
827 } else if (err & 0x012) { /* Denormal, Underflow */
829 } else if (err & 0x020) { /* Precision */
834 * If we're using IRQ 13, or supposedly even some trap
835 * X86_TRAP_MF implementations, it's possible
836 * we get a spurious trap, which is not an error.