2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu-internal.h>
11 * Track whether the kernel is using the FPU state
16 * - by IRQ context code to potentially use the FPU
19 * - to debug kernel_fpu_begin()/end() correctness
21 static DEFINE_PER_CPU(bool, in_kernel_fpu);
24 * Track which context is using the FPU on the CPU:
26 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
28 static void kernel_fpu_disable(void)
30 WARN_ON(this_cpu_read(in_kernel_fpu));
31 this_cpu_write(in_kernel_fpu, true);
34 static void kernel_fpu_enable(void)
36 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
37 this_cpu_write(in_kernel_fpu, false);
40 static bool kernel_fpu_disabled(void)
42 return this_cpu_read(in_kernel_fpu);
46 * Were we in an interrupt that interrupted kernel mode?
48 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
49 * pair does nothing at all: the thread must not have fpu (so
50 * that we don't try to save the FPU state), and TS must
51 * be set (so that the clts/stts pair does nothing that is
52 * visible in the interrupted kernel thread).
54 * Except for the eagerfpu case when we return true; in the likely case
55 * the thread has FPU but we are not going to set/clear TS.
57 static bool interrupted_kernel_fpu_idle(void)
59 if (kernel_fpu_disabled())
65 return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
69 * Were we in user mode (or vm86 mode) when we were
72 * Doing kernel_fpu_begin/end() is ok if we are running
73 * in an interrupt context from user mode - we'll just
74 * save the FPU state as required.
76 static bool interrupted_user_mode(void)
78 struct pt_regs *regs = get_irq_regs();
79 return regs && user_mode(regs);
83 * Can we use the FPU in kernel mode with the
84 * whole "kernel_fpu_begin/end()" sequence?
86 * It's always ok in process context (ie "not interrupt")
87 * but it is sometimes ok even from an irq.
89 bool irq_fpu_usable(void)
91 return !in_interrupt() ||
92 interrupted_user_mode() ||
93 interrupted_kernel_fpu_idle();
95 EXPORT_SYMBOL(irq_fpu_usable);
97 void __kernel_fpu_begin(void)
99 struct fpu *fpu = ¤t->thread.fpu;
101 kernel_fpu_disable();
106 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
107 if (!use_eager_fpu())
111 EXPORT_SYMBOL(__kernel_fpu_begin);
113 void __kernel_fpu_end(void)
115 struct task_struct *me = current;
116 struct fpu *fpu = &me->thread.fpu;
119 if (WARN_ON(restore_fpu_checking(me)))
121 } else if (!use_eager_fpu()) {
127 EXPORT_SYMBOL(__kernel_fpu_end);
130 * Save the FPU state (initialize it if necessary):
132 * This only ever gets called for the current task.
134 void fpu__save(struct task_struct *tsk)
136 struct fpu *fpu = &tsk->thread.fpu;
138 WARN_ON(tsk != current);
142 if (use_eager_fpu()) {
146 __thread_fpu_end(fpu);
151 EXPORT_SYMBOL_GPL(fpu__save);
153 void fpstate_init(struct fpu *fpu)
156 finit_soft_fpu(&fpu->state->soft);
160 memset(fpu->state, 0, xstate_size);
163 fx_finit(&fpu->state->fxsave);
165 struct i387_fsave_struct *fp = &fpu->state->fsave;
166 fp->cwd = 0xffff037fu;
167 fp->swd = 0xffff0000u;
168 fp->twd = 0xffffffffu;
169 fp->fos = 0xffff0000u;
172 EXPORT_SYMBOL_GPL(fpstate_init);
175 * FPU state allocation:
177 static struct kmem_cache *task_xstate_cachep;
179 void fpstate_cache_init(void)
182 kmem_cache_create("task_xstate", xstate_size,
183 __alignof__(union thread_xstate),
184 SLAB_PANIC | SLAB_NOTRACK, NULL);
188 int fpstate_alloc(struct fpu *fpu)
193 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
197 /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
198 WARN_ON((unsigned long)fpu->state & 15);
202 EXPORT_SYMBOL_GPL(fpstate_alloc);
204 void fpstate_free(struct fpu *fpu)
207 kmem_cache_free(task_xstate_cachep, fpu->state);
211 EXPORT_SYMBOL_GPL(fpstate_free);
214 * Copy the current task's FPU state to a new task's FPU context.
216 * In the 'eager' case we just save to the destination context.
218 * In the 'lazy' case we save to the source context, mark the FPU lazy
219 * via stts() and copy the source context into the destination context.
221 static void fpu_copy(struct task_struct *dst, struct task_struct *src)
223 WARN_ON(src != current);
225 if (use_eager_fpu()) {
226 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
229 struct fpu *dfpu = &dst->thread.fpu;
230 struct fpu *sfpu = &src->thread.fpu;
233 memcpy(dfpu->state, sfpu->state, xstate_size);
237 int fpu__copy(struct task_struct *dst, struct task_struct *src)
239 dst->thread.fpu.counter = 0;
240 dst->thread.fpu.has_fpu = 0;
241 dst->thread.fpu.state = NULL;
243 task_disable_lazy_fpu_restore(dst);
245 if (tsk_used_math(src)) {
246 int err = fpstate_alloc(&dst->thread.fpu);
256 * Allocate the backing store for the current task's FPU registers
257 * and initialize the registers themselves as well.
261 int fpstate_alloc_init(struct task_struct *curr)
265 if (WARN_ON_ONCE(curr != current))
267 if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
271 * Memory allocation at the first usage of the FPU and other state.
273 ret = fpstate_alloc(&curr->thread.fpu);
277 fpstate_init(&curr->thread.fpu);
279 /* Safe to do for the current task: */
280 curr->flags |= PF_USED_MATH;
284 EXPORT_SYMBOL_GPL(fpstate_alloc_init);
287 * The _current_ task is using the FPU for the first time
288 * so initialize it and set the mxcsr to its default
289 * value at reset if we support XMM instructions and then
290 * remember the current task has used the FPU.
292 static int fpu__unlazy_stopped(struct task_struct *child)
296 if (WARN_ON_ONCE(child == current))
299 if (child->flags & PF_USED_MATH) {
300 task_disable_lazy_fpu_restore(child);
305 * Memory allocation at the first usage of the FPU and other state.
307 ret = fpstate_alloc(&child->thread.fpu);
311 fpstate_init(&child->thread.fpu);
313 /* Safe to do for stopped child tasks: */
314 child->flags |= PF_USED_MATH;
320 * 'fpu__restore()' saves the current math information in the
321 * old math state array, and gets the new ones from the current task
323 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
324 * Don't touch unless you *really* know how it works.
326 * Must be called with kernel preemption disabled (eg with local
327 * local interrupts as in the case of do_device_not_available).
329 void fpu__restore(void)
331 struct task_struct *tsk = current;
333 if (!tsk_used_math(tsk)) {
336 * does a slab alloc which can sleep
338 if (fpstate_alloc_init(tsk)) {
342 do_group_exit(SIGKILL);
348 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
349 kernel_fpu_disable();
350 __thread_fpu_begin(tsk);
351 if (unlikely(restore_fpu_checking(tsk))) {
352 fpu_reset_state(tsk);
353 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
355 tsk->thread.fpu.counter++;
359 EXPORT_SYMBOL_GPL(fpu__restore);
361 void fpu__flush_thread(struct task_struct *tsk)
363 if (!use_eager_fpu()) {
364 /* FPU state will be reallocated lazily at the first use. */
366 fpstate_free(&tsk->thread.fpu);
368 if (!tsk_used_math(tsk)) {
369 /* kthread execs. TODO: cleanup this horror. */
370 if (WARN_ON(fpstate_alloc_init(tsk)))
371 force_sig(SIGKILL, tsk);
374 restore_init_xstate();
379 * The xstateregs_active() routine is the same as the fpregs_active() routine,
380 * as the "regset->n" for the xstate regset will be updated based on the feature
381 * capabilites supported by the xsave.
383 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
385 return tsk_used_math(target) ? regset->n : 0;
388 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
390 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
393 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
394 unsigned int pos, unsigned int count,
395 void *kbuf, void __user *ubuf)
402 ret = fpu__unlazy_stopped(target);
406 sanitize_i387_state(target);
408 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
409 &target->thread.fpu.state->fxsave, 0, -1);
412 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
413 unsigned int pos, unsigned int count,
414 const void *kbuf, const void __user *ubuf)
421 ret = fpu__unlazy_stopped(target);
425 sanitize_i387_state(target);
427 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
428 &target->thread.fpu.state->fxsave, 0, -1);
431 * mxcsr reserved bits must be masked to zero for security reasons.
433 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
436 * update the header bits in the xsave header, indicating the
437 * presence of FP and SSE state.
440 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
445 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
446 unsigned int pos, unsigned int count,
447 void *kbuf, void __user *ubuf)
449 struct xsave_struct *xsave;
455 ret = fpu__unlazy_stopped(target);
459 xsave = &target->thread.fpu.state->xsave;
462 * Copy the 48bytes defined by the software first into the xstate
463 * memory layout in the thread struct, so that we can copy the entire
464 * xstateregs to the user using one user_regset_copyout().
466 memcpy(&xsave->i387.sw_reserved,
467 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
469 * Copy the xstate memory layout.
471 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
475 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
476 unsigned int pos, unsigned int count,
477 const void *kbuf, const void __user *ubuf)
479 struct xsave_struct *xsave;
485 ret = fpu__unlazy_stopped(target);
489 xsave = &target->thread.fpu.state->xsave;
491 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
493 * mxcsr reserved bits must be masked to zero for security reasons.
495 xsave->i387.mxcsr &= mxcsr_feature_mask;
496 xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
498 * These bits must be zero.
500 memset(&xsave->xsave_hdr.reserved, 0, 48);
504 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
507 * FPU tag word conversions.
510 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
512 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
514 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
516 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
517 /* and move the valid bits to the lower byte. */
518 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
519 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
520 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
525 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
526 #define FP_EXP_TAG_VALID 0
527 #define FP_EXP_TAG_ZERO 1
528 #define FP_EXP_TAG_SPECIAL 2
529 #define FP_EXP_TAG_EMPTY 3
531 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
534 u32 tos = (fxsave->swd >> 11) & 7;
535 u32 twd = (unsigned long) fxsave->twd;
537 u32 ret = 0xffff0000u;
540 for (i = 0; i < 8; i++, twd >>= 1) {
542 st = FPREG_ADDR(fxsave, (i - tos) & 7);
544 switch (st->exponent & 0x7fff) {
546 tag = FP_EXP_TAG_SPECIAL;
549 if (!st->significand[0] &&
550 !st->significand[1] &&
551 !st->significand[2] &&
553 tag = FP_EXP_TAG_ZERO;
555 tag = FP_EXP_TAG_SPECIAL;
558 if (st->significand[3] & 0x8000)
559 tag = FP_EXP_TAG_VALID;
561 tag = FP_EXP_TAG_SPECIAL;
565 tag = FP_EXP_TAG_EMPTY;
567 ret |= tag << (2 * i);
573 * FXSR floating point environment conversions.
577 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
579 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
580 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
581 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
584 env->cwd = fxsave->cwd | 0xffff0000u;
585 env->swd = fxsave->swd | 0xffff0000u;
586 env->twd = twd_fxsr_to_i387(fxsave);
589 env->fip = fxsave->rip;
590 env->foo = fxsave->rdp;
592 * should be actually ds/cs at fpu exception time, but
593 * that information is not available in 64bit mode.
595 env->fcs = task_pt_regs(tsk)->cs;
596 if (tsk == current) {
597 savesegment(ds, env->fos);
599 env->fos = tsk->thread.ds;
601 env->fos |= 0xffff0000;
603 env->fip = fxsave->fip;
604 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
605 env->foo = fxsave->foo;
606 env->fos = fxsave->fos;
609 for (i = 0; i < 8; ++i)
610 memcpy(&to[i], &from[i], sizeof(to[0]));
613 void convert_to_fxsr(struct task_struct *tsk,
614 const struct user_i387_ia32_struct *env)
617 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
618 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
619 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
622 fxsave->cwd = env->cwd;
623 fxsave->swd = env->swd;
624 fxsave->twd = twd_i387_to_fxsr(env->twd);
625 fxsave->fop = (u16) ((u32) env->fcs >> 16);
627 fxsave->rip = env->fip;
628 fxsave->rdp = env->foo;
629 /* cs and ds ignored */
631 fxsave->fip = env->fip;
632 fxsave->fcs = (env->fcs & 0xffff);
633 fxsave->foo = env->foo;
634 fxsave->fos = env->fos;
637 for (i = 0; i < 8; ++i)
638 memcpy(&to[i], &from[i], sizeof(from[0]));
641 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
642 unsigned int pos, unsigned int count,
643 void *kbuf, void __user *ubuf)
645 struct user_i387_ia32_struct env;
648 ret = fpu__unlazy_stopped(target);
652 if (!static_cpu_has(X86_FEATURE_FPU))
653 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
656 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
657 &target->thread.fpu.state->fsave, 0,
660 sanitize_i387_state(target);
662 if (kbuf && pos == 0 && count == sizeof(env)) {
663 convert_from_fxsr(kbuf, target);
667 convert_from_fxsr(&env, target);
669 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
672 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
673 unsigned int pos, unsigned int count,
674 const void *kbuf, const void __user *ubuf)
676 struct user_i387_ia32_struct env;
679 ret = fpu__unlazy_stopped(target);
683 sanitize_i387_state(target);
685 if (!static_cpu_has(X86_FEATURE_FPU))
686 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
689 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
690 &target->thread.fpu.state->fsave, 0,
693 if (pos > 0 || count < sizeof(env))
694 convert_from_fxsr(&env, target);
696 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
698 convert_to_fxsr(target, &env);
701 * update the header bit in the xsave header, indicating the
705 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
710 * FPU state for core dumps.
711 * This is only used for a.out dumps now.
712 * It is declared generically using elf_fpregset_t (which is
713 * struct user_i387_struct) but is in fact only used for 32-bit
714 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
716 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
718 struct task_struct *tsk = current;
721 fpvalid = !!used_math();
723 fpvalid = !fpregs_get(tsk, NULL,
724 0, sizeof(struct user_i387_ia32_struct),
729 EXPORT_SYMBOL(dump_fpu);
731 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */