2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu-internal.h>
11 * Track whether the kernel is using the FPU state
16 * - by IRQ context code to potentially use the FPU
19 * - to debug kernel_fpu_begin()/end() correctness
21 static DEFINE_PER_CPU(bool, in_kernel_fpu);
24 * Track which context is using the FPU on the CPU:
26 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
28 static void kernel_fpu_disable(void)
30 WARN_ON(this_cpu_read(in_kernel_fpu));
31 this_cpu_write(in_kernel_fpu, true);
34 static void kernel_fpu_enable(void)
36 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
37 this_cpu_write(in_kernel_fpu, false);
40 static bool kernel_fpu_disabled(void)
42 return this_cpu_read(in_kernel_fpu);
46 * Were we in an interrupt that interrupted kernel mode?
48 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
49 * pair does nothing at all: the thread must not have fpu (so
50 * that we don't try to save the FPU state), and TS must
51 * be set (so that the clts/stts pair does nothing that is
52 * visible in the interrupted kernel thread).
54 * Except for the eagerfpu case when we return true; in the likely case
55 * the thread has FPU but we are not going to set/clear TS.
57 static bool interrupted_kernel_fpu_idle(void)
59 if (kernel_fpu_disabled())
65 return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
69 * Were we in user mode (or vm86 mode) when we were
72 * Doing kernel_fpu_begin/end() is ok if we are running
73 * in an interrupt context from user mode - we'll just
74 * save the FPU state as required.
76 static bool interrupted_user_mode(void)
78 struct pt_regs *regs = get_irq_regs();
79 return regs && user_mode(regs);
83 * Can we use the FPU in kernel mode with the
84 * whole "kernel_fpu_begin/end()" sequence?
86 * It's always ok in process context (ie "not interrupt")
87 * but it is sometimes ok even from an irq.
89 bool irq_fpu_usable(void)
91 return !in_interrupt() ||
92 interrupted_user_mode() ||
93 interrupted_kernel_fpu_idle();
95 EXPORT_SYMBOL(irq_fpu_usable);
97 void __kernel_fpu_begin(void)
99 struct fpu *fpu = ¤t->thread.fpu;
101 kernel_fpu_disable();
106 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
107 if (!use_eager_fpu())
111 EXPORT_SYMBOL(__kernel_fpu_begin);
113 void __kernel_fpu_end(void)
115 struct task_struct *me = current;
116 struct fpu *fpu = &me->thread.fpu;
119 if (WARN_ON(restore_fpu_checking(me)))
121 } else if (!use_eager_fpu()) {
127 EXPORT_SYMBOL(__kernel_fpu_end);
130 * Save the FPU state (initialize it if necessary):
132 * This only ever gets called for the current task.
134 void fpu__save(struct task_struct *tsk)
136 struct fpu *fpu = &tsk->thread.fpu;
138 WARN_ON(tsk != current);
142 if (use_eager_fpu()) {
146 __thread_fpu_end(fpu);
151 EXPORT_SYMBOL_GPL(fpu__save);
153 void fpstate_init(struct fpu *fpu)
156 finit_soft_fpu(&fpu->state->soft);
160 memset(fpu->state, 0, xstate_size);
163 fx_finit(&fpu->state->fxsave);
165 struct i387_fsave_struct *fp = &fpu->state->fsave;
166 fp->cwd = 0xffff037fu;
167 fp->swd = 0xffff0000u;
168 fp->twd = 0xffffffffu;
169 fp->fos = 0xffff0000u;
172 EXPORT_SYMBOL_GPL(fpstate_init);
175 * FPU state allocation:
177 static struct kmem_cache *task_xstate_cachep;
179 void fpstate_cache_init(void)
182 kmem_cache_create("task_xstate", xstate_size,
183 __alignof__(union thread_xstate),
184 SLAB_PANIC | SLAB_NOTRACK, NULL);
188 int fpstate_alloc(struct fpu *fpu)
193 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
197 /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
198 WARN_ON((unsigned long)fpu->state & 15);
202 EXPORT_SYMBOL_GPL(fpstate_alloc);
204 void fpstate_free(struct fpu *fpu)
207 kmem_cache_free(task_xstate_cachep, fpu->state);
211 EXPORT_SYMBOL_GPL(fpstate_free);
214 * Copy the current task's FPU state to a new task's FPU context.
216 * In the 'eager' case we just save to the destination context.
218 * In the 'lazy' case we save to the source context, mark the FPU lazy
219 * via stts() and copy the source context into the destination context.
221 static void fpu_copy(struct task_struct *dst, struct task_struct *src)
223 WARN_ON(src != current);
225 if (use_eager_fpu()) {
226 memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
229 struct fpu *dfpu = &dst->thread.fpu;
230 struct fpu *sfpu = &src->thread.fpu;
233 memcpy(dfpu->state, sfpu->state, xstate_size);
237 int fpu__copy(struct task_struct *dst, struct task_struct *src)
239 struct fpu *dst_fpu = &dst->thread.fpu;
240 struct fpu *src_fpu = &src->thread.fpu;
242 dst->thread.fpu.counter = 0;
243 dst->thread.fpu.has_fpu = 0;
244 dst->thread.fpu.state = NULL;
246 task_disable_lazy_fpu_restore(dst);
248 if (src_fpu->fpstate_active) {
249 int err = fpstate_alloc(dst_fpu);
259 * Allocate the backing store for the current task's FPU registers
260 * and initialize the registers themselves as well.
264 int fpstate_alloc_init(struct task_struct *curr)
266 struct fpu *fpu = &curr->thread.fpu;
269 if (WARN_ON_ONCE(curr != current))
271 if (WARN_ON_ONCE(fpu->fpstate_active))
275 * Memory allocation at the first usage of the FPU and other state.
277 ret = fpstate_alloc(&curr->thread.fpu);
281 fpstate_init(&curr->thread.fpu);
283 /* Safe to do for the current task: */
284 fpu->fpstate_active = 1;
288 EXPORT_SYMBOL_GPL(fpstate_alloc_init);
291 * This function is called before we modify a stopped child's
294 * If the child has not used the FPU before then initialize its
297 * If the child has used the FPU before then unlazy it.
299 * [ After this function call, after the context is modified and
300 * the child task is woken up, the child task will restore
301 * the modified FPU state from the modified context. If we
302 * didn't clear its lazy status here then the lazy in-registers
303 * state pending on its former CPU could be restored, losing
304 * the modifications. ]
306 * This function is also called before we read a stopped child's
307 * FPU state - to make sure it's modified.
309 * TODO: A future optimization would be to skip the unlazying in
310 * the read-only case, it's not strictly necessary for
311 * read-only access to the context.
313 static int fpu__unlazy_stopped(struct task_struct *child)
315 struct fpu *child_fpu = &child->thread.fpu;
318 if (WARN_ON_ONCE(child == current))
321 if (child_fpu->fpstate_active) {
322 task_disable_lazy_fpu_restore(child);
327 * Memory allocation at the first usage of the FPU and other state.
329 ret = fpstate_alloc(&child->thread.fpu);
333 fpstate_init(&child->thread.fpu);
335 /* Safe to do for stopped child tasks: */
336 child_fpu->fpstate_active = 1;
342 * 'fpu__restore()' saves the current math information in the
343 * old math state array, and gets the new ones from the current task
345 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
346 * Don't touch unless you *really* know how it works.
348 * Must be called with kernel preemption disabled (eg with local
349 * local interrupts as in the case of do_device_not_available).
351 void fpu__restore(void)
353 struct task_struct *tsk = current;
354 struct fpu *fpu = &tsk->thread.fpu;
356 if (!fpu->fpstate_active) {
359 * does a slab alloc which can sleep
361 if (fpstate_alloc_init(tsk)) {
365 do_group_exit(SIGKILL);
371 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
372 kernel_fpu_disable();
373 __thread_fpu_begin(fpu);
374 if (unlikely(restore_fpu_checking(tsk))) {
375 fpu_reset_state(tsk);
376 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
378 tsk->thread.fpu.counter++;
382 EXPORT_SYMBOL_GPL(fpu__restore);
384 void fpu__flush_thread(struct task_struct *tsk)
386 struct fpu *fpu = &tsk->thread.fpu;
388 WARN_ON(tsk != current);
390 if (!use_eager_fpu()) {
391 /* FPU state will be reallocated lazily at the first use. */
393 fpstate_free(&tsk->thread.fpu);
395 if (!fpu->fpstate_active) {
396 /* kthread execs. TODO: cleanup this horror. */
397 if (WARN_ON(fpstate_alloc_init(tsk)))
398 force_sig(SIGKILL, tsk);
401 restore_init_xstate();
406 * The xstateregs_active() routine is the same as the fpregs_active() routine,
407 * as the "regset->n" for the xstate regset will be updated based on the feature
408 * capabilites supported by the xsave.
410 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
412 struct fpu *target_fpu = &target->thread.fpu;
414 return target_fpu->fpstate_active ? regset->n : 0;
417 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
419 struct fpu *target_fpu = &target->thread.fpu;
421 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
424 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
425 unsigned int pos, unsigned int count,
426 void *kbuf, void __user *ubuf)
433 ret = fpu__unlazy_stopped(target);
437 sanitize_i387_state(target);
439 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
440 &target->thread.fpu.state->fxsave, 0, -1);
443 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
444 unsigned int pos, unsigned int count,
445 const void *kbuf, const void __user *ubuf)
452 ret = fpu__unlazy_stopped(target);
456 sanitize_i387_state(target);
458 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
459 &target->thread.fpu.state->fxsave, 0, -1);
462 * mxcsr reserved bits must be masked to zero for security reasons.
464 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
467 * update the header bits in the xsave header, indicating the
468 * presence of FP and SSE state.
471 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
476 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
477 unsigned int pos, unsigned int count,
478 void *kbuf, void __user *ubuf)
480 struct xsave_struct *xsave;
486 ret = fpu__unlazy_stopped(target);
490 xsave = &target->thread.fpu.state->xsave;
493 * Copy the 48bytes defined by the software first into the xstate
494 * memory layout in the thread struct, so that we can copy the entire
495 * xstateregs to the user using one user_regset_copyout().
497 memcpy(&xsave->i387.sw_reserved,
498 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
500 * Copy the xstate memory layout.
502 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
506 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 const void *kbuf, const void __user *ubuf)
510 struct xsave_struct *xsave;
516 ret = fpu__unlazy_stopped(target);
520 xsave = &target->thread.fpu.state->xsave;
522 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
524 * mxcsr reserved bits must be masked to zero for security reasons.
526 xsave->i387.mxcsr &= mxcsr_feature_mask;
527 xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
529 * These bits must be zero.
531 memset(&xsave->xsave_hdr.reserved, 0, 48);
535 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
538 * FPU tag word conversions.
541 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
543 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
545 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
547 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
548 /* and move the valid bits to the lower byte. */
549 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
550 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
551 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
556 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
557 #define FP_EXP_TAG_VALID 0
558 #define FP_EXP_TAG_ZERO 1
559 #define FP_EXP_TAG_SPECIAL 2
560 #define FP_EXP_TAG_EMPTY 3
562 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
565 u32 tos = (fxsave->swd >> 11) & 7;
566 u32 twd = (unsigned long) fxsave->twd;
568 u32 ret = 0xffff0000u;
571 for (i = 0; i < 8; i++, twd >>= 1) {
573 st = FPREG_ADDR(fxsave, (i - tos) & 7);
575 switch (st->exponent & 0x7fff) {
577 tag = FP_EXP_TAG_SPECIAL;
580 if (!st->significand[0] &&
581 !st->significand[1] &&
582 !st->significand[2] &&
584 tag = FP_EXP_TAG_ZERO;
586 tag = FP_EXP_TAG_SPECIAL;
589 if (st->significand[3] & 0x8000)
590 tag = FP_EXP_TAG_VALID;
592 tag = FP_EXP_TAG_SPECIAL;
596 tag = FP_EXP_TAG_EMPTY;
598 ret |= tag << (2 * i);
604 * FXSR floating point environment conversions.
608 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
610 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
611 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
612 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
615 env->cwd = fxsave->cwd | 0xffff0000u;
616 env->swd = fxsave->swd | 0xffff0000u;
617 env->twd = twd_fxsr_to_i387(fxsave);
620 env->fip = fxsave->rip;
621 env->foo = fxsave->rdp;
623 * should be actually ds/cs at fpu exception time, but
624 * that information is not available in 64bit mode.
626 env->fcs = task_pt_regs(tsk)->cs;
627 if (tsk == current) {
628 savesegment(ds, env->fos);
630 env->fos = tsk->thread.ds;
632 env->fos |= 0xffff0000;
634 env->fip = fxsave->fip;
635 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
636 env->foo = fxsave->foo;
637 env->fos = fxsave->fos;
640 for (i = 0; i < 8; ++i)
641 memcpy(&to[i], &from[i], sizeof(to[0]));
644 void convert_to_fxsr(struct task_struct *tsk,
645 const struct user_i387_ia32_struct *env)
648 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
649 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
650 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
653 fxsave->cwd = env->cwd;
654 fxsave->swd = env->swd;
655 fxsave->twd = twd_i387_to_fxsr(env->twd);
656 fxsave->fop = (u16) ((u32) env->fcs >> 16);
658 fxsave->rip = env->fip;
659 fxsave->rdp = env->foo;
660 /* cs and ds ignored */
662 fxsave->fip = env->fip;
663 fxsave->fcs = (env->fcs & 0xffff);
664 fxsave->foo = env->foo;
665 fxsave->fos = env->fos;
668 for (i = 0; i < 8; ++i)
669 memcpy(&to[i], &from[i], sizeof(from[0]));
672 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
673 unsigned int pos, unsigned int count,
674 void *kbuf, void __user *ubuf)
676 struct user_i387_ia32_struct env;
679 ret = fpu__unlazy_stopped(target);
683 if (!static_cpu_has(X86_FEATURE_FPU))
684 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
687 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
688 &target->thread.fpu.state->fsave, 0,
691 sanitize_i387_state(target);
693 if (kbuf && pos == 0 && count == sizeof(env)) {
694 convert_from_fxsr(kbuf, target);
698 convert_from_fxsr(&env, target);
700 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
703 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
704 unsigned int pos, unsigned int count,
705 const void *kbuf, const void __user *ubuf)
707 struct user_i387_ia32_struct env;
710 ret = fpu__unlazy_stopped(target);
714 sanitize_i387_state(target);
716 if (!static_cpu_has(X86_FEATURE_FPU))
717 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
720 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
721 &target->thread.fpu.state->fsave, 0,
724 if (pos > 0 || count < sizeof(env))
725 convert_from_fxsr(&env, target);
727 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
729 convert_to_fxsr(target, &env);
732 * update the header bit in the xsave header, indicating the
736 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
741 * FPU state for core dumps.
742 * This is only used for a.out dumps now.
743 * It is declared generically using elf_fpregset_t (which is
744 * struct user_i387_struct) but is in fact only used for 32-bit
745 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
747 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
749 struct task_struct *tsk = current;
750 struct fpu *fpu = &tsk->thread.fpu;
753 fpvalid = fpu->fpstate_active;
755 fpvalid = !fpregs_get(tsk, NULL,
756 0, sizeof(struct user_i387_ia32_struct),
761 EXPORT_SYMBOL(dump_fpu);
763 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */