2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu-internal.h>
10 static DEFINE_PER_CPU(bool, in_kernel_fpu);
12 void kernel_fpu_disable(void)
14 WARN_ON(this_cpu_read(in_kernel_fpu));
15 this_cpu_write(in_kernel_fpu, true);
18 void kernel_fpu_enable(void)
20 this_cpu_write(in_kernel_fpu, false);
24 * Were we in an interrupt that interrupted kernel mode?
26 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
27 * pair does nothing at all: the thread must not have fpu (so
28 * that we don't try to save the FPU state), and TS must
29 * be set (so that the clts/stts pair does nothing that is
30 * visible in the interrupted kernel thread).
32 * Except for the eagerfpu case when we return true; in the likely case
33 * the thread has FPU but we are not going to set/clear TS.
35 static inline bool interrupted_kernel_fpu_idle(void)
37 if (this_cpu_read(in_kernel_fpu))
43 return !__thread_has_fpu(current) &&
44 (read_cr0() & X86_CR0_TS);
48 * Were we in user mode (or vm86 mode) when we were
51 * Doing kernel_fpu_begin/end() is ok if we are running
52 * in an interrupt context from user mode - we'll just
53 * save the FPU state as required.
55 static inline bool interrupted_user_mode(void)
57 struct pt_regs *regs = get_irq_regs();
58 return regs && user_mode(regs);
62 * Can we use the FPU in kernel mode with the
63 * whole "kernel_fpu_begin/end()" sequence?
65 * It's always ok in process context (ie "not interrupt")
66 * but it is sometimes ok even from an irq.
68 bool irq_fpu_usable(void)
70 return !in_interrupt() ||
71 interrupted_user_mode() ||
72 interrupted_kernel_fpu_idle();
74 EXPORT_SYMBOL(irq_fpu_usable);
76 void __kernel_fpu_begin(void)
78 struct task_struct *me = current;
80 this_cpu_write(in_kernel_fpu, true);
82 if (__thread_has_fpu(me)) {
85 this_cpu_write(fpu_owner_task, NULL);
90 EXPORT_SYMBOL(__kernel_fpu_begin);
92 void __kernel_fpu_end(void)
94 struct task_struct *me = current;
96 if (__thread_has_fpu(me)) {
97 if (WARN_ON(restore_fpu_checking(me)))
99 } else if (!use_eager_fpu()) {
103 this_cpu_write(in_kernel_fpu, false);
105 EXPORT_SYMBOL(__kernel_fpu_end);
108 * Save the FPU state (initialize it if necessary):
110 * This only ever gets called for the current task.
112 void fpu__save(struct task_struct *tsk)
114 WARN_ON(tsk != current);
117 if (__thread_has_fpu(tsk)) {
118 if (use_eager_fpu()) {
121 __save_init_fpu(tsk);
122 __thread_fpu_end(tsk);
127 EXPORT_SYMBOL_GPL(fpu__save);
129 void fpstate_init(struct fpu *fpu)
132 finit_soft_fpu(&fpu->state->soft);
136 memset(fpu->state, 0, xstate_size);
139 fx_finit(&fpu->state->fxsave);
141 struct i387_fsave_struct *fp = &fpu->state->fsave;
142 fp->cwd = 0xffff037fu;
143 fp->swd = 0xffff0000u;
144 fp->twd = 0xffffffffu;
145 fp->fos = 0xffff0000u;
148 EXPORT_SYMBOL_GPL(fpstate_init);
151 * FPU state allocation:
153 struct kmem_cache *task_xstate_cachep;
154 EXPORT_SYMBOL_GPL(task_xstate_cachep);
156 void fpstate_cache_init(void)
159 kmem_cache_create("task_xstate", xstate_size,
160 __alignof__(union thread_xstate),
161 SLAB_PANIC | SLAB_NOTRACK, NULL);
165 int fpstate_alloc(struct fpu *fpu)
170 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
174 /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
175 WARN_ON((unsigned long)fpu->state & 15);
179 EXPORT_SYMBOL_GPL(fpstate_alloc);
181 void fpstate_free(struct fpu *fpu)
184 kmem_cache_free(task_xstate_cachep, fpu->state);
188 EXPORT_SYMBOL_GPL(fpstate_free);
190 int fpu__copy(struct task_struct *dst, struct task_struct *src)
192 dst->thread.fpu.counter = 0;
193 dst->thread.fpu.has_fpu = 0;
194 dst->thread.fpu.state = NULL;
196 task_disable_lazy_fpu_restore(dst);
198 if (tsk_used_math(src)) {
199 int err = fpstate_alloc(&dst->thread.fpu);
209 * Allocate the backing store for the current task's FPU registers
210 * and initialize the registers themselves as well.
214 int fpstate_alloc_init(struct task_struct *curr)
218 if (WARN_ON_ONCE(curr != current))
220 if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
224 * Memory allocation at the first usage of the FPU and other state.
226 ret = fpstate_alloc(&curr->thread.fpu);
230 fpstate_init(&curr->thread.fpu);
232 /* Safe to do for the current task: */
233 curr->flags |= PF_USED_MATH;
237 EXPORT_SYMBOL_GPL(fpstate_alloc_init);
240 * The _current_ task is using the FPU for the first time
241 * so initialize it and set the mxcsr to its default
242 * value at reset if we support XMM instructions and then
243 * remember the current task has used the FPU.
245 static int fpu__unlazy_stopped(struct task_struct *child)
249 if (WARN_ON_ONCE(child == current))
252 if (child->flags & PF_USED_MATH) {
253 task_disable_lazy_fpu_restore(child);
258 * Memory allocation at the first usage of the FPU and other state.
260 ret = fpstate_alloc(&child->thread.fpu);
264 fpstate_init(&child->thread.fpu);
266 /* Safe to do for stopped child tasks: */
267 child->flags |= PF_USED_MATH;
273 * 'fpu__restore()' saves the current math information in the
274 * old math state array, and gets the new ones from the current task
276 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
277 * Don't touch unless you *really* know how it works.
279 * Must be called with kernel preemption disabled (eg with local
280 * local interrupts as in the case of do_device_not_available).
282 void fpu__restore(void)
284 struct task_struct *tsk = current;
286 if (!tsk_used_math(tsk)) {
289 * does a slab alloc which can sleep
291 if (fpstate_alloc_init(tsk)) {
295 do_group_exit(SIGKILL);
301 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
302 kernel_fpu_disable();
303 __thread_fpu_begin(tsk);
304 if (unlikely(restore_fpu_checking(tsk))) {
305 fpu_reset_state(tsk);
306 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
308 tsk->thread.fpu.counter++;
312 EXPORT_SYMBOL_GPL(fpu__restore);
314 void fpu__flush_thread(struct task_struct *tsk)
316 if (!use_eager_fpu()) {
317 /* FPU state will be reallocated lazily at the first use. */
319 fpstate_free(&tsk->thread.fpu);
321 if (!tsk_used_math(tsk)) {
322 /* kthread execs. TODO: cleanup this horror. */
323 if (WARN_ON(fpstate_alloc_init(tsk)))
324 force_sig(SIGKILL, tsk);
327 restore_init_xstate();
332 * The xstateregs_active() routine is the same as the fpregs_active() routine,
333 * as the "regset->n" for the xstate regset will be updated based on the feature
334 * capabilites supported by the xsave.
336 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
338 return tsk_used_math(target) ? regset->n : 0;
341 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
343 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
346 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
347 unsigned int pos, unsigned int count,
348 void *kbuf, void __user *ubuf)
355 ret = fpu__unlazy_stopped(target);
359 sanitize_i387_state(target);
361 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
362 &target->thread.fpu.state->fxsave, 0, -1);
365 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
366 unsigned int pos, unsigned int count,
367 const void *kbuf, const void __user *ubuf)
374 ret = fpu__unlazy_stopped(target);
378 sanitize_i387_state(target);
380 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
381 &target->thread.fpu.state->fxsave, 0, -1);
384 * mxcsr reserved bits must be masked to zero for security reasons.
386 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
389 * update the header bits in the xsave header, indicating the
390 * presence of FP and SSE state.
393 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
398 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
399 unsigned int pos, unsigned int count,
400 void *kbuf, void __user *ubuf)
402 struct xsave_struct *xsave;
408 ret = fpu__unlazy_stopped(target);
412 xsave = &target->thread.fpu.state->xsave;
415 * Copy the 48bytes defined by the software first into the xstate
416 * memory layout in the thread struct, so that we can copy the entire
417 * xstateregs to the user using one user_regset_copyout().
419 memcpy(&xsave->i387.sw_reserved,
420 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
422 * Copy the xstate memory layout.
424 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
428 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
429 unsigned int pos, unsigned int count,
430 const void *kbuf, const void __user *ubuf)
432 struct xsave_struct *xsave;
438 ret = fpu__unlazy_stopped(target);
442 xsave = &target->thread.fpu.state->xsave;
444 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
446 * mxcsr reserved bits must be masked to zero for security reasons.
448 xsave->i387.mxcsr &= mxcsr_feature_mask;
449 xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
451 * These bits must be zero.
453 memset(&xsave->xsave_hdr.reserved, 0, 48);
457 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
460 * FPU tag word conversions.
463 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
465 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
467 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
469 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
470 /* and move the valid bits to the lower byte. */
471 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
472 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
473 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
478 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
479 #define FP_EXP_TAG_VALID 0
480 #define FP_EXP_TAG_ZERO 1
481 #define FP_EXP_TAG_SPECIAL 2
482 #define FP_EXP_TAG_EMPTY 3
484 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
487 u32 tos = (fxsave->swd >> 11) & 7;
488 u32 twd = (unsigned long) fxsave->twd;
490 u32 ret = 0xffff0000u;
493 for (i = 0; i < 8; i++, twd >>= 1) {
495 st = FPREG_ADDR(fxsave, (i - tos) & 7);
497 switch (st->exponent & 0x7fff) {
499 tag = FP_EXP_TAG_SPECIAL;
502 if (!st->significand[0] &&
503 !st->significand[1] &&
504 !st->significand[2] &&
506 tag = FP_EXP_TAG_ZERO;
508 tag = FP_EXP_TAG_SPECIAL;
511 if (st->significand[3] & 0x8000)
512 tag = FP_EXP_TAG_VALID;
514 tag = FP_EXP_TAG_SPECIAL;
518 tag = FP_EXP_TAG_EMPTY;
520 ret |= tag << (2 * i);
526 * FXSR floating point environment conversions.
530 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
532 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
533 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
534 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
537 env->cwd = fxsave->cwd | 0xffff0000u;
538 env->swd = fxsave->swd | 0xffff0000u;
539 env->twd = twd_fxsr_to_i387(fxsave);
542 env->fip = fxsave->rip;
543 env->foo = fxsave->rdp;
545 * should be actually ds/cs at fpu exception time, but
546 * that information is not available in 64bit mode.
548 env->fcs = task_pt_regs(tsk)->cs;
549 if (tsk == current) {
550 savesegment(ds, env->fos);
552 env->fos = tsk->thread.ds;
554 env->fos |= 0xffff0000;
556 env->fip = fxsave->fip;
557 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
558 env->foo = fxsave->foo;
559 env->fos = fxsave->fos;
562 for (i = 0; i < 8; ++i)
563 memcpy(&to[i], &from[i], sizeof(to[0]));
566 void convert_to_fxsr(struct task_struct *tsk,
567 const struct user_i387_ia32_struct *env)
570 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
571 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
572 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
575 fxsave->cwd = env->cwd;
576 fxsave->swd = env->swd;
577 fxsave->twd = twd_i387_to_fxsr(env->twd);
578 fxsave->fop = (u16) ((u32) env->fcs >> 16);
580 fxsave->rip = env->fip;
581 fxsave->rdp = env->foo;
582 /* cs and ds ignored */
584 fxsave->fip = env->fip;
585 fxsave->fcs = (env->fcs & 0xffff);
586 fxsave->foo = env->foo;
587 fxsave->fos = env->fos;
590 for (i = 0; i < 8; ++i)
591 memcpy(&to[i], &from[i], sizeof(from[0]));
594 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
595 unsigned int pos, unsigned int count,
596 void *kbuf, void __user *ubuf)
598 struct user_i387_ia32_struct env;
601 ret = fpu__unlazy_stopped(target);
605 if (!static_cpu_has(X86_FEATURE_FPU))
606 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
609 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
610 &target->thread.fpu.state->fsave, 0,
613 sanitize_i387_state(target);
615 if (kbuf && pos == 0 && count == sizeof(env)) {
616 convert_from_fxsr(kbuf, target);
620 convert_from_fxsr(&env, target);
622 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
625 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
626 unsigned int pos, unsigned int count,
627 const void *kbuf, const void __user *ubuf)
629 struct user_i387_ia32_struct env;
632 ret = fpu__unlazy_stopped(target);
636 sanitize_i387_state(target);
638 if (!static_cpu_has(X86_FEATURE_FPU))
639 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
642 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
643 &target->thread.fpu.state->fsave, 0,
646 if (pos > 0 || count < sizeof(env))
647 convert_from_fxsr(&env, target);
649 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
651 convert_to_fxsr(target, &env);
654 * update the header bit in the xsave header, indicating the
658 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
663 * FPU state for core dumps.
664 * This is only used for a.out dumps now.
665 * It is declared generically using elf_fpregset_t (which is
666 * struct user_i387_struct) but is in fact only used for 32-bit
667 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
669 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
671 struct task_struct *tsk = current;
674 fpvalid = !!used_math();
676 fpvalid = !fpregs_get(tsk, NULL,
677 0, sizeof(struct user_i387_ia32_struct),
682 EXPORT_SYMBOL(dump_fpu);
684 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */