2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
11 * Track whether the kernel is using the FPU state
16 * - by IRQ context code to potentially use the FPU
19 * - to debug kernel_fpu_begin()/end() correctness
21 static DEFINE_PER_CPU(bool, in_kernel_fpu);
24 * Track which context is using the FPU on the CPU:
26 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
28 static void kernel_fpu_disable(void)
30 WARN_ON(this_cpu_read(in_kernel_fpu));
31 this_cpu_write(in_kernel_fpu, true);
34 static void kernel_fpu_enable(void)
36 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
37 this_cpu_write(in_kernel_fpu, false);
40 static bool kernel_fpu_disabled(void)
42 return this_cpu_read(in_kernel_fpu);
46 * Were we in an interrupt that interrupted kernel mode?
48 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
49 * pair does nothing at all: the thread must not have fpu (so
50 * that we don't try to save the FPU state), and TS must
51 * be set (so that the clts/stts pair does nothing that is
52 * visible in the interrupted kernel thread).
54 * Except for the eagerfpu case when we return true; in the likely case
55 * the thread has FPU but we are not going to set/clear TS.
57 static bool interrupted_kernel_fpu_idle(void)
59 if (kernel_fpu_disabled())
65 return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
69 * Were we in user mode (or vm86 mode) when we were
72 * Doing kernel_fpu_begin/end() is ok if we are running
73 * in an interrupt context from user mode - we'll just
74 * save the FPU state as required.
76 static bool interrupted_user_mode(void)
78 struct pt_regs *regs = get_irq_regs();
79 return regs && user_mode(regs);
83 * Can we use the FPU in kernel mode with the
84 * whole "kernel_fpu_begin/end()" sequence?
86 * It's always ok in process context (ie "not interrupt")
87 * but it is sometimes ok even from an irq.
89 bool irq_fpu_usable(void)
91 return !in_interrupt() ||
92 interrupted_user_mode() ||
93 interrupted_kernel_fpu_idle();
95 EXPORT_SYMBOL(irq_fpu_usable);
97 void __kernel_fpu_begin(void)
99 struct fpu *fpu = ¤t->thread.fpu;
101 kernel_fpu_disable();
106 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
107 if (!use_eager_fpu())
111 EXPORT_SYMBOL(__kernel_fpu_begin);
113 void __kernel_fpu_end(void)
115 struct fpu *fpu = ¤t->thread.fpu;
118 if (WARN_ON(restore_fpu_checking(fpu)))
119 fpu_reset_state(fpu);
120 } else if (!use_eager_fpu()) {
126 EXPORT_SYMBOL(__kernel_fpu_end);
128 static void __save_fpu(struct fpu *fpu)
131 if (unlikely(system_state == SYSTEM_BOOTING))
132 xsave_state_booting(&fpu->state->xsave);
134 xsave_state(&fpu->state->xsave);
141 * Save the FPU state (initialize it if necessary):
143 * This only ever gets called for the current task.
145 void fpu__save(struct fpu *fpu)
147 WARN_ON(fpu != ¤t->thread.fpu);
151 if (use_eager_fpu()) {
155 __thread_fpu_end(fpu);
160 EXPORT_SYMBOL_GPL(fpu__save);
162 void fpstate_init(struct fpu *fpu)
165 finit_soft_fpu(&fpu->state->soft);
169 memset(fpu->state, 0, xstate_size);
172 fx_finit(&fpu->state->fxsave);
174 struct i387_fsave_struct *fp = &fpu->state->fsave;
175 fp->cwd = 0xffff037fu;
176 fp->swd = 0xffff0000u;
177 fp->twd = 0xffffffffu;
178 fp->fos = 0xffff0000u;
181 EXPORT_SYMBOL_GPL(fpstate_init);
184 * FPU state allocation:
186 static struct kmem_cache *task_xstate_cachep;
188 void fpstate_cache_init(void)
191 kmem_cache_create("task_xstate", xstate_size,
192 __alignof__(union thread_xstate),
193 SLAB_PANIC | SLAB_NOTRACK, NULL);
197 int fpstate_alloc(struct fpu *fpu)
202 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
206 /* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
207 WARN_ON((unsigned long)fpu->state & 15);
211 EXPORT_SYMBOL_GPL(fpstate_alloc);
213 void fpstate_free(struct fpu *fpu)
216 kmem_cache_free(task_xstate_cachep, fpu->state);
220 EXPORT_SYMBOL_GPL(fpstate_free);
223 * Copy the current task's FPU state to a new task's FPU context.
225 * In the 'eager' case we just save to the destination context.
227 * In the 'lazy' case we save to the source context, mark the FPU lazy
228 * via stts() and copy the source context into the destination context.
230 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
232 WARN_ON(src_fpu != ¤t->thread.fpu);
234 if (use_eager_fpu()) {
235 memset(&dst_fpu->state->xsave, 0, xstate_size);
239 memcpy(dst_fpu->state, src_fpu->state, xstate_size);
243 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
245 dst_fpu->counter = 0;
246 dst_fpu->has_fpu = 0;
247 dst_fpu->state = NULL;
248 dst_fpu->last_cpu = -1;
250 if (src_fpu->fpstate_active) {
251 int err = fpstate_alloc(dst_fpu);
255 fpu_copy(dst_fpu, src_fpu);
261 * Allocate the backing store for the current task's FPU registers
262 * and initialize the registers themselves as well.
266 int fpstate_alloc_init(struct fpu *fpu)
270 if (WARN_ON_ONCE(fpu != ¤t->thread.fpu))
272 if (WARN_ON_ONCE(fpu->fpstate_active))
276 * Memory allocation at the first usage of the FPU and other state.
278 ret = fpstate_alloc(fpu);
284 /* Safe to do for the current task: */
285 fpu->fpstate_active = 1;
289 EXPORT_SYMBOL_GPL(fpstate_alloc_init);
292 * This function is called before we modify a stopped child's
295 * If the child has not used the FPU before then initialize its
298 * If the child has used the FPU before then unlazy it.
300 * [ After this function call, after the context is modified and
301 * the child task is woken up, the child task will restore
302 * the modified FPU state from the modified context. If we
303 * didn't clear its lazy status here then the lazy in-registers
304 * state pending on its former CPU could be restored, losing
305 * the modifications. ]
307 * This function is also called before we read a stopped child's
308 * FPU state - to make sure it's modified.
310 * TODO: A future optimization would be to skip the unlazying in
311 * the read-only case, it's not strictly necessary for
312 * read-only access to the context.
314 static int fpu__unlazy_stopped(struct fpu *child_fpu)
318 if (WARN_ON_ONCE(child_fpu == ¤t->thread.fpu))
321 if (child_fpu->fpstate_active) {
322 child_fpu->last_cpu = -1;
327 * Memory allocation at the first usage of the FPU and other state.
329 ret = fpstate_alloc(child_fpu);
333 fpstate_init(child_fpu);
335 /* Safe to do for stopped child tasks: */
336 child_fpu->fpstate_active = 1;
342 * 'fpu__restore()' saves the current math information in the
343 * old math state array, and gets the new ones from the current task
345 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
346 * Don't touch unless you *really* know how it works.
348 * Must be called with kernel preemption disabled (eg with local
349 * local interrupts as in the case of do_device_not_available).
351 void fpu__restore(void)
353 struct task_struct *tsk = current;
354 struct fpu *fpu = &tsk->thread.fpu;
356 if (!fpu->fpstate_active) {
359 * does a slab alloc which can sleep
361 if (fpstate_alloc_init(fpu)) {
365 do_group_exit(SIGKILL);
371 /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
372 kernel_fpu_disable();
373 __thread_fpu_begin(fpu);
374 if (unlikely(restore_fpu_checking(fpu))) {
375 fpu_reset_state(fpu);
376 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
378 tsk->thread.fpu.counter++;
382 EXPORT_SYMBOL_GPL(fpu__restore);
384 void fpu__clear(struct task_struct *tsk)
386 struct fpu *fpu = &tsk->thread.fpu;
388 WARN_ON_ONCE(tsk != current); /* Almost certainly an anomaly */
390 if (!use_eager_fpu()) {
391 /* FPU state will be reallocated lazily at the first use. */
395 if (!fpu->fpstate_active) {
396 /* kthread execs. TODO: cleanup this horror. */
397 if (WARN_ON(fpstate_alloc_init(fpu)))
398 force_sig(SIGKILL, tsk);
401 restore_init_xstate();
406 * The xstateregs_active() routine is the same as the fpregs_active() routine,
407 * as the "regset->n" for the xstate regset will be updated based on the feature
408 * capabilites supported by the xsave.
410 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
412 struct fpu *target_fpu = &target->thread.fpu;
414 return target_fpu->fpstate_active ? regset->n : 0;
417 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
419 struct fpu *target_fpu = &target->thread.fpu;
421 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
424 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
425 unsigned int pos, unsigned int count,
426 void *kbuf, void __user *ubuf)
428 struct fpu *fpu = &target->thread.fpu;
434 ret = fpu__unlazy_stopped(fpu);
438 sanitize_i387_state(target);
440 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
441 &fpu->state->fxsave, 0, -1);
444 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
445 unsigned int pos, unsigned int count,
446 const void *kbuf, const void __user *ubuf)
448 struct fpu *fpu = &target->thread.fpu;
454 ret = fpu__unlazy_stopped(fpu);
458 sanitize_i387_state(target);
460 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
461 &fpu->state->fxsave, 0, -1);
464 * mxcsr reserved bits must be masked to zero for security reasons.
466 fpu->state->fxsave.mxcsr &= mxcsr_feature_mask;
469 * update the header bits in the xsave header, indicating the
470 * presence of FP and SSE state.
473 fpu->state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
478 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
479 unsigned int pos, unsigned int count,
480 void *kbuf, void __user *ubuf)
482 struct fpu *fpu = &target->thread.fpu;
483 struct xsave_struct *xsave;
489 ret = fpu__unlazy_stopped(fpu);
493 xsave = &fpu->state->xsave;
496 * Copy the 48bytes defined by the software first into the xstate
497 * memory layout in the thread struct, so that we can copy the entire
498 * xstateregs to the user using one user_regset_copyout().
500 memcpy(&xsave->i387.sw_reserved,
501 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
503 * Copy the xstate memory layout.
505 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
509 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
510 unsigned int pos, unsigned int count,
511 const void *kbuf, const void __user *ubuf)
513 struct fpu *fpu = &target->thread.fpu;
514 struct xsave_struct *xsave;
520 ret = fpu__unlazy_stopped(fpu);
524 xsave = &fpu->state->xsave;
526 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
528 * mxcsr reserved bits must be masked to zero for security reasons.
530 xsave->i387.mxcsr &= mxcsr_feature_mask;
531 xsave->xsave_hdr.xstate_bv &= xfeatures_mask;
533 * These bits must be zero.
535 memset(&xsave->xsave_hdr.reserved, 0, 48);
540 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
543 * FPU tag word conversions.
546 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
548 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
550 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
552 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
553 /* and move the valid bits to the lower byte. */
554 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
555 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
556 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
561 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
562 #define FP_EXP_TAG_VALID 0
563 #define FP_EXP_TAG_ZERO 1
564 #define FP_EXP_TAG_SPECIAL 2
565 #define FP_EXP_TAG_EMPTY 3
567 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
570 u32 tos = (fxsave->swd >> 11) & 7;
571 u32 twd = (unsigned long) fxsave->twd;
573 u32 ret = 0xffff0000u;
576 for (i = 0; i < 8; i++, twd >>= 1) {
578 st = FPREG_ADDR(fxsave, (i - tos) & 7);
580 switch (st->exponent & 0x7fff) {
582 tag = FP_EXP_TAG_SPECIAL;
585 if (!st->significand[0] &&
586 !st->significand[1] &&
587 !st->significand[2] &&
589 tag = FP_EXP_TAG_ZERO;
591 tag = FP_EXP_TAG_SPECIAL;
594 if (st->significand[3] & 0x8000)
595 tag = FP_EXP_TAG_VALID;
597 tag = FP_EXP_TAG_SPECIAL;
601 tag = FP_EXP_TAG_EMPTY;
603 ret |= tag << (2 * i);
609 * FXSR floating point environment conversions.
613 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
615 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
616 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
617 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
620 env->cwd = fxsave->cwd | 0xffff0000u;
621 env->swd = fxsave->swd | 0xffff0000u;
622 env->twd = twd_fxsr_to_i387(fxsave);
625 env->fip = fxsave->rip;
626 env->foo = fxsave->rdp;
628 * should be actually ds/cs at fpu exception time, but
629 * that information is not available in 64bit mode.
631 env->fcs = task_pt_regs(tsk)->cs;
632 if (tsk == current) {
633 savesegment(ds, env->fos);
635 env->fos = tsk->thread.ds;
637 env->fos |= 0xffff0000;
639 env->fip = fxsave->fip;
640 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
641 env->foo = fxsave->foo;
642 env->fos = fxsave->fos;
645 for (i = 0; i < 8; ++i)
646 memcpy(&to[i], &from[i], sizeof(to[0]));
649 void convert_to_fxsr(struct task_struct *tsk,
650 const struct user_i387_ia32_struct *env)
653 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
654 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
655 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
658 fxsave->cwd = env->cwd;
659 fxsave->swd = env->swd;
660 fxsave->twd = twd_i387_to_fxsr(env->twd);
661 fxsave->fop = (u16) ((u32) env->fcs >> 16);
663 fxsave->rip = env->fip;
664 fxsave->rdp = env->foo;
665 /* cs and ds ignored */
667 fxsave->fip = env->fip;
668 fxsave->fcs = (env->fcs & 0xffff);
669 fxsave->foo = env->foo;
670 fxsave->fos = env->fos;
673 for (i = 0; i < 8; ++i)
674 memcpy(&to[i], &from[i], sizeof(from[0]));
677 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
678 unsigned int pos, unsigned int count,
679 void *kbuf, void __user *ubuf)
681 struct fpu *fpu = &target->thread.fpu;
682 struct user_i387_ia32_struct env;
685 ret = fpu__unlazy_stopped(fpu);
689 if (!static_cpu_has(X86_FEATURE_FPU))
690 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
693 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
694 &fpu->state->fsave, 0,
697 sanitize_i387_state(target);
699 if (kbuf && pos == 0 && count == sizeof(env)) {
700 convert_from_fxsr(kbuf, target);
704 convert_from_fxsr(&env, target);
706 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
709 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
710 unsigned int pos, unsigned int count,
711 const void *kbuf, const void __user *ubuf)
713 struct fpu *fpu = &target->thread.fpu;
714 struct user_i387_ia32_struct env;
717 ret = fpu__unlazy_stopped(fpu);
721 sanitize_i387_state(target);
723 if (!static_cpu_has(X86_FEATURE_FPU))
724 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
727 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
728 &fpu->state->fsave, 0,
731 if (pos > 0 || count < sizeof(env))
732 convert_from_fxsr(&env, target);
734 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
736 convert_to_fxsr(target, &env);
739 * update the header bit in the xsave header, indicating the
743 fpu->state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
748 * FPU state for core dumps.
749 * This is only used for a.out dumps now.
750 * It is declared generically using elf_fpregset_t (which is
751 * struct user_i387_struct) but is in fact only used for 32-bit
752 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
754 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
756 struct task_struct *tsk = current;
757 struct fpu *fpu = &tsk->thread.fpu;
760 fpvalid = fpu->fpstate_active;
762 fpvalid = !fpregs_get(tsk, NULL,
763 0, sizeof(struct user_i387_ia32_struct),
768 EXPORT_SYMBOL(dump_fpu);
770 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */