2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <linux/hardirq.h>
12 * Track whether the kernel is using the FPU state
17 * - by IRQ context code to potentially use the FPU
20 * - to debug kernel_fpu_begin()/end() correctness
22 static DEFINE_PER_CPU(bool, in_kernel_fpu);
25 * Track which context is using the FPU on the CPU:
27 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
29 static void kernel_fpu_disable(void)
31 WARN_ON(this_cpu_read(in_kernel_fpu));
32 this_cpu_write(in_kernel_fpu, true);
35 static void kernel_fpu_enable(void)
37 WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
38 this_cpu_write(in_kernel_fpu, false);
41 static bool kernel_fpu_disabled(void)
43 return this_cpu_read(in_kernel_fpu);
47 * Were we in an interrupt that interrupted kernel mode?
49 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
50 * pair does nothing at all: the thread must not have fpu (so
51 * that we don't try to save the FPU state), and TS must
52 * be set (so that the clts/stts pair does nothing that is
53 * visible in the interrupted kernel thread).
55 * Except for the eagerfpu case when we return true; in the likely case
56 * the thread has FPU but we are not going to set/clear TS.
58 static bool interrupted_kernel_fpu_idle(void)
60 if (kernel_fpu_disabled())
66 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
70 * Were we in user mode (or vm86 mode) when we were
73 * Doing kernel_fpu_begin/end() is ok if we are running
74 * in an interrupt context from user mode - we'll just
75 * save the FPU state as required.
77 static bool interrupted_user_mode(void)
79 struct pt_regs *regs = get_irq_regs();
80 return regs && user_mode(regs);
84 * Can we use the FPU in kernel mode with the
85 * whole "kernel_fpu_begin/end()" sequence?
87 * It's always ok in process context (ie "not interrupt")
88 * but it is sometimes ok even from an irq.
90 bool irq_fpu_usable(void)
92 return !in_interrupt() ||
93 interrupted_user_mode() ||
94 interrupted_kernel_fpu_idle();
96 EXPORT_SYMBOL(irq_fpu_usable);
98 void __kernel_fpu_begin(void)
100 struct fpu *fpu = ¤t->thread.fpu;
102 kernel_fpu_disable();
104 if (fpu->fpregs_active) {
105 copy_fpregs_to_fpstate(fpu);
107 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
108 if (!use_eager_fpu())
112 EXPORT_SYMBOL(__kernel_fpu_begin);
114 void __kernel_fpu_end(void)
116 struct fpu *fpu = ¤t->thread.fpu;
118 if (fpu->fpregs_active) {
119 if (WARN_ON(restore_fpu_checking(fpu)))
120 fpu_reset_state(fpu);
121 } else if (!use_eager_fpu()) {
127 EXPORT_SYMBOL(__kernel_fpu_end);
129 void kernel_fpu_begin(void)
132 WARN_ON_ONCE(!irq_fpu_usable());
133 __kernel_fpu_begin();
135 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
137 void kernel_fpu_end(void)
142 EXPORT_SYMBOL_GPL(kernel_fpu_end);
145 * CR0::TS save/restore functions:
147 int irq_ts_save(void)
150 * If in process context and not atomic, we can take a spurious DNA fault.
151 * Otherwise, doing clts() in process context requires disabling preemption
152 * or some heavy lifting like kernel_fpu_begin()
157 if (read_cr0() & X86_CR0_TS) {
164 EXPORT_SYMBOL_GPL(irq_ts_save);
166 void irq_ts_restore(int TS_state)
171 EXPORT_SYMBOL_GPL(irq_ts_restore);
173 static void __save_fpu(struct fpu *fpu)
176 if (unlikely(system_state == SYSTEM_BOOTING))
177 xsave_state_booting(&fpu->state.xsave);
179 xsave_state(&fpu->state.xsave);
186 * Save the FPU state (initialize it if necessary):
188 * This only ever gets called for the current task.
190 void fpu__save(struct fpu *fpu)
192 WARN_ON(fpu != ¤t->thread.fpu);
195 if (fpu->fpregs_active) {
196 if (use_eager_fpu()) {
199 copy_fpregs_to_fpstate(fpu);
200 fpregs_deactivate(fpu);
205 EXPORT_SYMBOL_GPL(fpu__save);
207 void fpstate_init(struct fpu *fpu)
210 finit_soft_fpu(&fpu->state.soft);
214 memset(&fpu->state, 0, xstate_size);
217 fx_finit(&fpu->state.fxsave);
219 struct i387_fsave_struct *fp = &fpu->state.fsave;
220 fp->cwd = 0xffff037fu;
221 fp->swd = 0xffff0000u;
222 fp->twd = 0xffffffffu;
223 fp->fos = 0xffff0000u;
226 EXPORT_SYMBOL_GPL(fpstate_init);
229 * Copy the current task's FPU state to a new task's FPU context.
231 * In the 'eager' case we just save to the destination context.
233 * In the 'lazy' case we save to the source context, mark the FPU lazy
234 * via stts() and copy the source context into the destination context.
236 static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
238 WARN_ON(src_fpu != ¤t->thread.fpu);
240 if (use_eager_fpu()) {
241 memset(&dst_fpu->state.xsave, 0, xstate_size);
245 memcpy(&dst_fpu->state, &src_fpu->state, xstate_size);
249 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
251 dst_fpu->counter = 0;
252 dst_fpu->fpregs_active = 0;
253 dst_fpu->last_cpu = -1;
255 if (src_fpu->fpstate_active)
256 fpu_copy(dst_fpu, src_fpu);
262 * Initialize the current task's in-memory FPU context:
264 void fpstate_init_curr(struct fpu *fpu)
266 WARN_ON_ONCE(fpu != ¤t->thread.fpu);
267 WARN_ON_ONCE(fpu->fpstate_active);
271 /* Safe to do for the current task: */
272 fpu->fpstate_active = 1;
274 EXPORT_SYMBOL_GPL(fpstate_init_curr);
277 * This function is called before we modify a stopped child's
280 * If the child has not used the FPU before then initialize its
283 * If the child has used the FPU before then unlazy it.
285 * [ After this function call, after the context is modified and
286 * the child task is woken up, the child task will restore
287 * the modified FPU state from the modified context. If we
288 * didn't clear its lazy status here then the lazy in-registers
289 * state pending on its former CPU could be restored, losing
290 * the modifications. ]
292 * This function is also called before we read a stopped child's
293 * FPU state - to make sure it's modified.
295 * TODO: A future optimization would be to skip the unlazying in
296 * the read-only case, it's not strictly necessary for
297 * read-only access to the context.
299 static int fpu__unlazy_stopped(struct fpu *child_fpu)
303 if (WARN_ON_ONCE(child_fpu == ¤t->thread.fpu))
306 if (child_fpu->fpstate_active) {
307 child_fpu->last_cpu = -1;
311 fpstate_init(child_fpu);
313 /* Safe to do for stopped child tasks: */
314 child_fpu->fpstate_active = 1;
320 * 'fpu__restore()' saves the current math information in the
321 * old math state array, and gets the new ones from the current task
323 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
324 * Don't touch unless you *really* know how it works.
326 * Must be called with kernel preemption disabled (eg with local
327 * local interrupts as in the case of do_device_not_available).
329 void fpu__restore(void)
331 struct task_struct *tsk = current;
332 struct fpu *fpu = &tsk->thread.fpu;
334 if (!fpu->fpstate_active)
335 fpstate_init_curr(fpu);
337 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
338 kernel_fpu_disable();
339 fpregs_activate(fpu);
340 if (unlikely(restore_fpu_checking(fpu))) {
341 fpu_reset_state(fpu);
342 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
344 tsk->thread.fpu.counter++;
348 EXPORT_SYMBOL_GPL(fpu__restore);
350 void fpu__clear(struct task_struct *tsk)
352 struct fpu *fpu = &tsk->thread.fpu;
354 WARN_ON_ONCE(tsk != current); /* Almost certainly an anomaly */
356 if (!use_eager_fpu()) {
357 /* FPU state will be reallocated lazily at the first use. */
360 if (!fpu->fpstate_active) {
361 fpstate_init_curr(fpu);
364 restore_init_xstate();
369 * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
370 * as the "regset->n" for the xstate regset will be updated based on the feature
371 * capabilites supported by the xsave.
373 int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
375 struct fpu *target_fpu = &target->thread.fpu;
377 return target_fpu->fpstate_active ? regset->n : 0;
380 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
382 struct fpu *target_fpu = &target->thread.fpu;
384 return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
387 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
388 unsigned int pos, unsigned int count,
389 void *kbuf, void __user *ubuf)
391 struct fpu *fpu = &target->thread.fpu;
397 ret = fpu__unlazy_stopped(fpu);
401 sanitize_i387_state(target);
403 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
404 &fpu->state.fxsave, 0, -1);
407 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
408 unsigned int pos, unsigned int count,
409 const void *kbuf, const void __user *ubuf)
411 struct fpu *fpu = &target->thread.fpu;
417 ret = fpu__unlazy_stopped(fpu);
421 sanitize_i387_state(target);
423 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
424 &fpu->state.fxsave, 0, -1);
427 * mxcsr reserved bits must be masked to zero for security reasons.
429 fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
432 * update the header bits in the xsave header, indicating the
433 * presence of FP and SSE state.
436 fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
441 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
442 unsigned int pos, unsigned int count,
443 void *kbuf, void __user *ubuf)
445 struct fpu *fpu = &target->thread.fpu;
446 struct xsave_struct *xsave;
452 ret = fpu__unlazy_stopped(fpu);
456 xsave = &fpu->state.xsave;
459 * Copy the 48bytes defined by the software first into the xstate
460 * memory layout in the thread struct, so that we can copy the entire
461 * xstateregs to the user using one user_regset_copyout().
463 memcpy(&xsave->i387.sw_reserved,
464 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
466 * Copy the xstate memory layout.
468 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
472 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
473 unsigned int pos, unsigned int count,
474 const void *kbuf, const void __user *ubuf)
476 struct fpu *fpu = &target->thread.fpu;
477 struct xsave_struct *xsave;
483 ret = fpu__unlazy_stopped(fpu);
487 xsave = &fpu->state.xsave;
489 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
491 * mxcsr reserved bits must be masked to zero for security reasons.
493 xsave->i387.mxcsr &= mxcsr_feature_mask;
494 xsave->header.xfeatures &= xfeatures_mask;
496 * These bits must be zero.
498 memset(&xsave->header.reserved, 0, 48);
503 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
506 * FPU tag word conversions.
509 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
511 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
513 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
515 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
516 /* and move the valid bits to the lower byte. */
517 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
518 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
519 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
524 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
525 #define FP_EXP_TAG_VALID 0
526 #define FP_EXP_TAG_ZERO 1
527 #define FP_EXP_TAG_SPECIAL 2
528 #define FP_EXP_TAG_EMPTY 3
530 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
533 u32 tos = (fxsave->swd >> 11) & 7;
534 u32 twd = (unsigned long) fxsave->twd;
536 u32 ret = 0xffff0000u;
539 for (i = 0; i < 8; i++, twd >>= 1) {
541 st = FPREG_ADDR(fxsave, (i - tos) & 7);
543 switch (st->exponent & 0x7fff) {
545 tag = FP_EXP_TAG_SPECIAL;
548 if (!st->significand[0] &&
549 !st->significand[1] &&
550 !st->significand[2] &&
552 tag = FP_EXP_TAG_ZERO;
554 tag = FP_EXP_TAG_SPECIAL;
557 if (st->significand[3] & 0x8000)
558 tag = FP_EXP_TAG_VALID;
560 tag = FP_EXP_TAG_SPECIAL;
564 tag = FP_EXP_TAG_EMPTY;
566 ret |= tag << (2 * i);
572 * FXSR floating point environment conversions.
576 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
578 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
579 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
580 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
583 env->cwd = fxsave->cwd | 0xffff0000u;
584 env->swd = fxsave->swd | 0xffff0000u;
585 env->twd = twd_fxsr_to_i387(fxsave);
588 env->fip = fxsave->rip;
589 env->foo = fxsave->rdp;
591 * should be actually ds/cs at fpu exception time, but
592 * that information is not available in 64bit mode.
594 env->fcs = task_pt_regs(tsk)->cs;
595 if (tsk == current) {
596 savesegment(ds, env->fos);
598 env->fos = tsk->thread.ds;
600 env->fos |= 0xffff0000;
602 env->fip = fxsave->fip;
603 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
604 env->foo = fxsave->foo;
605 env->fos = fxsave->fos;
608 for (i = 0; i < 8; ++i)
609 memcpy(&to[i], &from[i], sizeof(to[0]));
612 void convert_to_fxsr(struct task_struct *tsk,
613 const struct user_i387_ia32_struct *env)
616 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state.fxsave;
617 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
618 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
621 fxsave->cwd = env->cwd;
622 fxsave->swd = env->swd;
623 fxsave->twd = twd_i387_to_fxsr(env->twd);
624 fxsave->fop = (u16) ((u32) env->fcs >> 16);
626 fxsave->rip = env->fip;
627 fxsave->rdp = env->foo;
628 /* cs and ds ignored */
630 fxsave->fip = env->fip;
631 fxsave->fcs = (env->fcs & 0xffff);
632 fxsave->foo = env->foo;
633 fxsave->fos = env->fos;
636 for (i = 0; i < 8; ++i)
637 memcpy(&to[i], &from[i], sizeof(from[0]));
640 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
641 unsigned int pos, unsigned int count,
642 void *kbuf, void __user *ubuf)
644 struct fpu *fpu = &target->thread.fpu;
645 struct user_i387_ia32_struct env;
648 ret = fpu__unlazy_stopped(fpu);
652 if (!static_cpu_has(X86_FEATURE_FPU))
653 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
656 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
657 &fpu->state.fsave, 0,
660 sanitize_i387_state(target);
662 if (kbuf && pos == 0 && count == sizeof(env)) {
663 convert_from_fxsr(kbuf, target);
667 convert_from_fxsr(&env, target);
669 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
672 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
673 unsigned int pos, unsigned int count,
674 const void *kbuf, const void __user *ubuf)
676 struct fpu *fpu = &target->thread.fpu;
677 struct user_i387_ia32_struct env;
680 ret = fpu__unlazy_stopped(fpu);
684 sanitize_i387_state(target);
686 if (!static_cpu_has(X86_FEATURE_FPU))
687 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
690 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
691 &fpu->state.fsave, 0,
694 if (pos > 0 || count < sizeof(env))
695 convert_from_fxsr(&env, target);
697 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
699 convert_to_fxsr(target, &env);
702 * update the header bit in the xsave header, indicating the
706 fpu->state.xsave.header.xfeatures |= XSTATE_FP;
711 * FPU state for core dumps.
712 * This is only used for a.out dumps now.
713 * It is declared generically using elf_fpregset_t (which is
714 * struct user_i387_struct) but is in fact only used for 32-bit
715 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
717 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
719 struct task_struct *tsk = current;
720 struct fpu *fpu = &tsk->thread.fpu;
723 fpvalid = fpu->fpstate_active;
725 fpvalid = !fpregs_get(tsk, NULL,
726 0, sizeof(struct user_i387_ia32_struct),
731 EXPORT_SYMBOL(dump_fpu);
733 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */