If unsure, say N.
+config X86_DEBUG_FPU
+ bool "Debug the x86 FPU code"
+ depends on DEBUG_KERNEL
+ default y
+ ---help---
+ If this option is enabled then there will be extra sanity
+ checks and (boot time) debug printouts added to the kernel.
+ This debugging adds some small amount of runtime overhead
+ to the kernel.
+
+ If unsure, say N.
+
endmenu
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
+/*
+ * Debugging facility:
+ */
+#ifdef CONFIG_X86_DEBUG_FPU
+# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
+#else
+# define WARN_ON_FPU(x) ({ 0; })
+#endif
+
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
/*
/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu)
{
+ WARN_ON_FPU(!fpu->fpregs_active);
+
fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu)
{
+ WARN_ON_FPU(fpu->fpregs_active);
+
fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
}
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
if (fpu_switch.preload) {
- if (unlikely(copy_fpstate_to_fpregs(new_fpu)))
+ if (unlikely(copy_fpstate_to_fpregs(new_fpu))) {
+ WARN_ON_FPU(1);
fpu__clear(new_fpu);
+ }
}
}
static void kernel_fpu_disable(void)
{
- WARN_ON(this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
}
static void kernel_fpu_enable(void)
{
- WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, false);
}
{
struct fpu *fpu = ¤t->thread.fpu;
- WARN_ON_ONCE(!irq_fpu_usable());
+ WARN_ON_FPU(!irq_fpu_usable());
kernel_fpu_disable();
struct fpu *fpu = ¤t->thread.fpu;
if (fpu->fpregs_active) {
- if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
+ if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
fpu__clear(fpu);
} else {
__fpregs_deactivate_hw();
*/
void fpu__save(struct fpu *fpu)
{
- WARN_ON(fpu != ¤t->thread.fpu);
+ WARN_ON_FPU(fpu != ¤t->thread.fpu);
preempt_disable();
if (fpu->fpregs_active) {
*/
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- WARN_ON(src_fpu != ¤t->thread.fpu);
+ WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
/*
* Don't let 'init optimized' areas of the XSAVE area
*/
void fpu__activate_curr(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != ¤t->thread.fpu);
+ WARN_ON_FPU(fpu != ¤t->thread.fpu);
if (!fpu->fpstate_active) {
fpstate_init(&fpu->state);
*/
void fpu__activate_stopped(struct fpu *child_fpu)
{
- WARN_ON_ONCE(child_fpu == ¤t->thread.fpu);
+ WARN_ON_FPU(child_fpu == ¤t->thread.fpu);
if (child_fpu->fpstate_active) {
child_fpu->last_cpu = -1;
*/
void fpu__clear(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
+ WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */
*/
static void __init fpu__init_system_xstate_size_legacy(void)
{
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
/*
* Note that xstate_size might be overwriten later during
* fpu__init_system_xstate().
*/
static void __init fpu__init_system_ctx_switch(void)
{
- WARN_ON(current->thread.fpu.fpstate_active);
+ static bool on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
+ WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
/* Auto enable eagerfpu for xsaveopt */
*/
static void __init setup_init_fpu_buf(void)
{
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
if (!cpu_has_xsave)
return;
void __init fpu__init_system_xstate(void)
{
unsigned int eax, ebx, ecx, edx;
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
if (!cpu_has_xsave) {
pr_info("x86/fpu: Legacy x87 FPU detected.\n");
}
if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
- WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
+ WARN_ON_FPU(1);
return;
}