x86: add NOPL as a synthetic CPU feature bit
authorH. Peter Anvin <hpa@zytor.com>
Tue, 19 Aug 2008 00:39:32 +0000 (17:39 -0700)
committerH. Peter Anvin <hpa@zytor.com>
Fri, 5 Sep 2008 23:13:52 +0000 (16:13 -0700)
The long noops ("NOPL") are supposed to be detected by family >= 6.
Unfortunately, several non-Intel x86 implementations, both hardware
and software, don't obey this dictum.  Instead, probe for NOPL
directly by executing a NOPL instruction and see if we get #UD.

Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/common_64.c
arch/x86/kernel/cpu/feature_names.c
include/asm-x86/cpufeature.h
include/asm-x86/required-features.h

index 80ab20d4fa39913ce2a7b4ea5676883bb6f9d96f..0785b3c8d043434109861d8326374be04fa7e8e1 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/mtrr.h>
 #include <asm/mce.h>
 #include <asm/pat.h>
+#include <asm/asm.h>
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
 #include <asm/apic.h>
@@ -341,6 +342,35 @@ static void __init early_cpu_detect(void)
        early_get_cap(c);
 }
 
+/*
+ * The NOPL instruction is supposed to exist on all CPUs with
+ * family >= 6, unfortunately, that's not true in practice because
+ * of early VIA chips and (more importantly) broken virtualizers that
+ * are not easy to detect.  Hence, probe for it based on first
+ * principles.
+ */
+static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+{
+       const u32 nopl_signature = 0x888c53b1; /* Random number */
+       u32 has_nopl = nopl_signature;
+
+       clear_cpu_cap(c, X86_FEATURE_NOPL);
+       if (c->x86 >= 6) {
+               asm volatile("\n"
+                            "1:      .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
+                            "2:\n"
+                            "        .section .fixup,\"ax\"\n"
+                            "3:      xor %0,%0\n"
+                            "        jmp 2b\n"
+                            "        .previous\n"
+                            _ASM_EXTABLE(1b,3b)
+                            : "+a" (has_nopl));
+
+               if (has_nopl == nopl_signature)
+                       set_cpu_cap(c, X86_FEATURE_NOPL);
+       }
+}
+
 static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
 {
        u32 tfms, xlvl;
@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
                }
 
                init_scattered_cpuid_features(c);
+               detect_nopl(c);
        }
-
 }
 
 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
index dd6e3f15017eb87b04885fd5164681bbdd9a3073..c3afba5a81a7bfb1780e715c153349de42106df2 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/mtrr.h>
 #include <asm/mce.h>
 #include <asm/pat.h>
+#include <asm/asm.h>
 #include <asm/numa.h>
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
        }
 }
 
+/*
+ * The NOPL instruction is supposed to exist on all CPUs with
+ * family >= 6, unfortunately, that's not true in practice because
+ * of early VIA chips and (more importantly) broken virtualizers that
+ * are not easy to detect.  Hence, probe for it based on first
+ * principles.
+ *
+ * Note: no 64-bit chip is known to lack these, but put the code here
+ * for consistency with 32 bits, and to make it utterly trivial to
+ * diagnose the problem should it ever surface.
+ */
+static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
+{
+       const u32 nopl_signature = 0x888c53b1; /* Random number */
+       u32 has_nopl = nopl_signature;
+
+       clear_cpu_cap(c, X86_FEATURE_NOPL);
+       if (c->x86 >= 6) {
+               asm volatile("\n"
+                            "1:      .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
+                            "2:\n"
+                            "        .section .fixup,\"ax\"\n"
+                            "3:      xor %0,%0\n"
+                            "        jmp 2b\n"
+                            "        .previous\n"
+                            _ASM_EXTABLE(1b,3b)
+                            : "+a" (has_nopl));
+
+               if (has_nopl == nopl_signature)
+                       set_cpu_cap(c, X86_FEATURE_NOPL);
+       }
+}
+
 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
 
 void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
                c->x86_phys_bits = eax & 0xff;
        }
 
+       detect_nopl(c);
+
        if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
            cpu_devs[c->x86_vendor]->c_early_init)
                cpu_devs[c->x86_vendor]->c_early_init(c);
index e43ad4ad4cbae8b75561f67ec1da51628c568491..c9017799497c5f99db3df1ce96d010b6ea5e3230 100644 (file)
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
        NULL, NULL, NULL, NULL,
        "constant_tsc", "up", NULL, "arch_perfmon",
        "pebs", "bts", NULL, NULL,
-       "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+       "rep_good", NULL, NULL, NULL,
+       "nopl", NULL, NULL, NULL,
        NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 
        /* Intel-defined (#2) */
index 762f6a6bc707cedfcabc15db59a2977a54decab2..9489283a4bcfbc6bbb4f9e8fd5c25dbb8ed547f8 100644 (file)
 #define X86_FEATURE_UP         (3*32+ 9) /* smp kernel running on up */
 #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS       (3*32+12)  /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS                (3*32+13)  /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32  (3*32+14)  /* syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 (3*32+15)  /* sysenter in ia32 userspace */
+#define X86_FEATURE_PEBS       (3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                (3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32  (3*32+14) /* syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
 #define X86_FEATURE_REP_GOOD   (3*32+16) /* rep microcode works well on this CPU */
 #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
 #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
-#define X86_FEATURE_11AP       (3*32+19)  /* Bad local APIC aka 11AP */
+#define X86_FEATURE_11AP       (3*32+19) /* Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL       (3*32+20) /* The NOPL (0F 1F) instructions */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3       (4*32+ 0) /* Streaming SIMD Extensions-3 */
index adec887dd7cd5f07c1148f8c233e033e05160e40..5c2ff4bc2980b0c21d4973f52c123ab1cca45e7f 100644 (file)
 # define NEED_3DNOW    0
 #endif
 
+#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
+# define NEED_NOPL     (1<<(X86_FEATURE_NOPL & 31))
+#else
+# define NEED_NOPL     0
+#endif
+
 #ifdef CONFIG_X86_64
 #define NEED_PSE       0
 #define NEED_MSR       (1<<(X86_FEATURE_MSR & 31))
@@ -67,7 +73,7 @@
 #define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
 
 #define REQUIRED_MASK2 0
-#define REQUIRED_MASK3 0
+#define REQUIRED_MASK3 (NEED_NOPL)
 #define REQUIRED_MASK4 0
 #define REQUIRED_MASK5 0
 #define REQUIRED_MASK6 0