2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
5 * derived from arch/x86/kvm/x86.c
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
20 #include <asm/xsave.h>
26 void kvm_update_cpuid(struct kvm_vcpu *vcpu)
28 struct kvm_cpuid_entry2 *best;
29 struct kvm_lapic *apic = vcpu->arch.apic;
31 best = kvm_find_cpuid_entry(vcpu, 1, 0);
35 /* Update OSXSAVE bit */
36 if (cpu_has_xsave && best->function == 0x1) {
37 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
38 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
39 best->ecx |= bit(X86_FEATURE_OSXSAVE);
43 if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
44 apic->lapic_timer.timer_mode_mask = 3 << 17;
46 apic->lapic_timer.timer_mode_mask = 1 << 17;
49 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
51 vcpu->arch.guest_supported_xcr0 = 0;
53 vcpu->arch.guest_supported_xcr0 =
54 (best->eax | ((u64)best->edx << 32)) &
55 host_xcr0 & KVM_SUPPORTED_XCR0;
57 kvm_pmu_cpuid_update(vcpu);
60 static int is_efer_nx(void)
62 unsigned long long efer = 0;
64 rdmsrl_safe(MSR_EFER, &efer);
65 return efer & EFER_NX;
68 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
71 struct kvm_cpuid_entry2 *e, *entry;
74 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
75 e = &vcpu->arch.cpuid_entries[i];
76 if (e->function == 0x80000001) {
81 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
82 entry->edx &= ~(1 << 20);
83 printk(KERN_INFO "kvm: guest NX capability removed\n");
87 /* when an old userspace process fills a new kernel module */
88 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
89 struct kvm_cpuid *cpuid,
90 struct kvm_cpuid_entry __user *entries)
93 struct kvm_cpuid_entry *cpuid_entries;
96 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
99 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
103 if (copy_from_user(cpuid_entries, entries,
104 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
106 for (i = 0; i < cpuid->nent; i++) {
107 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
108 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
109 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
110 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
111 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
112 vcpu->arch.cpuid_entries[i].index = 0;
113 vcpu->arch.cpuid_entries[i].flags = 0;
114 vcpu->arch.cpuid_entries[i].padding[0] = 0;
115 vcpu->arch.cpuid_entries[i].padding[1] = 0;
116 vcpu->arch.cpuid_entries[i].padding[2] = 0;
118 vcpu->arch.cpuid_nent = cpuid->nent;
119 cpuid_fix_nx_cap(vcpu);
121 kvm_apic_set_version(vcpu);
122 kvm_x86_ops->cpuid_update(vcpu);
123 kvm_update_cpuid(vcpu);
126 vfree(cpuid_entries);
131 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
132 struct kvm_cpuid2 *cpuid,
133 struct kvm_cpuid_entry2 __user *entries)
138 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
141 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
142 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
144 vcpu->arch.cpuid_nent = cpuid->nent;
145 kvm_apic_set_version(vcpu);
146 kvm_x86_ops->cpuid_update(vcpu);
147 kvm_update_cpuid(vcpu);
154 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
155 struct kvm_cpuid2 *cpuid,
156 struct kvm_cpuid_entry2 __user *entries)
161 if (cpuid->nent < vcpu->arch.cpuid_nent)
164 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
165 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
170 cpuid->nent = vcpu->arch.cpuid_nent;
174 static void cpuid_mask(u32 *word, int wordnum)
176 *word &= boot_cpu_data.x86_capability[wordnum];
179 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
182 entry->function = function;
183 entry->index = index;
184 cpuid_count(entry->function, entry->index,
185 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
189 static bool supported_xcr0_bit(unsigned bit)
191 u64 mask = ((u64)1 << bit);
193 return mask & KVM_SUPPORTED_XCR0 & host_xcr0;
196 #define F(x) bit(X86_FEATURE_##x)
198 static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
199 u32 index, int *nent, int maxnent)
202 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
204 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
206 unsigned f_lm = F(LM);
208 unsigned f_gbpages = 0;
211 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
212 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
215 const u32 kvm_supported_word0_x86_features =
216 F(FPU) | F(VME) | F(DE) | F(PSE) |
217 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
218 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
219 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
220 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
221 0 /* Reserved, DS, ACPI */ | F(MMX) |
222 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
223 0 /* HTT, TM, Reserved, PBE */;
224 /* cpuid 0x80000001.edx */
225 const u32 kvm_supported_word1_x86_features =
226 F(FPU) | F(VME) | F(DE) | F(PSE) |
227 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
228 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
229 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
230 F(PAT) | F(PSE36) | 0 /* Reserved */ |
231 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
232 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
233 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
235 const u32 kvm_supported_word4_x86_features =
236 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
237 0 /* DS-CPL, VMX, SMX, EST */ |
238 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
239 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
240 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
241 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
242 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
244 /* cpuid 0x80000001.ecx */
245 const u32 kvm_supported_word6_x86_features =
246 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
247 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
248 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
249 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
251 /* cpuid 0xC0000001.edx */
252 const u32 kvm_supported_word5_x86_features =
253 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
254 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
258 const u32 kvm_supported_word9_x86_features =
259 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
260 F(BMI2) | F(ERMS) | f_invpcid | F(RTM);
262 /* all calls to cpuid_count() should be made on the same cpu */
267 if (*nent >= maxnent)
270 do_cpuid_1_ent(entry, function, index);
275 entry->eax = min(entry->eax, (u32)0xd);
278 entry->edx &= kvm_supported_word0_x86_features;
279 cpuid_mask(&entry->edx, 0);
280 entry->ecx &= kvm_supported_word4_x86_features;
281 cpuid_mask(&entry->ecx, 4);
282 /* we support x2apic emulation even if host does not support
283 * it since we emulate x2apic in software */
284 entry->ecx |= F(X2APIC);
286 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
287 * may return different values. This forces us to get_cpu() before
288 * issuing the first command, and also to emulate this annoying behavior
289 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
291 int t, times = entry->eax & 0xff;
293 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
294 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
295 for (t = 1; t < times; ++t) {
296 if (*nent >= maxnent)
299 do_cpuid_1_ent(&entry[t], function, 0);
300 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
305 /* function 4 has additional index. */
309 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
310 /* read more entries until cache_type is zero */
312 if (*nent >= maxnent)
315 cache_type = entry[i - 1].eax & 0x1f;
318 do_cpuid_1_ent(&entry[i], function, i);
320 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
326 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
327 /* Mask ebx against host capability word 9 */
329 entry->ebx &= kvm_supported_word9_x86_features;
330 cpuid_mask(&entry->ebx, 9);
331 // TSC_ADJUST is emulated
332 entry->ebx |= F(TSC_ADJUST);
342 case 0xa: { /* Architectural Performance Monitoring */
343 struct x86_pmu_capability cap;
344 union cpuid10_eax eax;
345 union cpuid10_edx edx;
347 perf_get_x86_pmu_capability(&cap);
350 * Only support guest architectural pmu on a host
351 * with architectural pmu.
354 memset(&cap, 0, sizeof(cap));
356 eax.split.version_id = min(cap.version, 2);
357 eax.split.num_counters = cap.num_counters_gp;
358 eax.split.bit_width = cap.bit_width_gp;
359 eax.split.mask_length = cap.events_mask_len;
361 edx.split.num_counters_fixed = cap.num_counters_fixed;
362 edx.split.bit_width_fixed = cap.bit_width_fixed;
363 edx.split.reserved = 0;
365 entry->eax = eax.full;
366 entry->ebx = cap.events_mask;
368 entry->edx = edx.full;
371 /* function 0xb has additional index. */
375 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
376 /* read more entries until level_type is zero */
378 if (*nent >= maxnent)
381 level_type = entry[i - 1].ecx & 0xff00;
384 do_cpuid_1_ent(&entry[i], function, i);
386 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
394 entry->eax &= host_xcr0 & KVM_SUPPORTED_XCR0;
395 entry->edx &= (host_xcr0 & KVM_SUPPORTED_XCR0) >> 32;
396 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
397 for (idx = 1, i = 1; idx < 64; ++idx) {
398 if (*nent >= maxnent)
401 do_cpuid_1_ent(&entry[i], function, idx);
402 if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
405 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
411 case KVM_CPUID_SIGNATURE: {
412 static const char signature[12] = "KVMKVMKVM\0\0";
413 const u32 *sigptr = (const u32 *)signature;
414 entry->eax = KVM_CPUID_FEATURES;
415 entry->ebx = sigptr[0];
416 entry->ecx = sigptr[1];
417 entry->edx = sigptr[2];
420 case KVM_CPUID_FEATURES:
421 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
422 (1 << KVM_FEATURE_NOP_IO_DELAY) |
423 (1 << KVM_FEATURE_CLOCKSOURCE2) |
424 (1 << KVM_FEATURE_ASYNC_PF) |
425 (1 << KVM_FEATURE_PV_EOI) |
426 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
427 (1 << KVM_FEATURE_PV_UNHALT);
430 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
437 entry->eax = min(entry->eax, 0x8000001a);
440 entry->edx &= kvm_supported_word1_x86_features;
441 cpuid_mask(&entry->edx, 1);
442 entry->ecx &= kvm_supported_word6_x86_features;
443 cpuid_mask(&entry->ecx, 6);
446 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
447 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
448 unsigned phys_as = entry->eax & 0xff;
452 entry->eax = g_phys_as | (virt_as << 8);
453 entry->ebx = entry->edx = 0;
457 entry->ecx = entry->edx = 0;
463 /*Add support for Centaur's CPUID instruction*/
465 /*Just support up to 0xC0000004 now*/
466 entry->eax = min(entry->eax, 0xC0000004);
469 entry->edx &= kvm_supported_word5_x86_features;
470 cpuid_mask(&entry->edx, 5);
472 case 3: /* Processor serial number */
473 case 5: /* MONITOR/MWAIT */
474 case 6: /* Thermal management */
475 case 0x80000007: /* Advanced power management */
480 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
484 kvm_x86_ops->set_supported_cpuid(function, entry);
496 struct kvm_cpuid_param {
500 bool (*qualifier)(const struct kvm_cpuid_param *param);
503 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
505 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
508 int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
509 struct kvm_cpuid_entry2 __user *entries)
511 struct kvm_cpuid_entry2 *cpuid_entries;
512 int limit, nent = 0, r = -E2BIG, i;
514 static const struct kvm_cpuid_param param[] = {
515 { .func = 0, .has_leaf_count = true },
516 { .func = 0x80000000, .has_leaf_count = true },
517 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
518 { .func = KVM_CPUID_SIGNATURE },
519 { .func = KVM_CPUID_FEATURES },
524 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
525 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
527 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
532 for (i = 0; i < ARRAY_SIZE(param); i++) {
533 const struct kvm_cpuid_param *ent = ¶m[i];
535 if (ent->qualifier && !ent->qualifier(ent))
538 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
544 if (!ent->has_leaf_count)
547 limit = cpuid_entries[nent - 1].eax;
548 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
549 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
557 if (copy_to_user(entries, cpuid_entries,
558 nent * sizeof(struct kvm_cpuid_entry2)))
564 vfree(cpuid_entries);
569 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
571 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
572 int j, nent = vcpu->arch.cpuid_nent;
574 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
575 /* when no next entry is found, the current entry[i] is reselected */
576 for (j = i + 1; ; j = (j + 1) % nent) {
577 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
578 if (ej->function == e->function) {
579 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
583 return 0; /* silence gcc, even though control never reaches here */
586 /* find an entry with matching function, matching index (if needed), and that
587 * should be read next (if it's stateful) */
588 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
589 u32 function, u32 index)
591 if (e->function != function)
593 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
595 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
596 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
601 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
602 u32 function, u32 index)
605 struct kvm_cpuid_entry2 *best = NULL;
607 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
608 struct kvm_cpuid_entry2 *e;
610 e = &vcpu->arch.cpuid_entries[i];
611 if (is_matching_cpuid_entry(e, function, index)) {
612 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
613 move_to_next_stateful_cpuid_entry(vcpu, i);
620 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
622 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
624 struct kvm_cpuid_entry2 *best;
626 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
627 if (!best || best->eax < 0x80000008)
629 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
631 return best->eax & 0xff;
637 * If no match is found, check whether we exceed the vCPU's limit
638 * and return the content of the highest valid _standard_ leaf instead.
639 * This is to satisfy the CPUID specification.
641 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
642 u32 function, u32 index)
644 struct kvm_cpuid_entry2 *maxlevel;
646 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
647 if (!maxlevel || maxlevel->eax >= function)
649 if (function & 0x80000000) {
650 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
654 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
657 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
659 u32 function = *eax, index = *ecx;
660 struct kvm_cpuid_entry2 *best;
662 best = kvm_find_cpuid_entry(vcpu, function, index);
665 best = check_cpuid_limit(vcpu, function, index);
673 *eax = *ebx = *ecx = *edx = 0;
675 EXPORT_SYMBOL_GPL(kvm_cpuid);
677 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
679 u32 function, eax, ebx, ecx, edx;
681 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
682 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
683 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
684 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
685 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
686 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
687 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
688 kvm_x86_ops->skip_emulated_instruction(vcpu);
689 trace_kvm_cpuid(function, eax, ebx, ecx, edx);
691 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);