1 #ifndef __KVM_X86_PMU_H
2 #define __KVM_X86_PMU_H
4 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
5 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
6 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
8 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
9 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
11 struct kvm_event_hw_type_mapping {
18 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
20 unsigned (*find_fixed_event)(int idx);
21 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
22 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
23 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
24 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
25 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
26 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
27 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
28 void (*refresh)(struct kvm_vcpu *vcpu);
29 void (*init)(struct kvm_vcpu *vcpu);
30 void (*reset)(struct kvm_vcpu *vcpu);
33 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
35 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
37 return pmu->counter_bitmask[pmc->type];
40 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
42 u64 counter, enabled, running;
44 counter = pmc->counter;
46 counter += perf_event_read_value(pmc->perf_event,
48 /* FIXME: Scaling needed? */
49 return counter & pmc_bitmask(pmc);
52 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
54 if (pmc->perf_event) {
55 pmc->counter = pmc_read_counter(pmc);
56 perf_event_release_kernel(pmc->perf_event);
57 pmc->perf_event = NULL;
61 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
63 return pmc->type == KVM_PMC_GP;
66 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
68 return pmc->type == KVM_PMC_FIXED;
71 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
73 return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
76 /* returns general purpose PMC with the specified MSR. Note that it can be
77 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
78 * paramenter to tell them apart.
80 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
83 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
84 return &pmu->gp_counters[msr - base];
89 /* returns fixed PMC with the specified MSR */
90 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
92 int base = MSR_CORE_PERF_FIXED_CTR0;
94 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
95 return &pmu->fixed_counters[msr - base];
100 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
101 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
102 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
104 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
105 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
106 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
107 int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
108 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
109 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
110 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
111 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
112 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
113 void kvm_pmu_init(struct kvm_vcpu *vcpu);
114 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
116 extern struct kvm_pmu_ops intel_pmu_ops;
117 extern struct kvm_pmu_ops amd_pmu_ops;
118 #endif /* __KVM_X86_PMU_H */