Loading arch/x86/kvm/cpuid.c +1 −1 Original line number Diff line number Diff line Loading @@ -1338,7 +1338,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, if (sanity_check_entries(entries, cpuid->nent, type)) return -EINVAL; array.entries = kvcalloc(sizeof(struct kvm_cpuid_entry2), cpuid->nent, GFP_KERNEL); array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); if (!array.entries) return -ENOMEM; Loading arch/x86/kvm/vmx/capabilities.h +3 −16 Original line number Diff line number Diff line Loading @@ -24,8 +24,6 @@ extern int __read_mostly pt_mode; #define PMU_CAP_FW_WRITES (1ULL << 13) #define PMU_CAP_LBR_FMT 0x3f #define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) struct nested_vmx_msrs { /* * We only store the "true" versions of the VMX capability MSRs. We Loading Loading @@ -400,6 +398,7 @@ static inline bool vmx_pebs_supported(void) static inline u64 vmx_get_perf_capabilities(void) { u64 perf_cap = PMU_CAP_FW_WRITES; struct x86_pmu_lbr lbr; u64 host_perf_cap = 0; if (!enable_pmu) Loading @@ -408,6 +407,7 @@ static inline u64 vmx_get_perf_capabilities(void) if (boot_cpu_has(X86_FEATURE_PDCM)) rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr) perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; if (vmx_pebs_supported()) { Loading @@ -419,19 +419,6 @@ static inline u64 vmx_get_perf_capabilities(void) return perf_cap; } static inline u64 vmx_supported_debugctl(void) { u64 debugctl = 0; if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT; if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) debugctl |= DEBUGCTLMSR_LBR_MASK; return debugctl; } static inline bool cpu_has_notify_vmexit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & Loading arch/x86/kvm/vmx/vmx.c +11 −7 Original line number Diff line number Diff line Loading @@ -2021,15 +2021,17 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, return (unsigned long)data; } static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated) { u64 debugctl = vmx_supported_debugctl(); u64 debugctl = 0; if (!intel_pmu_lbr_is_enabled(vcpu)) debugctl &= ~DEBUGCTLMSR_LBR_MASK; if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))) debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT; if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) && (host_initiated || intel_pmu_lbr_is_enabled(vcpu))) debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; return debugctl; } Loading Loading @@ -2103,7 +2105,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_DEBUGCTLMSR: { u64 invalid = data & ~vcpu_supported_debugctl(vcpu); u64 invalid; invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { if (report_ignored_msrs) vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", Loading arch/x86/kvm/x86.c +3 −0 Original line number Diff line number Diff line Loading @@ -10404,7 +10404,10 @@ void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, kvm->arch.apicv_inhibit_reasons = new; if (new) { unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); int idx = srcu_read_lock(&kvm->srcu); kvm_zap_gfn_range(kvm, gfn, gfn+1); srcu_read_unlock(&kvm->srcu, idx); } } else { kvm->arch.apicv_inhibit_reasons = new; Loading Loading
arch/x86/kvm/cpuid.c +1 −1 Original line number Diff line number Diff line Loading @@ -1338,7 +1338,7 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, if (sanity_check_entries(entries, cpuid->nent, type)) return -EINVAL; array.entries = kvcalloc(sizeof(struct kvm_cpuid_entry2), cpuid->nent, GFP_KERNEL); array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); if (!array.entries) return -ENOMEM; Loading
arch/x86/kvm/vmx/capabilities.h +3 −16 Original line number Diff line number Diff line Loading @@ -24,8 +24,6 @@ extern int __read_mostly pt_mode; #define PMU_CAP_FW_WRITES (1ULL << 13) #define PMU_CAP_LBR_FMT 0x3f #define DEBUGCTLMSR_LBR_MASK (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) struct nested_vmx_msrs { /* * We only store the "true" versions of the VMX capability MSRs. We Loading Loading @@ -400,6 +398,7 @@ static inline bool vmx_pebs_supported(void) static inline u64 vmx_get_perf_capabilities(void) { u64 perf_cap = PMU_CAP_FW_WRITES; struct x86_pmu_lbr lbr; u64 host_perf_cap = 0; if (!enable_pmu) Loading @@ -408,6 +407,7 @@ static inline u64 vmx_get_perf_capabilities(void) if (boot_cpu_has(X86_FEATURE_PDCM)) rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr) perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; if (vmx_pebs_supported()) { Loading @@ -419,19 +419,6 @@ static inline u64 vmx_get_perf_capabilities(void) return perf_cap; } static inline u64 vmx_supported_debugctl(void) { u64 debugctl = 0; if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT; if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) debugctl |= DEBUGCTLMSR_LBR_MASK; return debugctl; } static inline bool cpu_has_notify_vmexit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & Loading
arch/x86/kvm/vmx/vmx.c +11 −7 Original line number Diff line number Diff line Loading @@ -2021,15 +2021,17 @@ static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, return (unsigned long)data; } static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu) static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated) { u64 debugctl = vmx_supported_debugctl(); u64 debugctl = 0; if (!intel_pmu_lbr_is_enabled(vcpu)) debugctl &= ~DEBUGCTLMSR_LBR_MASK; if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))) debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT; if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) && (host_initiated || intel_pmu_lbr_is_enabled(vcpu))) debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI; return debugctl; } Loading Loading @@ -2103,7 +2105,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_DEBUGCTLMSR: { u64 invalid = data & ~vcpu_supported_debugctl(vcpu); u64 invalid; invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) { if (report_ignored_msrs) vcpu_unimpl(vcpu, "%s: BTF|LBR in IA32_DEBUGCTLMSR 0x%llx, nop\n", Loading
arch/x86/kvm/x86.c +3 −0 Original line number Diff line number Diff line Loading @@ -10404,7 +10404,10 @@ void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, kvm->arch.apicv_inhibit_reasons = new; if (new) { unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE); int idx = srcu_read_lock(&kvm->srcu); kvm_zap_gfn_range(kvm, gfn, gfn+1); srcu_read_unlock(&kvm->srcu, idx); } } else { kvm->arch.apicv_inhibit_reasons = new; Loading