Commit 48b1893a authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-x86-pmu-6.4' of https://github.com/kvm-x86/linux into HEAD

KVM x86 PMU changes for 6.4:

 - Disallow virtualizing legacy LBRs if architectural LBRs are available,
   the two are mutually exclusive in hardware

 - Disallow writes to immutable feature MSRs (notably PERF_CAPABILITIES)
   after KVM_RUN, and overhaul the vmx_pmu_caps selftest to better
   validate PERF_CAPABILITIES

 - Apply PMU filters to emulated events and add test coverage to the
   pmu_event_filter selftest

 - Misc cleanups and fixes
parents 807b7584 457bd7af
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -513,6 +513,7 @@ struct kvm_pmc {
#define MSR_ARCH_PERFMON_FIXED_CTR_MAX	(MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
#define KVM_AMD_PMC_MAX_GENERIC	6
struct kvm_pmu {
	u8 version;
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
@@ -525,7 +526,6 @@ struct kvm_pmu {
	u64 global_ovf_ctrl_mask;
	u64 reserved_bits;
	u64 raw_event_mask;
	u8 version;
	struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
	struct irq_work irq_work;
+1 −1
Original line number Diff line number Diff line
@@ -414,7 +414,7 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
	 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
	 * whether the supplied CPUID data is equal to what's already set.
	 */
	if (vcpu->arch.last_vmentry_cpu != -1) {
	if (kvm_vcpu_has_run(vcpu)) {
		r = kvm_cpuid_check_equal(vcpu, e2, nent);
		if (r)
			return r;
+1 −1
Original line number Diff line number Diff line
@@ -5476,7 +5476,7 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
	 * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
	 * kvm_arch_vcpu_ioctl().
	 */
	KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
	KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm);
}

void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
+14 −7
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
#undef __KVM_X86_PMU_OP
}

static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
{
	return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
}
@@ -400,6 +400,12 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
	return is_fixed_event_allowed(filter, pmc->idx);
}

static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
{
	return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
	       check_pmu_event_filter(pmc);
}

static void reprogram_counter(struct kvm_pmc *pmc)
{
	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -409,10 +415,7 @@ static void reprogram_counter(struct kvm_pmc *pmc)

	pmc_pause_counter(pmc);

	if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc))
		goto reprogram_complete;

	if (!check_pmu_event_filter(pmc))
	if (!pmc_event_is_allowed(pmc))
		goto reprogram_complete;

	if (pmc->counter < pmc->prev_counter)
@@ -589,6 +592,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 */
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{
	if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
		return;

	bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
	static_call(kvm_x86_pmu_refresh)(vcpu);
}

@@ -646,7 +653,7 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{
	pmc->prev_counter = pmc->counter;
	pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
	kvm_pmu_request_counter_reprogam(pmc);
	kvm_pmu_request_counter_reprogram(pmc);
}

static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
@@ -684,7 +691,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
	for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);

		if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
		if (!pmc || !pmc_event_is_allowed(pmc))
			continue;

		/* Ignore checks for edge detect, pin control, invert and CMASK bits */
+1 −1
Original line number Diff line number Diff line
@@ -195,7 +195,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
					     KVM_PMC_MAX_FIXED);
}

static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
{
	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
Loading