Commit 545feb96 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

Revert "KVM: x86: always allow host-initiated writes to PMU MSRs"



Revert the hack to allow host-initiated accesses to all "PMU" MSRs,
as intel_is_valid_msr() returns true for _all_ MSRs, regardless of whether
or not it has a snowball's chance in hell of actually being a PMU MSR.

That mostly gets papered over by the actual get/set helpers only handling
MSRs that they knows about, except there's the minor detail that
kvm_pmu_{g,s}et_msr() eat reads and writes when the PMU is disabled.
I.e. KVM will happy allow reads and writes to _any_ MSR if the PMU is
disabled, either via module param or capability.

This reverts commit d1c88a40.

Fixes: d1c88a40 ("KVM: x86: always allow host-initiated writes to PMU MSRs")
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220611005755.753273-5-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5d4283df
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -425,10 +425,10 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
	}
}

bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr, host_initiated);
		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
}

static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
+2 −2
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ struct kvm_pmu_ops {
		unsigned int idx, u64 *mask);
	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
	bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated);
	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
	void (*refresh)(struct kvm_vcpu *vcpu);
@@ -189,7 +189,7 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
+1 −1
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
	return &counters[idx];
}

static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
	return false;
+10 −17
Original line number Diff line number Diff line
@@ -196,45 +196,38 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
	return ret;
}

static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u64 perf_capabilities = vcpu->arch.perf_capabilities;
	int ret;

	switch (msr) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
	case MSR_CORE_PERF_GLOBAL_STATUS:
	case MSR_CORE_PERF_GLOBAL_CTRL:
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		if (host_initiated)
			return true;
		return pmu->version > 1;
		ret = pmu->version > 1;
		break;
	case MSR_IA32_PEBS_ENABLE:
		if (host_initiated)
			return true;
		return perf_capabilities & PERF_CAP_PEBS_FORMAT;
		ret = perf_capabilities & PERF_CAP_PEBS_FORMAT;
		break;
	case MSR_IA32_DS_AREA:
		if (host_initiated)
			return true;
		return guest_cpuid_has(vcpu, X86_FEATURE_DS);
		ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
		break;
	case MSR_PEBS_DATA_CFG:
		if (host_initiated)
			return true;
		return (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
		ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
			((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
		break;
	default:
		if (host_initiated)
			return true;
		return get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
			get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
			intel_pmu_is_valid_lbr_msr(vcpu, msr);
		break;
	}

	return ret;
}

static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
@@ -596,7 +589,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);

	nested_vmx_pmu_refresh(vcpu,
			       intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, false));
			       intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL));

	if (cpuid_model_is_consistent(vcpu))
		x86_perf_get_lbr(&lbr_desc->records);
+5 −5
Original line number Diff line number Diff line
@@ -3704,7 +3704,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		fallthrough;
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
		if (kvm_pmu_is_valid_msr(vcpu, msr, msr_info->host_initiated))
		if (kvm_pmu_is_valid_msr(vcpu, msr))
			return kvm_pmu_set_msr(vcpu, msr_info);

		if (pr || data != 0)
@@ -3787,7 +3787,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		break;
#endif
	default:
		if (kvm_pmu_is_valid_msr(vcpu, msr, msr_info->host_initiated))
		if (kvm_pmu_is_valid_msr(vcpu, msr))
			return kvm_pmu_set_msr(vcpu, msr_info);
		return KVM_MSR_RET_INVALID;
	}
@@ -3867,7 +3867,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		msr_info->data = 0;
		break;
	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
			return kvm_pmu_get_msr(vcpu, msr_info);
		if (!msr_info->host_initiated)
			return 1;
@@ -3877,7 +3877,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
			return kvm_pmu_get_msr(vcpu, msr_info);
		msr_info->data = 0;
		break;
@@ -4123,7 +4123,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		break;
#endif
	default:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
			return kvm_pmu_get_msr(vcpu, msr_info);
		return KVM_MSR_RET_INVALID;
	}