Commit d1c88a40 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: always allow host-initiated writes to PMU MSRs



Whenever an MSR is part of KVM_GET_MSR_INDEX_LIST, it has to be always
retrievable and settable with KVM_GET_MSR and KVM_SET_MSR.  Accept
the PMU MSRs unconditionally in intel_is_valid_msr, if the access was
host-initiated.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bfb088d9
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -458,10 +458,10 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
	}
}

bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
{
	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr, host_initiated);
}

static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
+2 −2
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ struct kvm_pmu_ops {
		unsigned int idx, u64 *mask);
	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
	bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated);
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
	void (*refresh)(struct kvm_vcpu *vcpu);
@@ -181,7 +181,7 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
+1 −1
Original line number Diff line number Diff line
@@ -229,7 +229,7 @@ static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
	return &counters[idx];
}

static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
{
	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
	return false;
+17 −10
Original line number Diff line number Diff line
@@ -195,38 +195,45 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
	return ret;
}

static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr, bool host_initiated)
{
	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
	u64 perf_capabilities = vcpu->arch.perf_capabilities;
	int ret;

	switch (msr) {
	case MSR_CORE_PERF_FIXED_CTR_CTRL:
	case MSR_CORE_PERF_GLOBAL_STATUS:
	case MSR_CORE_PERF_GLOBAL_CTRL:
	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
		ret = pmu->version > 1;
		if (host_initiated)
			return true;
		return pmu->version > 1;
		break;
	case MSR_IA32_PEBS_ENABLE:
		ret = perf_capabilities & PERF_CAP_PEBS_FORMAT;
		if (host_initiated)
			return true;
		return perf_capabilities & PERF_CAP_PEBS_FORMAT;
		break;
	case MSR_IA32_DS_AREA:
		ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
		if (host_initiated)
			return true;
		return guest_cpuid_has(vcpu, X86_FEATURE_DS);
		break;
	case MSR_PEBS_DATA_CFG:
		ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
		if (host_initiated)
			return true;
		return (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
			((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
		break;
	default:
		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
		if (host_initiated)
			return true;
		return get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
			get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
			intel_pmu_is_valid_lbr_msr(vcpu, msr);
		break;
	}

	return ret;
}

static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
@@ -589,7 +596,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);

	nested_vmx_pmu_refresh(vcpu,
			       intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL));
			       intel_is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, false));

	if (cpuid_model_is_consistent(vcpu))
		x86_perf_get_lbr(&lbr_desc->records);
+5 −5
Original line number Diff line number Diff line
@@ -3719,7 +3719,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		fallthrough;
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
		if (kvm_pmu_is_valid_msr(vcpu, msr))
		if (kvm_pmu_is_valid_msr(vcpu, msr, msr_info->host_initiated))
			return kvm_pmu_set_msr(vcpu, msr_info);

		if (pr || data != 0)
@@ -3802,7 +3802,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		break;
#endif
	default:
		if (kvm_pmu_is_valid_msr(vcpu, msr))
		if (kvm_pmu_is_valid_msr(vcpu, msr, msr_info->host_initiated))
			return kvm_pmu_set_msr(vcpu, msr_info);
		return KVM_MSR_RET_INVALID;
	}
@@ -3882,7 +3882,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		msr_info->data = 0;
		break;
	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
			return kvm_pmu_get_msr(vcpu, msr_info);
		if (!msr_info->host_initiated)
			return 1;
@@ -3892,7 +3892,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
			return kvm_pmu_get_msr(vcpu, msr_info);
		msr_info->data = 0;
		break;
@@ -4138,7 +4138,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		break;
#endif
	default:
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index, msr_info->host_initiated))
			return kvm_pmu_get_msr(vcpu, msr_info);
		return KVM_MSR_RET_INVALID;
	}