Commit 6e5d8c71 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch 'kvm-arm64/pmu-undef' into kvmarm-master/next



Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 8c38602f 7521c3a9
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -733,4 +733,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)

#define kvm_vcpu_has_pmu(vcpu)					\
	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))

#endif /* __ARM64_KVM_HOST_H__ */
+8 −11
Original line number Diff line number Diff line
@@ -384,7 +384,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
	struct kvm_pmu *pmu = &vcpu->arch.pmu;
	bool overflow;

	if (!kvm_arm_pmu_v3_ready(vcpu))
	if (!kvm_vcpu_has_pmu(vcpu))
		return;

	overflow = !!kvm_pmu_overflow_status(vcpu);
@@ -825,9 +825,12 @@ bool kvm_arm_support_pmu_v3(void)

int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
	if (!vcpu->arch.pmu.created)
	if (!kvm_vcpu_has_pmu(vcpu))
		return 0;

	if (!vcpu->arch.pmu.created)
		return -EINVAL;

	/*
	 * A valid interrupt configuration for the PMU is either to have a
	 * properly configured interrupt number and using an in-kernel
@@ -835,9 +838,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
	 */
	if (irqchip_in_kernel(vcpu->kvm)) {
		int irq = vcpu->arch.pmu.irq_num;
		if (!kvm_arm_pmu_irq_initialized(vcpu))
			return -EINVAL;

		/*
		 * If we are using an in-kernel vgic, at this point we know
		 * the vgic will be initialized, so we can check the PMU irq
@@ -851,7 +851,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
	}

	kvm_pmu_vcpu_reset(vcpu);
	vcpu->arch.pmu.ready = true;

	return 0;
}
@@ -913,8 +912,7 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)

int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
	if (!kvm_arm_support_pmu_v3() ||
	    !test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
	if (!kvm_vcpu_has_pmu(vcpu))
		return -ENODEV;

	if (vcpu->arch.pmu.created)
@@ -1015,7 +1013,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
		if (!irqchip_in_kernel(vcpu->kvm))
			return -EINVAL;

		if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
		if (!kvm_vcpu_has_pmu(vcpu))
			return -ENODEV;

		if (!kvm_arm_pmu_irq_initialized(vcpu))
@@ -1035,8 +1033,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
	case KVM_ARM_VCPU_PMU_V3_IRQ:
	case KVM_ARM_VCPU_PMU_V3_INIT:
	case KVM_ARM_VCPU_PMU_V3_FILTER:
		if (kvm_arm_support_pmu_v3() &&
		    test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
		if (kvm_vcpu_has_pmu(vcpu))
			return 0;
	}

+4 −0
Original line number Diff line number Diff line
@@ -285,6 +285,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
			pstate = VCPU_RESET_PSTATE_EL1;
		}

		if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
			ret = -EINVAL;
			goto out;
		}
		break;
	}

+17 −39
Original line number Diff line number Diff line
@@ -609,8 +609,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{
	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
	bool enabled = kvm_vcpu_has_pmu(vcpu);

	enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
	if (!enabled)
		kvm_inject_undefined(vcpu);

@@ -642,9 +643,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 val;

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (pmu_access_el0_disabled(vcpu))
		return false;

@@ -671,9 +669,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
			  const struct sys_reg_desc *r)
{
	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (pmu_access_event_counter_el0_disabled(vcpu))
		return false;

@@ -692,9 +687,6 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 pmceid;

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	BUG_ON(p->is_write);

	if (pmu_access_el0_disabled(vcpu))
@@ -725,10 +717,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
			      struct sys_reg_params *p,
			      const struct sys_reg_desc *r)
{
	u64 idx;

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);
	u64 idx = ~0UL;

	if (r->CRn == 9 && r->CRm == 13) {
		if (r->Op2 == 2) {
@@ -744,8 +733,6 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
				return false;

			idx = ARMV8_PMU_CYCLE_IDX;
		} else {
			return false;
		}
	} else if (r->CRn == 0 && r->CRm == 9) {
		/* PMCCNTR */
@@ -759,10 +746,11 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
			return false;

		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
	} else {
		return false;
	}

	/* Catch any decoding mistake */
	WARN_ON(idx == ~0UL);

	if (!pmu_counter_idx_valid(vcpu, idx))
		return false;

@@ -783,9 +771,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 idx, reg;

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (pmu_access_el0_disabled(vcpu))
		return false;

@@ -823,9 +808,6 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 val, mask;

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (pmu_access_el0_disabled(vcpu))
		return false;

@@ -854,13 +836,8 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 mask = kvm_pmu_valid_counter_mask(vcpu);

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (!vcpu_mode_priv(vcpu)) {
		kvm_inject_undefined(vcpu);
	if (check_pmu_access_disabled(vcpu, 0))
		return false;
	}

	if (p->is_write) {
		u64 val = p->regval & mask;
@@ -883,9 +860,6 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 mask = kvm_pmu_valid_counter_mask(vcpu);

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (pmu_access_el0_disabled(vcpu))
		return false;

@@ -908,9 +882,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{
	u64 mask;

	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);

	if (!p->is_write)
		return read_from_write_only(vcpu, p, r);

@@ -925,8 +896,10 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
			     const struct sys_reg_desc *r)
{
	if (!kvm_arm_pmu_v3_ready(vcpu))
		return trap_raz_wi(vcpu, p, r);
	if (!kvm_vcpu_has_pmu(vcpu)) {
		kvm_inject_undefined(vcpu);
		return false;
	}

	if (p->is_write) {
		if (!vcpu_mode_priv(vcpu)) {
@@ -1061,10 +1034,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
			 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
			 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
	} else if (id == SYS_ID_AA64DFR0_EL1) {
		u64 cap = 0;

		/* Limit guests to PMUv3 for ARMv8.1 */
		if (kvm_vcpu_has_pmu(vcpu))
			cap = ID_AA64DFR0_PMUVER_8_1;

		val = cpuid_feature_cap_perfmon_field(val,
						ID_AA64DFR0_PMUVER_SHIFT,
						ID_AA64DFR0_PMUVER_8_1);
						cap);
	} else if (id == SYS_ID_DFR0_EL1) {
		/* Limit guests to PMUv3 for ARMv8.1 */
		val = cpuid_feature_cap_perfmon_field(val,
+0 −3
Original line number Diff line number Diff line
@@ -24,13 +24,11 @@ struct kvm_pmu {
	int irq_num;
	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
	DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
	bool ready;
	bool created;
	bool irq_level;
	struct irq_work overflow_work;
};

#define kvm_arm_pmu_v3_ready(v)		((v)->arch.pmu.ready)
#define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
@@ -61,7 +59,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu {
};

#define kvm_arm_pmu_v3_ready(v)		(false)
#define kvm_arm_pmu_irq_initialized(v)	(false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
					    u64 select_idx)