Commit 0cb9c3c8 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: PMU: Add counter_index_to_*reg() helpers



In order to reduce the boilerplate code, add two helpers returning
the counter register index (resp. the event register) in the vcpu
register file from the counter index.

Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221113163832.3154370-8-maz@kernel.org
parent 0f1e172b
Loading
Loading
Loading
Loading
+18 −15
Original line number Diff line number Diff line
@@ -77,6 +77,16 @@ static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
	return container_of(vcpu_arch, struct kvm_vcpu, arch);
}

static u32 counter_index_to_reg(u64 idx)
{
	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
}

static u32 counter_index_to_evtreg(u64 idx)
{
	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
}

/**
 * kvm_pmu_get_counter_value - get PMU counter value
 * @vcpu: The vcpu pointer
@@ -91,8 +101,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
	if (!kvm_vcpu_has_pmu(vcpu))
		return 0;

	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
		? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
	reg = counter_index_to_reg(select_idx);
	counter = __vcpu_sys_reg(vcpu, reg);

	/*
@@ -122,8 +131,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
	if (!kvm_vcpu_has_pmu(vcpu))
		return;

	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
	reg = counter_index_to_reg(select_idx);
	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);

	/* Recreate the perf event to reflect the updated sample_period */
@@ -158,10 +166,7 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)

	val = kvm_pmu_get_counter_value(vcpu, pmc->idx);

	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
		reg = PMCCNTR_EL0;
	else
		reg = PMEVCNTR0_EL0 + pmc->idx;
	reg = counter_index_to_reg(pmc->idx);

	__vcpu_sys_reg(vcpu, reg) = val;

@@ -404,16 +409,16 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
		u64 type, reg;

		/* Filter on event type */
		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
		type &= kvm_pmu_event_mask(vcpu->kvm);
		if (type != event)
			continue;

		/* Increment this counter */
		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
		if (!kvm_pmu_idx_is_64bit(vcpu, i))
			reg = lower_32_bits(reg);
		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;

		/* No overflow? move on */
		if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg))
@@ -549,8 +554,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
	struct perf_event_attr attr;
	u64 eventsel, counter, reg, data;

	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
	reg = counter_index_to_evtreg(select_idx);
	data = __vcpu_sys_reg(vcpu, reg);

	kvm_pmu_stop_counter(vcpu, pmc);
@@ -632,8 +636,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
	mask |= kvm_pmu_event_mask(vcpu->kvm);

	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
	reg = counter_index_to_evtreg(select_idx);

	__vcpu_sys_reg(vcpu, reg) = data & mask;