Commit 49661a52 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvmarm-fixes-6.4-3' of...

Merge tag 'kvmarm-fixes-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.4, take #3

- Fix the reported address of a watchpoint forwarded to userspace

- Fix the freeing of the root of stage-2 page tables

- Stop creating spurious PMU events to perform detection of the
  default PMU and use the existing PMU list instead.
parents 26f31498 40e54cad
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
	return false;
}

static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
	if (!__populate_fault_info(vcpu))
		return true;

	return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
	__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
	__alias(kvm_hyp_handle_memory_fault);

static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
	if (!__populate_fault_info(vcpu))
	if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
		return true;

	if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+2 −0
Original line number Diff line number Diff line
@@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
};

@@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
};

+3 −0
Original line number Diff line number Diff line
@@ -1332,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
	};

	WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));

	WARN_ON(mm_ops->page_count(pgtable) != 1);
	mm_ops->put_page(pgtable);
}
+1 −0
Original line number Diff line number Diff line
@@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
	[ESR_ELx_EC_WATCHPT_LOW]	= kvm_hyp_handle_watchpt_low,
	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
};

+23 −35
Original line number Diff line number Diff line
@@ -694,45 +694,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)

static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
	struct perf_event_attr attr = { };
	struct perf_event *event;
	struct arm_pmu *pmu = NULL;
	struct arm_pmu *tmp, *pmu = NULL;
	struct arm_pmu_entry *entry;
	int cpu;

	/*
	 * Create a dummy event that only counts user cycles. As we'll never
	 * leave this function with the event being live, it will never
	 * count anything. But it allows us to probe some of the PMU
	 * details. Yes, this is terrible.
	 */
	attr.type = PERF_TYPE_RAW;
	attr.size = sizeof(attr);
	attr.pinned = 1;
	attr.disabled = 0;
	attr.exclude_user = 0;
	attr.exclude_kernel = 1;
	attr.exclude_hv = 1;
	attr.exclude_host = 1;
	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
	attr.sample_period = GENMASK(63, 0);
	mutex_lock(&arm_pmus_lock);

	event = perf_event_create_kernel_counter(&attr, -1, current,
						 kvm_pmu_perf_overflow, &attr);
	cpu = smp_processor_id();
	list_for_each_entry(entry, &arm_pmus, entry) {
		tmp = entry->arm_pmu;

	if (IS_ERR(event)) {
		pr_err_once("kvm: pmu event creation failed %ld\n",
			    PTR_ERR(event));
		return NULL;
		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
			pmu = tmp;
			break;
		}

	if (event->pmu) {
		pmu = to_arm_pmu(event->pmu);
		if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
		    pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
			pmu = NULL;
	}

	perf_event_disable(event);
	perf_event_release_kernel(event);
	mutex_unlock(&arm_pmus_lock);

	return pmu;
}
@@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
		return -EBUSY;

	if (!kvm->arch.arm_pmu) {
		/* No PMU set, get the default one */
		/*
		 * No PMU set, get the default one.
		 *
		 * The observant among you will notice that the supported_cpus
		 * mask does not get updated for the default PMU even though it
		 * is quite possible the selected instance supports only a
		 * subset of cores in the system. This is intentional, and
		 * upholds the preexisting behavior on heterogeneous systems
		 * where vCPUs can be scheduled on any core but the guest
		 * counters could stop working.
		 */
		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
		if (!kvm->arch.arm_pmu)
			return -ENODEV;