Commit 39428f6e authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvmarm-fixes-6.4-1' of...

Merge tag 'kvmarm-fixes-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.4, take #1

- Plug a race in the stage-2 mapping code where the IPA and the PA
  would end up being out of sync

- Make better use of the bitmap API (bitmap_zero, bitmap_zalloc...)

- FP/SVE/SME documentation update, in the hope that this field
  becomes clearer...

- Add workaround for the usual Apple SEIS brokenness

- Random comment fixes
parents f1fcbaa1 c3a62df4
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -126,6 +126,10 @@
#define APPLE_CPU_PART_M1_FIRESTORM_MAX	0x029
#define APPLE_CPU_PART_M2_BLIZZARD	0x032
#define APPLE_CPU_PART_M2_AVALANCHE	0x033
#define APPLE_CPU_PART_M2_BLIZZARD_PRO	0x034
#define APPLE_CPU_PART_M2_AVALANCHE_PRO	0x035
#define APPLE_CPU_PART_M2_BLIZZARD_MAX	0x038
#define APPLE_CPU_PART_M2_AVALANCHE_MAX	0x039

#define AMPERE_CPU_PART_AMPERE1		0xAC3

@@ -181,6 +185,10 @@
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
#define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO)
#define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO)
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)

/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+1 −0
Original line number Diff line number Diff line
@@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx {
	kvm_pte_t				old;
	void					*arg;
	struct kvm_pgtable_mm_ops		*mm_ops;
	u64					start;
	u64					addr;
	u64					end;
	u32					level;
+17 −9
Original line number Diff line number Diff line
@@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)

	fpsimd_kvm_prepare();

	/*
	 * We will check TIF_FOREIGN_FPSTATE just before entering the
	 * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
	 * FP_STATE_FREE if the flag set.
	 */
	vcpu->arch.fp_state = FP_STATE_HOST_OWNED;

	vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
	if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
		vcpu_set_flag(vcpu, HOST_SVE_ENABLED);

	/*
	 * We don't currently support SME guests but if we leave
	 * things in streaming mode then when the guest starts running
	 * FPSIMD or SVE code it may generate SME traps so as a
	 * special case if we are in streaming mode we force the host
	 * state to be saved now and exit streaming mode so that we
	 * don't have to handle any SME traps for valid guest
	 * operations. Do this for ZA as well for now for simplicity.
	 */
	if (system_supports_sme()) {
		vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
		if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
			vcpu_set_flag(vcpu, HOST_SME_ENABLED);

		/*
		 * If PSTATE.SM is enabled then save any pending FP
		 * state and disable PSTATE.SM. If we leave PSTATE.SM
		 * enabled and the guest does not enable SME via
		 * CPACR_EL1.SMEN then operations that should be valid
		 * may generate SME traps from EL1 to EL1 which we
		 * can't intercept and which would confuse the guest.
		 *
		 * Do the same for PSTATE.ZA in the case where there
		 * is state in the registers which has not already
		 * been saved, this is very unlikely to happen.
		 */
		if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
			vcpu->arch.fp_state = FP_STATE_FREE;
			fpsimd_save_and_flush_cpu_state();
+10 −2
Original line number Diff line number Diff line
@@ -177,9 +177,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
	sve_guest = vcpu_has_sve(vcpu);
	esr_ec = kvm_vcpu_trap_get_class(vcpu);

	/* Don't handle SVE traps for non-SVE vcpus here: */
	if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
	/* Only handle traps the vCPU can support here: */
	switch (esr_ec) {
	case ESR_ELx_EC_FP_ASIMD:
		break;
	case ESR_ELx_EC_SVE:
		if (!sve_guest)
			return false;
		break;
	default:
		return false;
	}

	/* Valid trap.  Switch the context: */

+32 −9
Original line number Diff line number Diff line
@@ -58,8 +58,9 @@
struct kvm_pgtable_walk_data {
	struct kvm_pgtable_walker	*walker;

	const u64			start;
	u64				addr;
	u64				end;
	const u64			end;
};

static bool kvm_phys_is_valid(u64 phys)
@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
		.old	= READ_ONCE(*ptep),
		.arg	= data->walker->arg,
		.mm_ops	= mm_ops,
		.start	= data->start,
		.addr	= data->addr,
		.end	= data->end,
		.level	= level,
@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
		     struct kvm_pgtable_walker *walker)
{
	struct kvm_pgtable_walk_data walk_data = {
		.start	= ALIGN_DOWN(addr, PAGE_SIZE),
		.addr	= ALIGN_DOWN(addr, PAGE_SIZE),
		.end	= PAGE_ALIGN(walk_data.addr + size),
		.walker	= walker,
@@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
}

struct hyp_map_data {
	u64				phys;
	const u64			phys;
	kvm_pte_t			attr;
};

@@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
				    struct hyp_map_data *data)
{
	u64 phys = data->phys + (ctx->addr - ctx->start);
	kvm_pte_t new;
	u64 granule = kvm_granule_size(ctx->level), phys = data->phys;

	if (!kvm_block_mapping_supported(ctx, phys))
		return false;

	data->phys += granule;
	new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
	if (ctx->old == new)
		return true;
@@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
}

struct stage2_map_data {
	u64				phys;
	const u64			phys;
	kvm_pte_t			attr;
	u8				owner_id;

@@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
	return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
}

static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
				       const struct stage2_map_data *data)
{
	u64 phys = data->phys;

	/*
	 * Stage-2 walks to update ownership data are communicated to the map
	 * walker using an invalid PA. Avoid offsetting an already invalid PA,
	 * which could overflow and make the address valid again.
	 */
	if (!kvm_phys_is_valid(phys))
		return phys;

	/*
	 * Otherwise, work out the correct PA based on how far the walk has
	 * gotten.
	 */
	return phys + (ctx->addr - ctx->start);
}

static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
					struct stage2_map_data *data)
{
	u64 phys = stage2_map_walker_phys_addr(ctx, data);

	if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
		return false;

	return kvm_block_mapping_supported(ctx, data->phys);
	return kvm_block_mapping_supported(ctx, phys);
}

static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
				      struct stage2_map_data *data)
{
	kvm_pte_t new;
	u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
	u64 phys = stage2_map_walker_phys_addr(ctx, data);
	u64 granule = kvm_granule_size(ctx->level);
	struct kvm_pgtable *pgt = data->mmu->pgt;
	struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;

@@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,

	stage2_make_pte(ctx, new);

	if (kvm_phys_is_valid(phys))
		data->phys += granule;
	return 0;
}

Loading