Commit b0803ba7 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Convert FSC_* over to ESR_ELx_FSC_*



The former is an AArch32 legacy, so let's move over to the
verbose (and strictly identical) version.

This involves moving some of the #defines that were private
to KVM into the more generic esr.h.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent b8f8d190
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -114,6 +114,15 @@
#define ESR_ELx_FSC_ACCESS	(0x08)
#define ESR_ELx_FSC_FAULT	(0x04)
#define ESR_ELx_FSC_PERM	(0x0C)
#define ESR_ELx_FSC_SEA_TTW0	(0x14)
#define ESR_ELx_FSC_SEA_TTW1	(0x15)
#define ESR_ELx_FSC_SEA_TTW2	(0x16)
#define ESR_ELx_FSC_SEA_TTW3	(0x17)
#define ESR_ELx_FSC_SECC	(0x18)
#define ESR_ELx_FSC_SECC_TTW0	(0x1c)
#define ESR_ELx_FSC_SECC_TTW1	(0x1d)
#define ESR_ELx_FSC_SECC_TTW2	(0x1e)
#define ESR_ELx_FSC_SECC_TTW3	(0x1f)

/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT	(24)
+0 −15
Original line number Diff line number Diff line
@@ -319,21 +319,6 @@
				 BIT(18) |		\
				 GENMASK(16, 15))

/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT	ESR_ELx_FSC_FAULT
#define FSC_ACCESS	ESR_ELx_FSC_ACCESS
#define FSC_PERM	ESR_ELx_FSC_PERM
#define FSC_SEA		ESR_ELx_FSC_EXTABT
#define FSC_SEA_TTW0	(0x14)
#define FSC_SEA_TTW1	(0x15)
#define FSC_SEA_TTW2	(0x16)
#define FSC_SEA_TTW3	(0x17)
#define FSC_SECC	(0x18)
#define FSC_SECC_TTW0	(0x1c)
#define FSC_SECC_TTW1	(0x1d)
#define FSC_SECC_TTW2	(0x1e)
#define FSC_SECC_TTW3	(0x1f)

/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK	(~UL(0xf))
/*
+10 −10
Original line number Diff line number Diff line
@@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
{
	switch (kvm_vcpu_trap_get_fault(vcpu)) {
	case FSC_SEA:
	case FSC_SEA_TTW0:
	case FSC_SEA_TTW1:
	case FSC_SEA_TTW2:
	case FSC_SEA_TTW3:
	case FSC_SECC:
	case FSC_SECC_TTW0:
	case FSC_SECC_TTW1:
	case FSC_SECC_TTW2:
	case FSC_SECC_TTW3:
	case ESR_ELx_FSC_EXTABT:
	case ESR_ELx_FSC_SEA_TTW0:
	case ESR_ELx_FSC_SEA_TTW1:
	case ESR_ELx_FSC_SEA_TTW2:
	case ESR_ELx_FSC_SEA_TTW3:
	case ESR_ELx_FSC_SECC:
	case ESR_ELx_FSC_SECC_TTW0:
	case ESR_ELx_FSC_SECC_TTW1:
	case ESR_ELx_FSC_SECC_TTW2:
	case ESR_ELx_FSC_SECC_TTW3:
		return true;
	default:
		return false;
+1 −1
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
	 */
	if (!(esr & ESR_ELx_S1PTW) &&
	    (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
	     (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
	     (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
		if (!__translate_far_to_hpfar(far, &hpfar))
			return false;
	} else {
+1 −1
Original line number Diff line number Diff line
@@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
	if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
		bool valid;

		valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
		valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
			kvm_vcpu_dabt_isvalid(vcpu) &&
			!kvm_vcpu_abt_issea(vcpu) &&
			!kvm_vcpu_abt_iss1tw(vcpu);
Loading