Commit 7c7b363d authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/pkvm-fixed-features-prologue into kvmarm-master/next



* kvm-arm64/pkvm-fixed-features-prologue:
  : Rework a bunch of common infrastructure as a prologue
  : to Fuad Tabba's protected VM fixed feature series.
  KVM: arm64: Upgrade trace_kvm_arm_set_dreg32() to 64bit
  KVM: arm64: Add config register bit definitions
  KVM: arm64: Add feature register flag definitions
  KVM: arm64: Track value of cptr_el2 in struct kvm_vcpu_arch
  KVM: arm64: Keep mdcr_el2's value as set by __init_el2_debug
  KVM: arm64: Restore mdcr_el2 from vcpu
  KVM: arm64: Refactor sys_regs.h,c for nVHE reuse
  KVM: arm64: Fix names of config register fields
  KVM: arm64: MDCR_EL2 is a 64-bit register
  KVM: arm64: Remove trailing whitespace in comment
  KVM: arm64: placeholder to check if VM is protected

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents deb151a5 411d63d8
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -602,14 +602,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
	u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);

	return val == ID_AA64PFR0_EL1_32BIT_64BIT;
	return val == ID_AA64PFR0_ELx_32BIT_64BIT;
}

static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
	u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);

	return val == ID_AA64PFR0_EL0_32BIT_64BIT;
	return val == ID_AA64PFR0_ELx_32BIT_64BIT;
}

static inline bool id_aa64pfr0_sve(u64 pfr0)
+38 −16
Original line number Diff line number Diff line
@@ -12,8 +12,13 @@
#include <asm/types.h>

/* Hyp Configuration Register (HCR) bits */

#define HCR_TID5	(UL(1) << 58)
#define HCR_DCT		(UL(1) << 57)
#define HCR_ATA_SHIFT	56
#define HCR_ATA		(UL(1) << HCR_ATA_SHIFT)
#define HCR_AMVOFFEN	(UL(1) << 51)
#define HCR_FIEN	(UL(1) << 47)
#define HCR_FWB		(UL(1) << 46)
#define HCR_API		(UL(1) << 41)
#define HCR_APK		(UL(1) << 40)
@@ -32,9 +37,9 @@
#define HCR_TVM		(UL(1) << 26)
#define HCR_TTLB	(UL(1) << 25)
#define HCR_TPU		(UL(1) << 24)
#define HCR_TPC		(UL(1) << 23)
#define HCR_TPC		(UL(1) << 23) /* HCR_TPCP if FEAT_DPB */
#define HCR_TSW		(UL(1) << 22)
#define HCR_TAC		(UL(1) << 21)
#define HCR_TACR	(UL(1) << 21)
#define HCR_TIDCP	(UL(1) << 20)
#define HCR_TSC		(UL(1) << 19)
#define HCR_TID3	(UL(1) << 18)
@@ -56,12 +61,13 @@
#define HCR_PTW		(UL(1) << 2)
#define HCR_SWIO	(UL(1) << 1)
#define HCR_VM		(UL(1) << 0)
#define HCR_RES0	((UL(1) << 48) | (UL(1) << 39))

/*
 * The bits we set in HCR:
 * TLOR:	Trap LORegion register accesses
 * RW:		64bit by default, can be overridden for 32bit VMs
 * TAC:		Trap ACTLR
 * TACR:	Trap ACTLR
 * TSC:		Trap SMC
 * TSW:		Trap cache operations by set/way
 * TWE:		Trap WFE
@@ -76,7 +82,7 @@
 * PTW:		Take a stage2 fault if a stage1 walk steps in device memory
 */
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
			 HCR_BSU_IS | HCR_FB | HCR_TAC | \
			 HCR_BSU_IS | HCR_FB | HCR_TACR | \
			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
			 HCR_FMO | HCR_IMO | HCR_PTW )
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
@@ -275,24 +281,40 @@
#define CPTR_EL2_TTA	(1 << 20)
#define CPTR_EL2_TFP	(1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ	(1 << 8)
#define CPTR_EL2_RES1	0x000032ff /* known RES1 bits in CPTR_EL2 */
#define CPTR_EL2_DEFAULT	CPTR_EL2_RES1
#define CPTR_NVHE_EL2_RES1	0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
#define CPTR_EL2_DEFAULT	CPTR_NVHE_EL2_RES1
#define CPTR_NVHE_EL2_RES0	(GENMASK(63, 32) |	\
				 GENMASK(29, 21) |	\
				 GENMASK(19, 14) |	\
				 BIT(11))

/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK	(UL(0x3))
#define MDCR_EL2_E2TB_SHIFT	(UL(24))
#define MDCR_EL2_TTRF		(1 << 19)
#define MDCR_EL2_TPMS		(1 << 14)
#define MDCR_EL2_HPMFZS		(UL(1) << 36)
#define MDCR_EL2_HPMFZO		(UL(1) << 29)
#define MDCR_EL2_MTPME		(UL(1) << 28)
#define MDCR_EL2_TDCC		(UL(1) << 27)
#define MDCR_EL2_HCCD		(UL(1) << 23)
#define MDCR_EL2_TTRF		(UL(1) << 19)
#define MDCR_EL2_HPMD		(UL(1) << 17)
#define MDCR_EL2_TPMS		(UL(1) << 14)
#define MDCR_EL2_E2PB_MASK	(UL(0x3))
#define MDCR_EL2_E2PB_SHIFT	(UL(12))
#define MDCR_EL2_TDRA		(1 << 11)
#define MDCR_EL2_TDOSA		(1 << 10)
#define MDCR_EL2_TDA		(1 << 9)
#define MDCR_EL2_TDE		(1 << 8)
#define MDCR_EL2_HPME		(1 << 7)
#define MDCR_EL2_TPM		(1 << 6)
#define MDCR_EL2_TPMCR		(1 << 5)
#define MDCR_EL2_HPMN_MASK	(0x1F)
#define MDCR_EL2_TDRA		(UL(1) << 11)
#define MDCR_EL2_TDOSA		(UL(1) << 10)
#define MDCR_EL2_TDA		(UL(1) << 9)
#define MDCR_EL2_TDE		(UL(1) << 8)
#define MDCR_EL2_HPME		(UL(1) << 7)
#define MDCR_EL2_TPM		(UL(1) << 6)
#define MDCR_EL2_TPMCR		(UL(1) << 5)
#define MDCR_EL2_HPMN_MASK	(UL(0x1F))
#define MDCR_EL2_RES0		(GENMASK(63, 37) |	\
				 GENMASK(35, 30) |	\
				 GENMASK(25, 24) |	\
				 GENMASK(22, 20) |	\
				 BIT(18) |		\
				 GENMASK(16, 15))

/* For compatibility with fault code shared with 32-bit */
#define FSC_FAULT	ESR_ELx_FSC_FAULT
+1 −1
Original line number Diff line number Diff line
@@ -209,7 +209,7 @@ extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);

extern u32 __kvm_get_mdcr_el2(void);
extern u64 __kvm_get_mdcr_el2(void);

#define __KVM_EXTABLE(from, to)						\
	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
+11 −2
Original line number Diff line number Diff line
@@ -286,9 +286,13 @@ struct kvm_vcpu_arch {
	/* Stage 2 paging state used by the hardware on next switch */
	struct kvm_s2_mmu *hw_mmu;

	/* HYP configuration */
	/* Values of trap registers for the guest. */
	u64 hcr_el2;
	u32 mdcr_el2;
	u64 mdcr_el2;
	u64 cptr_el2;

	/* Values of trap registers for the host before guest entry. */
	u64 mdcr_el2_host;

	/* Exception Information */
	struct kvm_vcpu_fault_info fault;
@@ -771,6 +775,11 @@ void kvm_arch_free_vm(struct kvm *kvm);

int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);

static inline bool kvm_vm_is_protected(struct kvm *kvm)
{
	return false;
}

int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);

+1 −1
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ void __sve_restore_state(void *sve_pffr, u32 *fpsr);

#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
#endif

u64 __guest_enter(struct kvm_vcpu *vcpu);
Loading