Commit 20a30430 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/vgic-fixes-5.16 into kvmarm-master/next



* kvm-arm64/vgic-fixes-5.16:
  : .
  : Multiple updates to the GICv3 emulation in order to better support
  : the dreadful Apple M1 that only implements half of it, and in a
  : broken way...
  : .
  KVM: arm64: vgic-v3: Align emulated cpuif LPI state machine with the pseudocode
  KVM: arm64: vgic-v3: Don't advertise ICC_CTLR_EL1.SEIS
  KVM: arm64: vgic-v3: Reduce common group trapping to ICV_DIR_EL1 when possible
  KVM: arm64: vgic-v3: Work around GICv3 locally generated SErrors
  KVM: arm64: Force ID_AA64PFR0_EL1.GIC=1 when exposing a virtual GICv3

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 2a6bf513 9d449c71
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -1152,6 +1152,7 @@
#define ICH_HCR_TC		(1 << 10)
#define ICH_HCR_TALL0		(1 << 11)
#define ICH_HCR_TALL1		(1 << 12)
#define ICH_HCR_TDIR		(1 << 14)
#define ICH_HCR_EOIcount_SHIFT	27
#define ICH_HCR_EOIcount_MASK	(0x1f << ICH_HCR_EOIcount_SHIFT)

@@ -1184,6 +1185,8 @@
#define ICH_VTR_SEIS_MASK	(1 << ICH_VTR_SEIS_SHIFT)
#define ICH_VTR_A3V_SHIFT	21
#define ICH_VTR_A3V_MASK	(1 << ICH_VTR_A3V_SHIFT)
#define ICH_VTR_TDS_SHIFT	19
#define ICH_VTR_TDS_MASK	(1 << ICH_VTR_TDS_SHIFT)

#define ARM64_FEATURE_FIELD_BITS	4

+8 −14
Original line number Diff line number Diff line
@@ -695,8 +695,6 @@ static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
		goto spurious;

	lr_val &= ~ICH_LR_STATE;
	/* No active state for LPIs */
	if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
	lr_val |= ICH_LR_ACTIVE_BIT;
	__gic_v3_set_lr(lr_val, lr);
	__vgic_v3_set_active_priority(lr_prio, vmcr, grp);
@@ -764,20 +762,18 @@ static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
	/* Drop priority in any case */
	act_prio = __vgic_v3_clear_highest_active_priority();

	/* If EOIing an LPI, no deactivate to be performed */
	if (vid >= VGIC_MIN_LPI)
		return;

	/* EOImode == 1, nothing to be done here */
	if (vmcr & ICH_VMCR_EOIM_MASK)
		return;

	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
	if (lr == -1) {
		/* Do not bump EOIcount for LPIs that aren't in the LRs */
		if (!(vid >= VGIC_MIN_LPI))
			__vgic_v3_bump_eoicount();
		return;
	}

	/* EOImode == 1 and not an LPI, nothing to be done here */
	if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI))
		return;

	lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;

	/* If priorities or group do not match, the guest has fscked-up. */
@@ -987,8 +983,6 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
	val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
	/* IDbits */
	val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
	/* SEIS */
	val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
	/* A3V */
	val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
	/* EOImode */
+5 −0
Original line number Diff line number Diff line
@@ -1080,6 +1080,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
		if (irqchip_in_kernel(vcpu->kvm) &&
		    vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
		}
		break;
	case SYS_ID_AA64PFR1_EL1:
		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
+18 −3
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
static bool group0_trap;
static bool group1_trap;
static bool common_trap;
static bool dir_trap;
static bool gicv4_enable;

void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
@@ -296,6 +297,8 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
		vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
	if (common_trap)
		vgic_v3->vgic_hcr |= ICH_HCR_TC;
	if (dir_trap)
		vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
}

int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
@@ -673,11 +676,23 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
		group1_trap = true;
	}

	if (group0_trap || group1_trap || common_trap) {
		kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
	if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) {
		kvm_info("GICv3 with locally generated SEI\n");

		group0_trap = true;
		group1_trap = true;
		if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
			dir_trap = true;
		else
			common_trap = true;
	}

	if (group0_trap || group1_trap || common_trap | dir_trap) {
		kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
			 group0_trap ? "G0" : "",
			 group1_trap ? "G1" : "",
			 common_trap ? "C"  : "");
			 common_trap ? "C"  : "",
			 dir_trap    ? "D"  : "");
		static_branch_enable(&vgic_v3_cpuif_trap);
	}