Commit 3cd7cd8a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull kvm fixes from Paolo Bonzini:
 "Two larger x86 series:

   - Redo incorrect fix for SEV/SMAP erratum

   - Windows 11 Hyper-V workaround

  Other x86 changes:

   - Various x86 cleanups

   - Re-enable access_tracking_perf_test

   - Fix for #GP handling on SVM

   - Fix for CPUID leaf 0Dh in KVM_GET_SUPPORTED_CPUID

   - Fix for ICEBP in interrupt shadow

   - Avoid false-positive RCU splat

   - Enable Enlightened MSR-Bitmap support for real

  ARM:

   - Correctly update the shadow register on exception injection when
     running in nVHE mode

   - Correctly use the mm_ops indirection when performing cache
     invalidation from the page-table walker

   - Restrict the vgic-v3 workaround for SEIS to the two known broken
     implementations

  Generic code changes:

   - Dead code cleanup"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (43 commits)
  KVM: eventfd: Fix false positive RCU usage warning
  KVM: nVMX: Allow VMREAD when Enlightened VMCS is in use
  KVM: nVMX: Implement evmcs_field_offset() suitable for handle_vmread()
  KVM: nVMX: Rename vmcs_to_field_offset{,_table}
  KVM: nVMX: eVMCS: Filter out VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
  KVM: nVMX: Also filter MSR_IA32_VMX_TRUE_PINBASED_CTLS when eVMCS
  selftests: kvm: check dynamic bits against KVM_X86_XCOMP_GUEST_SUPP
  KVM: x86: add system attribute to retrieve full set of supported xsave states
  KVM: x86: Add a helper to retrieve userspace address from kvm_device_attr
  selftests: kvm: move vm_xsave_req_perm call to amx_test
  KVM: x86: Sync the states size with the XCR0/IA32_XSS at, any time
  KVM: x86: Update vCPU's runtime CPUID on write to MSR_IA32_XSS
  KVM: x86: Keep MSR_IA32_XSS unchanged for INIT
  KVM: x86: Free kvm_cpuid_entry2 array on post-KVM_RUN KVM_SET_CPUID{,2}
  KVM: nVMX: WARN on any attempt to allocate shadow VMCS for vmcs02
  KVM: selftests: Don't skip L2's VMCALL in SMM test for SVM guest
  KVM: x86: Check .flags in kvm_cpuid_check_equal() too
  KVM: x86: Forcibly leave nested virt when SMM state is toggled
  KVM: SVM: drop unnecessary code in svm_hv_vmcb_dirty_nested_enlightenments()
  KVM: SVM: hyper-v: Enable Enlightened MSR-Bitmap support for real
  ...
parents e0152705 17179d00
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -3268,6 +3268,7 @@ number.


:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
             KVM_CAP_VCPU_ATTRIBUTES for vcpu device
             KVM_CAP_VCPU_ATTRIBUTES for vcpu device
             KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device (no set)
:Type: device ioctl, vm ioctl, vcpu ioctl
:Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr
:Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error
:Returns: 0 on success, -1 on error
@@ -3303,6 +3304,7 @@ transferred is defined by the particular attribute.


:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
             KVM_CAP_VCPU_ATTRIBUTES for vcpu device
             KVM_CAP_VCPU_ATTRIBUTES for vcpu device
             KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device
:Type: device ioctl, vm ioctl, vcpu ioctl
:Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr
:Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error
:Returns: 0 on success, -1 on error
+4 −1
Original line number Original line Diff line number Diff line
@@ -38,7 +38,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)


static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
{
{
	if (has_vhe())
		write_sysreg_el1(val, SYS_SPSR);
		write_sysreg_el1(val, SYS_SPSR);
	else
		__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
}
}


static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+6 −12
Original line number Original line Diff line number Diff line
@@ -983,13 +983,9 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
	 */
	 */
	stage2_put_pte(ptep, mmu, addr, level, mm_ops);
	stage2_put_pte(ptep, mmu, addr, level, mm_ops);


	if (need_flush) {
	if (need_flush && mm_ops->dcache_clean_inval_poc)
		kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
		mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),

		dcache_clean_inval_poc((unsigned long)pte_follow,
				    (unsigned long)pte_follow +
					       kvm_granule_size(level));
					       kvm_granule_size(level));
	}


	if (childp)
	if (childp)
		mm_ops->put_page(childp);
		mm_ops->put_page(childp);
@@ -1151,14 +1147,12 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
	struct kvm_pgtable *pgt = arg;
	struct kvm_pgtable *pgt = arg;
	struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
	struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
	kvm_pte_t pte = *ptep;
	kvm_pte_t pte = *ptep;
	kvm_pte_t *pte_follow;


	if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
	if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
		return 0;
		return 0;


	pte_follow = kvm_pte_follow(pte, mm_ops);
	if (mm_ops->dcache_clean_inval_poc)
	dcache_clean_inval_poc((unsigned long)pte_follow,
		mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
			    (unsigned long)pte_follow +
					       kvm_granule_size(level));
					       kvm_granule_size(level));
	return 0;
	return 0;
}
}
+3 −0
Original line number Original line Diff line number Diff line
@@ -983,6 +983,9 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
	val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
	val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
	/* IDbits */
	/* IDbits */
	val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
	val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
	/* SEIS */
	if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
		val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
	/* A3V */
	/* A3V */
	val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
	val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
	/* EOImode */
	/* EOImode */
+15 −2
Original line number Original line Diff line number Diff line
@@ -609,6 +609,18 @@ static int __init early_gicv4_enable(char *buf)
}
}
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);


static const struct midr_range broken_seis[] = {
	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
	{},
};

static bool vgic_v3_broken_seis(void)
{
	return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
		is_midr_in_range_list(read_cpuid_id(), broken_seis));
}

/**
/**
 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
 * @info:	pointer to the GIC description
 * @info:	pointer to the GIC description
@@ -676,9 +688,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
		group1_trap = true;
		group1_trap = true;
	}
	}


	if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) {
	if (vgic_v3_broken_seis()) {
		kvm_info("GICv3 with locally generated SEI\n");
		kvm_info("GICv3 with broken locally generated SEI\n");


		kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
		group0_trap = true;
		group0_trap = true;
		group1_trap = true;
		group1_trap = true;
		if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
		if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
Loading