Commit f211b450 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-x86-fixes-6.4' of https://github.com/kvm-x86/linux into HEAD

KVM x86 fixes for 6.4

 - Fix a memslot lookup bug in the NX recovery thread that could
   theoretically let userspace bypass the NX hugepage mitigation

 - Fix a s/BLOCKING/PENDING bug in SVM's vNMI support

 - Account exit stats for fastpath VM-Exits that never leave the super
   tight run-loop

 - Fix an out-of-bounds bug in the optimized APIC map code, and add a
   regression test for the race.
parents 49661a52 47d2804b
Loading
Loading
Loading
Loading
+18 −2
Original line number Diff line number Diff line
@@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
	u32 xapic_id = kvm_xapic_id(apic);
	u32 physical_id;

	/*
	 * For simplicity, KVM always allocates enough space for all possible
	 * xAPIC IDs.  Yell, but don't kill the VM, as KVM can continue on
	 * without the optimized map.
	 */
	if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
		return -EINVAL;

	/*
	 * Bail if a vCPU was added and/or enabled its APIC between allocating
	 * the map and doing the actual calculations for the map.  Note, KVM
	 * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
	 * the compiler decides to reload x2apic_id after this check.
	 */
	if (x2apic_id > new->max_apic_id)
		return -E2BIG;

	/*
	 * Deliberately truncate the vCPU ID when detecting a mismatched APIC
	 * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
@@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
	 */
	if (vcpu->kvm->arch.x2apic_format) {
		/* See also kvm_apic_match_physical_addr(). */
		if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
			x2apic_id <= new->max_apic_id)
		if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
			new->phys_map[x2apic_id] = apic;

		if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
+4 −1
Original line number Diff line number Diff line
@@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
		 */
		slot = NULL;
		if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
			slot = gfn_to_memslot(kvm, sp->gfn);
			struct kvm_memslots *slots;

			slots = kvm_memslots_for_spte_role(kvm, sp->role);
			slot = __gfn_to_memslot(slots, sp->gfn);
			WARN_ON_ONCE(!slot);
		}

+1 −1
Original line number Diff line number Diff line
@@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
	if (!is_vnmi_enabled(svm))
		return false;

	return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
	return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
}

static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
+3 −0
Original line number Diff line number Diff line
@@ -10758,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
			exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
			break;
		}

		/* Note, VM-Exits that go down the "slow" path are accounted below. */
		++vcpu->stat.exits;
	}

	/*
+1 −0
Original line number Diff line number Diff line
@@ -116,6 +116,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
Loading