Commit 458e9874 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini
Browse files

KVM: selftests: Fix nx_huge_pages_test on TDP-disabled hosts



Map the test's huge page region with 2MiB virtual mappings when TDP is
disabled so that KVM can shadow the region with huge pages. This fixes
nx_huge_pages_test on hosts where TDP hardware support is disabled.

Purposely do not skip this test on TDP-disabled hosts. While we don't
care about NX Huge Pages on TDP-disabled hosts from a security
perspective, KVM does support it, and so we should test it.

For TDP-enabled hosts, continue mapping the region with 4KiB pages to
ensure that KVM can map it with huge pages irrespective of the guest
mappings.

Fixes: 8448ec59 ("KVM: selftests: Add NX huge pages test")
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220929181207.2281449-4-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4d2bd143
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -825,6 +825,8 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
	return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
}

bool kvm_is_tdp_enabled(void);

uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
				 uint64_t vaddr);
void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
@@ -855,6 +857,8 @@ enum pg_level {
#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)

void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
		    uint64_t nr_bytes, int level);

/*
 * Basic CPU control in CR0
+27 −0
Original line number Diff line number Diff line
@@ -111,6 +111,14 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
	}
}

bool kvm_is_tdp_enabled(void)
{
	if (is_intel_cpu())
		return get_kvm_intel_param_bool("ept");
	else
		return get_kvm_amd_param_bool("npt");
}

void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
	TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
@@ -214,6 +222,25 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
	__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
}

void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
		    uint64_t nr_bytes, int level)
{
	uint64_t pg_size = PG_LEVEL_SIZE(level);
	uint64_t nr_pages = nr_bytes / pg_size;
	int i;

	TEST_ASSERT(nr_bytes % pg_size == 0,
		    "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
		    nr_bytes, pg_size);

	for (i = 0; i < nr_pages; i++) {
		__virt_pg_map(vm, vaddr, paddr, level);

		vaddr += pg_size;
		paddr += pg_size;
	}
}

static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
					  struct kvm_vcpu *vcpu,
					  uint64_t vaddr)
+17 −2
Original line number Diff line number Diff line
@@ -112,6 +112,7 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
{
	struct kvm_vcpu *vcpu;
	struct kvm_vm *vm;
	uint64_t nr_bytes;
	void *hva;
	int r;

@@ -141,10 +142,24 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
				    HPAGE_GPA, HPAGE_SLOT,
				    HPAGE_SLOT_NPAGES, 0);

	virt_map(vm, HPAGE_GVA, HPAGE_GPA, HPAGE_SLOT_NPAGES);
	nr_bytes = HPAGE_SLOT_NPAGES * vm->page_size;

	/*
	 * Ensure that KVM can map HPAGE_SLOT with huge pages by mapping the
	 * region into the guest with 2MiB pages whenever TDP is disabled (i.e.
	 * whenever KVM is shadowing the guest page tables).
	 *
	 * When TDP is enabled, KVM should be able to map HPAGE_SLOT with huge
	 * pages irrespective of the guest page size, so map with 4KiB pages
	 * to test that that is the case.
	 */
	if (kvm_is_tdp_enabled())
		virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K);
	else
		virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_2M);

	hva = addr_gpa2hva(vm, HPAGE_GPA);
	memset(hva, RETURN_OPCODE, HPAGE_SLOT_NPAGES * PAGE_SIZE);
	memset(hva, RETURN_OPCODE, nr_bytes);

	check_2m_page_count(vm, 0);
	check_split_count(vm, 0);