Commit ea7fc1bb authored by Steven Price's avatar Steven Price Committed by Marc Zyngier
Browse files

KVM: arm64: Introduce MTE VM feature



Add a new VM feature 'KVM_ARM_CAP_MTE' which enables memory tagging
for a VM. This will expose the feature to the guest and automatically
tag memory pages touched by the VM as PG_mte_tagged (and clear the tag
storage) to ensure that the guest cannot see stale tags, and so that
the tags are correctly saved/restored across swap.

Actually exposing the new capability to user space happens in a later
patch.

Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteven Price <steven.price@arm.com>
[maz: move VM_SHARED sampling into the critical section]
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210621111716.37157-3-steven.price@arm.com
parent 69e3b846
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
	    vcpu_el1_is_32bit(vcpu))
		vcpu->arch.hcr_el2 |= HCR_TID2;

	if (kvm_has_mte(vcpu->kvm))
		vcpu->arch.hcr_el2 |= HCR_ATA;
}

static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
+4 −0
Original line number Diff line number Diff line
@@ -132,6 +132,9 @@ struct kvm_arch {

	u8 pfr0_csv2;
	u8 pfr0_csv3;

	/* Memory Tagging Extension enabled for the guest */
	bool mte_enabled;
};

struct kvm_vcpu_fault_info {
@@ -769,6 +772,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)

#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
#define kvm_vcpu_has_pmu(vcpu)					\
	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))

+2 −1
Original line number Diff line number Diff line
@@ -112,7 +112,8 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
	new |= (old & PSR_C_BIT);
	new |= (old & PSR_V_BIT);

	// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
	if (kvm_has_mte(vcpu->kvm))
		new |= PSR_TCO_BIT;

	new |= (old & PSR_DIT_BIT);

+66 −1
Original line number Diff line number Diff line
@@ -822,6 +822,45 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
	return PAGE_SIZE;
}

/*
 * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
 * able to see the page's tags and therefore they must be initialised first. If
 * PG_mte_tagged is set, tags have already been initialised.
 *
 * The race in the test/set of the PG_mte_tagged flag is handled by:
 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
 *   racing to santise the same page
 * - mmap_lock protects between a VM faulting a page in and the VMM performing
 *   an mprotect() to add VM_MTE
 */
static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
			     unsigned long size)
{
	unsigned long i, nr_pages = size >> PAGE_SHIFT;
	struct page *page;

	if (!kvm_has_mte(kvm))
		return 0;

	/*
	 * pfn_to_online_page() is used to reject ZONE_DEVICE pages
	 * that may not support tags.
	 */
	page = pfn_to_online_page(pfn);

	if (!page)
		return -EFAULT;

	for (i = 0; i < nr_pages; i++, page++) {
		if (!test_bit(PG_mte_tagged, &page->flags)) {
			mte_clear_page_tags(page_address(page));
			set_bit(PG_mte_tagged, &page->flags);
		}
	}

	return 0;
}

static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
			  struct kvm_memory_slot *memslot, unsigned long hva,
			  unsigned long fault_status)
@@ -830,6 +869,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
	bool write_fault, writable, force_pte = false;
	bool exec_fault;
	bool device = false;
	bool shared;
	unsigned long mmu_seq;
	struct kvm *kvm = vcpu->kvm;
	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -873,6 +913,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
		vma_shift = PAGE_SHIFT;
	}

	shared = (vma->vm_flags & VM_PFNMAP);

	switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
	case PUD_SHIFT:
@@ -971,8 +1013,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
	if (writable)
		prot |= KVM_PGTABLE_PROT_W;

	if (fault_status != FSC_PERM && !device)
	if (fault_status != FSC_PERM && !device) {
		/* Check the VMM hasn't introduced a new VM_SHARED VMA */
		if (kvm_has_mte(kvm) && shared) {
			ret = -EFAULT;
			goto out_unlock;
		}
		ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
		if (ret)
			goto out_unlock;

		clean_dcache_guest_page(pfn, vma_pagesize);
	}

	if (exec_fault) {
		prot |= KVM_PGTABLE_PROT_X;
@@ -1168,12 +1220,17 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	kvm_pfn_t pfn = pte_pfn(range->pte);
	int ret;

	if (!kvm->arch.mmu.pgt)
		return false;

	WARN_ON(range->end - range->start != 1);

	ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
	if (ret)
		return false;

	/*
	 * We've moved a page around, probably through CoW, so let's treat it
	 * just like a translation fault and clean the cache to the PoC.
@@ -1381,6 +1438,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
		if (!vma)
			break;

		/*
		 * VM_SHARED mappings are not allowed with MTE to avoid races
		 * when updating the PG_mte_tagged page flag, see
		 * sanitise_mte_tags for more details.
		 */
		if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
			return -EINVAL;

		/*
		 * Take the intersection of this VMA with the memory region
		 */
+7 −0
Original line number Diff line number Diff line
@@ -1047,6 +1047,13 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
		break;
	case SYS_ID_AA64PFR1_EL1:
		val &= ~FEATURE(ID_AA64PFR1_MTE);
		if (kvm_has_mte(vcpu->kvm)) {
			u64 pfr, mte;

			pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
			mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
			val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
		}
		break;
	case SYS_ID_AA64ISAR1_EL1:
		if (!vcpu_has_ptrauth(vcpu))
Loading