Commit 9f03db66 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/mmu/mte into kvmarm-master/next



KVM/arm64 support for MTE, courtesy of Steven Price.
It allows the guest to use memory tagging, and offers
a new userspace API to save/restore the tags.

* kvm-arm64/mmu/mte:
  KVM: arm64: Document MTE capability and ioctl
  KVM: arm64: Add ioctl to fetch/store tags in a guest
  KVM: arm64: Expose KVM_ARM_CAP_MTE
  KVM: arm64: Save/restore MTE registers
  KVM: arm64: Introduce MTE VM feature
  arm64: mte: Sync tags for pages where PTE is untagged

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 2fea6cf7 04c02c20
Loading
Loading
Loading
Loading
+61 −0
Original line number Diff line number Diff line
@@ -5034,6 +5034,43 @@ see KVM_XEN_VCPU_SET_ATTR above.
The KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST type may not be used
with the KVM_XEN_VCPU_GET_ATTR ioctl.

4.130 KVM_ARM_MTE_COPY_TAGS
---------------------------

:Capability: KVM_CAP_ARM_MTE
:Architectures: arm64
:Type: vm ioctl
:Parameters: struct kvm_arm_copy_mte_tags
:Returns: number of bytes copied, < 0 on error (-EINVAL for incorrect
          arguments, -EFAULT if memory cannot be accessed).

::

  struct kvm_arm_copy_mte_tags {
	__u64 guest_ipa;
	__u64 length;
	void __user *addr;
	__u64 flags;
	__u64 reserved[2];
  };

Copies Memory Tagging Extension (MTE) tags to/from guest tag memory. The
``guest_ipa`` and ``length`` fields must be ``PAGE_SIZE`` aligned. The ``addr``
field must point to a buffer which the tags will be copied to or from.

``flags`` specifies the direction of copy, either ``KVM_ARM_TAGS_TO_GUEST`` or
``KVM_ARM_TAGS_FROM_GUEST``.

The size of the buffer to store the tags is ``(length / 16)`` bytes
(granules in MTE are 16 bytes long). Each byte contains a single tag
value. This matches the format of ``PTRACE_PEEKMTETAGS`` and
``PTRACE_POKEMTETAGS``.

If an error occurs before any data is copied then a negative error code is
returned. If some tags have been copied before an error occurs then the number
of bytes successfully copied is returned. If the call completes successfully
then ``length`` is returned.

5. The kvm_run structure
========================

@@ -6362,6 +6399,30 @@ default.

See Documentation/x86/sgx/2.Kernel-internals.rst for more details.

7.26 KVM_CAP_ARM_MTE
--------------------

:Architectures: arm64
:Parameters: none

This capability indicates that KVM (and the hardware) supports exposing the
Memory Tagging Extensions (MTE) to the guest. It must also be enabled by the
VMM before creating any VCPUs to allow the guest access. Note that MTE is only
available to a guest running in AArch64 mode and enabling this capability will
cause attempts to create AArch32 VCPUs to fail.

When enabled the guest is able to access tags associated with any memory given
to the guest. KVM will ensure that the tags are maintained during swap or
hibernation of the host; however the VMM needs to manually save/restore the
tags as appropriate if the VM is migrated.

When this capability is enabled all memory in memslots must be mapped as
not-shareable (no MAP_SHARED), attempts to create a memslot with a
MAP_SHARED mmap will result in an -EINVAL return.

When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
perform a bulk copy of tags to/from the guest.

8. Other capabilities.
======================

+2 −1
Original line number Diff line number Diff line
@@ -12,7 +12,8 @@
#include <asm/types.h>

/* Hyp Configuration Register (HCR) bits */
#define HCR_ATA		(UL(1) << 56)
#define HCR_ATA_SHIFT	56
#define HCR_ATA		(UL(1) << HCR_ATA_SHIFT)
#define HCR_FWB		(UL(1) << 46)
#define HCR_API		(UL(1) << 41)
#define HCR_APK		(UL(1) << 40)
+3 −0
Original line number Diff line number Diff line
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
	    vcpu_el1_is_32bit(vcpu))
		vcpu->arch.hcr_el2 |= HCR_TID2;

	if (kvm_has_mte(vcpu->kvm))
		vcpu->arch.hcr_el2 |= HCR_ATA;
}

static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
+13 −0
Original line number Diff line number Diff line
@@ -133,6 +133,9 @@ struct kvm_arch {

	u8 pfr0_csv2;
	u8 pfr0_csv3;

	/* Memory Tagging Extension enabled for the guest */
	bool mte_enabled;
};

struct kvm_vcpu_fault_info {
@@ -207,6 +210,12 @@ enum vcpu_sysreg {
	CNTP_CVAL_EL0,
	CNTP_CTL_EL0,

	/* Memory Tagging Extension registers */
	RGSR_EL1,	/* Random Allocation Tag Seed Register */
	GCR_EL1,	/* Tag Control Register */
	TFSR_EL1,	/* Tag Fault Status Register (EL1) */
	TFSRE0_EL1,	/* Tag Fault Status Register (EL0) */

	/* 32bit specific registers. Keep them at the end of the range */
	DACR32_EL2,	/* Domain Access Control Register */
	IFSR32_EL2,	/* Instruction Fault Status Register */
@@ -722,6 +731,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
			       struct kvm_device_attr *attr);

long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
				struct kvm_arm_copy_mte_tags *copy_tags);

/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@ -770,6 +782,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)

#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
#define kvm_vcpu_has_pmu(vcpu)					\
	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))

+66 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2020-2021 ARM Ltd.
 */
#ifndef __ASM_KVM_MTE_H
#define __ASM_KVM_MTE_H

#ifdef __ASSEMBLY__

#include <asm/sysreg.h>

#ifdef CONFIG_ARM64_MTE

.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
alternative_if_not ARM64_MTE
	b	.L__skip_switch\@
alternative_else_nop_endif
	mrs	\reg1, hcr_el2
	tbz	\reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@

	mrs_s	\reg1, SYS_RGSR_EL1
	str	\reg1, [\h_ctxt, #CPU_RGSR_EL1]
	mrs_s	\reg1, SYS_GCR_EL1
	str	\reg1, [\h_ctxt, #CPU_GCR_EL1]

	ldr	\reg1, [\g_ctxt, #CPU_RGSR_EL1]
	msr_s	SYS_RGSR_EL1, \reg1
	ldr	\reg1, [\g_ctxt, #CPU_GCR_EL1]
	msr_s	SYS_GCR_EL1, \reg1

.L__skip_switch\@:
.endm

.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
alternative_if_not ARM64_MTE
	b	.L__skip_switch\@
alternative_else_nop_endif
	mrs	\reg1, hcr_el2
	tbz	\reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@

	mrs_s	\reg1, SYS_RGSR_EL1
	str	\reg1, [\g_ctxt, #CPU_RGSR_EL1]
	mrs_s	\reg1, SYS_GCR_EL1
	str	\reg1, [\g_ctxt, #CPU_GCR_EL1]

	ldr	\reg1, [\h_ctxt, #CPU_RGSR_EL1]
	msr_s	SYS_RGSR_EL1, \reg1
	ldr	\reg1, [\h_ctxt, #CPU_GCR_EL1]
	msr_s	SYS_GCR_EL1, \reg1

	isb

.L__skip_switch\@:
.endm

#else /* !CONFIG_ARM64_MTE */

.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
.endm

.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
.endm

#endif /* CONFIG_ARM64_MTE */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_KVM_MTE_H */
Loading