Commit 382b5b87 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/mte-map-shared into kvmarm-master/next



* kvm-arm64/mte-map-shared:
  : .
  : Update the MTE support to allow the VMM to use shared mappings
  : to back the memslots exposed to MTE-enabled guests.
  :
  : Patches courtesy of Catalin Marinas and Peter Collingbourne.
  : .
  : Fix a number of issues with MTE, such as races on the tags
  : being initialised vs the PG_mte_tagged flag as well as the
  : lack of support for VM_SHARED when KVM is involved.
  :
  : Patches from Catalin Marinas and Peter Collingbourne.
  : .
  Documentation: document the ABI changes for KVM_CAP_ARM_MTE
  KVM: arm64: permit all VM_MTE_ALLOWED mappings with MTE enabled
  KVM: arm64: unify the tests for VMAs in memslots when MTE is enabled
  arm64: mte: Lock a page for MTE tag initialisation
  mm: Add PG_arch_3 page flag
  KVM: arm64: Simplify the sanitise_mte_tags() logic
  arm64: mte: Fix/clarify the PG_mte_tagged semantics
  mm: Do not enable PG_arch_2 for all 64-bit architectures

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents cfa72993 a4baf8d2
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -7385,8 +7385,9 @@ hibernation of the host; however the VMM needs to manually save/restore the
tags as appropriate if the VM is migrated.

When this capability is enabled all memory in memslots must be mapped as
not-shareable (no MAP_SHARED), attempts to create a memslot with a
MAP_SHARED mmap will result in an -EINVAL return.
``MAP_ANONYMOUS`` or with a RAM-based file mapping (``tmpfs``, ``memfd``),
attempts to create a memslot with an invalid mmap will result in an
-EINVAL return.

When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
perform a bulk copy of tags to/from the guest.
+1 −0
Original line number Diff line number Diff line
@@ -1965,6 +1965,7 @@ config ARM64_MTE
	depends on ARM64_PAN
	select ARCH_HAS_SUBPAGE_FAULTS
	select ARCH_USES_HIGH_VMA_FLAGS
	select ARCH_USES_PG_ARCH_X
	help
	  Memory Tagging (part of the ARMv8.5 Extensions) provides
	  architectural support for run-time, always-on detection of
+64 −1
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ unsigned long mte_copy_tags_to_user(void __user *to, void *from,
				    unsigned long n);
int mte_save_tags(struct page *page);
void mte_save_page_tags(const void *page_addr, void *tag_storage);
bool mte_restore_tags(swp_entry_t entry, struct page *page);
void mte_restore_tags(swp_entry_t entry, struct page *page);
void mte_restore_page_tags(void *page_addr, const void *tag_storage);
void mte_invalidate_tags(int type, pgoff_t offset);
void mte_invalidate_tags_area(int type);
@@ -36,6 +36,58 @@ void mte_free_tag_storage(char *storage);

/* track which pages have valid allocation tags */
#define PG_mte_tagged	PG_arch_2
/* simple lock to avoid multiple threads tagging the same page */
#define PG_mte_lock	PG_arch_3

static inline void set_page_mte_tagged(struct page *page)
{
	/*
	 * Ensure that the tags written prior to this function are visible
	 * before the page flags update.
	 */
	smp_wmb();
	set_bit(PG_mte_tagged, &page->flags);
}

static inline bool page_mte_tagged(struct page *page)
{
	bool ret = test_bit(PG_mte_tagged, &page->flags);

	/*
	 * If the page is tagged, ensure ordering with a likely subsequent
	 * read of the tags.
	 */
	if (ret)
		smp_rmb();
	return ret;
}

/*
 * Lock the page for tagging and return 'true' if the page can be tagged,
 * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the
 * locking only happens once for page initialisation.
 *
 * The page MTE lock state:
 *
 *   Locked:	PG_mte_lock && !PG_mte_tagged
 *   Unlocked:	!PG_mte_lock || PG_mte_tagged
 *
 * Acquire semantics only if the page is tagged (returning 'false').
 */
static inline bool try_page_mte_tagging(struct page *page)
{
	if (!test_and_set_bit(PG_mte_lock, &page->flags))
		return true;

	/*
	 * The tags are either being initialised or may have been initialised
	 * already. Check if the PG_mte_tagged flag has been set or wait
	 * otherwise.
	 */
	smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));

	return false;
}

void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t old_pte, pte_t pte);
@@ -56,6 +108,17 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size);
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged	0

static inline void set_page_mte_tagged(struct page *page)
{
}
static inline bool page_mte_tagged(struct page *page)
{
	return false;
}
static inline bool try_page_mte_tagging(struct page *page)
{
	return false;
}
static inline void mte_zero_clear_page_tags(void *addr)
{
}
+2 −2
Original line number Diff line number Diff line
@@ -1049,8 +1049,8 @@ static inline void arch_swap_invalidate_area(int type)
#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
	if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
		set_bit(PG_mte_tagged, &folio->flags);
	if (system_supports_mte())
		mte_restore_tags(entry, &folio->page);
}

#endif /* CONFIG_ARM64_MTE */
+3 −1
Original line number Diff line number Diff line
@@ -2074,8 +2074,10 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
	 * Clear the tags in the zero page. This needs to be done via the
	 * linear map which has the Tagged attribute.
	 */
	if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
	if (try_page_mte_tagging(ZERO_PAGE(0))) {
		mte_clear_page_tags(lm_alias(empty_zero_page));
		set_page_mte_tagged(ZERO_PAGE(0));
	}

	kasan_init_hw_tags_cpu();
}
Loading