Commit 013bb59d authored by Peter Collingbourne's avatar Peter Collingbourne Committed by Will Deacon
Browse files

arm64: mte: handle tags zeroing at page allocation time



Currently, on an anonymous page fault, the kernel allocates a zeroed
page and maps it in user space. If the mapping is tagged (PROT_MTE),
set_pte_at() additionally clears the tags. It is, however, more
efficient to clear the tags at the same time as zeroing the data on
allocation. To avoid clearing the tags on any page (which may not be
mapped as tagged), only do this if the vma flags contain VM_MTE. This
requires introducing a new GFP flag that is used to determine whether
to clear the tags.

The DC GZVA instruction with a 0 top byte (and 0 tag) requires
top-byte-ignore. Set the TCR_EL1.{TBI1,TBID1} bits irrespective of
whether KASAN_HW is enabled.

Signed-off-by: default avatarPeter Collingbourne <pcc@google.com>
Co-developed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://linux-review.googlesource.com/id/Id46dc94e30fe11474f7e54f5d65e7658dbdddb26


Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarAndrey Konovalov <andreyknvl@gmail.com>
Link: https://lore.kernel.org/r/20210602235230.3928842-4-pcc@google.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 7a3b8353
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged	PG_arch_2

void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
@@ -53,6 +54,9 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged	0

static inline void mte_zero_clear_page_tags(void *addr)
{
}
static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
{
}
+6 −2
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__

#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <linux/types.h> /* for gfp_t */
#include <asm/pgtable-types.h>

struct page;
@@ -28,10 +29,13 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE

#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
	alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
						unsigned long vaddr);
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE

void tag_clear_highpage(struct page *to);
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE

#define clear_user_page(page, vaddr, pg)	clear_page(page)
#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)

+20 −0
Original line number Diff line number Diff line
@@ -36,6 +36,26 @@ SYM_FUNC_START(mte_clear_page_tags)
	ret
SYM_FUNC_END(mte_clear_page_tags)

/*
 * Zero the page and tags at the same time
 *
 * Parameters:
 *	x0 - address to the beginning of the page
 */
SYM_FUNC_START(mte_zero_clear_page_tags)
	mrs	x1, dczid_el0
	and	w1, w1, #0xf
	mov	x2, #4
	lsl	x1, x2, x1
	and	x0, x0, #(1 << MTE_TAG_SHIFT) - 1	// clear the tag

1:	dc	gzva, x0
	add	x0, x0, x1
	tst	x0, #(PAGE_SIZE - 1)
	b.ne	1b
	ret
SYM_FUNC_END(mte_zero_clear_page_tags)

/*
 * Copy the tags from the source page to the destination one
 *   x0 - address of the destination page
+26 −0
Original line number Diff line number Diff line
@@ -921,3 +921,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
	debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);

/*
 * Used during anonymous page fault handling.
 */
struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
						unsigned long vaddr)
{
	gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;

	/*
	 * If the page is mapped with PROT_MTE, initialise the tags at the
	 * point of allocation and page zeroing as this is usually faster than
	 * separate DC ZVA and STGM.
	 */
	if (vma->vm_flags & VM_MTE)
		flags |= __GFP_ZEROTAGS;

	return alloc_page_vma(flags, vma, vaddr);
}

void tag_clear_highpage(struct page *page)
{
	mte_zero_clear_page_tags(page_address(page));
	page_kasan_tag_reset(page);
	set_bit(PG_mte_tagged, &page->flags);
}
+7 −3
Original line number Diff line number Diff line
@@ -46,9 +46,13 @@
#endif

#ifdef CONFIG_KASAN_HW_TAGS
#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
#define TCR_KASAN_HW_FLAGS 0
/*
 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
 * TBI being enabled at EL1.
 */
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#endif

/*
@@ -464,7 +468,7 @@ SYM_FUNC_START(__cpu_setup)
	msr_s	SYS_TFSRE0_EL1, xzr

	/* set the TCR_EL1 bits */
	mov_q	x10, TCR_KASAN_HW_FLAGS
	mov_q	x10, TCR_MTE_FLAGS
	orr	tcr, tcr, x10
1:
#endif
Loading