Commit b32f46c2 authored by Tong Tiangen's avatar Tong Tiangen Committed by Ma Wupeng
Browse files

arm64: add cow to machine check safe

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5GB28


CVE: NA

-------------------------------

In the cow(copy on write) processing, the data of the user process is
copied, when hardware memory error is encountered during copy, only the
relevant processes are affected, so killing the user process and isolate
the user page with hardware memory errors is a more reasonable choice than
kernel panic.

Add new helper copy_page_mc() which provide a page copy implementation with
machine check safe. At present, only used in cow. In future, we can expand
more scenes. As long as the consequences of page copy failure are not
fatal(eg: only affect user process), we can use this helper.

The copy_page_mc() in copy_page_mc.S is largely borrows from copy_page()
in copy_page.S and the main difference is copy_page_mc() add extable entry
to every load/store insn to support machine check safe. largely to keep the
patch simple. If needed those optimizations can be folded in.

Signed-off-by: default avatarTong Tiangen <tongtiangen@huawei.com>
parent 26e697df
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -168,6 +168,11 @@ alternative_endif
#define USER_MC(l, x...)			\
9999:	x;					\
	_asm_mc_extable	9999b, l

#define CPY_MC(l, x...)				\
9999:   x;					\
	_asm_mc_extable  9999b, l

/*
 * Register aliases.
 */
+4 −0
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage);

void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_copy_page_tags_mc(void *kto, const void *kfrom);
void flush_mte_state(void);
void mte_thread_switch(struct task_struct *next);
void mte_suspend_exit(void);
@@ -56,6 +57,9 @@ static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
{
}
static inline void mte_copy_page_tags_mc(void *kto, const void *kfrom)
{
}
static inline void flush_mte_state(void)
{
}
+10 −0
Original line number Diff line number Diff line
@@ -28,6 +28,16 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE

#ifdef CONFIG_ARCH_HAS_COPY_MC
extern void copy_page_mc(void *to, const void *from);
void copy_highpage_mc(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE_MC

void copy_user_highpage_mc(struct page *to, struct page *from,
		unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC
#endif

#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+2 −0
Original line number Diff line number Diff line
@@ -13,6 +13,8 @@ endif

lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o

lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_page_mc.o

obj-$(CONFIG_CRC32) += crc32.o

obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+80 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2012 ARM Ltd.
 */

#include <linux/linkage.h>
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>

/*
 * Copy a page from src to dest (both are page aligned) with machine check
 *
 * Parameters:
 *	x0 - dest
 *	x1 - src
 */
SYM_FUNC_START(copy_page_mc)
alternative_if ARM64_HAS_NO_HW_PREFETCH
	// Prefetch three cache lines ahead.
	prfm	pldl1strm, [x1, #128]
	prfm	pldl1strm, [x1, #256]
	prfm	pldl1strm, [x1, #384]
alternative_else_nop_endif

CPY_MC(9998f, ldp	x2, x3, [x1])
CPY_MC(9998f, ldp	x4, x5, [x1, #16])
CPY_MC(9998f, ldp	x6, x7, [x1, #32])
CPY_MC(9998f, ldp	x8, x9, [x1, #48])
CPY_MC(9998f, ldp	x10, x11, [x1, #64])
CPY_MC(9998f, ldp	x12, x13, [x1, #80])
CPY_MC(9998f, ldp	x14, x15, [x1, #96])
CPY_MC(9998f, ldp	x16, x17, [x1, #112])

	add	x0, x0, #256
	add	x1, x1, #128
1:
	tst	x0, #(PAGE_SIZE - 1)

alternative_if ARM64_HAS_NO_HW_PREFETCH
	prfm	pldl1strm, [x1, #384]
alternative_else_nop_endif

CPY_MC(9998f, stnp	x2, x3, [x0, #-256])
CPY_MC(9998f, ldp	x2, x3, [x1])
CPY_MC(9998f, stnp	x4, x5, [x0, #16 - 256])
CPY_MC(9998f, ldp	x4, x5, [x1, #16])
CPY_MC(9998f, stnp	x6, x7, [x0, #32 - 256])
CPY_MC(9998f, ldp	x6, x7, [x1, #32])
CPY_MC(9998f, stnp	x8, x9, [x0, #48 - 256])
CPY_MC(9998f, ldp	x8, x9, [x1, #48])
CPY_MC(9998f, stnp	x10, x11, [x0, #64 - 256])
CPY_MC(9998f, ldp	x10, x11, [x1, #64])
CPY_MC(9998f, stnp	x12, x13, [x0, #80 - 256])
CPY_MC(9998f, ldp	x12, x13, [x1, #80])
CPY_MC(9998f, stnp	x14, x15, [x0, #96 - 256])
CPY_MC(9998f, ldp	x14, x15, [x1, #96])
CPY_MC(9998f, stnp	x16, x17, [x0, #112 - 256])
CPY_MC(9998f, ldp	x16, x17, [x1, #112])

	add	x0, x0, #128
	add	x1, x1, #128

	b.ne	1b

CPY_MC(9998f, stnp	x2, x3, [x0, #-256])
CPY_MC(9998f, stnp	x4, x5, [x0, #16 - 256])
CPY_MC(9998f, stnp	x6, x7, [x0, #32 - 256])
CPY_MC(9998f, stnp	x8, x9, [x0, #48 - 256])
CPY_MC(9998f, stnp	x10, x11, [x0, #64 - 256])
CPY_MC(9998f, stnp	x12, x13, [x0, #80 - 256])
CPY_MC(9998f, stnp	x14, x15, [x0, #96 - 256])
CPY_MC(9998f, stnp	x16, x17, [x0, #112 - 256])

9998:	ret

SYM_FUNC_END(copy_page_mc)
EXPORT_SYMBOL(copy_page_mc)
Loading