Unverified Commit e66b8996 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3214 bring mc support to page eject

Merge Pull Request from: @ci-robot 
 
PR sync from: Wupeng Ma <mawupeng1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/JWIO5QGC4XUAR23TOJ5EEKGSSW5B4XWD/ 
From: Ma Wupeng <mawupeng1@huawei.com>

During page eject, machine check safe is enhanced as follow:

During soft offline page, page is copied to new page in kernel. If the
origin page has UCE, there will lead to kernel panic.

In order to solve this problem, use machine check safe to catch this
error which can be achieved by using copy_mc_to_kernel to replace
copy_page. Signal SIGBUS will be send to user task if this UCE
is consumed by this situation to avoid kernel panic.

Changelog since v1:
 - remove unused ret in patch #4
 - add a bugfix for page eject

Jiaqi Yan (1):
  mm/hwpoison: introduce copy_mc_highpage

Ma Wupeng (6):
  mm: page_eject: Return right value during removal
  mm/hwpoison: arm64: introduce copy_mc_highpage
  mm/hwpoison: introduce copy_mc_highpages
  mm/hwpoison: add migrate_page_mc_extra()
  mm: Update PF_COREDUMP_MCS to PF_MCS
  mm: page_eject: Add mc support during offline page


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I8K5CO 
 
Link:https://gitee.com/openeuler/kernel/pulls/3214

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents fbc8debb 45dbef4c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@ void copy_highpage(struct page *to, struct page *from);
#ifdef CONFIG_ARCH_HAS_COPY_MC
extern void copy_page_mc(void *to, const void *from);
void copy_highpage_mc(struct page *to, struct page *from);
int copy_mc_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE_MC

void copy_user_highpage_mc(struct page *to, struct page *from,
+13 −0
Original line number Diff line number Diff line
@@ -61,4 +61,17 @@ void copy_user_highpage_mc(struct page *to, struct page *from,
	flush_dcache_page(to);
}
EXPORT_SYMBOL_GPL(copy_user_highpage_mc);

int copy_mc_highpage(struct page *to, struct page *from)
{
	void *kto = page_address(to);
	void *kfrom = page_address(from);
	int ret;

	ret = copy_mc_to_kernel(kto, kfrom, PAGE_SIZE);
	if (!ret)
		do_mte(to, from, kto, kfrom, true);

	return ret;
}
#endif
+8 −2
Original line number Diff line number Diff line
@@ -20,18 +20,19 @@ struct ejected_pfn {

static struct ejected_pfn *page_eject_remove_pfn_locked(unsigned long pfn)
{
	struct ejected_pfn *item = NULL, *next;
	struct ejected_pfn *item, *next, *ret = NULL;

	mutex_lock(&eject_page_mutex);
	list_for_each_entry_safe(item, next, &eject_page_list, list) {
		if (pfn == item->pfn) {
			list_del(&item->list);
			ret = item;
			break;
		}
	}
	mutex_unlock(&eject_page_mutex);

	return item;
	return ret;
}

static void page_eject_add_pfn_locked(struct ejected_pfn *item)
@@ -76,8 +77,13 @@ static int page_eject_offline_page(unsigned long pfn)
	 * if soft_offline_page return 0 because PageHWPoison, this pfn
	 * will add to list and this add will be removed during online
	 * since it is poisoned.
	 *
	 * Update task flag with PF_MCS to enable mc support during page
	 * migration.
	 */
	current->flags |= PF_MCS;
	ret = soft_offline_page(pfn, 0);
	current->flags &= ~PF_MCS;
	if (ret) {
		pr_err("page fail to be offlined, soft_offline_page failed(%d), pfn=%#lx\n",
		       ret, pfn);
+2 −2
Original line number Diff line number Diff line
@@ -907,9 +907,9 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
		if (page) {
			void *kaddr = kmap(page);

			current->flags |= PF_COREDUMP_MCS;
			current->flags |= PF_MCS;
			stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
			current->flags &= ~PF_COREDUMP_MCS;
			current->flags &= ~PF_MCS;
			kunmap(page);
			put_page(page);
		} else {
+52 −0
Original line number Diff line number Diff line
@@ -396,4 +396,56 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
	kunmap_atomic(to);
}

#ifdef copy_mc_to_kernel
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC
/*
 * If architecture supports machine check exception handling, define the
 * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
 * page with #MC in source page (@from) handled, and return the number
 * of bytes not copied if there was a #MC, otherwise 0 for success.
 */
static inline int copy_mc_highpage(struct page *to, struct page *from)
{
	char *vfrom, *vto;
	int ret;

	vfrom = kmap_atomic(from);
	vto = kmap_atomic(to);
	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
	kunmap_atomic(vto);
	kunmap_atomic(vfrom);

	return ret;
}
#endif

/* Return -EFAULT if there was a #MC during copy, otherwise 0 for success. */
static inline int copy_mc_highpages(struct page *to, struct page *from, int nr_pages)
{
	int ret = 0;
	int i;

	for (i = 0; i < nr_pages; i++) {
		cond_resched();
		ret = copy_mc_highpage(to + i, from + i);
		if (ret)
			return -EFAULT;
	}

	return ret;
}
#else
static inline int copy_mc_highpage(struct page *to, struct page *from)
{
	copy_highpage(to, from);
	return 0;
}

static inline int copy_mc_highpages(struct page *to, struct page *from, int nr_pages)
{
	copy_highpages(to, from, nr_pages);
	return 0;
}
#endif

#endif /* _LINUX_HIGHMEM_H */
Loading