Commit 17d08829 authored by Ma Wupeng's avatar Ma Wupeng Committed by Wupeng Ma
Browse files

mm/hwpoison: add migrate_page_mc_extra()

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8K5CO



--------------------------------

During page migration, page is copied in kernel space. If the origin page
has UCE, there will lead to kernel panic.

In order to solve this problem, use machine check safe to catch this
error which can be achieved by using copy_mc_to_kernel to replace
copy_page. Signal SIGBUS will be send to user task if this UCE
is consumed by this situation to avoid kernel panic.

Add a new param to copy_huge_page to support mc. If mc is set
copy_mc_higepage will be called rather than copy_hugepage during
memory copy.

Since migrate_page_move_mapping() is done before page copy, rollback is
hard due to race condition. Do copy page at the start of function
migrate_page_mc_extra() to solve this problem.

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
parent c10a520c
Loading
Loading
Loading
Loading
+61 −9
Original line number Diff line number Diff line
@@ -548,24 +548,33 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
 * arithmetic will work across the entire page.  We need something more
 * specialized.
 */
static void __copy_gigantic_page(struct page *dst, struct page *src,
				int nr_pages)
static int __copy_gigantic_page(struct page *dst, struct page *src,
				int nr_pages, bool mc)
{
	int i;
	int i, ret = 0;
	struct page *dst_base = dst;
	struct page *src_base = src;

	for (i = 0; i < nr_pages; ) {
		cond_resched();

		if (mc) {
			ret = copy_mc_highpage(dst, src);
			if (ret)
				return -EFAULT;
		} else {
			copy_highpage(dst, src);
		}

		i++;
		dst = mem_map_next(dst, dst_base, i);
		src = mem_map_next(src, src_base, i);
	}

	return ret;
}

static void copy_huge_page(struct page *dst, struct page *src)
static int __copy_huge_page(struct page *dst, struct page *src, bool mc)
{
	int nr_pages;

@@ -574,17 +583,29 @@ static void copy_huge_page(struct page *dst, struct page *src)
		struct hstate *h = page_hstate(src);
		nr_pages = pages_per_huge_page(h);

		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
			__copy_gigantic_page(dst, src, nr_pages);
			return;
		}
		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES))
			return __copy_gigantic_page(dst, src, nr_pages, mc);
	} else {
		/* thp page */
		BUG_ON(!PageTransHuge(src));
		nr_pages = thp_nr_pages(src);
	}

	if (mc)
		return copy_mc_highpages(dst, src, nr_pages);

	copy_highpages(dst, src, nr_pages);
	return 0;
}

static int copy_huge_page(struct page *dst, struct page *src)
{
	return __copy_huge_page(dst, src, false);
}

static int copy_mc_huge_page(struct page *dst, struct page *src)
{
	return __copy_huge_page(dst, src, true);
}

/*
@@ -674,6 +695,37 @@ void migrate_page_copy(struct page *newpage, struct page *page)
}
EXPORT_SYMBOL(migrate_page_copy);

static int migrate_page_copy_mc(struct page *newpage, struct page *page)
{
	int rc;

	if (PageHuge(page) || PageTransHuge(page))
		rc = copy_mc_huge_page(newpage, page);
	else
		rc = copy_mc_highpage(newpage, page);

	return rc;
}

static int migrate_page_mc_extra(struct address_space *mapping,
		struct page *newpage, struct page *page,
		enum migrate_mode mode, int extra_count)
{
	int rc;

	rc = migrate_page_copy_mc(newpage, page);
	if (rc)
		return rc;

	rc = migrate_page_move_mapping(mapping, newpage, page, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS)
		return rc;

	migrate_page_states(newpage, page);

	return rc;
}

/************************************************************
 *                    Migration functions
 ***********************************************************/