Commit d7c0e68d authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton
Browse files

mm/ksm: convert break_ksm() to use walk_page_range_vma()

FOLL_MIGRATION exists only for the purpose of break_ksm(), and actually,
there is not even the need to wait for the migration to finish, we only
want to know if we're dealing with a KSM page.

Using follow_page() just to identify a KSM page overcomplicates GUP code. 
Let's use walk_page_range_vma() instead, because we don't actually care
about the page itself, we only need to know a single property -- no need
to even grab a reference.

So, get rid of follow_page() usage such that we can get rid of
FOLL_MIGRATION now and eventually be able to get rid of follow_page() in
the future.

In my setup (AMD Ryzen 9 3900X), running the KSM selftest to test unmerge
performance on 2 GiB (taskset 0x8 ./ksm_tests -D -s 2048), this results in
a performance degradation of ~2% (old: ~5010 MiB/s, new: ~4900 MiB/s).  I
don't think we particularly care for now.

Interestingly, the benchmark reduction is due to the single callback. 
Adding a second callback (e.g., pud_entry()) reduces the benchmark by
another 100-200 MiB/s.

Link: https://lkml.kernel.org/r/20221021101141.84170-9-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e07cda5f
Loading
Loading
Loading
Loading
+39 −10
Original line number Original line Diff line number Diff line
@@ -39,6 +39,7 @@
#include <linux/freezer.h>
#include <linux/freezer.h>
#include <linux/oom.h>
#include <linux/oom.h>
#include <linux/numa.h>
#include <linux/numa.h>
#include <linux/pagewalk.h>


#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include "internal.h"
#include "internal.h"
@@ -419,6 +420,39 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
	return atomic_read(&mm->mm_users) == 0;
	return atomic_read(&mm->mm_users) == 0;
}
}


static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
			struct mm_walk *walk)
{
	struct page *page = NULL;
	spinlock_t *ptl;
	pte_t *pte;
	int ret;

	if (pmd_leaf(*pmd) || !pmd_present(*pmd))
		return 0;

	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
	if (pte_present(*pte)) {
		page = vm_normal_page(walk->vma, addr, *pte);
	} else if (!pte_none(*pte)) {
		swp_entry_t entry = pte_to_swp_entry(*pte);

		/*
		 * As KSM pages remain KSM pages until freed, no need to wait
		 * here for migration to end.
		 */
		if (is_migration_entry(entry))
			page = pfn_swap_entry_to_page(entry);
	}
	ret = page && PageKsm(page);
	pte_unmap_unlock(pte, ptl);
	return ret;
}

static const struct mm_walk_ops break_ksm_ops = {
	.pmd_entry = break_ksm_pmd_entry,
};

/*
/*
 * We use break_ksm to break COW on a ksm page by triggering unsharing,
 * We use break_ksm to break COW on a ksm page by triggering unsharing,
 * such that the ksm page will get replaced by an exclusive anonymous page.
 * such that the ksm page will get replaced by an exclusive anonymous page.
@@ -434,21 +468,16 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
 */
 */
static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
{
{
	struct page *page;
	vm_fault_t ret = 0;
	vm_fault_t ret = 0;


	do {
	do {
		bool ksm_page = false;
		int ksm_page;


		cond_resched();
		cond_resched();
		page = follow_page(vma, addr,
		ksm_page = walk_page_range_vma(vma, addr, addr + 1,
				FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
					       &break_ksm_ops, NULL);
		if (IS_ERR_OR_NULL(page))
		if (WARN_ON_ONCE(ksm_page < 0))
			break;
			return ksm_page;
		if (PageKsm(page))
			ksm_page = true;
		put_page(page);

		if (!ksm_page)
		if (!ksm_page)
			return 0;
			return 0;
		ret = handle_mm_fault(vma, addr,
		ret = handle_mm_fault(vma, addr,