Commit e07cda5f authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton
Browse files

mm/pagewalk: add walk_page_range_vma()

Let's add walk_page_range_vma(), which is similar to walk_page_vma(),
however, is only interested in a subset of the VMA range.

To be used in KSM code to stop using follow_page() next.

Link: https://lkml.kernel.org/r/20221021101141.84170-8-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6cce3314
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -101,6 +101,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
			  unsigned long end, const struct mm_walk_ops *ops,
			  unsigned long end, const struct mm_walk_ops *ops,
			  pgd_t *pgd,
			  pgd_t *pgd,
			  void *private);
			  void *private);
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
			unsigned long end, const struct mm_walk_ops *ops,
			void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
		void *private);
		void *private);
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
+20 −0
Original line number Original line Diff line number Diff line
@@ -517,6 +517,26 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
	return walk_pgd_range(start, end, &walk);
	return walk_pgd_range(start, end, &walk);
}
}


int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
			unsigned long end, const struct mm_walk_ops *ops,
			void *private)
{
	struct mm_walk walk = {
		.ops		= ops,
		.mm		= vma->vm_mm,
		.vma		= vma,
		.private	= private,
	};

	if (start >= end || !walk.mm)
		return -EINVAL;
	if (start < vma->vm_start || end > vma->vm_end)
		return -EINVAL;

	mmap_assert_locked(walk.mm);
	return __walk_page_range(start, end, &walk);
}

int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
		void *private)
		void *private)
{
{