Commit 182ea1d7 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

coredump: remove vma linked list walk

Use the Maple Tree iterator instead.  This is too complicated for the VMA
iterator to handle, so let's open-code it for now.  If this turns out to
be a common pattern, we can migrate it to common code.

Link: https://lkml.kernel.org/r/20220906194824.2110408-41-Liam.Howlett@oracle.com


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cbd43755
Loading
Loading
Loading
Loading
+12 −22
Original line number Diff line number Diff line
@@ -1072,30 +1072,20 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
	return vma->vm_end - vma->vm_start;
}

static struct vm_area_struct *first_vma(struct task_struct *tsk,
					struct vm_area_struct *gate_vma)
{
	struct vm_area_struct *ret = tsk->mm->mmap;

	if (ret)
		return ret;
	return gate_vma;
}

/*
 * Helper function for iterating across a vma list.  It ensures that the caller
 * will visit `gate_vma' prior to terminating the search.
 */
static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
static struct vm_area_struct *coredump_next_vma(struct ma_state *mas,
				       struct vm_area_struct *vma,
				       struct vm_area_struct *gate_vma)
{
	struct vm_area_struct *ret;

	ret = this_vma->vm_next;
	if (ret)
		return ret;
	if (this_vma == gate_vma)
	if (gate_vma && (vma == gate_vma))
		return NULL;

	vma = mas_next(mas, ULONG_MAX);
	if (vma)
		return vma;
	return gate_vma;
}

@@ -1119,9 +1109,10 @@ static void free_vma_snapshot(struct coredump_params *cprm)
 */
static bool dump_vma_snapshot(struct coredump_params *cprm)
{
	struct vm_area_struct *vma, *gate_vma;
	struct vm_area_struct *gate_vma, *vma = NULL;
	struct mm_struct *mm = current->mm;
	int i;
	MA_STATE(mas, &mm->mm_mt, 0, 0);
	int i = 0;

	/*
	 * Once the stack expansion code is fixed to not change VMA bounds
@@ -1141,8 +1132,7 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
		return false;
	}

	for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
			vma = next_vma(vma, gate_vma), i++) {
	while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) {
		struct core_vma_metadata *m = cprm->vma_meta + i;

		m->start = vma->vm_start;
@@ -1150,10 +1140,10 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
		m->flags = vma->vm_flags;
		m->dump_size = vma_dump_size(vma, cprm->mm_flags);
		m->pgoff = vma->vm_pgoff;

		m->file = vma->vm_file;
		if (m->file)
			get_file(m->file);
		i++;
	}

	mmap_write_unlock(mm);