Commit b50e195f authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton
Browse files

mm: update validate_mm() to use vma iterator

Use the vma iterator in the validation code and combine the code to check
the maple tree into the main validate_mm() function.

Introduce a new function vma_iter_dump_tree() to dump the maple tree in
hex layout.

Replace all calls to validate_mm_mt() with validate_mm().

[Liam.Howlett@oracle.com: update validate_mm() to use vma iterator CONFIG flag]
  Link: https://lkml.kernel.org/r/20230606183538.588190-1-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20230518145544.1722059-18-Liam.Howlett@oracle.com


Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: David Binderman <dcb314@hotmail.com>
Cc: Peng Zhang <zhangpeng.00@bytedance.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Vernon Yang <vernon2gm@gmail.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a5199577
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -8,10 +8,12 @@
struct page;
struct vm_area_struct;
struct mm_struct;
struct vma_iterator;

void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
void vma_iter_dump_tree(const struct vma_iterator *vmi);

#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm);
	}								\
	unlikely(__ret_warn_once);					\
})
#define VM_WARN_ON_ONCE_MM(cond, mm)		({			\
	static bool __section(".data.once") __warned;			\
	int __ret_warn_once = !!(cond);					\
									\
	if (unlikely(__ret_warn_once && !__warned)) {			\
		dump_mm(mm);						\
		__warned = true;					\
		WARN_ON(1);						\
	}								\
	unlikely(__ret_warn_once);					\
})

#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN_ON_ONCE_PAGE(cond, page)  BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_MM(cond, mm)  BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
+9 −0
Original line number Diff line number Diff line
@@ -268,4 +268,13 @@ void page_init_poison(struct page *page, size_t size)
	if (page_init_poisoning)
		memset(page, PAGE_POISON_PATTERN, size);
}

void vma_iter_dump_tree(const struct vma_iterator *vmi)
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
	mas_dump(&vmi->mas);
	mt_dump(vmi->mas.tree, mt_dump_hex);
#endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
}

#endif		/* CONFIG_DEBUG_VM */
+2 −1
Original line number Diff line number Diff line
@@ -1064,13 +1064,14 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
		printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
		mt_dump(vmi->mas.tree, mt_dump_hex);
		vma_iter_dump_tree(vmi);
	}
	if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
		printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
		printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
		printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
		mt_dump(vmi->mas.tree, mt_dump_hex);
		vma_iter_dump_tree(vmi);
	}
#endif

+36 −58
Original line number Diff line number Diff line
@@ -300,61 +300,40 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}

#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
extern void mt_validate(struct maple_tree *mt);
extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt);

/* Validate the maple tree */
static void validate_mm_mt(struct mm_struct *mm)
{
	struct maple_tree *mt = &mm->mm_mt;
	struct vm_area_struct *vma_mt;

	MA_STATE(mas, mt, 0, 0);

	mt_validate(&mm->mm_mt);
	mas_for_each(&mas, vma_mt, ULONG_MAX) {
		if ((vma_mt->vm_start != mas.index) ||
		    (vma_mt->vm_end - 1 != mas.last)) {
			pr_emerg("issue in %s\n", current->comm);
			dump_stack();
			dump_vma(vma_mt);
			pr_emerg("mt piv: %p %lu - %lu\n", vma_mt,
				 mas.index, mas.last);
			pr_emerg("mt vma: %p %lu - %lu\n", vma_mt,
				 vma_mt->vm_start, vma_mt->vm_end);

			mt_dump(mas.tree, mt_dump_hex);
			if (vma_mt->vm_end != mas.last + 1) {
				pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n",
						mm, vma_mt->vm_start, vma_mt->vm_end,
						mas.index, mas.last);
				mt_dump(mas.tree, mt_dump_hex);
			}
			VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
			if (vma_mt->vm_start != mas.index) {
				pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n",
						mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
				mt_dump(mas.tree, mt_dump_hex);
			}
			VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
		}
	}
}

static void validate_mm(struct mm_struct *mm)
{
	int bug = 0;
	int i = 0;
	struct vm_area_struct *vma;
	MA_STATE(mas, &mm->mm_mt, 0, 0);

	validate_mm_mt(mm);
	VMA_ITERATOR(vmi, mm, 0);

	mas_for_each(&mas, vma, ULONG_MAX) {
	mt_validate(&mm->mm_mt);
	for_each_vma(vmi, vma) {
#ifdef CONFIG_DEBUG_VM_RB
		struct anon_vma *anon_vma = vma->anon_vma;
		struct anon_vma_chain *avc;
#endif
		unsigned long vmi_start, vmi_end;
		bool warn = 0;

		vmi_start = vma_iter_addr(&vmi);
		vmi_end = vma_iter_end(&vmi);
		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
			warn = 1;

		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
			warn = 1;

		if (warn) {
			pr_emerg("issue in %s\n", current->comm);
			dump_stack();
			dump_vma(vma);
			pr_emerg("tree range: %px start %lx end %lx\n", vma,
				 vmi_start, vmi_end - 1);
			vma_iter_dump_tree(&vmi);
		}

#ifdef CONFIG_DEBUG_VM_RB
		if (anon_vma) {
			anon_vma_lock_read(anon_vma);
			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
@@ -365,14 +344,13 @@ static void validate_mm(struct mm_struct *mm)
		i++;
	}
	if (i != mm->map_count) {
		pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
		bug = 1;
	}
	VM_BUG_ON_MM(bug, mm);
}

#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
#define validate_mm_mt(root) do { } while (0)
#define validate_mm(mm) do { } while (0)
#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */

@@ -2234,7 +2212,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
	struct vm_area_struct *new;
	int err;

	validate_mm_mt(vma->vm_mm);
	validate_mm(vma->vm_mm);

	WARN_ON(vma->vm_start >= addr);
	WARN_ON(vma->vm_end <= addr);
@@ -2292,7 +2270,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
	/* Success. */
	if (new_below)
		vma_next(vmi);
	validate_mm_mt(vma->vm_mm);
	validate_mm(vma->vm_mm);
	return 0;

out_free_mpol:
@@ -2301,7 +2279,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
	vma_iter_free(vmi);
out_free_vma:
	vm_area_free(new);
	validate_mm_mt(vma->vm_mm);
	validate_mm(vma->vm_mm);
	return err;
}

@@ -2936,7 +2914,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,

	arch_unmap(mm, start, end);
	ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
	validate_mm_mt(mm);
	validate_mm(mm);
	return ret;
}

@@ -2958,7 +2936,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
	struct mm_struct *mm = current->mm;
	struct vma_prepare vp;

	validate_mm_mt(mm);
	validate_mm(mm);
	/*
	 * Check against address space limits by the changed size
	 * Note: This happens *after* clearing old mappings in some code paths.
@@ -3199,7 +3177,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
	bool faulted_in_anon_vma = true;
	VMA_ITERATOR(vmi, mm, addr);

	validate_mm_mt(mm);
	validate_mm(mm);
	/*
	 * If anonymous vma has not yet been faulted, update new pgoff
	 * to match new location, to increase its chance of merging.
@@ -3258,7 +3236,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
			goto out_vma_link;
		*need_rmap_locks = false;
	}
	validate_mm_mt(mm);
	validate_mm(mm);
	return new_vma;

out_vma_link:
@@ -3274,7 +3252,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
out_free_vma:
	vm_area_free(new_vma);
out:
	validate_mm_mt(mm);
	validate_mm(mm);
	return NULL;
}

@@ -3411,7 +3389,7 @@ static struct vm_area_struct *__install_special_mapping(
	int ret;
	struct vm_area_struct *vma;

	validate_mm_mt(mm);
	validate_mm(mm);
	vma = vm_area_alloc(mm);
	if (unlikely(vma == NULL))
		return ERR_PTR(-ENOMEM);
@@ -3434,12 +3412,12 @@ static struct vm_area_struct *__install_special_mapping(

	perf_event_mmap(vma);

	validate_mm_mt(mm);
	validate_mm(mm);
	return vma;

out:
	vm_area_free(vma);
	validate_mm_mt(mm);
	validate_mm(mm);
	return ERR_PTR(ret);
}