Commit 515778e2 authored by Peter Xu's avatar Peter Xu Committed by Andrew Morton
Browse files

mm/uffd: fix warning without PTE_MARKER_UFFD_WP compiled in

When PTE_MARKER_UFFD_WP not configured, it's still possible to reach pte
marker code and trigger an warning. Add a few CONFIG_PTE_MARKER_UFFD_WP
ifdefs to make sure the code won't be reached when not compiled in.

Link: https://lkml.kernel.org/r/YzeR+R6b4bwBlBHh@x1n


Fixes: b1f9e876 ("mm/uffd: enable write protection for shmem & hugetlbfs")
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Reported-by: default avatar <syzbot+2b9b4f0895be09a6dec3@syzkaller.appspotmail.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: Edward Liaw <edliaw@google.com>
Cc: Liu Shixin <liushixin2@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 28c5609f
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -5096,6 +5096,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
		 * unmapped and its refcount is dropped, so just clear pte here.
		 */
		if (unlikely(!pte_present(pte))) {
#ifdef CONFIG_PTE_MARKER_UFFD_WP
			/*
			 * If the pte was wr-protected by uffd-wp in any of the
			 * swap forms, meanwhile the caller does not want to
@@ -5107,6 +5108,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
				set_huge_pte_at(mm, address, ptep,
						make_pte_marker(PTE_MARKER_UFFD_WP));
			else
#endif
				huge_pte_clear(mm, address, ptep, sz);
			spin_unlock(ptl);
			continue;
@@ -5135,11 +5137,13 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
		if (huge_pte_dirty(pte))
			set_page_dirty(page);
#ifdef CONFIG_PTE_MARKER_UFFD_WP
		/* Leave a uffd-wp pte marker if needed */
		if (huge_pte_uffd_wp(pte) &&
		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
			set_huge_pte_at(mm, address, ptep,
					make_pte_marker(PTE_MARKER_UFFD_WP));
#endif
		hugetlb_count_sub(pages_per_huge_page(h), mm);
		page_remove_rmap(page, vma, true);

+2 −0
Original line number Diff line number Diff line
@@ -1393,10 +1393,12 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
			      unsigned long addr, pte_t *pte,
			      struct zap_details *details, pte_t pteval)
{
#ifdef CONFIG_PTE_MARKER_UFFD_WP
	if (zap_drop_file_uffd_wp(details))
		return;

	pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
#endif
}

static unsigned long zap_pte_range(struct mmu_gather *tlb,
+2 −0
Original line number Diff line number Diff line
@@ -267,6 +267,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
		} else {
			/* It must be an none page, or what else?.. */
			WARN_ON_ONCE(!pte_none(oldpte));
#ifdef CONFIG_PTE_MARKER_UFFD_WP
			if (unlikely(uffd_wp && !vma_is_anonymous(vma))) {
				/*
				 * For file-backed mem, we need to be able to
@@ -278,6 +279,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
					   make_pte_marker(PTE_MARKER_UFFD_WP));
				pages++;
			}
#endif
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
	arch_leave_lazy_mmu_mode();