Commit 8018db85 authored by Peter Xu's avatar Peter Xu Committed by Linus Torvalds
Browse files

mm: rework swap handling of zap_pte_range

Clean the code up by merging the device private/exclusive swap entry
handling with the rest, then we merge the pte clear operation too.

struct* page is defined in multiple places in the function, move it
upward.

free_swap_and_cache() is only useful for !non_swap_entry() case, put it
into the condition.

No functional change intended.

Link: https://lkml.kernel.org/r/20220216094810.60572-5-peterx@redhat.com


Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2e148f1e
Loading
Loading
Loading
Loading
+6 −15
Original line number Original line Diff line number Diff line
@@ -1361,6 +1361,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
	arch_enter_lazy_mmu_mode();
	arch_enter_lazy_mmu_mode();
	do {
	do {
		pte_t ptent = *pte;
		pte_t ptent = *pte;
		struct page *page;

		if (pte_none(ptent))
		if (pte_none(ptent))
			continue;
			continue;


@@ -1368,8 +1370,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
			break;
			break;


		if (pte_present(ptent)) {
		if (pte_present(ptent)) {
			struct page *page;

			page = vm_normal_page(vma, addr, ptent);
			page = vm_normal_page(vma, addr, ptent);
			if (unlikely(!should_zap_page(details, page)))
			if (unlikely(!should_zap_page(details, page)))
				continue;
				continue;
@@ -1403,28 +1403,21 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
		entry = pte_to_swp_entry(ptent);
		entry = pte_to_swp_entry(ptent);
		if (is_device_private_entry(entry) ||
		if (is_device_private_entry(entry) ||
		    is_device_exclusive_entry(entry)) {
		    is_device_exclusive_entry(entry)) {
			struct page *page = pfn_swap_entry_to_page(entry);
			page = pfn_swap_entry_to_page(entry);

			if (unlikely(!should_zap_page(details, page)))
			if (unlikely(!should_zap_page(details, page)))
				continue;
				continue;
			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
			rss[mm_counter(page)]--;
			rss[mm_counter(page)]--;

			if (is_device_private_entry(entry))
			if (is_device_private_entry(entry))
				page_remove_rmap(page, false);
				page_remove_rmap(page, false);

			put_page(page);
			put_page(page);
			continue;
		} else if (!non_swap_entry(entry)) {
		}

		if (!non_swap_entry(entry)) {
			/* Genuine swap entry, hence a private anon page */
			/* Genuine swap entry, hence a private anon page */
			if (!should_zap_cows(details))
			if (!should_zap_cows(details))
				continue;
				continue;
			rss[MM_SWAPENTS]--;
			rss[MM_SWAPENTS]--;
			if (unlikely(!free_swap_and_cache(entry)))
				print_bad_pte(vma, addr, ptent, NULL);
		} else if (is_migration_entry(entry)) {
		} else if (is_migration_entry(entry)) {
			struct page *page;

			page = pfn_swap_entry_to_page(entry);
			page = pfn_swap_entry_to_page(entry);
			if (!should_zap_page(details, page))
			if (!should_zap_page(details, page))
				continue;
				continue;
@@ -1436,8 +1429,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
			/* We should have covered all the swap entry types */
			/* We should have covered all the swap entry types */
			WARN_ON_ONCE(1);
			WARN_ON_ONCE(1);
		}
		}
		if (unlikely(!free_swap_and_cache(entry)))
			print_bad_pte(vma, addr, ptent, NULL);
		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
	} while (pte++, addr += PAGE_SIZE, addr != end);
	} while (pte++, addr += PAGE_SIZE, addr != end);