Commit 31d270fd authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds
Browse files

mm: add an 'end' parameter to pagevec_lookup_entries

Simplifies the callers and uses the existing functionality in
find_get_entries().  We can also drop the final argument of
truncate_exceptional_pvec_entries() and simplify the logic in that
function.

Link: https://lkml.kernel.org/r/20201112212641.27837-12-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ca122fe4
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -26,9 +26,8 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup_entries(struct pagevec *pvec,
				struct address_space *mapping,
				pgoff_t start, unsigned nr_entries,
				pgoff_t *indices);
		struct address_space *mapping, pgoff_t start, pgoff_t end,
		unsigned nr_entries, pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec,
			      struct address_space *mapping,
+4 −4
Original line number Diff line number Diff line
@@ -1022,6 +1022,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
 * @pvec:	Where the resulting entries are placed
 * @mapping:	The address_space to search
 * @start:	The starting entry index
 * @end:	The highest index to return (inclusive).
 * @nr_entries:	The maximum number of pages
 * @indices:	The cache indices corresponding to the entries in @pvec
 *
@@ -1042,11 +1043,10 @@ void __pagevec_lru_add(struct pagevec *pvec)
 * found.
 */
unsigned pagevec_lookup_entries(struct pagevec *pvec,
				struct address_space *mapping,
				pgoff_t start, unsigned nr_entries,
				pgoff_t *indices)
		struct address_space *mapping, pgoff_t start, pgoff_t end,
		unsigned nr_entries, pgoff_t *indices)
{
	pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries,
	pvec->nr = find_get_entries(mapping, start, end, nr_entries,
				    pvec->pages, indices);
	return pagevec_count(pvec);
}
+10 −31
Original line number Diff line number Diff line
@@ -57,11 +57,10 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
 * exceptional entries similar to what pagevec_remove_exceptionals does.
 */
static void truncate_exceptional_pvec_entries(struct address_space *mapping,
				struct pagevec *pvec, pgoff_t *indices,
				pgoff_t end)
				struct pagevec *pvec, pgoff_t *indices)
{
	int i, j;
	bool dax, lock;
	bool dax;

	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
@@ -75,8 +74,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
		return;

	dax = dax_mapping(mapping);
	lock = !dax && indices[j] < end;
	if (lock)
	if (!dax)
		xa_lock_irq(&mapping->i_pages);

	for (i = j; i < pagevec_count(pvec); i++) {
@@ -88,9 +86,6 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
			continue;
		}

		if (index >= end)
			continue;

		if (unlikely(dax)) {
			dax_delete_mapping_entry(mapping, index);
			continue;
@@ -99,7 +94,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
		__clear_shadow_entry(mapping, index, page);
	}

	if (lock)
	if (!dax)
		xa_unlock_irq(&mapping->i_pages);
	pvec->nr = j;
}
@@ -329,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
	while (index < end && find_lock_entries(mapping, index, end - 1,
			&pvec, indices)) {
		index = indices[pagevec_count(&pvec) - 1] + 1;
		truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
		truncate_exceptional_pvec_entries(mapping, &pvec, indices);
		for (i = 0; i < pagevec_count(&pvec); i++)
			truncate_cleanup_page(mapping, pvec.pages[i]);
		delete_from_page_cache_batch(mapping, &pvec);
@@ -381,8 +376,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
	index = start;
	for ( ; ; ) {
		cond_resched();
		if (!pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
		if (!pagevec_lookup_entries(&pvec, mapping, index, end - 1,
				PAGEVEC_SIZE, indices)) {
			/* If all gone from start onwards, we're done */
			if (index == start)
				break;
@@ -390,23 +385,12 @@ void truncate_inode_pages_range(struct address_space *mapping,
			index = start;
			continue;
		}
		if (index == start && indices[0] >= end) {
			/* All gone out of hole to be punched, we're done */
			pagevec_remove_exceptionals(&pvec);
			pagevec_release(&pvec);
			break;
		}

		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			/* We rely upon deletion not changing page->index */
			index = indices[i];
			if (index >= end) {
				/* Restart punch to make sure all gone */
				index = start - 1;
				break;
			}

			if (xa_is_value(page))
				continue;
@@ -417,7 +401,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
			truncate_inode_page(mapping, page);
			unlock_page(page);
		}
		truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
		truncate_exceptional_pvec_entries(mapping, &pvec, indices);
		pagevec_release(&pvec);
		index++;
	}
@@ -513,8 +497,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,

			/* We rely upon deletion not changing page->index */
			index = indices[i];
			if (index > end)
				break;

			if (xa_is_value(page)) {
				invalidate_exceptional_entry(mapping, index,
@@ -656,16 +638,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,

	pagevec_init(&pvec);
	index = start;
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
	while (pagevec_lookup_entries(&pvec, mapping, index, end,
			PAGEVEC_SIZE, indices)) {
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			/* We rely upon deletion not changing page->index */
			index = indices[i];
			if (index > end)
				break;

			if (xa_is_value(page)) {
				if (!invalidate_exceptional_entry2(mapping,