Commit f8ee8909 authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds
Browse files

mm: move more expensive part of XA setup out of mapping check

The fast path here is not needing any writeback, yet we spend time
setting up the xarray lookup data upfront.  Move the part that actually
needs to iterate the address space mapping into a separate helper,
saving ~30% of the time here.

Link: https://lkml.kernel.org/r/49f67983-b802-8929-edab-d807f745c9ca@kernel.dk


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d417b49f
Loading
Loading
Loading
Loading
+25 −18
Original line number Original line Diff line number Diff line
@@ -639,6 +639,30 @@ static bool mapping_needs_writeback(struct address_space *mapping)
	return mapping->nrpages;
	return mapping->nrpages;
}
}


static bool filemap_range_has_writeback(struct address_space *mapping,
					loff_t start_byte, loff_t end_byte)
{
	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
	pgoff_t max = end_byte >> PAGE_SHIFT;
	struct page *page;

	if (end_byte < start_byte)
		return false;

	rcu_read_lock();
	xas_for_each(&xas, page, max) {
		if (xas_retry(&xas, page))
			continue;
		if (xa_is_value(page))
			continue;
		if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
			break;
	}
	rcu_read_unlock();
	return page != NULL;

}

/**
/**
 * filemap_range_needs_writeback - check if range potentially needs writeback
 * filemap_range_needs_writeback - check if range potentially needs writeback
 * @mapping:           address space within which to check
 * @mapping:           address space within which to check
@@ -656,29 +680,12 @@ static bool mapping_needs_writeback(struct address_space *mapping)
bool filemap_range_needs_writeback(struct address_space *mapping,
bool filemap_range_needs_writeback(struct address_space *mapping,
				   loff_t start_byte, loff_t end_byte)
				   loff_t start_byte, loff_t end_byte)
{
{
	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
	pgoff_t max = end_byte >> PAGE_SHIFT;
	struct page *page;

	if (!mapping_needs_writeback(mapping))
	if (!mapping_needs_writeback(mapping))
		return false;
		return false;
	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
	    !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
	    !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
		return false;
		return false;
	if (end_byte < start_byte)
	return filemap_range_has_writeback(mapping, start_byte, end_byte);
		return false;

	rcu_read_lock();
	xas_for_each(&xas, page, max) {
		if (xas_retry(&xas, page))
			continue;
		if (xa_is_value(page))
			continue;
		if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
			break;
	}
	rcu_read_unlock();
	return page != NULL;
}
}
EXPORT_SYMBOL_GPL(filemap_range_needs_writeback);
EXPORT_SYMBOL_GPL(filemap_range_needs_writeback);