Commit 7b3df3b9 authored by David Howells's avatar David Howells Committed by Linus Torvalds
Browse files

mm/readahead: pass readahead_control to force_page_cache_ra



Reimplement force_page_cache_readahead() as a wrapper around
force_page_cache_ra().  Pass the existing readahead_control from
page_cache_sync_readahead().

Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Eric Biggers <ebiggers@google.com>
Link: https://lkml.kernel.org/r/20200903140844.14194-7-willy@infradead.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6e4af69a
Loading
Loading
Loading
Loading
+9 −4
Original line number Diff line number Diff line
@@ -49,10 +49,15 @@ void unmap_page_range(struct mmu_gather *tlb,
			     unsigned long addr, unsigned long end,
			     struct zap_details *details);

void force_page_cache_readahead(struct address_space *, struct file *,
		pgoff_t index, unsigned long nr_to_read);
void do_page_cache_ra(struct readahead_control *,
		unsigned long nr_to_read, unsigned long lookahead_size);
void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
		unsigned long lookahead_size);
void force_page_cache_ra(struct readahead_control *, unsigned long nr);
static inline void force_page_cache_readahead(struct address_space *mapping,
		struct file *file, pgoff_t index, unsigned long nr_to_read)
{
	DEFINE_READAHEAD(ractl, file, mapping, index);
	force_page_cache_ra(&ractl, nr_to_read);
}

/*
 * Submit IO for the read-ahead request in file_ra_state.
+10 −8
Original line number Diff line number Diff line
@@ -271,13 +271,13 @@ void do_page_cache_ra(struct readahead_control *ractl,
 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
 * memory at once.
 */
void force_page_cache_readahead(struct address_space *mapping,
		struct file *file, pgoff_t index, unsigned long nr_to_read)
void force_page_cache_ra(struct readahead_control *ractl,
		unsigned long nr_to_read)
{
	DEFINE_READAHEAD(ractl, file, mapping, index);
	struct address_space *mapping = ractl->mapping;
	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
	struct file_ra_state *ra = &file->f_ra;
	unsigned long max_pages;
	struct file_ra_state *ra = &ractl->file->f_ra;
	unsigned long max_pages, index;

	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
			!mapping->a_ops->readahead))
@@ -287,14 +287,16 @@ void force_page_cache_readahead(struct address_space *mapping,
	 * If the request exceeds the readahead window, allow the read to
	 * be up to the optimal hardware IO size
	 */
	index = readahead_index(ractl);
	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
	nr_to_read = min(nr_to_read, max_pages);
	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
	while (nr_to_read) {
		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;

		if (this_chunk > nr_to_read)
			this_chunk = nr_to_read;
		do_page_cache_ra(&ractl, this_chunk, 0);
		ractl->_index = index;
		do_page_cache_ra(ractl, this_chunk, 0);

		index += this_chunk;
		nr_to_read -= this_chunk;
@@ -576,7 +578,7 @@ void page_cache_sync_readahead(struct address_space *mapping,

	/* be dumb */
	if (filp && (filp->f_mode & FMODE_RANDOM)) {
		force_page_cache_readahead(mapping, filp, index, req_count);
		force_page_cache_ra(&ractl, req_count);
		return;
	}