Commit fefa7c47 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds
Browse files

mm/readahead: add page_cache_sync_ra and page_cache_async_ra



Reimplement page_cache_sync_readahead() and page_cache_async_readahead()
as wrappers around versions of the function which take a readahead_control
in preparation for making do_sync_mmap_readahead() pass down an RAC
struct.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Eric Biggers <ebiggers@google.com>
Link: https://lkml.kernel.org/r/20200903140844.14194-8-willy@infradead.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7b3df3b9
Loading
Loading
Loading
Loading
+54 −10
Original line number Original line Diff line number Diff line
@@ -761,16 +761,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
void delete_from_page_cache_batch(struct address_space *mapping,
void delete_from_page_cache_batch(struct address_space *mapping,
				  struct pagevec *pvec);
				  struct pagevec *pvec);


#define VM_READAHEAD_PAGES	(SZ_128K / PAGE_SIZE)

void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
		struct file *, pgoff_t index, unsigned long req_count);
void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
		struct file *, struct page *, pgoff_t index,
		unsigned long req_count);
void page_cache_ra_unbounded(struct readahead_control *,
		unsigned long nr_to_read, unsigned long lookahead_count);

/*
/*
 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 * Like add_to_page_cache_locked, but used to add newly allocated pages:
 * the page is new, so we can just run __SetPageLocked() against it.
 * the page is new, so we can just run __SetPageLocked() against it.
@@ -818,6 +808,60 @@ struct readahead_control {
		._index = i,						\
		._index = i,						\
	}
	}


#define VM_READAHEAD_PAGES	(SZ_128K / PAGE_SIZE)

void page_cache_ra_unbounded(struct readahead_control *,
		unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *,
		unsigned long req_count);
void page_cache_async_ra(struct readahead_control *, struct file_ra_state *,
		struct page *, unsigned long req_count);

/**
 * page_cache_sync_readahead - generic file readahead
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @file: Used by the filesystem for authentication.
 * @index: Index of first page to be read.
 * @req_count: Total number of pages being read by the caller.
 *
 * page_cache_sync_readahead() should be called when a cache miss happened:
 * it will submit the read.  The readahead logic may decide to piggyback more
 * pages onto the read request if access patterns suggest it will improve
 * performance.
 */
static inline
void page_cache_sync_readahead(struct address_space *mapping,
		struct file_ra_state *ra, struct file *file, pgoff_t index,
		unsigned long req_count)
{
	DEFINE_READAHEAD(ractl, file, mapping, index);
	page_cache_sync_ra(&ractl, ra, req_count);
}

/**
 * page_cache_async_readahead - file readahead for marked pages
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @file: Used by the filesystem for authentication.
 * @page: The page at @index which triggered the readahead call.
 * @index: Index of first page to be read.
 * @req_count: Total number of pages being read by the caller.
 *
 * page_cache_async_readahead() should be called when a page is used which
 * is marked as PageReadahead; this is a marker to suggest that the application
 * has used up enough of the readahead window that we should start pulling in
 * more pages.
 */
static inline
void page_cache_async_readahead(struct address_space *mapping,
		struct file_ra_state *ra, struct file *file,
		struct page *page, pgoff_t index, unsigned long req_count)
{
	DEFINE_READAHEAD(ractl, file, mapping, index);
	page_cache_async_ra(&ractl, ra, page, req_count);
}

/**
/**
 * readahead_page - Get the next page to read.
 * readahead_page - Get the next page to read.
 * @rac: The current readahead request.
 * @rac: The current readahead request.
+12 −46
Original line number Original line Diff line number Diff line
@@ -550,25 +550,9 @@ static void ondemand_readahead(struct readahead_control *ractl,
	do_page_cache_ra(ractl, ra->size, ra->async_size);
	do_page_cache_ra(ractl, ra->size, ra->async_size);
}
}


/**
void page_cache_sync_ra(struct readahead_control *ractl,
 * page_cache_sync_readahead - generic file readahead
		struct file_ra_state *ra, unsigned long req_count)
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
 * @index: Index of first page to be read.
 * @req_count: Total number of pages being read by the caller.
 *
 * page_cache_sync_readahead() should be called when a cache miss happened:
 * it will submit the read.  The readahead logic may decide to piggyback more
 * pages onto the read request if access patterns suggest it will improve
 * performance.
 */
void page_cache_sync_readahead(struct address_space *mapping,
			       struct file_ra_state *ra, struct file *filp,
			       pgoff_t index, unsigned long req_count)
{
{
	DEFINE_READAHEAD(ractl, filp, mapping, index);

	/* no read-ahead */
	/* no read-ahead */
	if (!ra->ra_pages)
	if (!ra->ra_pages)
		return;
		return;
@@ -577,38 +561,20 @@ void page_cache_sync_readahead(struct address_space *mapping,
		return;
		return;


	/* be dumb */
	/* be dumb */
	if (filp && (filp->f_mode & FMODE_RANDOM)) {
	if (ractl->file && (ractl->file->f_mode & FMODE_RANDOM)) {
		force_page_cache_ra(&ractl, req_count);
		force_page_cache_ra(ractl, req_count);
		return;
		return;
	}
	}


	/* do read-ahead */
	/* do read-ahead */
	ondemand_readahead(&ractl, ra, false, req_count);
	ondemand_readahead(ractl, ra, false, req_count);
}
}
EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
EXPORT_SYMBOL_GPL(page_cache_sync_ra);


/**
void page_cache_async_ra(struct readahead_control *ractl,
 * page_cache_async_readahead - file readahead for marked pages
		struct file_ra_state *ra, struct page *page,
 * @mapping: address_space which holds the pagecache and I/O vectors
 * @ra: file_ra_state which holds the readahead state
 * @filp: passed on to ->readpage() and ->readpages()
 * @page: The page at @index which triggered the readahead call.
 * @index: Index of first page to be read.
 * @req_count: Total number of pages being read by the caller.
 *
 * page_cache_async_readahead() should be called when a page is used which
 * is marked as PageReadahead; this is a marker to suggest that the application
 * has used up enough of the readahead window that we should start pulling in
 * more pages.
 */
void
page_cache_async_readahead(struct address_space *mapping,
			   struct file_ra_state *ra, struct file *filp,
			   struct page *page, pgoff_t index,
		unsigned long req_count)
		unsigned long req_count)
{
{
	DEFINE_READAHEAD(ractl, filp, mapping, index);

	/* no read-ahead */
	/* no read-ahead */
	if (!ra->ra_pages)
	if (!ra->ra_pages)
		return;
		return;
@@ -624,16 +590,16 @@ page_cache_async_readahead(struct address_space *mapping,
	/*
	/*
	 * Defer asynchronous read-ahead on IO congestion.
	 * Defer asynchronous read-ahead on IO congestion.
	 */
	 */
	if (inode_read_congested(mapping->host))
	if (inode_read_congested(ractl->mapping->host))
		return;
		return;


	if (blk_cgroup_congested())
	if (blk_cgroup_congested())
		return;
		return;


	/* do read-ahead */
	/* do read-ahead */
	ondemand_readahead(&ractl, ra, true, req_count);
	ondemand_readahead(ractl, ra, true, req_count);
}
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
EXPORT_SYMBOL_GPL(page_cache_async_ra);


ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
{
{