Unverified Commit cc10030e authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15470 some readahead improvement

Merge Pull Request from: @ci-robot 
 
PR sync from: Tong Tiangen <tongtiangen@huawei.com>
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/L7BXXR5XAIPVIWDMOPY53NJF7MEOHVPA/ 
Jan Kara (3):
  readahead: make sure sync readahead reads needed page
  readahead: don't shorten readahead window in read_pages()
  readahead: properly shorten readahead when falling back to
    do_page_cache_ra()

 
https://gitee.com/openeuler/kernel/issues/IB9L9C
https://gitee.com/openeuler/kernel/issues/IBRWBB 
 
Link:https://gitee.com/openeuler/kernel/pulls/15470

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents fd77bc4c 169b8873
Loading
Loading
Loading
Loading
+13 −16
Original line number Diff line number Diff line
@@ -164,20 +164,10 @@ static void read_pages(struct readahead_control *rac)

	if (aops->readahead) {
		aops->readahead(rac);
		/*
		 * Clean up the remaining folios.  The sizes in ->ra
		 * may be used to size the next readahead, so make sure
		 * they accurately reflect what happened.
		 */
		/* Clean up the remaining folios. */
		while ((folio = readahead_folio(rac)) != NULL) {
			unsigned long nr = folio_nr_pages(folio);

			folio_get(folio);
			rac->ra->size -= nr;
			if (rac->ra->async_size >= nr) {
				rac->ra->async_size -= nr;
			filemap_remove_folio(folio);
			}
			folio_unlock(folio);
			folio_put(folio);
		}
@@ -497,7 +487,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
		struct file_ra_state *ra, unsigned int new_order)
{
	struct address_space *mapping = ractl->mapping;
	pgoff_t index = readahead_index(ractl);
	pgoff_t start = readahead_index(ractl);
	pgoff_t index = start;
	pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
	pgoff_t mark = index + ra->size - ra->async_size;
	unsigned int nofs;
@@ -556,12 +547,18 @@ void page_cache_ra_order(struct readahead_control *ractl,
	/*
	 * If there were already pages in the page cache, then we may have
	 * left some gaps.  Let the regular readahead code take care of this
	 * situation.
	 * situation below.
	 */
	if (!err)
		return;
fallback:
	do_page_cache_ra(ractl, ra->size, ra->async_size);
	/*
	 * ->readahead() may have updated readahead window size so we have to
	 * check there's still something to read.
	 */
	if (ra->size > index - start)
		do_page_cache_ra(ractl, ra->size - (index - start),
				 ra->async_size);
}

/*
@@ -597,7 +594,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
	 */
	expected = round_down(ra->start + ra->size - ra->async_size,
			1UL << order);
	if (index == expected || index == (ra->start + ra->size)) {
	if (folio && index == expected) {
		ra->start += ra->size;
		/*
		 * In the case of MADV_HUGEPAGE, the actual size might exceed