Commit 923e2f0e authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

shmem: remove shmem_getpage()

With all callers removed, remove this wrapper function.  The flags are now
mysteriously called SGP, but I think we can live with that.

Link: https://lkml.kernel.org/r/20220902194653.1739778-34-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 12acf4fb
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
						pgoff_t start, pgoff_t end);

/* Flag allocation requirements to shmem_getpage */
/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
	SGP_READ,	/* don't exceed i_size, don't allocate page */
	SGP_NOALLOC,	/* similar, but fail on hole or use fallocated page */
@@ -111,8 +111,6 @@ enum sgp_type {
	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
};

extern int shmem_getpage(struct inode *inode, pgoff_t index,
		struct page **pagep, enum sgp_type sgp);
int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
		enum sgp_type sgp);

+1 −14
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ static inline int shmem_reacct_size(unsigned long flags,
/*
 * ... whereas tmpfs objects are accounted incrementally as
 * pages are allocated, in order to allow large sparse files.
 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
 * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
 */
static inline int shmem_acct_block(unsigned long flags, long pages)
@@ -2031,19 +2031,6 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
			mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
}

int shmem_getpage(struct inode *inode, pgoff_t index,
		struct page **pagep, enum sgp_type sgp)
{
	struct folio *folio = NULL;
	int ret = shmem_get_folio(inode, index, &folio, sgp);

	if (folio)
		*pagep = folio_file_page(folio, index);
	else
		*pagep = NULL;
	return ret;
}

/*
 * This is like autoremove_wake_function, but it removes the wait queue
 * entry unconditionally - even if something else had already woken the