Commit 0127f25b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull NFS client updates from Anna Schumaker:
 "New Features:

   - Convert the readdir path to use folios

   - Convert the NFS fscache code to use netfs

  Bugfixes and Cleanups:

   - Always send a RECLAIM_COMPLETE after establishing a lease

   - Simplify sysctl registrations and other cleanups

   - Handle out-of-order write replies on NFS v3

   - Have sunrpc call_bind_status use standard hard/soft task semantics

   - Other minor cleanups"

* tag 'nfs-for-6.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs:
  NFSv4.2: Rework scratch handling for READ_PLUS
  NFS: Cleanup unused rpc_clnt variable
  NFS: set varaiable nfs_netfs_debug_id storage-class-specifier to static
  SUNRPC: remove the maximum number of retries in call_bind_status
  NFS: Convert readdir page array functions to use a folio
  NFS: Convert the readdir array-of-pages into an array-of-folios
  NFSv3: handle out-of-order write replies.
  NFS: Remove fscache specific trace points and NFS_INO_FSCACHE bit
  NFS: Remove all NFSIOS_FSCACHE counters due to conversion to netfs API
  NFS: Convert buffered read paths to use netfs when fscache is enabled
  NFS: Configure support for netfs when NFS fscache is configured
  NFS: Rename readpage_async_filler to nfs_read_add_folio
  sunrpc: simplify one-level sysctl registration for debug_table
  sunrpc: move sunrpc_table and proc routines above
  sunrpc: simplify one-level sysctl registration for xs_tunables_table
  sunrpc: simplify one-level sysctl registration for xr_tunables_table
  nfs: simplify two-level sysctl registration for nfs_cb_sysctls
  nfs: simplify two-level sysctl registration for nfs4_cb_sysctls
  lockd: simplify two-level sysctl registration for nlm_sysctls
  NFSv4.1: Always send a RECLAIM_COMPLETE after establishing lease
parents 1e098dec fbd2a05f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -170,6 +170,7 @@ config ROOT_NFS
config NFS_FSCACHE
	bool "Provide NFS client caching support"
	depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
	select NETFS_SUPPORT
	help
	  Say Y here if you want NFS data to be cached locally on disc through
	  the general filesystem cache manager
+149 −151
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, struct dir_context *);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
static void nfs_readdir_free_folio(struct folio *);
static void nfs_readdir_clear_array(struct folio *);

const struct file_operations nfs_dir_operations = {
	.llseek		= nfs_llseek_dir,
@@ -67,7 +67,7 @@ const struct file_operations nfs_dir_operations = {
};

const struct address_space_operations nfs_dir_aops = {
	.free_folio = nfs_readdir_free_folio,
	.free_folio = nfs_readdir_clear_array,
};

#define NFS_INIT_DTSIZE PAGE_SIZE
@@ -146,18 +146,18 @@ struct nfs_cache_array {
	u64 change_attr;
	u64 last_cookie;
	unsigned int size;
	unsigned char page_full : 1,
		      page_is_eof : 1,
	unsigned char folio_full : 1,
		      folio_is_eof : 1,
		      cookies_are_ordered : 1;
	struct nfs_cache_array_entry array[];
};

struct nfs_readdir_descriptor {
	struct file	*file;
	struct page	*page;
	struct folio	*folio;
	struct dir_context *ctx;
	pgoff_t		page_index;
	pgoff_t		page_index_max;
	pgoff_t		folio_index;
	pgoff_t		folio_index_max;
	u64		dir_cookie;
	u64		last_cookie;
	loff_t		current_index;
@@ -198,17 +198,17 @@ static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc)
	nfs_set_dtsize(desc, desc->dtsize << 1);
}

static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie,
static void nfs_readdir_folio_init_array(struct folio *folio, u64 last_cookie,
					 u64 change_attr)
{
	struct nfs_cache_array *array;

	array = kmap_local_page(page);
	array = kmap_local_folio(folio, 0);
	array->change_attr = change_attr;
	array->last_cookie = last_cookie;
	array->size = 0;
	array->page_full = 0;
	array->page_is_eof = 0;
	array->folio_full = 0;
	array->folio_is_eof = 0;
	array->cookies_are_ordered = 1;
	kunmap_local(array);
}
@@ -216,44 +216,39 @@ static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie,
/*
 * we are freeing strings created by nfs_add_to_readdir_array()
 */
static void nfs_readdir_clear_array(struct page *page)
static void nfs_readdir_clear_array(struct folio *folio)
{
	struct nfs_cache_array *array;
	unsigned int i;

	array = kmap_local_page(page);
	array = kmap_local_folio(folio, 0);
	for (i = 0; i < array->size; i++)
		kfree(array->array[i].name);
	array->size = 0;
	kunmap_local(array);
}

static void nfs_readdir_free_folio(struct folio *folio)
{
	nfs_readdir_clear_array(&folio->page);
}

static void nfs_readdir_page_reinit_array(struct page *page, u64 last_cookie,
static void nfs_readdir_folio_reinit_array(struct folio *folio, u64 last_cookie,
					   u64 change_attr)
{
	nfs_readdir_clear_array(page);
	nfs_readdir_page_init_array(page, last_cookie, change_attr);
	nfs_readdir_clear_array(folio);
	nfs_readdir_folio_init_array(folio, last_cookie, change_attr);
}

static struct page *
nfs_readdir_page_array_alloc(u64 last_cookie, gfp_t gfp_flags)
static struct folio *
nfs_readdir_folio_array_alloc(u64 last_cookie, gfp_t gfp_flags)
{
	struct page *page = alloc_page(gfp_flags);
	if (page)
		nfs_readdir_page_init_array(page, last_cookie, 0);
	return page;
	struct folio *folio = folio_alloc(gfp_flags, 0);
	if (folio)
		nfs_readdir_folio_init_array(folio, last_cookie, 0);
	return folio;
}

static void nfs_readdir_page_array_free(struct page *page)
static void nfs_readdir_folio_array_free(struct folio *folio)
{
	if (page) {
		nfs_readdir_clear_array(page);
		put_page(page);
	if (folio) {
		nfs_readdir_clear_array(folio);
		folio_put(folio);
	}
}

@@ -264,13 +259,13 @@ static u64 nfs_readdir_array_index_cookie(struct nfs_cache_array *array)

static void nfs_readdir_array_set_eof(struct nfs_cache_array *array)
{
	array->page_is_eof = 1;
	array->page_full = 1;
	array->folio_is_eof = 1;
	array->folio_full = 1;
}

static bool nfs_readdir_array_is_full(struct nfs_cache_array *array)
{
	return array->page_full;
	return array->folio_full;
}

/*
@@ -302,16 +297,16 @@ static size_t nfs_readdir_array_maxentries(void)
 */
static int nfs_readdir_array_can_expand(struct nfs_cache_array *array)
{
	if (array->page_full)
	if (array->folio_full)
		return -ENOSPC;
	if (array->size == nfs_readdir_array_maxentries()) {
		array->page_full = 1;
		array->folio_full = 1;
		return -ENOSPC;
	}
	return 0;
}

static int nfs_readdir_page_array_append(struct page *page,
static int nfs_readdir_folio_array_append(struct folio *folio,
					  const struct nfs_entry *entry,
					  u64 *cookie)
{
@@ -322,7 +317,7 @@ static int nfs_readdir_page_array_append(struct page *page,

	name = nfs_readdir_copy_name(entry->name, entry->len);

	array = kmap_atomic(page);
	array = kmap_atomic(folio_page(folio, 0));
	if (!name)
		goto out;
	ret = nfs_readdir_array_can_expand(array);
@@ -361,17 +356,17 @@ static int nfs_readdir_page_array_append(struct page *page,
 * 127 readdir entries for a typical 64-bit system, that works out to a
 * cache of ~ 33 million entries per directory.
 */
static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie)
static pgoff_t nfs_readdir_folio_cookie_hash(u64 cookie)
{
	if (cookie == 0)
		return 0;
	return hash_64(cookie, 18);
}

static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
static bool nfs_readdir_folio_validate(struct folio *folio, u64 last_cookie,
				       u64 change_attr)
{
	struct nfs_cache_array *array = kmap_local_page(page);
	struct nfs_cache_array *array = kmap_local_folio(folio, 0);
	int ret = true;

	if (array->change_attr != change_attr)
@@ -382,81 +377,83 @@ static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
	return ret;
}

static void nfs_readdir_page_unlock_and_put(struct page *page)
static void nfs_readdir_folio_unlock_and_put(struct folio *folio)
{
	unlock_page(page);
	put_page(page);
	folio_unlock(folio);
	folio_put(folio);
}

static void nfs_readdir_page_init_and_validate(struct page *page, u64 cookie,
static void nfs_readdir_folio_init_and_validate(struct folio *folio, u64 cookie,
						u64 change_attr)
{
	if (PageUptodate(page)) {
		if (nfs_readdir_page_validate(page, cookie, change_attr))
	if (folio_test_uptodate(folio)) {
		if (nfs_readdir_folio_validate(folio, cookie, change_attr))
			return;
		nfs_readdir_clear_array(page);
		nfs_readdir_clear_array(folio);
	}
	nfs_readdir_page_init_array(page, cookie, change_attr);
	SetPageUptodate(page);
	nfs_readdir_folio_init_array(folio, cookie, change_attr);
	folio_mark_uptodate(folio);
}

static struct page *nfs_readdir_page_get_locked(struct address_space *mapping,
static struct folio *nfs_readdir_folio_get_locked(struct address_space *mapping,
						  u64 cookie, u64 change_attr)
{
	pgoff_t index = nfs_readdir_page_cookie_hash(cookie);
	struct page *page;
	pgoff_t index = nfs_readdir_folio_cookie_hash(cookie);
	struct folio *folio;

	page = grab_cache_page(mapping, index);
	if (!page)
	folio = filemap_grab_folio(mapping, index);
	if (!folio)
		return NULL;
	nfs_readdir_page_init_and_validate(page, cookie, change_attr);
	return page;
	nfs_readdir_folio_init_and_validate(folio, cookie, change_attr);
	return folio;
}

static u64 nfs_readdir_page_last_cookie(struct page *page)
static u64 nfs_readdir_folio_last_cookie(struct folio *folio)
{
	struct nfs_cache_array *array;
	u64 ret;

	array = kmap_local_page(page);
	array = kmap_local_folio(folio, 0);
	ret = array->last_cookie;
	kunmap_local(array);
	return ret;
}

static bool nfs_readdir_page_needs_filling(struct page *page)
static bool nfs_readdir_folio_needs_filling(struct folio *folio)
{
	struct nfs_cache_array *array;
	bool ret;

	array = kmap_local_page(page);
	array = kmap_local_folio(folio, 0);
	ret = !nfs_readdir_array_is_full(array);
	kunmap_local(array);
	return ret;
}

static void nfs_readdir_page_set_eof(struct page *page)
static void nfs_readdir_folio_set_eof(struct folio *folio)
{
	struct nfs_cache_array *array;

	array = kmap_local_page(page);
	array = kmap_local_folio(folio, 0);
	nfs_readdir_array_set_eof(array);
	kunmap_local(array);
}

static struct page *nfs_readdir_page_get_next(struct address_space *mapping,
static struct folio *nfs_readdir_folio_get_next(struct address_space *mapping,
						u64 cookie, u64 change_attr)
{
	pgoff_t index = nfs_readdir_page_cookie_hash(cookie);
	struct page *page;
	pgoff_t index = nfs_readdir_folio_cookie_hash(cookie);
	struct folio *folio;

	page = grab_cache_page_nowait(mapping, index);
	if (!page)
	folio = __filemap_get_folio(mapping, index,
			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
			mapping_gfp_mask(mapping));
	if (!folio)
		return NULL;
	nfs_readdir_page_init_and_validate(page, cookie, change_attr);
	if (nfs_readdir_page_last_cookie(page) != cookie)
		nfs_readdir_page_reinit_array(page, cookie, change_attr);
	return page;
	nfs_readdir_folio_init_and_validate(folio, cookie, change_attr);
	if (nfs_readdir_folio_last_cookie(folio) != cookie)
		nfs_readdir_folio_reinit_array(folio, cookie, change_attr);
	return folio;
}

static inline
@@ -481,11 +478,11 @@ bool nfs_readdir_use_cookie(const struct file *filp)
static void nfs_readdir_seek_next_array(struct nfs_cache_array *array,
					struct nfs_readdir_descriptor *desc)
{
	if (array->page_full) {
	if (array->folio_full) {
		desc->last_cookie = array->last_cookie;
		desc->current_index += array->size;
		desc->cache_entry_index = 0;
		desc->page_index++;
		desc->folio_index++;
	} else
		desc->last_cookie = nfs_readdir_array_index_cookie(array);
}
@@ -494,7 +491,7 @@ static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc)
{
	desc->current_index = 0;
	desc->last_cookie = 0;
	desc->page_index = 0;
	desc->folio_index = 0;
}

static int nfs_readdir_search_for_pos(struct nfs_cache_array *array,
@@ -506,7 +503,7 @@ static int nfs_readdir_search_for_pos(struct nfs_cache_array *array,
	if (diff < 0)
		goto out_eof;
	if (diff >= array->size) {
		if (array->page_is_eof)
		if (array->folio_is_eof)
			goto out_eof;
		nfs_readdir_seek_next_array(array, desc);
		return -EAGAIN;
@@ -554,7 +551,7 @@ static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array,
		}
	}
check_eof:
	if (array->page_is_eof) {
	if (array->folio_is_eof) {
		status = -EBADCOOKIE;
		if (desc->dir_cookie == array->last_cookie)
			desc->eof = true;
@@ -568,7 +565,7 @@ static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc)
	struct nfs_cache_array *array;
	int status;

	array = kmap_local_page(desc->page);
	array = kmap_local_folio(desc->folio, 0);

	if (desc->dir_cookie == 0)
		status = nfs_readdir_search_for_pos(array, desc);
@@ -819,16 +816,17 @@ static int nfs_readdir_entry_decode(struct nfs_readdir_descriptor *desc,
}

/* Perform conversion from xdr to cache array */
static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc,
static int nfs_readdir_folio_filler(struct nfs_readdir_descriptor *desc,
				    struct nfs_entry *entry,
				    struct page **xdr_pages, unsigned int buflen,
				   struct page **arrays, size_t narrays,
				    struct folio **arrays, size_t narrays,
				    u64 change_attr)
{
	struct address_space *mapping = desc->file->f_mapping;
	struct folio *new, *folio = *arrays;
	struct xdr_stream stream;
	struct page *scratch;
	struct xdr_buf buf;
	struct page *scratch, *new, *page = *arrays;
	u64 cookie;
	int status;

@@ -844,36 +842,36 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc,
		if (status != 0)
			break;

		status = nfs_readdir_page_array_append(page, entry, &cookie);
		status = nfs_readdir_folio_array_append(folio, entry, &cookie);
		if (status != -ENOSPC)
			continue;

		if (page->mapping != mapping) {
		if (folio->mapping != mapping) {
			if (!--narrays)
				break;
			new = nfs_readdir_page_array_alloc(cookie, GFP_KERNEL);
			new = nfs_readdir_folio_array_alloc(cookie, GFP_KERNEL);
			if (!new)
				break;
			arrays++;
			*arrays = page = new;
			*arrays = folio = new;
		} else {
			new = nfs_readdir_page_get_next(mapping, cookie,
			new = nfs_readdir_folio_get_next(mapping, cookie,
							 change_attr);
			if (!new)
				break;
			if (page != *arrays)
				nfs_readdir_page_unlock_and_put(page);
			page = new;
			if (folio != *arrays)
				nfs_readdir_folio_unlock_and_put(folio);
			folio = new;
		}
		desc->page_index_max++;
		status = nfs_readdir_page_array_append(page, entry, &cookie);
		desc->folio_index_max++;
		status = nfs_readdir_folio_array_append(folio, entry, &cookie);
	} while (!status && !entry->eof);

	switch (status) {
	case -EBADCOOKIE:
		if (!entry->eof)
			break;
		nfs_readdir_page_set_eof(page);
		nfs_readdir_folio_set_eof(folio);
		fallthrough;
	case -EAGAIN:
		status = 0;
@@ -886,8 +884,8 @@ static int nfs_readdir_page_filler(struct nfs_readdir_descriptor *desc,
			;
	}

	if (page != *arrays)
		nfs_readdir_page_unlock_and_put(page);
	if (folio != *arrays)
		nfs_readdir_folio_unlock_and_put(folio);

	put_page(scratch);
	return status;
@@ -927,11 +925,11 @@ static struct page **nfs_readdir_alloc_pages(size_t npages)

static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
				    __be32 *verf_arg, __be32 *verf_res,
				    struct page **arrays, size_t narrays)
				    struct folio **arrays, size_t narrays)
{
	u64 change_attr;
	struct page **pages;
	struct page *page = *arrays;
	struct folio *folio = *arrays;
	struct nfs_entry *entry;
	size_t array_size;
	struct inode *inode = file_inode(desc->file);
@@ -942,7 +940,7 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;
	entry->cookie = nfs_readdir_page_last_cookie(page);
	entry->cookie = nfs_readdir_folio_last_cookie(folio);
	entry->fh = nfs_alloc_fhandle();
	entry->fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
	entry->server = NFS_SERVER(inode);
@@ -962,10 +960,10 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,

	pglen = status;
	if (pglen != 0)
		status = nfs_readdir_page_filler(desc, entry, pages, pglen,
		status = nfs_readdir_folio_filler(desc, entry, pages, pglen,
						  arrays, narrays, change_attr);
	else
		nfs_readdir_page_set_eof(page);
		nfs_readdir_folio_set_eof(folio);
	desc->buffer_fills++;

free_pages:
@@ -977,33 +975,33 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
	return status;
}

static void nfs_readdir_page_put(struct nfs_readdir_descriptor *desc)
static void nfs_readdir_folio_put(struct nfs_readdir_descriptor *desc)
{
	put_page(desc->page);
	desc->page = NULL;
	folio_put(desc->folio);
	desc->folio = NULL;
}

static void
nfs_readdir_page_unlock_and_put_cached(struct nfs_readdir_descriptor *desc)
nfs_readdir_folio_unlock_and_put_cached(struct nfs_readdir_descriptor *desc)
{
	unlock_page(desc->page);
	nfs_readdir_page_put(desc);
	folio_unlock(desc->folio);
	nfs_readdir_folio_put(desc);
}

static struct page *
nfs_readdir_page_get_cached(struct nfs_readdir_descriptor *desc)
static struct folio *
nfs_readdir_folio_get_cached(struct nfs_readdir_descriptor *desc)
{
	struct address_space *mapping = desc->file->f_mapping;
	u64 change_attr = inode_peek_iversion_raw(mapping->host);
	u64 cookie = desc->last_cookie;
	struct page *page;
	struct folio *folio;

	page = nfs_readdir_page_get_locked(mapping, cookie, change_attr);
	if (!page)
	folio = nfs_readdir_folio_get_locked(mapping, cookie, change_attr);
	if (!folio)
		return NULL;
	if (desc->clear_cache && !nfs_readdir_page_needs_filling(page))
		nfs_readdir_page_reinit_array(page, cookie, change_attr);
	return page;
	if (desc->clear_cache && !nfs_readdir_folio_needs_filling(folio))
		nfs_readdir_folio_reinit_array(folio, cookie, change_attr);
	return folio;
}

/*
@@ -1017,21 +1015,21 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
	__be32 verf[NFS_DIR_VERIFIER_SIZE];
	int res;

	desc->page = nfs_readdir_page_get_cached(desc);
	if (!desc->page)
	desc->folio = nfs_readdir_folio_get_cached(desc);
	if (!desc->folio)
		return -ENOMEM;
	if (nfs_readdir_page_needs_filling(desc->page)) {
	if (nfs_readdir_folio_needs_filling(desc->folio)) {
		/* Grow the dtsize if we had to go back for more pages */
		if (desc->page_index == desc->page_index_max)
		if (desc->folio_index == desc->folio_index_max)
			nfs_grow_dtsize(desc);
		desc->page_index_max = desc->page_index;
		desc->folio_index_max = desc->folio_index;
		trace_nfs_readdir_cache_fill(desc->file, nfsi->cookieverf,
					     desc->last_cookie,
					     desc->page->index, desc->dtsize);
					     desc->folio->index, desc->dtsize);
		res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf,
					       &desc->page, 1);
					       &desc->folio, 1);
		if (res < 0) {
			nfs_readdir_page_unlock_and_put_cached(desc);
			nfs_readdir_folio_unlock_and_put_cached(desc);
			trace_nfs_readdir_cache_fill_done(inode, res);
			if (res == -EBADCOOKIE || res == -ENOTSYNC) {
				invalidate_inode_pages2(desc->file->f_mapping);
@@ -1059,7 +1057,7 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
	res = nfs_readdir_search_array(desc);
	if (res == 0)
		return 0;
	nfs_readdir_page_unlock_and_put_cached(desc);
	nfs_readdir_folio_unlock_and_put_cached(desc);
	return res;
}

@@ -1087,7 +1085,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
	unsigned int i;
	bool first_emit = !desc->dir_cookie;

	array = kmap_local_page(desc->page);
	array = kmap_local_folio(desc->folio, 0);
	for (i = desc->cache_entry_index; i < array->size; i++) {
		struct nfs_cache_array_entry *ent;

@@ -1114,7 +1112,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
			break;
		}
	}
	if (array->page_is_eof)
	if (array->folio_is_eof)
		desc->eof = !desc->eob;

	kunmap_local(array);
@@ -1136,7 +1134,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
 */
static int uncached_readdir(struct nfs_readdir_descriptor *desc)
{
	struct page	**arrays;
	struct folio	**arrays;
	size_t		i, sz = 512;
	__be32		verf[NFS_DIR_VERIFIER_SIZE];
	int		status = -ENOMEM;
@@ -1147,14 +1145,14 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
	arrays = kcalloc(sz, sizeof(*arrays), GFP_KERNEL);
	if (!arrays)
		goto out;
	arrays[0] = nfs_readdir_page_array_alloc(desc->dir_cookie, GFP_KERNEL);
	arrays[0] = nfs_readdir_folio_array_alloc(desc->dir_cookie, GFP_KERNEL);
	if (!arrays[0])
		goto out;

	desc->page_index = 0;
	desc->folio_index = 0;
	desc->cache_entry_index = 0;
	desc->last_cookie = desc->dir_cookie;
	desc->page_index_max = 0;
	desc->folio_index_max = 0;

	trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie,
				   -1, desc->dtsize);
@@ -1166,10 +1164,10 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
	}

	for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
		desc->page = arrays[i];
		desc->folio = arrays[i];
		nfs_do_filldir(desc, verf);
	}
	desc->page = NULL;
	desc->folio = NULL;

	/*
	 * Grow the dtsize if we have to go back for more pages,
@@ -1179,16 +1177,16 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
		if (!desc->eob)
			nfs_grow_dtsize(desc);
		else if (desc->buffer_fills == 1 &&
			 i < (desc->page_index_max >> 1))
			 i < (desc->folio_index_max >> 1))
			nfs_shrink_dtsize(desc);
	}
out_free:
	for (i = 0; i < sz && arrays[i]; i++)
		nfs_readdir_page_array_free(arrays[i]);
		nfs_readdir_folio_array_free(arrays[i]);
out:
	if (!nfs_readdir_use_cookie(desc->file))
		nfs_readdir_rewind_search(desc);
	desc->page_index_max = -1;
	desc->folio_index_max = -1;
	kfree(arrays);
	dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status);
	return status;
@@ -1240,11 +1238,11 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
		goto out;
	desc->file = file;
	desc->ctx = ctx;
	desc->page_index_max = -1;
	desc->folio_index_max = -1;

	spin_lock(&file->f_lock);
	desc->dir_cookie = dir_ctx->dir_cookie;
	desc->page_index = dir_ctx->page_index;
	desc->folio_index = dir_ctx->page_index;
	desc->last_cookie = dir_ctx->last_cookie;
	desc->attr_gencount = dir_ctx->attr_gencount;
	desc->eof = dir_ctx->eof;
@@ -1291,8 +1289,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
			break;

		nfs_do_filldir(desc, nfsi->cookieverf);
		nfs_readdir_page_unlock_and_put_cached(desc);
		if (desc->page_index == desc->page_index_max)
		nfs_readdir_folio_unlock_and_put_cached(desc);
		if (desc->folio_index == desc->folio_index_max)
			desc->clear_cache = force_clear;
	} while (!desc->eob && !desc->eof);

@@ -1300,7 +1298,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
	dir_ctx->dir_cookie = desc->dir_cookie;
	dir_ctx->last_cookie = desc->last_cookie;
	dir_ctx->attr_gencount = desc->attr_gencount;
	dir_ctx->page_index = desc->page_index;
	dir_ctx->page_index = desc->folio_index;
	dir_ctx->force_clear = force_clear;
	dir_ctx->eof = desc->eof;
	dir_ctx->dtsize = desc->dtsize;
+142 −96

File changed.

Preview size limit exceeded, changes collapsed.

+92 −39
Original line number Diff line number Diff line
@@ -34,6 +34,58 @@ struct nfs_fscache_inode_auxdata {
	u64	change_attr;
};

struct nfs_netfs_io_data {
	/*
	 * NFS may split a netfs_io_subrequest into multiple RPCs, each
	 * with their own read completion.  In netfs, we can only call
	 * netfs_subreq_terminated() once for each subrequest.  Use the
	 * refcount here to double as a marker of the last RPC completion,
	 * and only call netfs via netfs_subreq_terminated() once.
	 */
	refcount_t			refcount;
	struct netfs_io_subrequest	*sreq;

	/*
	 * Final disposition of the netfs_io_subrequest, sent in
	 * netfs_subreq_terminated()
	 */
	atomic64_t	transferred;
	int		error;
};

static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
{
	refcount_inc(&netfs->refcount);
}

static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
{
	ssize_t final_len;

	/* Only the last RPC completion should call netfs_subreq_terminated() */
	if (!refcount_dec_and_test(&netfs->refcount))
		return;

	/*
	 * The NFS pageio interface may read a complete page, even when netfs
	 * only asked for a partial page.  Specifically, this may be seen when
	 * one thread is truncating a file while another one is reading the last
	 * page of the file.
	 * Correct the final length here to be no larger than the netfs subrequest
	 * length, and thus avoid netfs's "Subreq overread" warning message.
	 */
	final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
	netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
	kfree(netfs);
}
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
	netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
}
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
extern int nfs_netfs_folio_unlock(struct folio *folio);

/*
 * fscache.c
 */
@@ -44,9 +96,8 @@ extern void nfs_fscache_init_inode(struct inode *);
extern void nfs_fscache_clear_inode(struct inode *);
extern void nfs_fscache_open_file(struct inode *, struct file *);
extern void nfs_fscache_release_file(struct inode *, struct file *);

extern int __nfs_fscache_read_page(struct inode *, struct page *);
extern void __nfs_fscache_write_page(struct inode *, struct page *);
extern int nfs_netfs_readahead(struct readahead_control *ractl);
extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);

static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
@@ -54,34 +105,11 @@ static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
		if (current_is_kswapd() || !(gfp & __GFP_FS))
			return false;
		folio_wait_fscache(folio);
		fscache_note_page_release(nfs_i_fscache(folio->mapping->host));
		nfs_inc_fscache_stats(folio->mapping->host,
				      NFSIOS_FSCACHE_PAGES_UNCACHED);
	}
	fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
	return true;
}

/*
 * Retrieve a page from an inode data storage object.
 */
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
{
	if (nfs_i_fscache(inode))
		return __nfs_fscache_read_page(inode, page);
	return -ENOBUFS;
}

/*
 * Store a page newly fetched from the server in an inode data storage object
 * in the cache.
 */
static inline void nfs_fscache_write_page(struct inode *inode,
					   struct page *page)
{
	if (nfs_i_fscache(inode))
		__nfs_fscache_write_page(inode, page);
}

static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
					      struct inode *inode)
{
@@ -101,13 +129,10 @@ static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *
static inline void nfs_fscache_invalidate(struct inode *inode, int flags)
{
	struct nfs_fscache_inode_auxdata auxdata;
	struct nfs_inode *nfsi = NFS_I(inode);
	struct fscache_cookie *cookie =  netfs_i_cookie(&NFS_I(inode)->netfs);

	if (nfsi->fscache) {
	nfs_fscache_update_auxdata(&auxdata, inode);
		fscache_invalidate(nfsi->fscache, &auxdata,
				   i_size_read(inode), flags);
	}
	fscache_invalidate(cookie, &auxdata, i_size_read(inode), flags);
}

/*
@@ -120,7 +145,28 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
	return "no ";
}

static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
					     struct nfs_pageio_descriptor *desc)
{
	hdr->netfs = desc->pg_netfs;
}
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
						   struct nfs_pgio_header *hdr)
{
	desc->pg_netfs = hdr->netfs;
}
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
{
	desc->pg_netfs = NULL;
}
#else /* CONFIG_NFS_FSCACHE */
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
static inline int nfs_netfs_folio_unlock(struct folio *folio)
{
	return 1;
}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}

static inline void nfs_fscache_init_inode(struct inode *inode) {}
@@ -128,22 +174,29 @@ static inline void nfs_fscache_clear_inode(struct inode *inode) {}
static inline void nfs_fscache_open_file(struct inode *inode,
					 struct file *filp) {}
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}

static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
static inline int nfs_netfs_readahead(struct readahead_control *ractl)
{
	return true; /* may release folio */
	return -ENOBUFS;
}
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{
	return -ENOBUFS;
}
static inline void nfs_fscache_write_page(struct inode *inode, struct page *page) {}

static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
	return true; /* may release folio */
}
static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}

static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
	return "no ";
}

static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
					     struct nfs_pageio_descriptor *desc) {}
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
						   struct nfs_pgio_header *hdr) {}
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
#endif /* CONFIG_NFS_FSCACHE */
#endif /* _NFS_FSCACHE_H */
+99 −15

File changed.

Preview size limit exceeded, changes collapsed.

Loading