Commit 4090b314 authored by David Howells's avatar David Howells
Browse files

netfs: Add a function to consolidate beginning a read



Add a function to do the steps needed to begin a read request, allowing
this code to be removed from several other functions and consolidated.

Changes
=======
ver #2)
 - Move before the unstaticking patch so that some functions can be left
   static.
 - Set uninitialised return code in netfs_begin_read()[1][2].
 - Fixed a refleak caused by non-removal of a get from netfs_write_begin()
   when the request submission code got moved to netfs_begin_read().
 - Use INIT_WORK() to (re-)init the request work_struct[3].

Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com

Link: https://lore.kernel.org/r/20220303163826.1120936-1-nathan@kernel.org/ [1]
Link: https://lore.kernel.org/r/20220303235647.1297171-1-colin.i.king@gmail.com/ [2]
Link: https://lore.kernel.org/r/9d69be49081bccff44260e4c6e0049c63d6d04a1.camel@redhat.com/ [3]
Link: https://lore.kernel.org/r/164623004355.3564931.7275693529042495641.stgit@warthog.procyon.org.uk/ # v1
Link: https://lore.kernel.org/r/164678214287.1200972.16734134007649832160.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/164692911113.2099075.1060868473229451371.stgit@warthog.procyon.org.uk/ # v3
parent bc899ee1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
 */
extern unsigned int netfs_debug;

void netfs_rreq_work(struct work_struct *work);
int netfs_begin_read(struct netfs_io_request *rreq, bool sync);

/*
 * stats.c
+0 −1
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
	rreq->i_size	= i_size_read(inode);
	rreq->debug_id	= atomic_inc_return(&debug_ids);
	INIT_LIST_HEAD(&rreq->subrequests);
	INIT_WORK(&rreq->work, netfs_rreq_work);
	refcount_set(&rreq->ref, 1);
	__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
	if (rreq->netfs_ops->init_request) {
+72 −72
Original line number Diff line number Diff line
@@ -443,7 +443,7 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
	netfs_rreq_completed(rreq, was_async);
}

void netfs_rreq_work(struct work_struct *work)
static void netfs_rreq_work(struct work_struct *work)
{
	struct netfs_io_request *rreq =
		container_of(work, struct netfs_io_request, work);
@@ -688,6 +688,69 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
	return false;
}

/*
 * Begin the process of reading in a chunk of data, where that data may be
 * stitched together from multiple sources, including multiple servers and the
 * local cache.
 */
int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
{
	unsigned int debug_index = 0;
	int ret;

	_enter("R=%x %llx-%llx",
	       rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);

	if (rreq->len == 0) {
		pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
		netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
		return -EIO;
	}

	INIT_WORK(&rreq->work, netfs_rreq_work);

	if (sync)
		netfs_get_request(rreq, netfs_rreq_trace_get_hold);

	/* Chop the read into slices according to what the cache and the netfs
	 * want and submit each one.
	 */
	atomic_set(&rreq->nr_outstanding, 1);
	do {
		if (!netfs_rreq_submit_slice(rreq, &debug_index))
			break;

	} while (rreq->submitted < rreq->len);

	if (sync) {
		/* Keep nr_outstanding incremented so that the ref always belongs to
		 * us, and the service code isn't punted off to a random thread pool to
		 * process.
		 */
		for (;;) {
			wait_var_event(&rreq->nr_outstanding,
				       atomic_read(&rreq->nr_outstanding) == 1);
			netfs_rreq_assess(rreq, false);
			if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
				break;
			cond_resched();
		}

		ret = rreq->error;
		if (ret == 0 && rreq->submitted < rreq->len) {
			trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
			ret = -EIO;
		}
		netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
	} else {
		/* If we decrement nr_outstanding to 0, the ref belongs to us. */
		if (atomic_dec_and_test(&rreq->nr_outstanding))
			netfs_rreq_assess(rreq, false);
		ret = 0;
	}
	return ret;
}

static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
					 loff_t *_start, size_t *_len, loff_t i_size)
{
@@ -750,7 +813,6 @@ void netfs_readahead(struct readahead_control *ractl)
{
	struct netfs_io_request *rreq;
	struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
	unsigned int debug_index = 0;
	int ret;

	_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
@@ -777,22 +839,13 @@ void netfs_readahead(struct readahead_control *ractl)

	netfs_rreq_expand(rreq, ractl);

	atomic_set(&rreq->nr_outstanding, 1);
	do {
		if (!netfs_rreq_submit_slice(rreq, &debug_index))
			break;

	} while (rreq->submitted < rreq->len);

	/* Drop the refs on the folios here rather than in the cache or
	 * filesystem.  The locks will be dropped in netfs_rreq_unlock().
	 */
	while (readahead_folio(ractl))
		;

	/* If we decrement nr_outstanding to 0, the ref belongs to us. */
	if (atomic_dec_and_test(&rreq->nr_outstanding))
		netfs_rreq_assess(rreq, false);
	netfs_begin_read(rreq, false);
	return;

cleanup_free:
@@ -821,7 +874,6 @@ int netfs_readpage(struct file *file, struct page *subpage)
	struct address_space *mapping = folio->mapping;
	struct netfs_io_request *rreq;
	struct netfs_i_context *ctx = netfs_i_context(mapping->host);
	unsigned int debug_index = 0;
	int ret;

	_enter("%lx", folio_index(folio));
@@ -836,42 +888,16 @@ int netfs_readpage(struct file *file, struct page *subpage)

	if (ctx->ops->begin_cache_operation) {
		ret = ctx->ops->begin_cache_operation(rreq);
		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
			folio_unlock(folio);
			goto out;
		}
		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
			goto discard;
	}

	netfs_stat(&netfs_n_rh_readpage);
	trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
	return netfs_begin_read(rreq, true);

	netfs_get_request(rreq, netfs_rreq_trace_get_hold);

	atomic_set(&rreq->nr_outstanding, 1);
	do {
		if (!netfs_rreq_submit_slice(rreq, &debug_index))
			break;

	} while (rreq->submitted < rreq->len);

	/* Keep nr_outstanding incremented so that the ref always belongs to us, and
	 * the service code isn't punted off to a random thread pool to
	 * process.
	 */
	do {
		wait_var_event(&rreq->nr_outstanding,
			       atomic_read(&rreq->nr_outstanding) == 1);
		netfs_rreq_assess(rreq, false);
	} while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));

	ret = rreq->error;
	if (ret == 0 && rreq->submitted < rreq->len) {
		trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
		ret = -EIO;
	}
out:
	netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
	return ret;
discard:
	netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
alloc_error:
	folio_unlock(folio);
	return ret;
@@ -966,7 +992,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
	struct netfs_io_request *rreq;
	struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
	struct folio *folio;
	unsigned int debug_index = 0, fgp_flags;
	unsigned int fgp_flags;
	pgoff_t index = pos >> PAGE_SHIFT;
	int ret;

@@ -1029,39 +1055,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
	 */
	ractl._nr_pages = folio_nr_pages(folio);
	netfs_rreq_expand(rreq, &ractl);
	netfs_get_request(rreq, netfs_rreq_trace_get_hold);

	/* We hold the folio locks, so we can drop the references */
	folio_get(folio);
	while (readahead_folio(&ractl))
		;

	atomic_set(&rreq->nr_outstanding, 1);
	do {
		if (!netfs_rreq_submit_slice(rreq, &debug_index))
			break;

	} while (rreq->submitted < rreq->len);

	/* Keep nr_outstanding incremented so that the ref always belongs to
	 * us, and the service code isn't punted off to a random thread pool to
	 * process.
	 */
	for (;;) {
		wait_var_event(&rreq->nr_outstanding,
			       atomic_read(&rreq->nr_outstanding) == 1);
		netfs_rreq_assess(rreq, false);
		if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
			break;
		cond_resched();
	}

	ret = rreq->error;
	if (ret == 0 && rreq->submitted < rreq->len) {
		trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
		ret = -EIO;
	}
	netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
	ret = netfs_begin_read(rreq, true);
	if (ret < 0)
		goto error;

+3 −2
Original line number Diff line number Diff line
@@ -56,17 +56,18 @@
	EM(netfs_fail_check_write_begin,	"check-write-begin")	\
	EM(netfs_fail_copy_to_cache,		"copy-to-cache")	\
	EM(netfs_fail_read,			"read")			\
	EM(netfs_fail_short_readpage,		"short-readpage")	\
	EM(netfs_fail_short_write_begin,	"short-write-begin")	\
	EM(netfs_fail_short_read,		"short-read")		\
	E_(netfs_fail_prepare_write,		"prep-write")

#define netfs_rreq_ref_traces					\
	EM(netfs_rreq_trace_get_hold,		"GET HOLD   ")	\
	EM(netfs_rreq_trace_get_subreq,		"GET SUBREQ ")	\
	EM(netfs_rreq_trace_put_complete,	"PUT COMPLT ")	\
	EM(netfs_rreq_trace_put_discard,	"PUT DISCARD")	\
	EM(netfs_rreq_trace_put_failed,		"PUT FAILED ")	\
	EM(netfs_rreq_trace_put_hold,		"PUT HOLD   ")	\
	EM(netfs_rreq_trace_put_subreq,		"PUT SUBREQ ")	\
	EM(netfs_rreq_trace_put_zero_len,	"PUT ZEROLEN")	\
	E_(netfs_rreq_trace_new,		"NEW        ")

#define netfs_sreq_ref_traces					\