Commit e844d307 authored by Chuck Lever's avatar Chuck Lever
Browse files

svcrdma: Add a "deferred close" helper



Refactor a bit of commonly used logic so that every site that wants
a close deferred to an nfsd thread does all the right things
(set_bit(XPT_CLOSE) then enqueue).

Also, once XPT_CLOSE is set on a transport, it is never cleared. If
XPT_CLOSE is already set, then the close is already being handled
and the enqueue can be skipped.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent c558d475
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -143,6 +143,7 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
int	svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
void	svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt);
void	svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *);
void	svc_xprt_deferred_close(struct svc_xprt *xprt);

static inline void svc_xprt_get(struct svc_xprt *xprt)
{
+14 −0
Original line number Diff line number Diff line
@@ -139,6 +139,20 @@ int svc_print_xprts(char *buf, int maxlen)
	return len;
}

/**
 * svc_xprt_deferred_close - Close a transport
 * @xprt: transport instance
 *
 * Used in contexts that need to defer the work of shutting down
 * the transport to an nfsd thread.
 */
void svc_xprt_deferred_close(struct svc_xprt *xprt)
{
	if (!test_and_set_bit(XPT_CLOSE, &xprt->xpt_flags))
		svc_xprt_enqueue(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_deferred_close);

static void svc_xprt_free(struct kref *kref)
{
	struct svc_xprt *xprt =
+6 −9
Original line number Diff line number Diff line
@@ -728,10 +728,8 @@ static void svc_tcp_state_change(struct sock *sk)
		rmb();
		svsk->sk_ostate(sk);
		trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock);
		if (sk->sk_state != TCP_ESTABLISHED) {
			set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
			svc_xprt_enqueue(&svsk->sk_xprt);
		}
		if (sk->sk_state != TCP_ESTABLISHED)
			svc_xprt_deferred_close(&svsk->sk_xprt);
	}
}

@@ -901,7 +899,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
	net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
			       __func__, svsk->sk_xprt.xpt_server->sv_name,
			       svc_sock_reclen(svsk));
	set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
	svc_xprt_deferred_close(&svsk->sk_xprt);
err_short:
	return -EAGAIN;
}
@@ -1057,7 +1055,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
	svsk->sk_datalen = 0;
err_delete:
	trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
	set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
	svc_xprt_deferred_close(&svsk->sk_xprt);
err_noclose:
	return 0;	/* record not complete */
}
@@ -1188,8 +1186,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
		  xprt->xpt_server->sv_name,
		  (err < 0) ? "got error" : "sent",
		  (err < 0) ? err : sent, xdr->len);
	set_bit(XPT_CLOSE, &xprt->xpt_flags);
	svc_xprt_enqueue(xprt);
	svc_xprt_deferred_close(xprt);
	atomic_dec(&svsk->sk_sendqlen);
	mutex_unlock(&xprt->xpt_mutex);
	return -EAGAIN;
@@ -1268,7 +1265,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
		case TCP_ESTABLISHED:
			break;
		default:
			set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
			svc_xprt_deferred_close(&svsk->sk_xprt);
		}
	}
}
+1 −2
Original line number Diff line number Diff line
@@ -367,8 +367,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)

flushed:
	svc_rdma_recv_ctxt_put(rdma, ctxt);
	set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
	svc_xprt_enqueue(&rdma->sc_xprt);
	svc_xprt_deferred_close(&rdma->sc_xprt);
}

/**
+2 −3
Original line number Diff line number Diff line
@@ -250,7 +250,7 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
	wake_up(&rdma->sc_send_wait);

	if (unlikely(wc->status != IB_WC_SUCCESS))
		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
		svc_xprt_deferred_close(&rdma->sc_xprt);

	svc_rdma_write_info_free(info);
}
@@ -334,7 +334,6 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
{
	struct svcxprt_rdma *rdma = cc->cc_rdma;
	struct svc_xprt *xprt = &rdma->sc_xprt;
	struct ib_send_wr *first_wr;
	const struct ib_send_wr *bad_wr;
	struct list_head *tmp;
@@ -373,7 +372,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
	} while (1);

	trace_svcrdma_sq_post_err(rdma, ret);
	set_bit(XPT_CLOSE, &xprt->xpt_flags);
	svc_xprt_deferred_close(&rdma->sc_xprt);

	/* If even one was posted, there will be a completion. */
	if (bad_wr != first_wr)
Loading