Unverified Commit 0d6aeaf5 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!825 [OLK-5.10] net/smc: optimization related to data transmission

Merge Pull Request from: @zengyingyu11 
 
Content:
    1. send cdc msg inline, which can help reducing sending latency.
    2. send small packages with inline flag, which can help reducing sending latency.
    3. do not send msg in receiving process when tx is not blocked.

Related issue:
https://gitee.com/openeuler/kernel/issues/I77V5Z 
 
Link:https://gitee.com/openeuler/kernel/pulls/825

 

Reviewed-by: default avatarJiao Litao <jiaolitao@sangfor.com.cn>
Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents fd46506a fa3512b7
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -360,7 +360,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
	}

	/* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
	if ((diff_cons && smc_tx_prepared_sends(conn)) ||
	if ((diff_cons && smc_tx_prepared_sends(conn) &&
	     conn->local_tx_ctrl.prod_flags.write_blocked) ||
	    conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
	    conn->local_rx_ctrl.prod_flags.urg_data_pending) {
		if (!sock_owned_by_user(&smc->sk))
+1 −0
Original line number Diff line number Diff line
@@ -371,6 +371,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk)
			.max_recv_wr = SMC_WR_BUF_CNT * 3,
			.max_send_sge = SMC_IB_MAX_SEND_SGE,
			.max_recv_sge = 1,
			.max_inline_data = 0,
		},
		.sq_sig_type = IB_SIGNAL_REQ_WR,
		.qp_type = IB_QPT_RC,
+12 −5
Original line number Diff line number Diff line
@@ -374,12 +374,20 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
	int rc;

	for (dstchunk = 0; dstchunk < 2; dstchunk++) {
		struct ib_sge *sge =
			wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
		struct ib_rdma_wr *wr = &wr_rdma_buf->wr_tx_rdma[dstchunk];
		struct ib_sge *sge = wr->wr.sg_list;
		u64 base_addr = dma_addr;

		if (dst_len < link->qp_attr.cap.max_inline_data) {
			base_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr;
			wr->wr.send_flags |= IB_SEND_INLINE;
		} else {
			wr->wr.send_flags &= ~IB_SEND_INLINE;
		}

		num_sges = 0;
		for (srcchunk = 0; srcchunk < 2; srcchunk++) {
			sge[srcchunk].addr = dma_addr + src_off;
			sge[srcchunk].addr = base_addr + src_off;
			sge[srcchunk].length = src_len;
			num_sges++;

@@ -393,8 +401,7 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
			src_len = dst_len - src_len; /* remainder */
			src_len_sum += src_len;
		}
		rc = smc_tx_rdma_write(conn, dst_off, num_sges,
				       &wr_rdma_buf->wr_tx_rdma[dstchunk]);
		rc = smc_tx_rdma_write(conn, dst_off, num_sges, wr);
		if (rc)
			return rc;
		if (dst_len_sum == len)
+4 −1
Original line number Diff line number Diff line
@@ -488,9 +488,10 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk)
static void smc_wr_init_sge(struct smc_link *lnk)
{
	u32 i;
	bool send_inline = (lnk->qp_attr.cap.max_inline_data > SMC_WR_TX_SIZE);

	for (i = 0; i < lnk->wr_tx_cnt; i++) {
		lnk->wr_tx_sges[i].addr =
		lnk->wr_tx_sges[i].addr = send_inline ? (uintptr_t)(&lnk->wr_tx_bufs[i]) :
			lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
		lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
		lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
@@ -508,6 +509,8 @@ static void smc_wr_init_sge(struct smc_link *lnk)
		lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
		lnk->wr_tx_ibs[i].send_flags =
			IB_SEND_SIGNALED | IB_SEND_SOLICITED;
		if (send_inline)
			lnk->wr_tx_ibs[i].send_flags |= IB_SEND_INLINE;
		lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
		lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
		lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =