Unverified Commit 0f9e7446 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!813 [OLK-5.10] net/smc: backport dma sync ops optimize

Merge Pull Request from: @giree2 
 
backport dma sync ops optimize:

1. net/smc: remove redundant dma sync ops
2. net/smc: optimize for smc_sndbuf_sync_sg_for_device and smc_rmb_sync_sg_for_cpu 
 
Link:https://gitee.com/openeuler/kernel/pulls/813

 

Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents b4796f43 57f34783
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -814,7 +814,6 @@ static int smc_connect_rdma(struct smc_sock *smc,
			goto connect_abort;
		}
	}
	smc_rmb_sync_sg_for_device(&smc->conn);

	reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
					   SMC_V1);
@@ -1598,7 +1597,6 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
		if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
			return SMC_CLC_DECL_ERR_REGRMB;
	}
	smc_rmb_sync_sg_for_device(&new_smc->conn);

	return 0;
}
+8 −21
Original line number Diff line number Diff line
@@ -1453,6 +1453,9 @@ static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
		goto free_table;
	}

	buf_desc->is_dma_need_sync |=
		smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;

	/* create a new memory region for the RMB */
	if (is_rmb) {
		rc = smc_ib_get_memory_region(lnk->roce_pd,
@@ -1681,6 +1684,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
		/* check for reusable slot in the link group */
		buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
		if (buf_desc) {
			buf_desc->is_dma_need_sync = 0;
			memset(buf_desc->cpu_addr, 0, bufsize);
			break; /* found reusable slot */
		}
@@ -1729,15 +1733,10 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
	return 0;
}

void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
{
	if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
		return;
	smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
}

void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
	if (!conn->sndbuf_desc->is_dma_need_sync)
		return;
	if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
		return;
	smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
@@ -1747,26 +1746,14 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
{
	int i;

	if (!conn->lgr || conn->lgr->is_smcd)
	if (!conn->rmb_desc->is_dma_need_sync)
		return;
	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
		if (!smc_link_active(&conn->lgr->lnk[i]))
			continue;
		smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
				       DMA_FROM_DEVICE);
	}
}

void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
{
	int i;

	if (!conn->lgr || conn->lgr->is_smcd)
		return;
	for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
		if (!smc_link_active(&conn->lgr->lnk[i]))
			continue;
		smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
		smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
				       DMA_FROM_DEVICE);
	}
}
+1 −2
Original line number Diff line number Diff line
@@ -162,6 +162,7 @@ struct smc_buf_desc {
					/* mem region registered */
			u8		is_map_ib[SMC_LINKS_PER_LGR_MAX];
					/* mem region mapped to lnk */
			u8		is_dma_need_sync;
			u8		is_reg_err;
					/* buffer registration err */
		};
@@ -392,10 +393,8 @@ void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
		    __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey);
void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
		     __be64 nw_vaddr, __be32 nw_rkey);
void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);

void smc_conn_free(struct smc_connection *conn);
+29 −0
Original line number Diff line number Diff line
@@ -428,6 +428,29 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
	return 0;
}

bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
			    struct smc_buf_desc *buf_slot)
{
	struct scatterlist *sg;
	unsigned int i;
	bool ret = false;

	/* for now there is just one DMA address */
	for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
		    buf_slot->sgt[lnk->link_idx].nents, i) {
		if (!sg_dma_len(sg))
			break;
		if (dma_need_sync(lnk->smcibdev->ibdev->dma_device,
				  sg_dma_address(sg))) {
			ret = true;
			goto out;
		}
	}

out:
	return ret;
}

/* synchronize buffer usage for cpu access */
void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
			    struct smc_buf_desc *buf_slot,
@@ -436,6 +459,9 @@ void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
	struct scatterlist *sg;
	unsigned int i;

	if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
		return;

	/* for now there is just one DMA address */
	for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
		    buf_slot->sgt[lnk->link_idx].nents, i) {
@@ -456,6 +482,9 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
	struct scatterlist *sg;
	unsigned int i;

	if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
		return;

	/* for now there is just one DMA address */
	for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
		    buf_slot->sgt[lnk->link_idx].nents, i) {
+2 −0
Original line number Diff line number Diff line
@@ -79,6 +79,8 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
			     struct smc_buf_desc *buf_slot, u8 link_idx);
void smc_ib_put_memory_region(struct ib_mr *mr);
bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
			    struct smc_buf_desc *buf_slot);
void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
			    struct smc_buf_desc *buf_slot,
			    enum dma_data_direction data_direction);
Loading