Commit 0182d078 authored by Subbaraya Sundeep's avatar Subbaraya Sundeep Committed by David S. Miller
Browse files

octeontx2-pf: Simplify the receive buffer size calculation



This patch separates the logic of configuring hardware
maximum transmit frame size and receive frame size.
This simplifies the logic to calculate receive buffer
size and using cqe descriptor of different size.
Also additional size of skb_shared_info structure is
allocated for each receive buffer pointer given to
hardware which is not necessary. Hence change the
size calculation to remove the size of
skb_shared_info. Add a check for array out of
bounds while adding fragments to the network stack.

Signed-off-by: default avatarSubbaraya Sundeep <sbhatta@marvell.com>
Signed-off-by: default avatarHariprasad Kelam <hkelam@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b9c56ccb
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
	aq->sq.ena = 1;
	/* Only one SMQ is allocated, map all SQ's to that SMQ  */
	aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
	aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
	aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
	aq->sq.default_chan = pfvf->hw.tx_chan_base;
	aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
	aq->sq.sqb_aura = sqb_aura;
+4 −6
Original line number Diff line number Diff line
@@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
		return -ENOMEM;
	}

	req->maxlen = pfvf->max_frs;
	req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;

	err = otx2_sync_mbox_msg(&pfvf->mbox);
	mutex_unlock(&pfvf->mbox.lock);
@@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
	u64 schq, parent;
	u64 dwrr_val;

	dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
	dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);

	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
	if (!req)
@@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
	/* Set topology e.t.c configuration */
	if (lvl == NIX_TXSCH_LVL_SMQ) {
		req->reg[0] = NIX_AF_SMQX_CFG(schq);
		req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
				  | OTX2_MIN_MTU;

		req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
		req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
				  (0x2ULL << 36);
		req->num_regs++;
@@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
	aq->sq.ena = 1;
	/* Only one SMQ is allocated, map all SQ's to that SMQ  */
	aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
	aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
	aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
	aq->sq.default_chan = pfvf->hw.tx_chan_base;
	aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
	aq->sq.sqb_aura = sqb_aura;
+1 −1
Original line number Diff line number Diff line
@@ -326,7 +326,7 @@ struct otx2_nic {
	struct net_device	*netdev;
	struct dev_hw_ops	*hw_ops;
	void			*iommu_domain;
	u16			max_frs;
	u16			tx_max_pktlen;
	u16			rbsize; /* Receive buffer size */

#define OTX2_FLAG_RX_TSTAMP_ENABLED		BIT_ULL(0)
+7 −8
Original line number Diff line number Diff line
@@ -1312,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
	 * NIX transfers entire data using 6 segments/buffers and writes
	 * a CQE_RX descriptor with those segment addresses. First segment
	 * has additional data prepended to packet. Also software omits a
	 * headroom of 128 bytes and sizeof(struct skb_shared_info) in
	 * each segment. Hence the total size of memory needed
	 * to receive a packet with 'mtu' is:
	 * headroom of 128 bytes in each segment. Hence the total size of
	 * memory needed to receive a packet with 'mtu' is:
	 * frame size =  mtu + additional data;
	 * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
	 * memory = frame_size + headroom * 6;
	 * each receive buffer size = memory / 6;
	 */
	frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
	total_size = frame_size + (OTX2_HEAD_ROOM +
		     OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
	total_size = frame_size + OTX2_HEAD_ROOM * 6;
	rbuf_size = total_size / 6;

	return ALIGN(rbuf_size, 2048);
@@ -1343,7 +1341,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
	hw->sqpool_cnt = hw->tot_tx_queues;
	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;

	pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
	/* Maximum hardware supported transmit length */
	pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;

	pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);

@@ -1807,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)

	/* Check for minimum and maximum packet length */
	if (skb->len <= ETH_HLEN ||
	    (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
	    (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}
+21 −9
Original line number Diff line number Diff line
@@ -181,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}

static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
			      u64 iova, int len, struct nix_rx_parse_s *parse)
static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
			      u64 iova, int len, struct nix_rx_parse_s *parse,
			      int qidx)
{
	struct page *page;
	int off = 0;
@@ -203,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
	}

	page = virt_to_page(va);
	if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
			va - page_address(page) + off, len - off, pfvf->rbsize);
				va - page_address(page) + off,
				len - off, pfvf->rbsize);

		otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
				    pfvf->rbsize, DMA_FROM_DEVICE);
		return true;
	}

	/* If more than MAX_SKB_FRAGS fragments are received then
	 * give back those buffer pointers to hardware for reuse.
	 */
	pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);

	return false;
}

static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -349,8 +361,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
		seg_addr = &sg->seg_addr;
		seg_size = (void *)sg;
		for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
			otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
					  parse);
			if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
					      seg_size[seg], parse, cq->cq_idx))
				cq->pool_ptrs++;
		}
		start += sizeof(*sg);
Loading