Commit 4af1b64f authored by Geetha sowjanya's avatar Geetha sowjanya Committed by David S. Miller
Browse files

octeontx2-pf: Fix lmtst ID used in aura free



Current code uses per_cpu pointer to get the lmtst_id mapped to
the core on which aura_free() is executed. Using per_cpu pointer
without preemption disable causing mismatch between lmtst_id and
core on which pointer gets freed. This patch fixes the issue by
disabling preemption around aura_free.

Fixes: ef6c8da7 ("octeontx2-pf: cn10K: Reserve LMTST lines per core")
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9c807965
Loading
Loading
Loading
Loading
+21 −9
Original line number Diff line number Diff line
@@ -1012,6 +1012,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
	rbpool = cq->rbpool;
	free_ptrs = cq->pool_ptrs;

	get_cpu();
	while (cq->pool_ptrs) {
		if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
			/* Schedule a WQ if we fails to free atleast half of the
@@ -1031,6 +1032,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
		pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
		cq->pool_ptrs--;
	}
	put_cpu();
	cq->refill_task_sched = false;
}

@@ -1368,6 +1370,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
	if (err)
		goto fail;

	get_cpu();
	/* Allocate pointers and free them to aura/pool */
	for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
@@ -1376,18 +1379,24 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
		sq = &qset->sq[qidx];
		sq->sqb_count = 0;
		sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
		if (!sq->sqb_ptrs)
			return -ENOMEM;
		if (!sq->sqb_ptrs) {
			err = -ENOMEM;
			goto err_mem;
		}

		for (ptr = 0; ptr < num_sqbs; ptr++) {
			if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
				return -ENOMEM;
			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
			if (err)
				goto err_mem;
			pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
			sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
		}
	}

	return 0;
err_mem:
	put_cpu();
	return err ? -ENOMEM : 0;

fail:
	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
	otx2_aura_pool_free(pfvf);
@@ -1426,18 +1435,21 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
	if (err)
		goto fail;

	get_cpu();
	/* Allocate pointers and free them to aura/pool */
	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
		pool = &pfvf->qset.pool[pool_id];
		for (ptr = 0; ptr < num_ptrs; ptr++) {
			if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
				return -ENOMEM;
			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
			if (err)
				goto err_mem;
			pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
						   bufptr + OTX2_HEAD_ROOM);
		}
	}

	return 0;
err_mem:
	put_cpu();
	return err ? -ENOMEM : 0;
fail:
	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
	otx2_aura_pool_free(pfvf);