Commit a8f75cb3 authored by Loic Poulain's avatar Loic Poulain Committed by Manivannan Sadhasivam
Browse files

mhi: core: Factorize mhi queuing



Instead of duplicating queuing procedure in mhi_queue_dma(),
mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
as common helper.

Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf
behavior, taking it with irqsave variant (vs _bh for former queue_skb
and queue_dma version).

Signed-off-by: default avatarLoic Poulain <loic.poulain@linaro.org>
Reviewed-by: default avatarManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: default avatarManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
parent cdce2663
Loading
Loading
Loading
Loading
+44 −118
Original line number Diff line number Diff line
@@ -967,118 +967,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
	return (tmp == ring->rp);
}

int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
		     enum dma_data_direction dir, enum mhi_flags mflags)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
							     mhi_dev->dl_chan;
	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
	struct mhi_buf_info buf_info = { };
	unsigned long flags;
	int ret;

	/* If MHI host pre-allocates buffers then client drivers cannot queue */
	if (mhi_chan->pre_alloc)
		return -EINVAL;
	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
		return -EIO;

	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
		return -ENOMEM;
	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
		read_unlock_bh(&mhi_cntrl->pm_lock);
		return -EIO;
	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
	if (unlikely(ret)) {
		ret = -ENOMEM;
		goto exit_unlock;
	}

	/* we're in M3 or transitioning to M3 */
	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
	if (unlikely(ret))
		goto exit_unlock;

	/* trigger M3 exit if necessary */
	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
		mhi_trigger_resume(mhi_cntrl);

	/* Toggle wake to exit out of M2 */
	/* Assert dev_wake (to exit/prevent M1/M2)*/
	mhi_cntrl->wake_toggle(mhi_cntrl);

	buf_info.v_addr = skb->data;
	buf_info.cb_buf = skb;
	buf_info.len = len;

	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
	if (unlikely(ret)) {
		read_unlock_bh(&mhi_cntrl->pm_lock);
		return ret;
	}

	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		read_lock_bh(&mhi_chan->lock);
		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_bh(&mhi_chan->lock);
	if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		ret = -EIO;
		goto exit_unlock;
	}

	read_unlock_bh(&mhi_cntrl->pm_lock);
	mhi_ring_chan_db(mhi_cntrl, mhi_chan);

	return 0;
exit_unlock:
	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

	return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);

int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
							     mhi_dev->dl_chan;
	struct device *dev = &mhi_cntrl->mhi_dev->dev;
	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
	struct mhi_buf_info buf_info = { };
	int ret;

	/* If MHI host pre-allocates buffers then client drivers cannot queue */
	if (mhi_chan->pre_alloc)
		return -EINVAL;

	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
		return -ENOMEM;
	buf_info.v_addr = skb->data;
	buf_info.cb_buf = skb;
	buf_info.len = len;

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
		dev_err(dev, "MHI is not in activate state, PM state: %s\n",
			to_mhi_pm_state_str(mhi_cntrl->pm_state));
		read_unlock_bh(&mhi_cntrl->pm_lock);
	if (unlikely(mhi_chan->pre_alloc))
		return -EINVAL;

		return -EIO;
	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);

	/* we're in M3 or transitioning to M3 */
	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
		mhi_trigger_resume(mhi_cntrl);

	/* Toggle wake to exit out of M2 */
	mhi_cntrl->wake_toggle(mhi_cntrl);
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
{
	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
							     mhi_dev->dl_chan;
	struct mhi_buf_info buf_info = { };

	buf_info.p_addr = mhi_buf->dma_addr;
	buf_info.cb_buf = mhi_buf;
	buf_info.pre_mapped = true;
	buf_info.len = len;

	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
	if (unlikely(ret)) {
		read_unlock_bh(&mhi_cntrl->pm_lock);
		return ret;
	}

	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		read_lock_bh(&mhi_chan->lock);
		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_bh(&mhi_chan->lock);
	}

	read_unlock_bh(&mhi_cntrl->pm_lock);
	if (unlikely(mhi_chan->pre_alloc))
		return -EINVAL;

	return 0;
	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);

@@ -1132,57 +1102,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
		  void *buf, size_t len, enum mhi_flags mflags)
{
	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
							     mhi_dev->dl_chan;
	struct mhi_ring *tre_ring;
	struct mhi_buf_info buf_info = { };
	unsigned long flags;
	int ret;

	/*
	 * this check here only as a guard, it's always
	 * possible mhi can enter error while executing rest of function,
	 * which is not fatal so we do not need to hold pm_lock
	 */
	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
		return -EIO;

	tre_ring = &mhi_chan->tre_ring;
	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
		return -ENOMEM;

	buf_info.v_addr = buf;
	buf_info.cb_buf = buf;
	buf_info.len = len;

	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
	if (unlikely(ret))
		return ret;

	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);

	/* we're in M3 or transitioning to M3 */
	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
		mhi_trigger_resume(mhi_cntrl);

	/* Toggle wake to exit out of M2 */
	mhi_cntrl->wake_toggle(mhi_cntrl);

	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
		unsigned long flags;

		read_lock_irqsave(&mhi_chan->lock, flags);
		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_irqrestore(&mhi_chan->lock, flags);
	}

	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

	return 0;
	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);