Commit f8cca8c9 authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab
Browse files

media: v4l2-mem2mem: support held capture buffers



Check for held buffers that are ready to be returned to vb2 in
__v4l2_m2m_try_queue(). This avoids drivers having to handle this
case.

Add v4l2_m2m_buf_done_and_job_finish() to correctly return source
and destination buffers and mark the job as finished while taking
a held destination buffer into account (i.e. that buffer won't be
returned). This has to be done while job_spinlock is held to avoid
race conditions.

Signed-off-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
parent 137272cd
Loading
Loading
Loading
Loading
+97 −33
Original line number Diff line number Diff line
@@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
				 struct v4l2_m2m_ctx *m2m_ctx)
{
	unsigned long flags_job, flags_out, flags_cap;
	unsigned long flags_job;
	struct vb2_v4l2_buffer *dst, *src;

	dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);

@@ -307,20 +308,30 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
		goto job_unlock;
	}

	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
	if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
	    && !m2m_ctx->out_q_ctx.buffered) {
	src = v4l2_m2m_next_src_buf(m2m_ctx);
	dst = v4l2_m2m_next_dst_buf(m2m_ctx);
	if (!src && !m2m_ctx->out_q_ctx.buffered) {
		dprintk("No input buffers available\n");
		goto out_unlock;
		goto job_unlock;
	}
	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
	if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
	    && !m2m_ctx->cap_q_ctx.buffered) {
	if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
		dprintk("No output buffers available\n");
		goto cap_unlock;
		goto job_unlock;
	}

	if (src && dst &&
	    dst->is_held && dst->vb2_buf.copied_timestamp &&
	    dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
		dst->is_held = false;
		v4l2_m2m_dst_buf_remove(m2m_ctx);
		v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
		dst = v4l2_m2m_next_dst_buf(m2m_ctx);

		if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
			dprintk("No output buffers available after returning held buffer\n");
			goto job_unlock;
		}
	}
	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);

	if (m2m_dev->m2m_ops->job_ready
		&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -331,13 +342,6 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
	list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
	m2m_ctx->job_flags |= TRANS_QUEUED;

	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
	return;

cap_unlock:
	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
out_unlock:
	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
job_unlock:
	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
@@ -412,37 +416,97 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
	}
}

void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
/*
 * Schedule the next job, called from v4l2_m2m_job_finish() or
 * v4l2_m2m_buf_done_and_job_finish().
 */
static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
				       struct v4l2_m2m_ctx *m2m_ctx)
{
	unsigned long flags;
	/*
	 * This instance might have more buffers ready, but since we do not
	 * allow more than one job on the job_queue per instance, each has
	 * to be scheduled separately after the previous one finishes.
	 */
	__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);

	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
	/*
	 * We might be running in atomic context,
	 * but the job must be run in non-atomic context.
	 */
	schedule_work(&m2m_dev->job_work);
}

/*
 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
 * v4l2_m2m_buf_done_and_job_finish().
 */
static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
				 struct v4l2_m2m_ctx *m2m_ctx)
{
	if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
		dprintk("Called by an instance not currently running\n");
		return;
		return false;
	}

	list_del(&m2m_dev->curr_ctx->queue);
	m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
	wake_up(&m2m_dev->curr_ctx->finished);
	m2m_dev->curr_ctx = NULL;
	return true;
}

	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);

	/* This instance might have more buffers ready, but since we do not
	 * allow more than one job on the job_queue per instance, each has
	 * to be scheduled separately after the previous one finishes. */
	__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
			 struct v4l2_m2m_ctx *m2m_ctx)
{
	unsigned long flags;
	bool schedule_next;

	/* We might be running in atomic context,
	 * but the job must be run in non-atomic context.
	/*
	 * This function should not be used for drivers that support
	 * holding capture buffers. Those should use
	 * v4l2_m2m_buf_done_and_job_finish() instead.
	 */
	schedule_work(&m2m_dev->job_work);
	WARN_ON(m2m_ctx->cap_q_ctx.q.subsystem_flags &
		VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
	schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);

	if (schedule_next)
		v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);

void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
				      struct v4l2_m2m_ctx *m2m_ctx,
				      enum vb2_buffer_state state)
{
	struct vb2_v4l2_buffer *src_buf, *dst_buf;
	bool schedule_next = false;
	unsigned long flags;

	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
	src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
	dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);

	if (WARN_ON(!src_buf || !dst_buf))
		goto unlock;
	v4l2_m2m_buf_done(src_buf, state);
	dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
	if (!dst_buf->is_held) {
		v4l2_m2m_dst_buf_remove(m2m_ctx);
		v4l2_m2m_buf_done(dst_buf, state);
	}
	schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
unlock:
	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);

	if (schedule_next)
		v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);

int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		     struct v4l2_requestbuffers *reqbufs)
{
+31 −2
Original line number Diff line number Diff line
@@ -21,7 +21,8 @@
 *		callback.
 *		The job does NOT have to end before this callback returns
 *		(and it will be the usual case). When the job finishes,
 *		v4l2_m2m_job_finish() has to be called.
 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
 *		has to be called.
 * @job_ready:	optional. Should return 0 if the driver does not have a job
 *		fully prepared to run yet (i.e. it will not be able to finish a
 *		transaction without sleeping). If not provided, it will be
@@ -33,7 +34,8 @@
 *		stop the device safely; e.g. in the next interrupt handler),
 *		even if the transaction would not have been finished by then.
 *		After the driver performs the necessary steps, it has to call
 *		v4l2_m2m_job_finish() (as if the transaction ended normally).
 *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
 *		if the transaction ended normally.
 *		This function does not have to (and will usually not) wait
 *		until the device enters a state when it can be stopped.
 */
@@ -173,6 +175,33 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
			 struct v4l2_m2m_ctx *m2m_ctx);

/**
 * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
 * state and inform the framework that a job has been finished and have it
 * clean up
 *
 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
 *
 * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
 * function instead of job_finish() to take held buffers into account. It is
 * optional for other drivers.
 *
 * This function removes the source buffer from the ready list and returns
 * it with the given state. The same is done for the destination buffer, unless
 * it is marked 'held'. In that case the buffer is kept on the ready list.
 *
 * After that the job is finished (see job_finish()).
 *
 * This allows for multiple output buffers to be used to fill in a single
 * capture buffer. This is typically used by stateless decoders where
 * multiple e.g. H.264 slices contribute to a single decoded frame.
 */
void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
				      struct v4l2_m2m_ctx *m2m_ctx,
				      enum vb2_buffer_state state);

static inline void
v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
{