Commit bc55cfd5 authored by Takashi Iwai's avatar Takashi Iwai
Browse files

ALSA: pcm: Fix potential AB/BA lock with buffer_mutex and mmap_lock



syzbot caught a potential deadlock between the PCM
runtime->buffer_mutex and the mm->mmap_lock.  It was brought by the
recent fix to cover the racy read/write and other ioctls, and in that
commit, I overlooked a (hopefully only) corner case that may take the
revert lock, namely, the OSS mmap.  The OSS mmap operation
exceptionally allows to re-configure the parameters inside the OSS
mmap syscall, where mm->mmap_mutex is already held.  Meanwhile, the
copy_from/to_user calls at read/write operations also take the
mm->mmap_lock internally, hence it may lead to a AB/BA deadlock.

A similar problem was already seen in the past and we fixed it with a
refcount (in commit b2483716).  The former fix covered only the
call paths with OSS read/write and OSS ioctls, while we need to cover
the concurrent access via both ALSA and OSS APIs now.

This patch addresses the problem above by replacing the buffer_mutex
lock in the read/write operations with a refcount similar as we've
used for OSS.  The new field, runtime->buffer_accessing, keeps the
number of concurrent read/write operations.  Unlike the former
buffer_mutex protection, this protects only around the
copy_from/to_user() calls; the other codes are basically protected by
the PCM stream lock.  The refcount can be a negative, meaning blocked
by the ioctls.  If a negative value is seen, the read/write aborts
with -EBUSY.  In the ioctl side, OTOH, they check this refcount, too,
and set to a negative value for blocking unless it's already being
accessed.

Reported-by: default avatar <syzbot+6e5c88838328e99c7e1c@syzkaller.appspotmail.com>
Fixes: dca947d4 ("ALSA: pcm: Fix races among concurrent read/write and buffer changes")
Cc: <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/000000000000381a0d05db622a81@google.com
Link: https://lore.kernel.org/r/20220330120903.4738-1-tiwai@suse.de


Signed-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent 21b5954d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -402,6 +402,7 @@ struct snd_pcm_runtime {
	struct fasync_struct *fasync;
	bool stop_operating;		/* sync_stop will be called */
	struct mutex buffer_mutex;	/* protect for buffer changes */
	atomic_t buffer_accessing;	/* >0: in r/w operation, <0: blocked */

	/* -- private section -- */
	void *private_data;
+1 −0
Original line number Diff line number Diff line
@@ -970,6 +970,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream,

	runtime->status->state = SNDRV_PCM_STATE_OPEN;
	mutex_init(&runtime->buffer_mutex);
	atomic_set(&runtime->buffer_accessing, 0);

	substream->runtime = runtime;
	substream->private_data = pcm->private_data;
+5 −4
Original line number Diff line number Diff line
@@ -1906,11 +1906,9 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
		if (avail >= runtime->twake)
			break;
		snd_pcm_stream_unlock_irq(substream);
		mutex_unlock(&runtime->buffer_mutex);

		tout = schedule_timeout(wait_time);

		mutex_lock(&runtime->buffer_mutex);
		snd_pcm_stream_lock_irq(substream);
		set_current_state(TASK_INTERRUPTIBLE);
		switch (runtime->status->state) {
@@ -2221,7 +2219,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,

	nonblock = !!(substream->f_flags & O_NONBLOCK);

	mutex_lock(&runtime->buffer_mutex);
	snd_pcm_stream_lock_irq(substream);
	err = pcm_accessible_state(runtime);
	if (err < 0)
@@ -2276,6 +2273,10 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
			err = -EINVAL;
			goto _end_unlock;
		}
		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
			err = -EBUSY;
			goto _end_unlock;
		}
		snd_pcm_stream_unlock_irq(substream);
		if (!is_playback)
			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
@@ -2284,6 +2285,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
		if (is_playback)
			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
		snd_pcm_stream_lock_irq(substream);
		atomic_dec(&runtime->buffer_accessing);
		if (err < 0)
			goto _end_unlock;
		err = pcm_accessible_state(runtime);
@@ -2313,7 +2315,6 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
	if (xfer > 0 && err >= 0)
		snd_pcm_update_state(substream, runtime);
	snd_pcm_stream_unlock_irq(substream);
	mutex_unlock(&runtime->buffer_mutex);
	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
EXPORT_SYMBOL(__snd_pcm_lib_xfer);
+32 −7
Original line number Diff line number Diff line
@@ -685,6 +685,24 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
	return 0;
}

/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise
 * block the further r/w operations
 */
static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
{
	if (!atomic_dec_unless_positive(&runtime->buffer_accessing))
		return -EBUSY;
	mutex_lock(&runtime->buffer_mutex);
	return 0; /* keep buffer_mutex, unlocked by below */
}

/* release buffer_mutex and clear r/w access flag */
static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
{
	mutex_unlock(&runtime->buffer_mutex);
	atomic_inc(&runtime->buffer_accessing);
}

#if IS_ENABLED(CONFIG_SND_PCM_OSS)
#define is_oss_stream(substream)	((substream)->oss.oss)
#else
@@ -695,14 +713,16 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
			     struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime;
	int err = 0, usecs;
	int err, usecs;
	unsigned int bits;
	snd_pcm_uframes_t frames;

	if (PCM_RUNTIME_CHECK(substream))
		return -ENXIO;
	runtime = substream->runtime;
	mutex_lock(&runtime->buffer_mutex);
	err = snd_pcm_buffer_access_lock(runtime);
	if (err < 0)
		return err;
	snd_pcm_stream_lock_irq(substream);
	switch (runtime->status->state) {
	case SNDRV_PCM_STATE_OPEN:
@@ -820,7 +840,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
			snd_pcm_lib_free_pages(substream);
	}
 unlock:
	mutex_unlock(&runtime->buffer_mutex);
	snd_pcm_buffer_access_unlock(runtime);
	return err;
}

@@ -865,7 +885,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
	if (PCM_RUNTIME_CHECK(substream))
		return -ENXIO;
	runtime = substream->runtime;
	mutex_lock(&runtime->buffer_mutex);
	result = snd_pcm_buffer_access_lock(runtime);
	if (result < 0)
		return result;
	snd_pcm_stream_lock_irq(substream);
	switch (runtime->status->state) {
	case SNDRV_PCM_STATE_SETUP:
@@ -884,7 +906,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
	snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
	cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
 unlock:
	mutex_unlock(&runtime->buffer_mutex);
	snd_pcm_buffer_access_unlock(runtime);
	return result;
}

@@ -1369,12 +1391,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops,

	/* Guarantee the group members won't change during non-atomic action */
	down_read(&snd_pcm_link_rwsem);
	mutex_lock(&substream->runtime->buffer_mutex);
	res = snd_pcm_buffer_access_lock(substream->runtime);
	if (res < 0)
		goto unlock;
	if (snd_pcm_stream_linked(substream))
		res = snd_pcm_action_group(ops, substream, state, false);
	else
		res = snd_pcm_action_single(ops, substream, state);
	mutex_unlock(&substream->runtime->buffer_mutex);
	snd_pcm_buffer_access_unlock(substream->runtime);
 unlock:
	up_read(&snd_pcm_link_rwsem);
	return res;
}