Commit b33b6fdc authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm bufio: simplify DM_BUFIO_CLIENT_NO_SLEEP locking



Historically none of the bufio code runs in interrupt context but with
the use of DM_BUFIO_CLIENT_NO_SLEEP a bufio client can, see: commit
5721d4e5 ("dm verity: Add optional "try_verify_in_tasklet" feature")
That said, the new tasklet usecase still doesn't require interrupts be
disabled by bufio (let alone conditionally restore them).

Yet with PREEMPT_RT, and falling back from tasklet to workqueue, care
must be taken to properly synchronize between softirq and process
context, otherwise ABBA deadlock may occur. While it is unnecessary to
disable bottom-half preemption within a tasklet, we must consistently do
so in process context to ensure locking is in the proper order.

Fix these issues by switching from spin_lock_irq{save,restore} to using
spin_{lock,unlock}_bh instead. Also remove the 'spinlock_flags' member
in dm_bufio_client struct (that can be used unsafely if bufio must
recurse on behalf of some caller, e.g. block layer's submit_bio).

Fixes: 5721d4e5 ("dm verity: Add optional "try_verify_in_tasklet" feature")
Reported-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 12907efd
Loading
Loading
Loading
Loading
+4 −6
Original line number Original line Diff line number Diff line
@@ -83,7 +83,7 @@
struct dm_bufio_client {
struct dm_bufio_client {
	struct mutex lock;
	struct mutex lock;
	spinlock_t spinlock;
	spinlock_t spinlock;
	unsigned long spinlock_flags;
	bool no_sleep;


	struct list_head lru[LIST_SIZE];
	struct list_head lru[LIST_SIZE];
	unsigned long n_buffers[LIST_SIZE];
	unsigned long n_buffers[LIST_SIZE];
@@ -93,8 +93,6 @@ struct dm_bufio_client {
	s8 sectors_per_block_bits;
	s8 sectors_per_block_bits;
	void (*alloc_callback)(struct dm_buffer *);
	void (*alloc_callback)(struct dm_buffer *);
	void (*write_callback)(struct dm_buffer *);
	void (*write_callback)(struct dm_buffer *);
	bool no_sleep;

	struct kmem_cache *slab_buffer;
	struct kmem_cache *slab_buffer;
	struct kmem_cache *slab_cache;
	struct kmem_cache *slab_cache;
	struct dm_io_client *dm_io;
	struct dm_io_client *dm_io;
@@ -174,7 +172,7 @@ static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
static void dm_bufio_lock(struct dm_bufio_client *c)
static void dm_bufio_lock(struct dm_bufio_client *c)
{
{
	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
		spin_lock_irqsave_nested(&c->spinlock, c->spinlock_flags, dm_bufio_in_request());
		spin_lock_bh(&c->spinlock);
	else
	else
		mutex_lock_nested(&c->lock, dm_bufio_in_request());
		mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
}
@@ -182,7 +180,7 @@ static void dm_bufio_lock(struct dm_bufio_client *c)
static int dm_bufio_trylock(struct dm_bufio_client *c)
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
{
	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
		return spin_trylock_irqsave(&c->spinlock, c->spinlock_flags);
		return spin_trylock_bh(&c->spinlock);
	else
	else
		return mutex_trylock(&c->lock);
		return mutex_trylock(&c->lock);
}
}
@@ -190,7 +188,7 @@ static int dm_bufio_trylock(struct dm_bufio_client *c)
static void dm_bufio_unlock(struct dm_bufio_client *c)
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
{
	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
	if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
		spin_unlock_irqrestore(&c->spinlock, c->spinlock_flags);
		spin_unlock_bh(&c->spinlock);
	else
	else
		mutex_unlock(&c->lock);
		mutex_unlock(&c->lock);
}
}