Commit f87904c0 authored by Khazhismel Kumykov's avatar Khazhismel Kumykov Committed by Andrew Morton
Browse files

writeback: avoid use-after-free after removing device

When a disk is removed, bdi_unregister gets called to stop further
writeback and wait for associated delayed work to complete.  However,
wb_inode_writeback_end() may schedule bandwidth estimation dwork after
this has completed, which can result in the timer attempting to access the
just freed bdi_writeback.

Fix this by checking if the bdi_writeback is alive, similar to when
scheduling writeback work.

Since this requires wb->work_lock, and wb_inode_writeback_end() may get
called from interrupt, switch wb->work_lock to an irqsafe lock.

Link: https://lkml.kernel.org/r/20220801155034.3772543-1-khazhy@google.com


Fixes: 45a2966f ("writeback: fix bandwidth estimate for spiky workload")
Signed-off-by: default avatarKhazhismel Kumykov <khazhy@google.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Michael Stapelberg <stapelberg+linux@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9dfb3b8d
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode,

static void wb_wakeup(struct bdi_writeback *wb)
{
	spin_lock_bh(&wb->work_lock);
	spin_lock_irq(&wb->work_lock);
	if (test_bit(WB_registered, &wb->state))
		mod_delayed_work(bdi_wq, &wb->dwork, 0);
	spin_unlock_bh(&wb->work_lock);
	spin_unlock_irq(&wb->work_lock);
}

static void finish_writeback_work(struct bdi_writeback *wb,
@@ -164,7 +164,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
	if (work->done)
		atomic_inc(&work->done->cnt);

	spin_lock_bh(&wb->work_lock);
	spin_lock_irq(&wb->work_lock);

	if (test_bit(WB_registered, &wb->state)) {
		list_add_tail(&work->list, &wb->work_list);
@@ -172,7 +172,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
	} else
		finish_writeback_work(wb, work);

	spin_unlock_bh(&wb->work_lock);
	spin_unlock_irq(&wb->work_lock);
}

/**
@@ -2082,13 +2082,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
{
	struct wb_writeback_work *work = NULL;

	spin_lock_bh(&wb->work_lock);
	spin_lock_irq(&wb->work_lock);
	if (!list_empty(&wb->work_list)) {
		work = list_entry(wb->work_list.next,
				  struct wb_writeback_work, list);
		list_del_init(&work->list);
	}
	spin_unlock_bh(&wb->work_lock);
	spin_unlock_irq(&wb->work_lock);
	return work;
}

+5 −5
Original line number Diff line number Diff line
@@ -260,10 +260,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
	unsigned long timeout;

	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
	spin_lock_bh(&wb->work_lock);
	spin_lock_irq(&wb->work_lock);
	if (test_bit(WB_registered, &wb->state))
		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
	spin_unlock_bh(&wb->work_lock);
	spin_unlock_irq(&wb->work_lock);
}

static void wb_update_bandwidth_workfn(struct work_struct *work)
@@ -334,12 +334,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
static void wb_shutdown(struct bdi_writeback *wb)
{
	/* Make sure nobody queues further work */
	spin_lock_bh(&wb->work_lock);
	spin_lock_irq(&wb->work_lock);
	if (!test_and_clear_bit(WB_registered, &wb->state)) {
		spin_unlock_bh(&wb->work_lock);
		spin_unlock_irq(&wb->work_lock);
		return;
	}
	spin_unlock_bh(&wb->work_lock);
	spin_unlock_irq(&wb->work_lock);

	cgwb_remove_from_bdi_list(wb);
	/*
+5 −1
Original line number Diff line number Diff line
@@ -2892,6 +2892,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb)

static void wb_inode_writeback_end(struct bdi_writeback *wb)
{
	unsigned long flags;
	atomic_dec(&wb->writeback_inodes);
	/*
	 * Make sure estimate of writeback throughput gets updated after
@@ -2900,7 +2901,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
	 * that if multiple inodes end writeback at a similar time, they get
	 * batched into one bandwidth update.
	 */
	spin_lock_irqsave(&wb->work_lock, flags);
	if (test_bit(WB_registered, &wb->state))
		queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
	spin_unlock_irqrestore(&wb->work_lock, flags);
}

bool __folio_end_writeback(struct folio *folio)