Commit 9f233ffe authored by Kai Krakow's avatar Kai Krakow Committed by Jens Axboe
Browse files

Revert "bcache: Kill btree_io_wq"



This reverts commit 56b30770.

With the btree using the `system_wq`, I seem to see a lot more desktop
latency than I should.

After some more investigation, it looks like the original assumption
of 56b30770 no longer is true, and bcache has a very high potential of
congesting the `system_wq`. In turn, this introduces laggy desktop
performance, IO stalls (at least with btrfs), and input events may be
delayed.

So let's revert this. It's important to note that the semantics of
using `system_wq` previously mean that `btree_io_wq` should be created
before and destroyed after other bcache wqs to keep the same
assumptions.

Cc: Coly Li <colyli@suse.de>
Cc: stable@vger.kernel.org # 5.4+
Signed-off-by: default avatarKai Krakow <kai@kaishome.de>
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d7fae7b4
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1046,5 +1046,7 @@ void bch_debug_exit(void);
void bch_debug_init(void);
void bch_request_exit(void);
int bch_request_init(void);
void bch_btree_exit(void);
int bch_btree_init(void);

#endif /* _BCACHE_H */
+19 −2
Original line number Diff line number Diff line
@@ -99,6 +99,8 @@
#define PTR_HASH(c, k)							\
	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))

static struct workqueue_struct *btree_io_wq;

#define insert_lock(s, b)	((b)->level <= (s)->lock)


@@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl)
	btree_complete_write(b, w);

	if (btree_node_dirty(b))
		schedule_delayed_work(&b->work, 30 * HZ);
		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);

	closure_return_with_destructor(cl, btree_node_write_unlock);
}
@@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
	BUG_ON(!i->keys);

	if (!btree_node_dirty(b))
		schedule_delayed_work(&b->work, 30 * HZ);
		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);

	set_btree_node_dirty(b);

@@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf)
	spin_lock_init(&buf->lock);
	array_allocator_init(&buf->freelist);
}

void bch_btree_exit(void)
{
	if (btree_io_wq)
		destroy_workqueue(btree_io_wq);
}

int __init bch_btree_init(void)
{
	btree_io_wq = create_singlethread_workqueue("bch_btree_io");
	if (!btree_io_wq)
		return -ENOMEM;

	return 0;
}
+4 −0
Original line number Diff line number Diff line
@@ -2821,6 +2821,7 @@ static void bcache_exit(void)
		destroy_workqueue(bcache_wq);
	if (bch_journal_wq)
		destroy_workqueue(bch_journal_wq);
	bch_btree_exit();

	if (bcache_major)
		unregister_blkdev(bcache_major, "bcache");
@@ -2876,6 +2877,9 @@ static int __init bcache_init(void)
		return bcache_major;
	}

	if (bch_btree_init())
		goto err;

	bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
	if (!bcache_wq)
		goto err;