Commit 90add6d4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-5.19/dm-fixes-2' of...

Merge tag 'for-5.19/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix DM core's bioset initialization so that blk integrity pool is
   properly setup. Remove now unused bioset_init_from_src.

 - Fix DM zoned hang from locking imbalance due to needless check in
   clone_endio().

* tag 'for-5.19/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: fix zoned locking imbalance due to needless check in clone_endio
  block: remove bioset_init_from_src
  dm: fix bio_set allocation
parents 045fb9c2 dddf3056
Loading
Loading
Loading
Loading
+0 −20
Original line number Diff line number Diff line
@@ -1747,26 +1747,6 @@ int bioset_init(struct bio_set *bs,
}
EXPORT_SYMBOL(bioset_init);

/*
 * Initialize and setup a new bio_set, based on the settings from
 * another bio_set.
 */
int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
{
	int flags;

	flags = 0;
	if (src->bvec_pool.min_nr)
		flags |= BIOSET_NEED_BVECS;
	if (src->rescue_workqueue)
		flags |= BIOSET_NEED_RESCUER;
	if (src->cache)
		flags |= BIOSET_PERCPU_CACHE;

	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
}
EXPORT_SYMBOL(bioset_init_from_src);

static int __init init_bio(void)
{
	int i;
+9 −2
Original line number Diff line number Diff line
@@ -33,6 +33,14 @@ struct dm_kobject_holder {
 * access their members!
 */

/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
	struct bio_set bs;
	struct bio_set io_bs;
};

struct mapped_device {
	struct mutex suspend_lock;

@@ -110,8 +118,7 @@ struct mapped_device {
	/*
	 * io objects are allocated from here.
	 */
	struct bio_set io_bs;
	struct bio_set bs;
	struct dm_md_mempools *mempools;

	/* kobject and completion */
	struct dm_kobject_holder kobj_holder;
+1 −1
Original line number Diff line number Diff line
@@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq,
{
	int r;

	r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
	r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
			      dm_rq_bio_constructor, tio);
	if (r)
		return r;
+0 −11
Original line number Diff line number Diff line
@@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
	return 0;
}

void dm_table_free_md_mempools(struct dm_table *t)
{
	dm_free_md_mempools(t->mempools);
	t->mempools = NULL;
}

struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
{
	return t->mempools;
}

static int setup_indexes(struct dm_table *t)
{
	int i;
+36 −74
Original line number Diff line number Diff line
@@ -136,14 +136,6 @@ static int get_swap_bios(void)
	return latch;
}

/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
	struct bio_set bs;
	struct bio_set io_bs;
};

struct table_device {
	struct list_head list;
	refcount_t count;
@@ -581,7 +573,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
	struct dm_target_io *tio;
	struct bio *clone;

	clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs);
	clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
	/* Set default bdev, but target must bio_set_dev() before issuing IO */
	clone->bi_bdev = md->disk->part0;

@@ -628,7 +620,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
	} else {
		struct mapped_device *md = ci->io->md;

		clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs);
		clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
					&md->mempools->bs);
		if (!clone)
			return NULL;
		/* Set default bdev, but target must bio_set_dev() before issuing IO */
@@ -1023,22 +1016,18 @@ static void clone_endio(struct bio *bio)
	struct dm_io *io = tio->io;
	struct mapped_device *md = io->md;

	if (likely(bio->bi_bdev != md->disk->part0)) {
		struct request_queue *q = bdev_get_queue(bio->bi_bdev);

	if (unlikely(error == BLK_STS_TARGET)) {
		if (bio_op(bio) == REQ_OP_DISCARD &&
		    !bdev_max_discard_sectors(bio->bi_bdev))
			disable_discard(md);
		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
				 !q->limits.max_write_zeroes_sectors)
			 !bdev_write_zeroes_sectors(bio->bi_bdev))
			disable_write_zeroes(md);
	}

	if (static_branch_unlikely(&zoned_enabled) &&
		    unlikely(blk_queue_is_zoned(q)))
	    unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
		dm_zone_endio(io, bio);
	}

	if (endio) {
		int r = endio(ti, bio, &error);
@@ -1876,8 +1865,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
{
	if (md->wq)
		destroy_workqueue(md->wq);
	bioset_exit(&md->bs);
	bioset_exit(&md->io_bs);
	dm_free_md_mempools(md->mempools);

	if (md->dax_dev) {
		dax_remove_host(md->disk);
@@ -2049,48 +2037,6 @@ static void free_dev(struct mapped_device *md)
	kvfree(md);
}

static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
	int ret = 0;

	if (dm_table_bio_based(t)) {
		/*
		 * The md may already have mempools that need changing.
		 * If so, reload bioset because front_pad may have changed
		 * because a different table was loaded.
		 */
		bioset_exit(&md->bs);
		bioset_exit(&md->io_bs);

	} else if (bioset_initialized(&md->bs)) {
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
		 * Note for future: If you are to reload bioset,
		 * prep-ed requests in the queue may refer
		 * to bio from the old bioset, so you must walk
		 * through the queue to unprep.
		 */
		goto out;
	}

	BUG_ON(!p ||
	       bioset_initialized(&md->bs) ||
	       bioset_initialized(&md->io_bs));

	ret = bioset_init_from_src(&md->bs, &p->bs);
	if (ret)
		goto out;
	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
	if (ret)
		bioset_exit(&md->bs);
out:
	/* mempool bind completed, no longer need any mempools in the table */
	dm_table_free_md_mempools(t);
	return ret;
}

/*
 * Bind a table to the device.
 */
@@ -2144,12 +2090,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
		 * immutable singletons - used to optimize dm_mq_queue_rq.
		 */
		md->immutable_target = dm_table_get_immutable_target(t);
	}

	ret = __bind_mempools(md, t);
	if (ret) {
		old_map = ERR_PTR(ret);
		goto out;
		/*
		 * There is no need to reload with request-based dm because the
		 * size of front_pad doesn't change.
		 *
		 * Note for future: If you are to reload bioset, prep-ed
		 * requests in the queue may refer to bio from the old bioset,
		 * so you must walk through the queue to unprep.
		 */
		if (!md->mempools) {
			md->mempools = t->mempools;
			t->mempools = NULL;
		}
	} else {
		/*
		 * The md may already have mempools that need changing.
		 * If so, reload bioset because front_pad may have changed
		 * because a different table was loaded.
		 */
		dm_free_md_mempools(md->mempools);
		md->mempools = t->mempools;
		t->mempools = NULL;
	}

	ret = dm_table_set_restrictions(t, md->queue, limits);
Loading