Commit e810cb78 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Mike Snitzer
Browse files

dm: refactor dm_md_mempool allocation



The current split between dm_table_alloc_md_mempools and
dm_alloc_md_mempools is rather arbitrary, so merge the two
into one easy to follow function.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 4ed045d8
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -230,6 +230,9 @@ struct dm_target_io {
	sector_t old_sector;
	struct bio clone;
};
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
#define DM_IO_BIO_OFFSET \
	(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))

/*
 * dm_target_io flags
+41 −16
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@
 */

#include "dm-core.h"
#include "dm-rq.h"

#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -1010,32 +1011,56 @@ static bool dm_table_supports_poll(struct dm_table *t);
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
	enum dm_queue_mode type = dm_table_get_type(t);
	unsigned per_io_data_size = 0;
	unsigned min_pool_size = 0;
	struct dm_target *ti;
	unsigned i;
	bool poll_supported = false;
	unsigned int per_io_data_size = 0, front_pad, io_front_pad;
	unsigned int min_pool_size = 0, pool_size;
	struct dm_md_mempools *pools;

	if (unlikely(type == DM_TYPE_NONE)) {
		DMWARN("no table type is set, can't allocate mempools");
		return -EINVAL;
	}

	if (__table_type_bio_based(type)) {
		for (i = 0; i < t->num_targets; i++) {
			ti = t->targets + i;
	pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
	if (!pools)
		return -ENOMEM;

	if (type == DM_TYPE_REQUEST_BASED) {
		pool_size = dm_get_reserved_rq_based_ios();
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
		goto init_bs;
	}

	for (unsigned int i = 0; i < t->num_targets; i++) {
		struct dm_target *ti = t->targets + i;

		per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
		min_pool_size = max(min_pool_size, ti->num_flush_bios);
	}
		poll_supported = dm_table_supports_poll(t);
	}
	pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
	front_pad = roundup(per_io_data_size,
		__alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;

	io_front_pad = roundup(per_io_data_size,
		__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
	if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
			dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
		goto out_free_pools;
	if (t->integrity_supported &&
	    bioset_integrity_create(&pools->io_bs, pool_size))
		goto out_free_pools;
init_bs:
	if (bioset_init(&pools->bs, pool_size, front_pad, 0))
		goto out_free_pools;
	if (t->integrity_supported &&
	    bioset_integrity_create(&pools->bs, pool_size))
		goto out_free_pools;

	t->mempools = pools;
	return 0;

	t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
					   t->integrity_supported, poll_supported);
	if (!t->mempools)
out_free_pools:
	dm_free_md_mempools(pools);
	return -ENOMEM;

	return 0;
}

static int setup_indexes(struct dm_table *t)
+0 −52
Original line number Diff line number Diff line
@@ -88,10 +88,6 @@ struct clone_info {
	bool submit_as_polled:1;
};

#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
#define DM_IO_BIO_OFFSET \
	(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))

static inline struct dm_target_io *clone_to_tio(struct bio *clone)
{
	return container_of(clone, struct dm_target_io, clone);
@@ -2978,54 +2974,6 @@ int dm_noflush_suspending(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
					    unsigned per_io_data_size, unsigned min_pool_size,
					    bool integrity, bool poll)
{
	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
	unsigned int pool_size = 0;
	unsigned int front_pad, io_front_pad;
	int ret;

	if (!pools)
		return NULL;

	switch (type) {
	case DM_TYPE_BIO_BASED:
	case DM_TYPE_DAX_BIO_BASED:
		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
		io_front_pad = roundup(per_io_data_size,  __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0);
		if (ret)
			goto out;
		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
			goto out;
		break;
	case DM_TYPE_REQUEST_BASED:
		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
		/* per_io_data_size is used for blk-mq pdu at queue allocation */
		break;
	default:
		BUG();
	}

	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
	if (ret)
		goto out;

	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
		goto out;

	return pools;

out:
	dm_free_md_mempools(pools);

	return NULL;
}

void dm_free_md_mempools(struct dm_md_mempools *pools)
{
	if (!pools)
+0 −3
Original line number Diff line number Diff line
@@ -218,9 +218,6 @@ void dm_kcopyd_exit(void);
/*
 * Mempool operations
 */
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
					    unsigned per_io_data_size, unsigned min_pool_size,
					    bool integrity, bool poll);
void dm_free_md_mempools(struct dm_md_mempools *pools);

/*