Commit 7af81cd0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-5.13/dm-changes' of...

Merge tag 'for-5.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Improve scalability of DM's device hash by switching to rbtree

 - Extend DM ioctl's DM_LIST_DEVICES_CMD handling to include UUID and
   allow filtering based on name or UUID prefix.

 - Various small fixes for typos, warnings, unused function, or
   needlessly exported interfaces.

 - Remove needless request_queue NULL pointer checks in DM thin and
   cache targets.

 - Remove unnecessary loop in DM core's __split_and_process_bio().

 - Remove DM core's dm_vcalloc() and just use kvcalloc or kvmalloc_array
   instead (depending whether zeroing is useful).

 - Fix request-based DM's double free of blk_mq_tag_set in device remove
   after table load fails.

 - Improve DM persistent data performance on non-x86 by fixing packed
   structs to have a stated alignment. Also remove needless extra work
   from redundant calls to sm_disk_get_nr_free() and a paranoid BUG_ON()
   that caused duplicate checksum calculation.

 - Fix missing goto in DM integrity's bitmap_flush_interval error
   handling.

 - Add "reset_recalculate" feature flag to DM integrity.

 - Improve DM integrity by leveraging discard support to avoid needless
   re-writing of metadata and also use discard support to improve hash
   recalculation.

 - Fix race with DM raid target's reshape and MD raid4/5/6 resync that
   resulted in inconsistant reshape state during table reloads.

 - Update DM raid target to temove unnecessary discard limits for raid0
   and raid10 now that MD has optimized discard handling for both raid
   levels.

* tag 'for-5.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (26 commits)
  dm raid: remove unnecessary discard limits for raid0 and raid10
  dm rq: fix double free of blk_mq_tag_set in dev remove after table load fails
  dm integrity: use discard support when recalculating
  dm integrity: increase RECALC_SECTORS to improve recalculate speed
  dm integrity: don't re-write metadata if discarding same blocks
  dm raid: fix inconclusive reshape layout on fast raid4/5/6 table reload sequences
  dm raid: fix fall-through warning in rs_check_takeover() for Clang
  dm clone metadata: remove unused function
  dm integrity: fix missing goto in bitmap_flush_interval error handling
  dm: replace dm_vcalloc()
  dm space map common: fix division bug in sm_ll_find_free_block()
  dm persistent data: packed struct should have an aligned() attribute too
  dm btree spine: remove paranoid node_check call in node_prep_for_write()
  dm space map disk: remove redundant calls to sm_disk_get_nr_free()
  dm integrity: add the "reset_recalculate" feature flag
  dm persistent data: remove unused return from exit_shadow_spine()
  dm cache: remove needless request_queue NULL pointer checks
  dm thin: remove needless request_queue NULL pointer check
  dm: unexport dm_{get,put}_table_device
  dm ebs: fix a few typos
  ...
parents 152d32aa ca4a4e9a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3387,7 +3387,7 @@ static bool origin_dev_supports_discard(struct block_device *origin_bdev)
{
	struct request_queue *q = bdev_get_queue(origin_bdev);

	return q && blk_queue_discard(q);
	return blk_queue_discard(q);
}

/*
+0 −6
Original line number Diff line number Diff line
@@ -276,12 +276,6 @@ static inline int superblock_read_lock(struct dm_clone_metadata *cmd,
	return dm_bm_read_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
}

static inline int superblock_write_lock(struct dm_clone_metadata *cmd,
					struct dm_block **sblock)
{
	return dm_bm_write_lock(cmd->bm, SUPERBLOCK_LOCATION, &sb_validator, sblock);
}

static inline int superblock_write_lock_zero(struct dm_clone_metadata *cmd,
					     struct dm_block **sblock)
{
+3 −3
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ struct ebs_c {
	spinlock_t lock;		/* Guard bios input list above. */
	sector_t start;			/* <start> table line argument, see ebs_ctr below. */
	unsigned int e_bs;		/* Emulated block size in sectors exposed to upper layer. */
	unsigned int u_bs;		/* Underlying block size in sectors retrievd from/set on lower layer device. */
	unsigned int u_bs;		/* Underlying block size in sectors retrieved from/set on lower layer device. */
	unsigned char block_shift;	/* bitshift sectors -> blocks used in dm-bufio API. */
	bool u_bs_set:1;		/* Flag to indicate underlying block size is set on table line. */
};
@@ -43,7 +43,7 @@ static inline sector_t __block_mod(sector_t sector, unsigned int bs)
	return sector & (bs - 1);
}

/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */
/* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
{
	sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
@@ -171,7 +171,7 @@ static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
	dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
}

/* Worker funtion to process incoming bios. */
/* Worker function to process incoming bios. */
static void __ebs_process_bios(struct work_struct *ws)
{
	int r;
+55 −30
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@
#define MIN_LOG2_INTERLEAVE_SECTORS	3
#define MAX_LOG2_INTERLEAVE_SECTORS	31
#define METADATA_WORKQUEUE_MAX_ACTIVE	16
#define RECALC_SECTORS			8192
#define RECALC_SECTORS			32768
#define RECALC_WRITE_SUPER		16
#define BITMAP_BLOCK_SIZE		4096	/* don't change it */
#define BITMAP_FLUSH_INTERVAL		(10 * HZ)
@@ -262,6 +262,7 @@ struct dm_integrity_c {
	bool journal_uptodate;
	bool just_formatted;
	bool recalculate_flag;
	bool reset_recalculate_flag;
	bool discard;
	bool fix_padding;
	bool fix_hmac;
@@ -1428,8 +1429,10 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
		if (op == TAG_READ) {
			memcpy(tag, dp, to_copy);
		} else if (op == TAG_WRITE) {
			if (memcmp(dp, tag, to_copy)) {
				memcpy(dp, tag, to_copy);
				dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
			}
		} else {
			/* e.g.: op == TAG_CMP */

@@ -2686,6 +2689,7 @@ static void integrity_recalc(struct work_struct *w)
	if (unlikely(dm_integrity_failed(ic)))
		goto err;

	if (!ic->discard) {
		io_req.bi_op = REQ_OP_READ;
		io_req.bi_op_flags = 0;
		io_req.mem.type = DM_IO_VMA;
@@ -2707,6 +2711,9 @@ static void integrity_recalc(struct work_struct *w)
			integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
			t += ic->tag_size;
		}
	} else {
		t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
	}

	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);

@@ -3134,7 +3141,8 @@ static void dm_integrity_resume(struct dm_target *ti)
		rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
		if (ic->mode == 'B') {
			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
			    !ic->reset_recalculate_flag) {
				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
@@ -3156,7 +3164,8 @@ static void dm_integrity_resume(struct dm_target *ti)
			}
		} else {
			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
			    ic->reset_recalculate_flag) {
				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
				ic->sb->recalc_sector = cpu_to_le64(0);
			}
@@ -3169,6 +3178,10 @@ static void dm_integrity_resume(struct dm_target *ti)
			dm_integrity_io_error(ic, "writing superblock", r);
	} else {
		replay_journal(ic);
		if (ic->reset_recalculate_flag) {
			ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
			ic->sb->recalc_sector = cpu_to_le64(0);
		}
		if (ic->mode == 'B') {
			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
@@ -3242,6 +3255,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
		arg_count += !!ic->meta_dev;
		arg_count += ic->sectors_per_block != 1;
		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
		arg_count += ic->reset_recalculate_flag;
		arg_count += ic->discard;
		arg_count += ic->mode == 'J';
		arg_count += ic->mode == 'J';
@@ -3261,6 +3275,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
			DMEMIT(" recalculate");
		if (ic->reset_recalculate_flag)
			DMEMIT(" reset_recalculate");
		if (ic->discard)
			DMEMIT(" allow_discards");
		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
@@ -3914,7 +3930,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
	unsigned extra_args;
	struct dm_arg_set as;
	static const struct dm_arg _args[] = {
		{0, 17, "Invalid number of feature args"},
		{0, 18, "Invalid number of feature args"},
	};
	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
	bool should_write_sb;
@@ -4039,6 +4055,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
				r = -EINVAL;
				ti->error = "Invalid bitmap_flush_interval argument";
				goto bad;
			}
			ic->bitmap_flush_interval = msecs_to_jiffies(val);
		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
@@ -4058,6 +4075,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
				goto bad;
		} else if (!strcmp(opt_string, "recalculate")) {
			ic->recalculate_flag = true;
		} else if (!strcmp(opt_string, "reset_recalculate")) {
			ic->recalculate_flag = true;
			ic->reset_recalculate_flag = true;
		} else if (!strcmp(opt_string, "allow_discards")) {
			ic->discard = true;
		} else if (!strcmp(opt_string, "fix_padding")) {
@@ -4348,12 +4368,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
			goto bad;
		}
		INIT_WORK(&ic->recalc_work, integrity_recalc);
		if (!ic->discard) {
			ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
			if (!ic->recalc_buffer) {
				ti->error = "Cannot allocate buffer for recalculating";
				r = -ENOMEM;
				goto bad;
			}
		}
		ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
						 ic->tag_size, GFP_KERNEL);
		if (!ic->recalc_tags) {
@@ -4361,6 +4383,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
			r = -ENOMEM;
			goto bad;
		}
		if (ic->discard)
			memset(ic->recalc_tags, DISCARD_FILLER,
			       (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
	} else {
		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
			ti->error = "Recalculate can only be specified with internal_hash";
@@ -4554,7 +4579,7 @@ static void dm_integrity_dtr(struct dm_target *ti)

static struct target_type integrity_target = {
	.name			= "integrity",
	.version		= {1, 7, 0},
	.version		= {1, 9, 0},
	.module			= THIS_MODULE,
	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
	.ctr			= dm_integrity_ctr,
+183 −111
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
@@ -36,8 +37,10 @@ struct dm_file {
 * name or uuid.
 *---------------------------------------------------------------*/
struct hash_cell {
	struct list_head name_list;
	struct list_head uuid_list;
	struct rb_node name_node;
	struct rb_node uuid_node;
	bool name_set;
	bool uuid_set;

	char *name;
	char *uuid;
@@ -53,10 +56,8 @@ struct vers_iter {
};


#define NUM_BUCKETS 64
#define MASK_BUCKETS (NUM_BUCKETS - 1)
static struct list_head _name_buckets[NUM_BUCKETS];
static struct list_head _uuid_buckets[NUM_BUCKETS];
static struct rb_root name_rb_tree = RB_ROOT;
static struct rb_root uuid_rb_tree = RB_ROOT;

static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);

@@ -70,73 +71,110 @@ static DECLARE_RWSEM(_hash_lock);
 */
static DEFINE_MUTEX(dm_hash_cells_mutex);

static void init_buckets(struct list_head *buckets)
{
	unsigned int i;

	for (i = 0; i < NUM_BUCKETS; i++)
		INIT_LIST_HEAD(buckets + i);
}

static int dm_hash_init(void)
{
	init_buckets(_name_buckets);
	init_buckets(_uuid_buckets);
	return 0;
}

static void dm_hash_exit(void)
{
	dm_hash_remove_all(false, false, false);
}

/*-----------------------------------------------------------------
 * Hash function:
 * We're not really concerned with the str hash function being
 * fast since it's only used by the ioctl interface.
 *---------------------------------------------------------------*/
static unsigned int hash_str(const char *str)
{
	const unsigned int hash_mult = 2654435387U;
	unsigned int h = 0;

	while (*str)
		h = (h + (unsigned int) *str++) * hash_mult;

	return h & MASK_BUCKETS;
}

/*-----------------------------------------------------------------
 * Code for looking up a device by name
 *---------------------------------------------------------------*/
static struct hash_cell *__get_name_cell(const char *str)
{
	struct hash_cell *hc;
	unsigned int h = hash_str(str);
	struct rb_node *n = name_rb_tree.rb_node;

	list_for_each_entry (hc, _name_buckets + h, name_list)
		if (!strcmp(hc->name, str)) {
	while (n) {
		struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
		int c = strcmp(hc->name, str);
		if (!c) {
			dm_get(hc->md);
			return hc;
		}
		n = c >= 0 ? n->rb_left : n->rb_right;
	}

	return NULL;
}

static struct hash_cell *__get_uuid_cell(const char *str)
{
	struct hash_cell *hc;
	unsigned int h = hash_str(str);
	struct rb_node *n = uuid_rb_tree.rb_node;

	list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
		if (!strcmp(hc->uuid, str)) {
	while (n) {
		struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
		int c = strcmp(hc->uuid, str);
		if (!c) {
			dm_get(hc->md);
			return hc;
		}
		n = c >= 0 ? n->rb_left : n->rb_right;
	}

	return NULL;
}

static void __unlink_name(struct hash_cell *hc)
{
	if (hc->name_set) {
		hc->name_set = false;
		rb_erase(&hc->name_node, &name_rb_tree);
	}
}

static void __unlink_uuid(struct hash_cell *hc)
{
	if (hc->uuid_set) {
		hc->uuid_set = false;
		rb_erase(&hc->uuid_node, &uuid_rb_tree);
	}
}

static void __link_name(struct hash_cell *new_hc)
{
	struct rb_node **n, *parent;

	__unlink_name(new_hc);

	new_hc->name_set = true;

	n = &name_rb_tree.rb_node;
	parent = NULL;

	while (*n) {
		struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
		int c = strcmp(hc->name, new_hc->name);
		BUG_ON(!c);
		parent = *n;
		n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
	}

	rb_link_node(&new_hc->name_node, parent, n);
	rb_insert_color(&new_hc->name_node, &name_rb_tree);
}

static void __link_uuid(struct hash_cell *new_hc)
{
	struct rb_node **n, *parent;

	__unlink_uuid(new_hc);

	new_hc->uuid_set = true;

	n = &uuid_rb_tree.rb_node;
	parent = NULL;

	while (*n) {
		struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
		int c = strcmp(hc->uuid, new_hc->uuid);
		BUG_ON(!c);
		parent = *n;
		n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
	}

	rb_link_node(&new_hc->uuid_node, parent, n);
	rb_insert_color(&new_hc->uuid_node, &uuid_rb_tree);
}

static struct hash_cell *__get_dev_cell(uint64_t dev)
{
	struct mapped_device *md;
@@ -185,8 +223,7 @@ static struct hash_cell *alloc_cell(const char *name, const char *uuid,
		}
	}

	INIT_LIST_HEAD(&hc->name_list);
	INIT_LIST_HEAD(&hc->uuid_list);
	hc->name_set = hc->uuid_set = false;
	hc->md = md;
	hc->new_map = NULL;
	return hc;
@@ -226,16 +263,16 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
		goto bad;
	}

	list_add(&cell->name_list, _name_buckets + hash_str(name));
	__link_name(cell);

	if (uuid) {
		hc = __get_uuid_cell(uuid);
		if (hc) {
			list_del(&cell->name_list);
			__unlink_name(cell);
			dm_put(hc->md);
			goto bad;
		}
		list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
		__link_uuid(cell);
	}
	dm_get(md);
	mutex_lock(&dm_hash_cells_mutex);
@@ -256,9 +293,9 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
	struct dm_table *table;
	int srcu_idx;

	/* remove from the dev hash */
	list_del(&hc->uuid_list);
	list_del(&hc->name_list);
	/* remove from the dev trees */
	__unlink_name(hc);
	__unlink_uuid(hc);
	mutex_lock(&dm_hash_cells_mutex);
	dm_set_mdptr(hc->md, NULL);
	mutex_unlock(&dm_hash_cells_mutex);
@@ -279,7 +316,8 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)

static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
{
	int i, dev_skipped;
	int dev_skipped;
	struct rb_node *n;
	struct hash_cell *hc;
	struct mapped_device *md;
	struct dm_table *t;
@@ -289,8 +327,8 @@ static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool

	down_write(&_hash_lock);

	for (i = 0; i < NUM_BUCKETS; i++) {
		list_for_each_entry(hc, _name_buckets + i, name_list) {
	for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
		hc = container_of(n, struct hash_cell, name_node);
		md = hc->md;
		dm_get(md);

@@ -323,7 +361,6 @@ static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool
		 */
		goto retry;
	}
	}

	up_write(&_hash_lock);

@@ -340,7 +377,7 @@ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
	hc->uuid = new_uuid;
	mutex_unlock(&dm_hash_cells_mutex);

	list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
	__link_uuid(hc);
}

/*
@@ -354,14 +391,14 @@ static char *__change_cell_name(struct hash_cell *hc, char *new_name)
	/*
	 * Rename and move the name cell.
	 */
	list_del(&hc->name_list);
	__unlink_name(hc);
	old_name = hc->name;

	mutex_lock(&dm_hash_cells_mutex);
	hc->name = new_name;
	mutex_unlock(&dm_hash_cells_mutex);

	list_add(&hc->name_list, _name_buckets + hash_str(new_name));
	__link_name(hc);

	return old_name;
}
@@ -503,9 +540,33 @@ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
	return ((void *) param) + param->data_start;
}

static bool filter_device(struct hash_cell *hc, const char *pfx_name, const char *pfx_uuid)
{
	const char *val;
	size_t val_len, pfx_len;

	val = hc->name;
	val_len = strlen(val);
	pfx_len = strnlen(pfx_name, DM_NAME_LEN);
	if (pfx_len > val_len)
		return false;
	if (memcmp(val, pfx_name, pfx_len))
		return false;

	val = hc->uuid ? hc->uuid : "";
	val_len = strlen(val);
	pfx_len = strnlen(pfx_uuid, DM_UUID_LEN);
	if (pfx_len > val_len)
		return false;
	if (memcmp(val, pfx_uuid, pfx_len))
		return false;

	return true;
}

static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
	unsigned int i;
	struct rb_node *n;
	struct hash_cell *hc;
	size_t len, needed = 0;
	struct gendisk *disk;
@@ -518,11 +579,14 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
	 * Loop through all the devices working out how much
	 * space we need.
	 */
	for (i = 0; i < NUM_BUCKETS; i++) {
		list_for_each_entry (hc, _name_buckets + i, name_list) {
	for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
		hc = container_of(n, struct hash_cell, name_node);
		if (!filter_device(hc, param->name, param->uuid))
			continue;
		needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
			needed += align_val(sizeof(uint32_t));
		}
		needed += align_val(sizeof(uint32_t) * 2);
		if (param->flags & DM_UUID_FLAG && hc->uuid)
			needed += align_val(strlen(hc->uuid) + 1);
	}

	/*
@@ -540,8 +604,11 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
	/*
	 * Now loop through filling out the names.
	 */
	for (i = 0; i < NUM_BUCKETS; i++) {
		list_for_each_entry (hc, _name_buckets + i, name_list) {
	for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
		void *uuid_ptr;
		hc = container_of(n, struct hash_cell, name_node);
		if (!filter_device(hc, param->name, param->uuid))
			continue;
		if (old_nl)
			old_nl->next = (uint32_t) ((void *) nl -
						   (void *) old_nl);
@@ -552,9 +619,19 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_

		old_nl = nl;
		event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
			*event_nr = dm_get_event_nr(hc->md);
			nl = align_ptr(event_nr + 1);
		event_nr[0] = dm_get_event_nr(hc->md);
		event_nr[1] = 0;
		uuid_ptr = align_ptr(event_nr + 2);
		if (param->flags & DM_UUID_FLAG) {
			if (hc->uuid) {
				event_nr[1] |= DM_NAME_LIST_FLAG_HAS_UUID;
				strcpy(uuid_ptr, hc->uuid);
				uuid_ptr = align_ptr(uuid_ptr + strlen(hc->uuid) + 1);
			} else {
				event_nr[1] |= DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID;
			}
		}
		nl = uuid_ptr;
	}
	/*
	 * If mismatch happens, security may be compromised due to buffer
@@ -1991,14 +2068,9 @@ int __init dm_interface_init(void)
{
	int r;

	r = dm_hash_init();
	if (r)
		return r;

	r = misc_register(&_dm_misc);
	if (r) {
		DMERR("misc_register failed for control device");
		dm_hash_exit();
		return r;
	}

Loading