Commit c75981a1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-6.5/dm-fixes' of...

Merge tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix double free on memory allocation failure in DM integrity target's
   integrity_recalc()

 - Fix locking in DM raid target's raid_ctr() and around call to
   md_stop()

 - Fix DM cache target's cleaner policy to always allow work to be
   queued for writeback; even if cache isn't idle.

* tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
  dm raid: protect md_stop() with 'reconfig_mutex'
  dm raid: clean up four equivalent goto tags in raid_ctr()
  dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
  dm integrity: fix double free on memory allocation failure
parents 6fb9f7f8 1e4ab7b4
Loading
Loading
Loading
Loading
+18 −10
Original line number Diff line number Diff line
@@ -857,7 +857,13 @@ struct smq_policy {

	struct background_tracker *bg_work;

	bool migrations_allowed;
	bool migrations_allowed:1;

	/*
	 * If this is set the policy will try and clean the whole cache
	 * even if the device is not idle.
	 */
	bool cleaner:1;
};

/*----------------------------------------------------------------*/
@@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
	 * Cache entries may not be populated.  So we cannot rely on the
	 * size of the clean queue.
	 */
	if (idle) {
	if (idle || mq->cleaner) {
		/*
		 * We'd like to clean everything.
		 */
@@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size,
		*hotspot_block_size /= 2u;
}

static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
					    sector_t origin_size,
					    sector_t cache_block_size,
					    bool mimic_mq,
					    bool migrations_allowed)
static struct dm_cache_policy *
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
	     bool mimic_mq, bool migrations_allowed, bool cleaner)
{
	unsigned int i;
	unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
		goto bad_btracker;

	mq->migrations_allowed = migrations_allowed;
	mq->cleaner = cleaner;

	return &mq->policy;

@@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
					  sector_t origin_size,
					  sector_t cache_block_size)
{
	return __smq_create(cache_size, origin_size, cache_block_size, false, true);
	return __smq_create(cache_size, origin_size, cache_block_size,
			    false, true, false);
}

static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
					 sector_t origin_size,
					 sector_t cache_block_size)
{
	return __smq_create(cache_size, origin_size, cache_block_size, true, true);
	return __smq_create(cache_size, origin_size, cache_block_size,
			    true, true, false);
}

static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
					      sector_t origin_size,
					      sector_t cache_block_size)
{
	return __smq_create(cache_size, origin_size, cache_block_size, false, false);
	return __smq_create(cache_size, origin_size, cache_block_size,
			    false, false, true);
}

/*----------------------------------------------------------------*/
+1 −0
Original line number Diff line number Diff line
@@ -2676,6 +2676,7 @@ static void integrity_recalc(struct work_struct *w)
	recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
	if (!recalc_tags) {
		vfree(recalc_buffer);
		recalc_buffer = NULL;
		goto oom;
	}

+9 −11
Original line number Diff line number Diff line
@@ -3251,8 +3251,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	r = md_start(&rs->md);
	if (r) {
		ti->error = "Failed to start raid array";
		mddev_unlock(&rs->md);
		goto bad_md_start;
		goto bad_unlock;
	}

	/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
@@ -3260,8 +3259,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
		if (r) {
			ti->error = "Failed to set raid4/5/6 journal mode";
			mddev_unlock(&rs->md);
			goto bad_journal_mode_set;
			goto bad_unlock;
		}
	}

@@ -3272,14 +3270,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	if (rs_is_raid456(rs)) {
		r = rs_set_raid456_stripe_cache(rs);
		if (r)
			goto bad_stripe_cache;
			goto bad_unlock;
	}

	/* Now do an early reshape check */
	if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
		r = rs_check_reshape(rs);
		if (r)
			goto bad_check_reshape;
			goto bad_unlock;

		/* Restore new, ctr requested layout to perform check */
		rs_config_restore(rs, &rs_layout);
@@ -3288,7 +3286,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
			r = rs->md.pers->check_reshape(&rs->md);
			if (r) {
				ti->error = "Reshape check failed";
				goto bad_check_reshape;
				goto bad_unlock;
			}
		}
	}
@@ -3299,11 +3297,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	mddev_unlock(&rs->md);
	return 0;

bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
bad_unlock:
	md_stop(&rs->md);
	mddev_unlock(&rs->md);
bad:
	raid_set_free(rs);

@@ -3314,7 +3310,9 @@ static void raid_dtr(struct dm_target *ti)
{
	struct raid_set *rs = ti->private;

	mddev_lock_nointr(&rs->md);
	md_stop(&rs->md);
	mddev_unlock(&rs->md);
	raid_set_free(rs);
}

+2 −0
Original line number Diff line number Diff line
@@ -6247,6 +6247,8 @@ static void __md_stop(struct mddev *mddev)

void md_stop(struct mddev *mddev)
{
	lockdep_assert_held(&mddev->reconfig_mutex);

	/* stop the array and free an attached data structures.
	 * This is called from dm-raid
	 */