Commit 5bc357b2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-6.6-2023-09-15' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull via Keith:
      - nvme-tcp iov len fix (Varun)
      - nvme-hwmon const qualifier for safety (Krzysztof)
      - nvme-fc null pointer checks (Nigel)
      - nvme-pci no numa node fix (Pratyush)
      - nvme timeout fix for non-compliant controllers (Keith)

 - MD pull via Song fixing regressions with both 6.5 and 6.6

 - Fix a use-after-free regression in resizing blk-mq tags (Chengming)

* tag 'block-6.6-2023-09-15' of git://git.kernel.dk/linux:
  nvme: avoid bogus CRTO values
  md: Put the right device in md_seq_next
  nvme-pci: do not set the NUMA node of device if it has none
  blk-mq: fix tags UAF when shrinking q->nr_hw_queues
  md/raid1: fix error: ISO C90 forbids mixed declarations
  md: fix warning for holder mismatch from export_rdev()
  md: don't dereference mddev after export_rdev()
  nvme-fc: Prevent null pointer dereference in nvme_fc_io_getuuid()
  nvme: host: hwmon: constify pointers to hwmon_channel_info
  nvmet-tcp: pass iov_len instead of sg->length to bvec_set_page()
parents 31d8fddb c266ae77
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
	struct blk_mq_tags **new_tags;
	int i;

	if (set->nr_hw_queues >= new_nr_hw_queues) {
		for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
			__blk_mq_free_map_and_rqs(set, i);
	if (set->nr_hw_queues >= new_nr_hw_queues)
		goto done;
	}

	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
				GFP_KERNEL, set->numa_node);
@@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
{
	struct request_queue *q;
	LIST_HEAD(head);
	int prev_nr_hw_queues;
	int prev_nr_hw_queues = set->nr_hw_queues;
	int i;

	lockdep_assert_held(&set->tag_list_lock);

@@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
		blk_mq_sysfs_unregister_hctxs(q);
	}

	prev_nr_hw_queues = set->nr_hw_queues;
	if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
		goto reregister;

@@ -4781,6 +4778,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);

	/* Free the excess tags when nr_hw_queues shrink. */
	for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
		__blk_mq_free_map_and_rqs(set, i);
}

void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+16 −7
Original line number Diff line number Diff line
@@ -798,14 +798,14 @@ void mddev_unlock(struct mddev *mddev)
	} else
		mutex_unlock(&mddev->reconfig_mutex);

	md_wakeup_thread(mddev->thread);
	wake_up(&mddev->sb_wait);

	list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
		list_del_init(&rdev->same_set);
		kobject_del(&rdev->kobj);
		export_rdev(rdev, mddev);
	}

	md_wakeup_thread(mddev->thread);
	wake_up(&mddev->sb_wait);
}
EXPORT_SYMBOL_GPL(mddev_unlock);

@@ -2452,7 +2452,8 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
	if (test_bit(AutoDetected, &rdev->flags))
		md_autodetect_dev(rdev->bdev->bd_dev);
#endif
	blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
	blkdev_put(rdev->bdev,
		   test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
	rdev->bdev = NULL;
	kobject_put(&rdev->kobj);
}
@@ -3632,6 +3633,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
	struct md_rdev *rdev;
	struct md_rdev *holder;
	sector_t size;
	int err;

@@ -3646,8 +3648,15 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
	if (err)
		goto out_clear_rdev;

	if (super_format == -2) {
		holder = &claim_rdev;
	} else {
		holder = rdev;
		set_bit(Holder, &rdev->flags);
	}

	rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
			super_format == -2 ? &claim_rdev : rdev, NULL);
				       holder, NULL);
	if (IS_ERR(rdev->bdev)) {
		pr_warn("md: could not open device unknown-block(%u,%u).\n",
			MAJOR(newdev), MINOR(newdev));
@@ -3684,7 +3693,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
	return rdev;

out_blkdev_put:
	blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
	blkdev_put(rdev->bdev, holder);
out_clear_rdev:
	md_rdev_clear(rdev);
out_free_rdev:
@@ -8256,7 +8265,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
	spin_unlock(&all_mddevs_lock);

	if (to_put)
		mddev_put(mddev);
		mddev_put(to_put);
	return next_mddev;

}
+3 −0
Original line number Diff line number Diff line
@@ -211,6 +211,9 @@ enum flag_bits {
				 * check if there is collision between raid1
				 * serial bios.
				 */
	Holder,			/* rdev is used as holder while opening
				 * underlying disk exclusively.
				 */
};

static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
+1 −2
Original line number Diff line number Diff line
@@ -1837,12 +1837,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
	struct r1conf *conf = mddev->private;
	int err = 0;
	int number = rdev->raid_disk;
	struct raid1_info *p = conf->mirrors + number;

	if (unlikely(number >= conf->raid_disks))
		goto abort;

	struct raid1_info *p = conf->mirrors + number;

	if (rdev != p->rdev)
		p = conf->mirrors + conf->raid_disks + number;

+35 −19
Original line number Diff line number Diff line
@@ -2245,25 +2245,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
	else
		ctrl->ctrl_config = NVME_CC_CSS_NVM;

	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
		u32 crto;

		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
		if (ret) {
			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
				ret);
			return ret;
		}

		if (ctrl->cap & NVME_CAP_CRMS_CRIMS) {
	if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS)
		ctrl->ctrl_config |= NVME_CC_CRIME;
			timeout = NVME_CRTO_CRIMT(crto);
		} else {
			timeout = NVME_CRTO_CRWMT(crto);
		}
	} else {
		timeout = NVME_CAP_TIMEOUT(ctrl->cap);
	}

	ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
	ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
@@ -2277,6 +2260,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
	if (ret)
		return ret;

	/* CAP value may change after initial CC write */
	ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
	if (ret)
		return ret;

	timeout = NVME_CAP_TIMEOUT(ctrl->cap);
	if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
		u32 crto, ready_timeout;

		ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
		if (ret) {
			dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
				ret);
			return ret;
		}

		/*
		 * CRTO should always be greater or equal to CAP.TO, but some
		 * devices are known to get this wrong. Use the larger of the
		 * two values.
		 */
		if (ctrl->ctrl_config & NVME_CC_CRIME)
			ready_timeout = NVME_CRTO_CRIMT(crto);
		else
			ready_timeout = NVME_CRTO_CRWMT(crto);

		if (ready_timeout < timeout)
			dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
				      crto, ctrl->cap);
		else
			timeout = ready_timeout;
	}

	ctrl->ctrl_config |= NVME_CC_ENABLE;
	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
	if (ret)
Loading