Commit 9d0281b5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-6.3-2023-03-03' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Christoph:
      - Don't access released socket during error recovery (Akinobu
        Mita)
      - Bring back auto-removal of deleted namespaces during sequential
        scan (Christoph Hellwig)
      - Fix an error code in nvme_auth_process_dhchap_challenge (Dan
        Carpenter)
      - Show well known discovery name (Daniel Wagner)
      - Add a missing endianess conversion in effects masking (Keith
        Busch)

 - Fix for a regression introduced in blk-rq-qos during init in this
   merge window (Breno)

 - Reorder a few fields in struct blk_mq_tag_set, eliminating a few
   holes and shrinking it (Christophe)

 - Remove redundant bdev_get_queue() NULL checks (Juhyung)

 - Add sed-opal single user mode support flag (Luca)

 - Remove SQE128 check in ublk as it isn't needed, saving some memory
   (Ming)

 - Op specific segment checking for cloned requests (Uday)

 - Exclusive open partition scan fixes (Yu)

 - Loop offset/size checking before assigning them in the device (Zhong)

 - Bio polling fixes (me)

* tag 'block-6.3-2023-03-03' of git://git.kernel.dk/linux:
  blk-mq: enforce op-specific segment limits in blk_insert_cloned_request
  nvme-fabrics: show well known discovery name
  nvme-tcp: don't access released socket during error recovery
  nvme-auth: fix an error code in nvme_auth_process_dhchap_challenge()
  nvme: bring back auto-removal of deleted namespaces during sequential scan
  blk-iocost: Pass gendisk to ioc_refresh_params
  nvme: fix sparse warning on effects masking
  block: be a bit more careful in checking for NULL bdev while polling
  block: clear bio->bi_bdev when putting a bio back in the cache
  loop: loop_set_status_from_info() check before assignment
  ublk: remove check IO_URING_F_SQE128 in ublk_ch_uring_cmd
  block: remove more NULL checks after bdev_get_queue()
  blk-mq: Reorder fields in 'struct blk_mq_tag_set'
  block: fix scan partition for exclusively open device again
  block: Revert "block: Do not reread partition table on exclusively open device"
  sed-opal: add support flag for SUM in status ioctl
parents 1bd1aee6 49d24398
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -772,6 +772,7 @@ static inline void bio_put_percpu_cache(struct bio *bio)

	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
		bio->bi_next = cache->free_list;
		bio->bi_bdev = NULL;
		cache->free_list = bio;
		cache->nr++;
	} else {
+8 −2
Original line number Diff line number Diff line
@@ -858,10 +858,16 @@ EXPORT_SYMBOL(submit_bio);
 */
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
{
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
	struct block_device *bdev;
	struct request_queue *q;
	int ret = 0;

	bdev = READ_ONCE(bio->bi_bdev);
	if (!bdev)
		return 0;

	q = bdev_get_queue(bdev);
	if (cookie == BLK_QC_T_NONE ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		return 0;
@@ -930,7 +936,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
	 */
	rcu_read_lock();
	bio = READ_ONCE(kiocb->private);
	if (bio && bio->bi_bdev)
	if (bio)
		ret = bio_poll(bio, iob, flags);
	rcu_read_unlock();

+20 −6
Original line number Diff line number Diff line
@@ -800,7 +800,11 @@ static void ioc_refresh_period_us(struct ioc *ioc)
	ioc_refresh_margins(ioc);
}

static int ioc_autop_idx(struct ioc *ioc)
/*
 *  ioc->rqos.disk isn't initialized when this function is called from
 *  the init path.
 */
static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
{
	int idx = ioc->autop_idx;
	const struct ioc_params *p = &autop[idx];
@@ -808,11 +812,11 @@ static int ioc_autop_idx(struct ioc *ioc)
	u64 now_ns;

	/* rotational? */
	if (!blk_queue_nonrot(ioc->rqos.disk->queue))
	if (!blk_queue_nonrot(disk->queue))
		return AUTOP_HDD;

	/* handle SATA SSDs w/ broken NCQ */
	if (blk_queue_depth(ioc->rqos.disk->queue) == 1)
	if (blk_queue_depth(disk->queue) == 1)
		return AUTOP_SSD_QD1;

	/* use one of the normal ssd sets */
@@ -901,14 +905,19 @@ static void ioc_refresh_lcoefs(struct ioc *ioc)
		    &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
}

static bool ioc_refresh_params(struct ioc *ioc, bool force)
/*
 * struct gendisk is required as an argument because ioc->rqos.disk
 * is not properly initialized when called from the init path.
 */
static bool ioc_refresh_params_disk(struct ioc *ioc, bool force,
				    struct gendisk *disk)
{
	const struct ioc_params *p;
	int idx;

	lockdep_assert_held(&ioc->lock);

	idx = ioc_autop_idx(ioc);
	idx = ioc_autop_idx(ioc, disk);
	p = &autop[idx];

	if (idx == ioc->autop_idx && !force)
@@ -939,6 +948,11 @@ static bool ioc_refresh_params(struct ioc *ioc, bool force)
	return true;
}

static bool ioc_refresh_params(struct ioc *ioc, bool force)
{
	return ioc_refresh_params_disk(ioc, force, ioc->rqos.disk);
}

/*
 * When an iocg accumulates too much vtime or gets deactivated, we throw away
 * some vtime, which lowers the overall device utilization. As the exact amount
@@ -2880,7 +2894,7 @@ static int blk_iocost_init(struct gendisk *disk)

	spin_lock_irq(&ioc->lock);
	ioc->autop_idx = AUTOP_INVALID;
	ioc_refresh_params(ioc, true);
	ioc_refresh_params_disk(ioc, true, disk);
	spin_unlock_irq(&ioc->lock);

	/*
+0 −7
Original line number Diff line number Diff line
@@ -587,13 +587,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(__blk_rq_map_sg);

static inline unsigned int blk_rq_get_max_segments(struct request *rq)
{
	if (req_op(rq) == REQ_OP_DISCARD)
		return queue_max_discard_segments(rq->q);
	return queue_max_segments(rq->q);
}

static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
						  sector_t offset)
{
+4 −3
Original line number Diff line number Diff line
@@ -3000,6 +3000,7 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
{
	struct request_queue *q = rq->q;
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
	unsigned int max_segments = blk_rq_get_max_segments(rq);
	blk_status_t ret;

	if (blk_rq_sectors(rq) > max_sectors) {
@@ -3026,9 +3027,9 @@ blk_status_t blk_insert_cloned_request(struct request *rq)
	 * original queue.
	 */
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
	if (rq->nr_phys_segments > queue_max_segments(q)) {
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
	if (rq->nr_phys_segments > max_segments) {
		printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n",
			__func__, rq->nr_phys_segments, max_segments);
		return BLK_STS_IOERR;
	}

Loading