Commit fa9db655 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-5.20/block-2022-08-04' of git://git.kernel.dk/linux-block

Pull block driver updates from Jens Axboe:

 - NVMe pull requests via Christoph:
      - add support for In-Band authentication (Hannes Reinecke)
      - handle the persistent internal error AER (Michael Kelley)
      - use in-capsule data for TCP I/O queue connect (Caleb Sander)
      - remove timeout for getting RDMA-CM established event (Israel
        Rukshin)
      - misc cleanups (Joel Granados, Sagi Grimberg, Chaitanya Kulkarni,
        Guixin Liu, Xiang wangx)
      - use command_id instead of req->tag in trace_nvme_complete_rq()
        (Bean Huo)
      - various fixes for the new authentication code (Lukas Bulwahn,
        Dan Carpenter, Colin Ian King, Chaitanya Kulkarni, Hannes
        Reinecke)
      - small cleanups (Liu Song, Christoph Hellwig)
      - restore compat_ioctl support (Nick Bowler)
      - make a nvmet-tcp workqueue lockdep-safe (Sagi Grimberg)
      - enable generic interface (/dev/ngXnY) for unknown command sets
        (Joel Granados, Christoph Hellwig)
      - don't always build constants.o (Christoph Hellwig)
      - print the command name of aborted commands (Christoph Hellwig)

 - MD pull requests via Song:
      - Improve raid5 lock contention, by Logan Gunthorpe.
      - Misc fixes to raid5, by Logan Gunthorpe.
      - Fix race condition with md_reap_sync_thread(), by Guoqing Jiang.
      - Fix potential deadlock with raid5_quiesce and
        raid5_get_active_stripe, by Logan Gunthorpe.
      - Refactoring md_alloc(), by Christoph"
      - Fix md disk_name lifetime problems, by Christoph Hellwig
      - Convert prepare_to_wait() to wait_woken() api, by Logan
        Gunthorpe;
      - Fix sectors_to_do bitmap issue, by Logan Gunthorpe.

 - Work on unifying the null_blk module parameters and configfs API
   (Vincent)

 - drbd bitmap IO error fix (Lars)

 - Set of rnbd fixes (Guoqing, Md Haris)

 - Remove experimental marker on bcache async device registration (Coly)

 - Series from cleaning up the bio splitting (Christoph)

 - Removal of the sx8 block driver. This hardware never really
   widespread, and it didn't receive a lot of attention after the
   initial merge of it back in 2005 (Christoph)

 - A few fixes for s390 dasd (Eric, Jiang)

 - Followup set of fixes for ublk (Ming)

 - Support for UBLK_IO_NEED_GET_DATA for ublk (ZiyangZhang)

 - Fixes for the dio dma alignment (Keith)

 - Misc fixes and cleanups (Ming, Yu, Dan, Christophe

* tag 'for-5.20/block-2022-08-04' of git://git.kernel.dk/linux-block: (136 commits)
  s390/dasd: Establish DMA alignment
  s390/dasd: drop unexpected word 'for' in comments
  ublk_drv: add support for UBLK_IO_NEED_GET_DATA
  ublk_cmd.h: add one new ublk command: UBLK_IO_NEED_GET_DATA
  ublk_drv: cleanup ublksrv_ctrl_dev_info
  ublk_drv: add SET_PARAMS/GET_PARAMS control command
  ublk_drv: fix ublk device leak in case that add_disk fails
  ublk_drv: cancel device even though disk isn't up
  block: fix leaking page ref on truncated direct io
  block: ensure bio_iov_add_page can't fail
  block: ensure iov_iter advances for added pages
  drivers:md:fix a potential use-after-free bug
  md/raid5: Ensure batch_last is released before sleeping for quiesce
  md/raid5: Move stripe_request_ctx up
  md/raid5: Drop unnecessary call to r5c_check_stripe_cache_usage()
  md/raid5: Make is_inactive_blocked() helper
  md/raid5: Refactor raid5_get_active_stripe()
  block: pass struct queue_limits to the bio splitting helpers
  block: move bio_allowed_max_sectors to blk-merge.c
  block: move the call to get_max_io_size out of blk_bio_segment_split
  ...
parents e4952747 bc792884
Loading
Loading
Loading
Loading
+22 −0
Original line number Diff line number Diff line
@@ -72,6 +72,28 @@ submit_queues=[1..nr_cpus]: Default: 1
hw_queue_depth=[0..qdepth]: Default: 64
  The hardware queue depth of the device.

memory_backed=[0/1]: Default: 0
  Whether or not to use a memory buffer to respond to IO requests

  =  =============================================
  0  Transfer no data in response to IO requests
  1  Use a memory buffer to respond to IO requests
  =  =============================================

discard=[0/1]: Default: 0
  Support discard operations (requires memory-backed null_blk device).

  =  =====================================
  0  Do not support discard operations
  1  Enable support for discard operations
  =  =====================================

cache_size=[Size in MB]: Default: 0
  Cache size in MB for memory-backed device.

mbps=[Maximum bandwidth in MB/s]: Default: 0 (no limit)
  Bandwidth limit for device performance.

Multi-queue specific parameters
-------------------------------

+3 −1
Original line number Diff line number Diff line
@@ -14507,7 +14507,8 @@ S: Supported
W:	http://git.infradead.org/nvme.git
T:	git://git.infradead.org/nvme.git
F:	drivers/nvme/host/
F:	include/linux/nvme.h
F:	drivers/nvme/common/
F:	include/linux/nvme*
F:	include/uapi/linux/nvme_ioctl.h
NVM EXPRESS FC TRANSPORT DRIVERS
@@ -18838,6 +18839,7 @@ SOFTWARE RAID (Multiple Disks) SUPPORT
M:	Song Liu <song@kernel.org>
L:	linux-raid@vger.kernel.org
S:	Supported
Q:	https://patchwork.kernel.org/project/linux-raid/list/
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
F:	drivers/md/Kconfig
F:	drivers/md/Makefile
+1 −1
Original line number Diff line number Diff line
@@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
	iv = bip->bip_vec + bip->bip_vcnt;

	if (bip->bip_vcnt &&
	    bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
	    bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
		return 0;

+25 −26
Original line number Diff line number Diff line
@@ -965,7 +965,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
		 * would create a gap, disallow it.
		 */
		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
		if (bvec_gap_to_prev(q, bvec, offset))
		if (bvec_gap_to_prev(&q->limits, bvec, offset))
			return 0;
	}

@@ -1151,22 +1151,12 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
	bio_set_flag(bio, BIO_CLONED);
}

static void bio_put_pages(struct page **pages, size_t size, size_t off)
{
	size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);

	for (i = 0; i < nr; i++)
		put_page(pages[i]);
}

static int bio_iov_add_page(struct bio *bio, struct page *page,
		unsigned int len, unsigned int offset)
{
	bool same_page = false;

	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
		if (WARN_ON_ONCE(bio_full(bio, len)))
			return -EINVAL;
		__bio_add_page(bio, page, len, offset);
		return 0;
	}
@@ -1209,8 +1199,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
	struct page **pages = (struct page **)bv;
	ssize_t size, left;
	unsigned len, i;
	unsigned len, i = 0;
	size_t offset;
	int ret = 0;

	/*
	 * Move page array up in the allocated memory for the bio vecs as far as
@@ -1227,32 +1218,40 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
	 * result to ensure the bio's total size is correct. The remainder of
	 * the iov data will be picked up in the next bio iteration.
	 */
	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
	if (size > 0)
	size = iov_iter_get_pages(iter, pages, UINT_MAX - bio->bi_iter.bi_size,
				  nr_pages, &offset);
	if (size > 0) {
		nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
		size = ALIGN_DOWN(size, bdev_logical_block_size(bio->bi_bdev));
	if (unlikely(size <= 0))
		return size ? size : -EFAULT;
	} else
		nr_pages = 0;

	if (unlikely(size <= 0)) {
		ret = size ? size : -EFAULT;
		goto out;
	}

	for (left = size, i = 0; left > 0; left -= len, i++) {
		struct page *page = pages[i];
		int ret;

		len = min_t(size_t, PAGE_SIZE - offset, left);
		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
			ret = bio_iov_add_zone_append_page(bio, page, len,
					offset);
		else
			ret = bio_iov_add_page(bio, page, len, offset);
			if (ret)
				break;
		} else
			bio_iov_add_page(bio, page, len, offset);

		if (ret) {
			bio_put_pages(pages + i, left, offset);
			return ret;
		}
		offset = 0;
	}

	iov_iter_advance(iter, size);
	return 0;
	iov_iter_advance(iter, size - left);
out:
	while (i < nr_pages)
		put_page(pages[i++]);

	return ret;
}

/**
+1 −8
Original line number Diff line number Diff line
@@ -377,7 +377,6 @@ static void blk_timeout_work(struct work_struct *work)
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
{
	struct request_queue *q;
	int ret;

	q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
			GFP_KERNEL | __GFP_ZERO, node_id);
@@ -396,13 +395,9 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
	if (q->id < 0)
		goto fail_srcu;

	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
	if (ret)
		goto fail_id;

	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_split;
		goto fail_id;

	q->node = node_id;

@@ -439,8 +434,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)

fail_stats:
	blk_free_queue_stats(q->stats);
fail_split:
	bioset_exit(&q->bio_split);
fail_id:
	ida_free(&blk_queue_ida, q->id);
fail_srcu:
Loading