Commit c6e0e874 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-6.1-2022-10-28' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Christoph:
      - make the multipath dma alignment match the non-multipath one
        (Keith Busch)
      - fix a bogus use of sg_init_marker() (Nam Cao)
      - fix circulr locking in nvme-tcp (Sagi Grimberg)

 - Initialization fix for requests allocated via the special hw queue
   allocator (John)

 - Fix for a regression added in this release with the batched
   completions of end_io backed requests (Ming)

 - Error handling leak fix for rbd (Yang)

 - Error handling leak fix for add_disk() failure (Yu)

* tag 'block-6.1-2022-10-28' of git://git.kernel.dk/linux:
  blk-mq: Properly init requests from blk_mq_alloc_request_hctx()
  blk-mq: don't add non-pt request with ->end_io to batch
  rbd: fix possible memory leak in rbd_sysfs_init()
  nvme-multipath: set queue dma alignment to 3
  nvme-tcp: fix possible circular locking when deleting a controller under memory pressure
  nvme-tcp: replace sg_init_marker() with sg_init_table()
  block: fix memory leak for elevator on add_disk failure
parents 4d244327 e3c5a78c
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -611,6 +611,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
		.nr_tags	= 1,
	};
	u64 alloc_time_ns = 0;
	struct request *rq;
	unsigned int cpu;
	unsigned int tag;
	int ret;
@@ -660,8 +661,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
	tag = blk_mq_get_tag(&data);
	if (tag == BLK_MQ_NO_TAG)
		goto out_queue_exit;
	return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
	rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
					alloc_time_ns);
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
	return rq;

out_queue_exit:
	blk_queue_exit(q);
+8 −4
Original line number Diff line number Diff line
@@ -410,9 +410,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
	 * Otherwise just allocate the device numbers for both the whole device
	 * and all partitions from the extended dev_t space.
	 */
	ret = -EINVAL;
	if (disk->major) {
		if (WARN_ON(!disk->minors))
			return -EINVAL;
			goto out_exit_elevator;

		if (disk->minors > DISK_MAX_PARTS) {
			pr_err("block: can't allocate more than %d partitions\n",
@@ -420,14 +421,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
			disk->minors = DISK_MAX_PARTS;
		}
		if (disk->first_minor + disk->minors > MINORMASK + 1)
			return -EINVAL;
			goto out_exit_elevator;
	} else {
		if (WARN_ON(disk->minors))
			return -EINVAL;
			goto out_exit_elevator;

		ret = blk_alloc_ext_minor();
		if (ret < 0)
			return ret;
			goto out_exit_elevator;
		disk->major = BLOCK_EXT_MAJOR;
		disk->first_minor = ret;
	}
@@ -540,6 +541,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
out_free_ext_minor:
	if (disk->major == BLOCK_EXT_MAJOR)
		blk_free_ext_minor(disk->first_minor);
out_exit_elevator:
	if (disk->queue->elevator)
		elevator_exit(disk->queue);
	return ret;
}
EXPORT_SYMBOL(device_add_disk);
+3 −1
Original line number Diff line number Diff line
@@ -7222,8 +7222,10 @@ static int __init rbd_sysfs_init(void)
	int ret;

	ret = device_register(&rbd_root_dev);
	if (ret < 0)
	if (ret < 0) {
		put_device(&rbd_root_dev);
		return ret;
	}

	ret = bus_register(&rbd_bus_type);
	if (ret < 0)
+1 −0
Original line number Diff line number Diff line
@@ -516,6 +516,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
	/* set to a default value of 512 until the disk is validated */
	blk_queue_logical_block_size(head->disk->queue, 512);
	blk_set_stacking_limits(&head->disk->queue->limits);
	blk_queue_dma_alignment(head->disk->queue, 3);

	/* we need to propagate up the VMC settings */
	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+11 −2
Original line number Diff line number Diff line
@@ -387,7 +387,7 @@ static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
{
	struct scatterlist sg;

	sg_init_marker(&sg, 1);
	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, len, off);
	ahash_request_set_crypt(hash, &sg, NULL, len);
	crypto_ahash_update(hash);
@@ -1141,6 +1141,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
{
	struct nvme_tcp_request *req;
	unsigned int noreclaim_flag;
	int ret = 1;

	if (!queue->request) {
@@ -1150,12 +1151,13 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
	}
	req = queue->request;

	noreclaim_flag = memalloc_noreclaim_save();
	if (req->state == NVME_TCP_SEND_CMD_PDU) {
		ret = nvme_tcp_try_send_cmd_pdu(req);
		if (ret <= 0)
			goto done;
		if (!nvme_tcp_has_inline_data(req))
			return ret;
			goto out;
	}

	if (req->state == NVME_TCP_SEND_H2C_PDU) {
@@ -1181,6 +1183,8 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
		nvme_tcp_fail_request(queue->request);
		nvme_tcp_done_send_req(queue);
	}
out:
	memalloc_noreclaim_restore(noreclaim_flag);
	return ret;
}

@@ -1296,6 +1300,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
	struct page *page;
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
	unsigned int noreclaim_flag;

	if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
		return;
@@ -1308,7 +1313,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
		__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
		queue->pf_cache.va = NULL;
	}

	noreclaim_flag = memalloc_noreclaim_save();
	sock_release(queue->sock);
	memalloc_noreclaim_restore(noreclaim_flag);

	kfree(queue->pdu);
	mutex_destroy(&queue->send_mutex);
	mutex_destroy(&queue->queue_lock);
Loading