Commit 81409e5e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-6.3-2023-03-30' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Christoph:
     - Mark Lexar NM760 as IGNORE_DEV_SUBNQN (Juraj Pecigos)
     - Fix a possible UAF when failing to allocate an TCP io queue (Sagi
       Grimberg)

 - MD pull request via Song:
     - Fix a null pointer deference in 6.3-rc (Yu Kuai)

 - uevent partition fix (Alyssa)

* tag 'block-6.3-2023-03-30' of git://git.kernel.dk/linux:
  nvme-tcp: fix a possible UAF when failing to allocate an io queue
  md: fix regression for null-ptr-deference in __md_stop()
  nvme-pci: mark Lexar NM760 as IGNORE_DEV_SUBNQN
  loop: LOOP_CONFIGURE: send uevents for partitions
parents f3fa7f02 24ab70d8
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
	/* This is safe, since we have a reference from open(). */
	__module_get(THIS_MODULE);

	/* suppress uevents while reconfiguring the device */
	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);

	/*
	 * If we don't hold exclusive handle for the device, upgrade to it
	 * here to avoid changing device under exclusive owner.
@@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
		}
	}

	/* suppress uevents while reconfiguring the device */
	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);

	disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
	set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);

@@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
	if (partscan)
		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);

	/* enable and uncork uevent now that we are done */
	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);

	loop_global_unlock(lo, is_loop);
	if (partscan)
		loop_reread_partitions(lo);

	if (!(mode & FMODE_EXCL))
		bd_abort_claiming(bdev, loop_configure);

	error = 0;
done:
	/* enable and uncork uevent now that we are done */
	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
	return error;
	return 0;

out_unlock:
	loop_global_unlock(lo, is_loop);
@@ -1130,7 +1130,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
	fput(file);
	/* This is safe: open() is still holding a reference. */
	module_put(THIS_MODULE);
	goto done;
	return error;
}

static void __loop_clr_fd(struct loop_device *lo, bool release)
+2 −1
Original line number Diff line number Diff line
@@ -6260,7 +6260,6 @@ static void __md_stop(struct mddev *mddev)
	module_put(pers->owner);
	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);

	percpu_ref_exit(&mddev->writes_pending);
	percpu_ref_exit(&mddev->active_io);
	bioset_exit(&mddev->bio_set);
	bioset_exit(&mddev->sync_set);
@@ -6273,6 +6272,7 @@ void md_stop(struct mddev *mddev)
	 */
	__md_stop_writes(mddev);
	__md_stop(mddev);
	percpu_ref_exit(&mddev->writes_pending);
}

EXPORT_SYMBOL_GPL(md_stop);
@@ -7843,6 +7843,7 @@ static void md_free_disk(struct gendisk *disk)
{
	struct mddev *mddev = disk->private_data;

	percpu_ref_exit(&mddev->writes_pending);
	mddev_free(mddev);
}

+2 −1
Original line number Diff line number Diff line
@@ -3441,7 +3441,8 @@ static const struct pci_device_id nvme_id_table[] = {
	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
		.driver_data = NVME_QUIRK_BOGUS_NID |
				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+26 −20
Original line number Diff line number Diff line
@@ -1620,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
	if (ret)
		goto err_init_connect;

	queue->rd_enabled = true;
	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
	nvme_tcp_init_recv_ctx(queue);

	write_lock_bh(&queue->sock->sk->sk_callback_lock);
	queue->sock->sk->sk_user_data = queue;
	queue->state_change = queue->sock->sk->sk_state_change;
	queue->data_ready = queue->sock->sk->sk_data_ready;
	queue->write_space = queue->sock->sk->sk_write_space;
	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
#ifdef CONFIG_NET_RX_BUSY_POLL
	queue->sock->sk->sk_ll_usec = 1;
#endif
	write_unlock_bh(&queue->sock->sk->sk_callback_lock);

	return 0;

@@ -1655,7 +1640,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
	return ret;
}

static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
{
	struct socket *sock = queue->sock;

@@ -1670,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
{
	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
	nvme_tcp_restore_sock_calls(queue);
	nvme_tcp_restore_sock_ops(queue);
	cancel_work_sync(&queue->io_work);
}

@@ -1688,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
	mutex_unlock(&queue->queue_lock);
}

static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
{
	write_lock_bh(&queue->sock->sk->sk_callback_lock);
	queue->sock->sk->sk_user_data = queue;
	queue->state_change = queue->sock->sk->sk_state_change;
	queue->data_ready = queue->sock->sk->sk_data_ready;
	queue->write_space = queue->sock->sk->sk_write_space;
	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
#ifdef CONFIG_NET_RX_BUSY_POLL
	queue->sock->sk->sk_ll_usec = 1;
#endif
	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
}

static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
{
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvme_tcp_queue *queue = &ctrl->queues[idx];
	int ret;

	queue->rd_enabled = true;
	nvme_tcp_init_recv_ctx(queue);
	nvme_tcp_setup_sock_ops(queue);

	if (idx)
		ret = nvmf_connect_io_queue(nctrl, idx);
	else
		ret = nvmf_connect_admin_queue(nctrl);

	if (!ret) {
		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
		set_bit(NVME_TCP_Q_LIVE, &queue->flags);
	} else {
		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
		if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
			__nvme_tcp_stop_queue(queue);
		dev_err(nctrl->device,
			"failed to connect queue: %d ret=%d\n", idx, ret);
	}