Commit 2383ffc4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "Main thing here is the fix for the regression in flush handling which
  caused IO hangs/stalls for a few reporters. Hopefully that should all
  be sorted out now. Outside of that, just a few minor fixes for issues
  that were introduced in this cycle"

* tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux:
  blk-mq: release scheduler resource when request completes
  blk-crypto: dynamically allocate fallback profile
  blk-cgroup: hold queue_lock when removing blkg->q_node
  drivers/rnbd: restore sysfs interface to rnbd-client
parents aa9ea98c e5c0ca13
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work)
			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
	if (blkg->parent)
		blkg_put(blkg->parent);
	spin_lock_irq(&q->queue_lock);
	list_del_init(&blkg->q_node);
	spin_unlock_irq(&q->queue_lock);
	mutex_unlock(&q->blkcg_mutex);

	blk_put_queue(q);
+23 −13
Original line number Diff line number Diff line
@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
	struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots;

static struct blk_crypto_profile blk_crypto_fallback_profile;
static struct blk_crypto_profile *blk_crypto_fallback_profile;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
static struct bio_set crypto_bio_split;
@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
	 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
	 * this bio's algorithm and key.
	 */
	blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
	blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
					bc->bc_key, &slot);
	if (blk_st != BLK_STS_OK) {
		src_bio->bi_status = blk_st;
@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
	 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
	 * this bio's algorithm and key.
	 */
	blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
	blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
					bc->bc_key, &slot);
	if (blk_st != BLK_STS_OK) {
		bio->bi_status = blk_st;
@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
		return false;
	}

	if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
	if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
					&bc->bc_key->crypto_cfg)) {
		bio->bi_status = BLK_STS_NOTSUPP;
		return false;
@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)

int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
	return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
	return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
}

static bool blk_crypto_fallback_inited;
@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
{
	int i;
	int err;
	struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;

	if (blk_crypto_fallback_inited)
		return 0;
@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
	if (err)
		goto out;

	err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
	if (err)
	/* Dynamic allocation is needed because of lockdep_register_key(). */
	blk_crypto_fallback_profile =
		kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
	if (!blk_crypto_fallback_profile) {
		err = -ENOMEM;
		goto fail_free_bioset;
	}

	err = blk_crypto_profile_init(blk_crypto_fallback_profile,
				      blk_crypto_num_keyslots);
	if (err)
		goto fail_free_profile;
	err = -ENOMEM;

	profile->ll_ops = blk_crypto_fallback_ll_ops;
	profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
	blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
	blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;

	/* All blk-crypto modes have a crypto API fallback. */
	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
		profile->modes_supported[i] = 0xFFFFFFFF;
	profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
		blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
	blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;

	blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
					WQ_UNBOUND | WQ_HIGHPRI |
@@ -597,7 +605,9 @@ static int blk_crypto_fallback_init(void)
fail_free_wq:
	destroy_workqueue(blk_crypto_wq);
fail_destroy_profile:
	blk_crypto_profile_destroy(profile);
	blk_crypto_profile_destroy(blk_crypto_fallback_profile);
fail_free_profile:
	kfree(blk_crypto_fallback_profile);
fail_free_bioset:
	bioset_exit(&crypto_bio_split);
out:
+20 −3
Original line number Diff line number Diff line
@@ -681,6 +681,21 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

static void blk_mq_finish_request(struct request *rq)
{
	struct request_queue *q = rq->q;

	if (rq->rq_flags & RQF_USE_SCHED) {
		q->elevator->type->ops.finish_request(rq);
		/*
		 * For postflush request that may need to be
		 * completed twice, we should clear this flag
		 * to avoid double finish_request() on the rq.
		 */
		rq->rq_flags &= ~RQF_USE_SCHED;
	}
}

static void __blk_mq_free_request(struct request *rq)
{
	struct request_queue *q = rq->q;
@@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq)
{
	struct request_queue *q = rq->q;

	if ((rq->rq_flags & RQF_USE_SCHED) &&
	    q->elevator->type->ops.finish_request)
		q->elevator->type->ops.finish_request(rq);
	blk_mq_finish_request(rq);

	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
		laptop_io_completion(q->disk->bdi);
@@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
	if (blk_mq_need_time_stamp(rq))
		__blk_mq_end_request_acct(rq, ktime_get_ns());

	blk_mq_finish_request(rq);

	if (rq->end_io) {
		rq_qos_done(rq->q, rq);
		if (rq->end_io(rq, error) == RQ_END_IO_FREE)
@@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
		if (iob->need_ts)
			__blk_mq_end_request_acct(rq, now);

		blk_mq_finish_request(rq);

		rq_qos_done(rq->q, rq);

		/*
+3 −0
Original line number Diff line number Diff line
@@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q)

int elv_register(struct elevator_type *e)
{
	/* finish request is mandatory */
	if (WARN_ON_ONCE(!e->ops.finish_request))
		return -EINVAL;
	/* insert_requests and dispatch_request are mandatory */
	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
		return -EINVAL;
+1 −1
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@

static struct device *rnbd_dev;
static const struct class rnbd_dev_class = {
	.name = "rnbd_client",
	.name = "rnbd-client",
};
static struct kobject *rnbd_devs_kobj;