Commit 1dbe7e38 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-5.15-2021-09-05' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Was going to send this one in later this week, but given that -Werror
  is now enabled (or at least available), the mq-deadline fix really
  should go in for the folks hitting that.

   - Ensure dd_queued() is only there if needed (Geert)

   - Fix a kerneldoc warning for bio_alloc_kiocb()

   - BFQ fix for queue merging

   - loop locking fix (Tetsuo)"

* tag 'block-5.15-2021-09-05' of git://git.kernel.dk/linux-block:
  loop: reduce the loop_ctl_mutex scope
  bio: fix kerneldoc documentation for bio_alloc_kiocb()
  block, bfq: honor already-setup queue merges
  block/mq-deadline: Move dd_queued() to fix defined but not used warning
parents 03085b3d 1c500ad7
Loading
Loading
Loading
Loading
+13 −3
Original line number Diff line number Diff line
@@ -2662,6 +2662,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
	 * are likely to increase the throughput.
	 */
	bfqq->new_bfqq = new_bfqq;
	/*
	 * The above assignment schedules the following redirections:
	 * each time some I/O for bfqq arrives, the process that
	 * generated that I/O is disassociated from bfqq and
	 * associated with new_bfqq. Here we increases new_bfqq->ref
	 * in advance, adding the number of processes that are
	 * expected to be associated with new_bfqq as they happen to
	 * issue I/O.
	 */
	new_bfqq->ref += process_refs;
	return new_bfqq;
}
@@ -2724,6 +2733,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
	struct bfq_queue *in_service_bfqq, *new_bfqq;

	/* if a merge has already been setup, then proceed with that first */
	if (bfqq->new_bfqq)
		return bfqq->new_bfqq;

	/*
	 * Check delayed stable merge for rotational or non-queueing
	 * devs. For this branch to be executed, bfqq must not be
@@ -2825,9 +2838,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
	if (bfq_too_late_for_merging(bfqq))
		return NULL;

	if (bfqq->new_bfqq)
		return bfqq->new_bfqq;

	if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
		return NULL;

+1 −1
Original line number Diff line number Diff line
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL(bioset_init_from_src);
/**
 * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
 * @kiocb:	kiocb describing the IO
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @nr_vecs:	number of iovecs to pre-allocate
 * @bs:		bio_set to allocate from
 *
 * Description:
+6 −6
Original line number Diff line number Diff line
@@ -270,12 +270,6 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
	deadline_remove_request(rq->q, per_prio, rq);
}

/* Number of requests queued for a given priority level. */
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
{
	return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
}

/*
 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
@@ -953,6 +947,12 @@ static int dd_async_depth_show(void *data, struct seq_file *m)
	return 0;
}

/* Number of requests queued for a given priority level. */
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
{
	return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
}

static int dd_queued_show(void *data, struct seq_file *m)
{
	struct request_queue *q = data;
+49 −26
Original line number Diff line number Diff line
@@ -2111,18 +2111,6 @@ int loop_register_transfer(struct loop_func_table *funcs)
	return 0;
}

static int unregister_transfer_cb(int id, void *ptr, void *data)
{
	struct loop_device *lo = ptr;
	struct loop_func_table *xfer = data;

	mutex_lock(&lo->lo_mutex);
	if (lo->lo_encryption == xfer)
		loop_release_xfer(lo);
	mutex_unlock(&lo->lo_mutex);
	return 0;
}

int loop_unregister_transfer(int number)
{
	unsigned int n = number;
@@ -2130,9 +2118,20 @@ int loop_unregister_transfer(int number)

	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
		return -EINVAL;
	/*
	 * This function is called from only cleanup_cryptoloop().
	 * Given that each loop device that has a transfer enabled holds a
	 * reference to the module implementing it we should never get here
	 * with a transfer that is set (unless forced module unloading is
	 * requested). Thus, check module's refcount and warn if this is
	 * not a clean unloading.
	 */
#ifdef CONFIG_MODULE_UNLOAD
	if (xfer->owner && module_refcount(xfer->owner) != -1)
		pr_err("Danger! Unregistering an in use transfer function.\n");
#endif

	xfer_funcs[n] = NULL;
	idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
	return 0;
}

@@ -2323,8 +2322,9 @@ static int loop_add(int i)
	} else {
		err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
	}
	mutex_unlock(&loop_ctl_mutex);
	if (err < 0)
		goto out_unlock;
		goto out_free_dev;
	i = err;

	err = -ENOMEM;
@@ -2393,15 +2393,19 @@ static int loop_add(int i)
	disk->events		= DISK_EVENT_MEDIA_CHANGE;
	disk->event_flags	= DISK_EVENT_FLAG_UEVENT;
	sprintf(disk->disk_name, "loop%d", i);
	/* Make this loop device reachable from pathname. */
	add_disk(disk);
	/* Show this loop device. */
	mutex_lock(&loop_ctl_mutex);
	lo->idr_visible = true;
	mutex_unlock(&loop_ctl_mutex);
	return i;

out_cleanup_tags:
	blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
	mutex_lock(&loop_ctl_mutex);
	idr_remove(&loop_index_idr, i);
out_unlock:
	mutex_unlock(&loop_ctl_mutex);
out_free_dev:
	kfree(lo);
@@ -2411,9 +2415,14 @@ static int loop_add(int i)

static void loop_remove(struct loop_device *lo)
{
	/* Make this loop device unreachable from pathname. */
	del_gendisk(lo->lo_disk);
	blk_cleanup_disk(lo->lo_disk);
	blk_mq_free_tag_set(&lo->tag_set);
	mutex_lock(&loop_ctl_mutex);
	idr_remove(&loop_index_idr, lo->lo_number);
	mutex_unlock(&loop_ctl_mutex);
	/* There is no route which can find this loop device. */
	mutex_destroy(&lo->lo_mutex);
	kfree(lo);
}
@@ -2437,31 +2446,40 @@ static int loop_control_remove(int idx)
		return -EINVAL;
	}
		
	/* Hide this loop device for serialization. */
	ret = mutex_lock_killable(&loop_ctl_mutex);
	if (ret)
		return ret;

	lo = idr_find(&loop_index_idr, idx);
	if (!lo) {
	if (!lo || !lo->idr_visible)
		ret = -ENODEV;
		goto out_unlock_ctrl;
	}
	else
		lo->idr_visible = false;
	mutex_unlock(&loop_ctl_mutex);
	if (ret)
		return ret;

	/* Check whether this loop device can be removed. */
	ret = mutex_lock_killable(&lo->lo_mutex);
	if (ret)
		goto out_unlock_ctrl;
		goto mark_visible;
	if (lo->lo_state != Lo_unbound ||
	    atomic_read(&lo->lo_refcnt) > 0) {
		mutex_unlock(&lo->lo_mutex);
		ret = -EBUSY;
		goto out_unlock_ctrl;
		goto mark_visible;
	}
	/* Mark this loop device no longer open()-able. */
	lo->lo_state = Lo_deleting;
	mutex_unlock(&lo->lo_mutex);

	idr_remove(&loop_index_idr, lo->lo_number);
	loop_remove(lo);
out_unlock_ctrl:
	return 0;

mark_visible:
	/* Show this loop device again. */
	mutex_lock(&loop_ctl_mutex);
	lo->idr_visible = true;
	mutex_unlock(&loop_ctl_mutex);
	return ret;
}
@@ -2475,7 +2493,8 @@ static int loop_control_get_free(int idx)
	if (ret)
		return ret;
	idr_for_each_entry(&loop_index_idr, lo, id) {
		if (lo->lo_state == Lo_unbound)
		/* Hitting a race results in creating a new loop device which is harmless. */
		if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
			goto found;
	}
	mutex_unlock(&loop_ctl_mutex);
@@ -2591,10 +2610,14 @@ static void __exit loop_exit(void)
	unregister_blkdev(LOOP_MAJOR, "loop");
	misc_deregister(&loop_misc);

	mutex_lock(&loop_ctl_mutex);
	/*
	 * There is no need to use loop_ctl_mutex here, for nobody else can
	 * access loop_index_idr when this module is unloading (unless forced
	 * module unloading is requested). If this is not a clean unloading,
	 * we have no means to avoid kernel crash.
	 */
	idr_for_each_entry(&loop_index_idr, lo, id)
		loop_remove(lo);
	mutex_unlock(&loop_ctl_mutex);

	idr_destroy(&loop_index_idr);
}
+1 −0
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ struct loop_device {
	struct blk_mq_tag_set	tag_set;
	struct gendisk		*lo_disk;
	struct mutex		lo_mutex;
	bool			idr_visible;
};

struct loop_cmd {