Commit 1231039d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

Revert "blk-cgroup: move the cgroup information to struct gendisk"



This reverts commit 3f13ab7c as a patch
it depends on caused a few problems.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230214183308.1658775-2-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2f1e07dd
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -999,7 +999,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd)
{
	struct blkcg_gq *blkg;

	list_for_each_entry(blkg, &bfqd->queue->disk->blkg_list, entry) {
	list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
		struct bfq_group *bfqg = blkg_to_bfqg(blkg);

		bfq_end_wr_async_queues(bfqd, bfqg);
@@ -1293,7 +1293,7 @@ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
	if (ret)
		return NULL;

	return blkg_to_bfqg(bfqd->queue->disk->root_blkg);
	return blkg_to_bfqg(bfqd->queue->root_blkg);
}

struct blkcg_policy blkcg_policy_bfq = {
+33 −33
Original line number Diff line number Diff line
@@ -108,10 +108,10 @@ static struct cgroup_subsys_state *blkcg_css(void)
	return task_css(current, io_cgrp_id);
}

static bool blkcg_policy_enabled(struct gendisk *disk,
static bool blkcg_policy_enabled(struct request_queue *q,
				 const struct blkcg_policy *pol)
{
	return pol && test_bit(pol->plid, disk->blkcg_pols);
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

static void blkg_free_workfn(struct work_struct *work)
@@ -123,18 +123,18 @@ static void blkg_free_workfn(struct work_struct *work)
	/*
	 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
	 * in order to make sure pd_free_fn() is called in order, the deletion
	 * of the list blkg->entry is delayed to here from blkg_destroy(), and
	 * of the list blkg->q_node is delayed to here from blkg_destroy(), and
	 * blkcg_mutex is used to synchronize blkg_free_workfn() and
	 * blkcg_deactivate_policy().
	 */
	mutex_lock(&blkg->disk->blkcg_mutex);
	mutex_lock(&blkg->disk->queue->blkcg_mutex);
	for (i = 0; i < BLKCG_MAX_POLS; i++)
		if (blkg->pd[i])
			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
	if (blkg->parent)
		blkg_put(blkg->parent);
	list_del_init(&blkg->entry);
	mutex_unlock(&blkg->disk->blkcg_mutex);
	list_del_init(&blkg->q_node);
	mutex_unlock(&blkg->disk->queue->blkcg_mutex);

	put_disk(blkg->disk);
	free_percpu(blkg->iostat_cpu);
@@ -269,7 +269,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
	get_device(disk_to_dev(disk));
	blkg->disk = disk;

	INIT_LIST_HEAD(&blkg->entry);
	INIT_LIST_HEAD(&blkg->q_node);
	spin_lock_init(&blkg->async_bio_lock);
	bio_list_init(&blkg->async_bios);
	INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
@@ -285,7 +285,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
		struct blkcg_policy *pol = blkcg_policy[i];
		struct blkg_policy_data *pd;

		if (!blkcg_policy_enabled(disk, pol))
		if (!blkcg_policy_enabled(disk->queue, pol))
			continue;

		/* alloc per-policy data and attach it to blkg */
@@ -371,7 +371,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
	ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->entry, &disk->blkg_list);
		list_add(&blkg->q_node, &disk->queue->blkg_list);

		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];
@@ -444,7 +444,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent = blkcg_parent(blkcg);
		struct blkcg_gq *ret_blkg = disk->root_blkg;
		struct blkcg_gq *ret_blkg = q->root_blkg;

		while (parent) {
			blkg = blkg_lookup(parent, disk);
@@ -526,7 +526,7 @@ static void blkg_destroy_all(struct gendisk *disk)

restart:
	spin_lock_irq(&q->queue_lock);
	list_for_each_entry_safe(blkg, n, &disk->blkg_list, entry) {
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
		struct blkcg *blkcg = blkg->blkcg;

		spin_lock(&blkcg->lock);
@@ -545,7 +545,7 @@ static void blkg_destroy_all(struct gendisk *disk)
		}
	}

	disk->root_blkg = NULL;
	q->root_blkg = NULL;
	spin_unlock_irq(&q->queue_lock);
}

@@ -620,7 +620,7 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
	rcu_read_lock();
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
		spin_lock_irq(&blkg->disk->queue->queue_lock);
		if (blkcg_policy_enabled(blkg->disk, pol))
		if (blkcg_policy_enabled(blkg->disk->queue, pol))
			total += prfill(sf, blkg->pd[pol->plid], data);
		spin_unlock_irq(&blkg->disk->queue->queue_lock);
	}
@@ -728,7 +728,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
	rcu_read_lock();
	spin_lock_irq(&q->queue_lock);

	if (!blkcg_policy_enabled(disk, pol)) {
	if (!blkcg_policy_enabled(q, pol)) {
		ret = -EOPNOTSUPP;
		goto fail_unlock;
	}
@@ -771,7 +771,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		rcu_read_lock();
		spin_lock_irq(&q->queue_lock);

		if (!blkcg_policy_enabled(disk, pol)) {
		if (!blkcg_policy_enabled(q, pol)) {
			blkg_free(new_blkg);
			ret = -EOPNOTSUPP;
			goto fail_preloaded;
@@ -951,7 +951,7 @@ static void blkcg_fill_root_iostats(void)
	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
	while ((dev = class_dev_iter_next(&iter))) {
		struct block_device *bdev = dev_to_bdev(dev);
		struct blkcg_gq *blkg = bdev->bd_disk->root_blkg;
		struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
		struct blkg_iostat tmp;
		int cpu;
		unsigned long flags;
@@ -1298,8 +1298,8 @@ int blkcg_init_disk(struct gendisk *disk)
	bool preloaded;
	int ret;

	INIT_LIST_HEAD(&disk->blkg_list);
	mutex_init(&disk->blkcg_mutex);
	INIT_LIST_HEAD(&q->blkg_list);
	mutex_init(&q->blkcg_mutex);

	new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
	if (!new_blkg)
@@ -1313,7 +1313,7 @@ int blkcg_init_disk(struct gendisk *disk)
	blkg = blkg_create(&blkcg_root, disk, new_blkg);
	if (IS_ERR(blkg))
		goto err_unlock;
	disk->root_blkg = blkg;
	q->root_blkg = blkg;
	spin_unlock_irq(&q->queue_lock);

	if (preloaded)
@@ -1426,7 +1426,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
	struct blkcg_gq *blkg, *pinned_blkg = NULL;
	int ret;

	if (blkcg_policy_enabled(disk, pol))
	if (blkcg_policy_enabled(q, pol))
		return 0;

	if (queue_is_mq(q))
@@ -1435,7 +1435,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
	spin_lock_irq(&q->queue_lock);

	/* blkg_list is pushed at the head, reverse walk to allocate parents first */
	list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) {
	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
		struct blkg_policy_data *pd;

		if (blkg->pd[pol->plid])
@@ -1480,16 +1480,16 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)

	/* all allocated, init in the same order */
	if (pol->pd_init_fn)
		list_for_each_entry_reverse(blkg, &disk->blkg_list, entry)
		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
			pol->pd_init_fn(blkg->pd[pol->plid]);

	list_for_each_entry_reverse(blkg, &disk->blkg_list, entry) {
	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
		if (pol->pd_online_fn)
			pol->pd_online_fn(blkg->pd[pol->plid]);
		blkg->pd[pol->plid]->online = true;
	}

	__set_bit(pol->plid, disk->blkcg_pols);
	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;

	spin_unlock_irq(&q->queue_lock);
@@ -1505,7 +1505,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
enomem:
	/* alloc failed, nothing's initialized yet, free everything */
	spin_lock_irq(&q->queue_lock);
	list_for_each_entry(blkg, &disk->blkg_list, entry) {
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		struct blkcg *blkcg = blkg->blkcg;

		spin_lock(&blkcg->lock);
@@ -1535,18 +1535,18 @@ void blkcg_deactivate_policy(struct gendisk *disk,
	struct request_queue *q = disk->queue;
	struct blkcg_gq *blkg;

	if (!blkcg_policy_enabled(disk, pol))
	if (!blkcg_policy_enabled(q, pol))
		return;

	if (queue_is_mq(q))
		blk_mq_freeze_queue(q);

	mutex_lock(&disk->blkcg_mutex);
	mutex_lock(&q->blkcg_mutex);
	spin_lock_irq(&q->queue_lock);

	__clear_bit(pol->plid, disk->blkcg_pols);
	__clear_bit(pol->plid, q->blkcg_pols);

	list_for_each_entry(blkg, &disk->blkg_list, entry) {
	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		struct blkcg *blkcg = blkg->blkcg;

		spin_lock(&blkcg->lock);
@@ -1560,7 +1560,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
	}

	spin_unlock_irq(&q->queue_lock);
	mutex_unlock(&disk->blkcg_mutex);
	mutex_unlock(&q->blkcg_mutex);

	if (queue_is_mq(q))
		blk_mq_unfreeze_queue(q);
@@ -1957,7 +1957,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
 * Associate @bio with the blkg found by combining the css's blkg and the
 * request_queue of the @bio.  An association failure is handled by walking up
 * the blkg tree.  Therefore, the blkg associated can be anything between @blkg
 * and disk->root_blkg.  This situation only happens when a cgroup is dying and
 * and q->root_blkg.  This situation only happens when a cgroup is dying and
 * then the remaining bios will spill to the closest alive blkg.
 *
 * A reference will be taken on the blkg and will be released when @bio is
@@ -1972,8 +1972,8 @@ void bio_associate_blkg_from_css(struct bio *bio,
	if (css && css->parent) {
		bio->bi_blkg = blkg_tryget_closest(bio, css);
	} else {
		blkg_get(bio->bi_bdev->bd_disk->root_blkg);
		bio->bi_blkg = bio->bi_bdev->bd_disk->root_blkg;
		blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
		bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
	}
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
+2 −2
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@ struct blkg_iostat_set {
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
	struct gendisk			*disk;
	struct list_head		entry;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
	struct blkcg			*blkcg;

@@ -250,7 +250,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (blkcg == &blkcg_root)
		return disk->root_blkg;
		return disk->queue->root_blkg;

	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->disk == disk)
+1 −1
Original line number Diff line number Diff line
@@ -665,7 +665,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)

	rcu_read_lock();
	blkg_for_each_descendant_pre(blkg, pos_css,
				     blkiolat->rqos.disk->root_blkg) {
				     blkiolat->rqos.disk->queue->root_blkg) {
		struct iolatency_grp *iolat;
		struct child_latency_info *lat_info;
		unsigned long flags;
+6 −10
Original line number Diff line number Diff line
@@ -451,8 +451,7 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
	bool low_valid = false;

	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css,
			td->queue->disk->root_blkg) {
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);

		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
@@ -1181,7 +1180,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)

	spin_lock_irq(&q->queue_lock);

	if (!q->disk->root_blkg)
	if (!q->root_blkg)
		goto out_unlock;

	if (throtl_can_upgrade(td, NULL))
@@ -1323,8 +1322,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
	 * blk-throttle.
	 */
	blkg_for_each_descendant_pre(blkg, pos_css,
			global ? tg->td->queue->disk->root_blkg :
			tg_to_blkg(tg)) {
			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
		struct throtl_grp *this_tg = blkg_to_tg(blkg);
		struct throtl_grp *parent_tg;

@@ -1719,7 +1717,7 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
	 * path need RCU protection and to prevent warning from lockdep.
	 */
	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css, disk->root_blkg) {
	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);
		struct throtl_service_queue *sq = &tg->service_queue;

@@ -1873,8 +1871,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
		return false;

	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css,
			td->queue->disk->root_blkg) {
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);

		if (tg == this_tg)
@@ -1920,8 +1917,7 @@ static void throtl_upgrade_state(struct throtl_data *td)
	td->low_upgrade_time = jiffies;
	td->scale = 0;
	rcu_read_lock();
	blkg_for_each_descendant_post(blkg, pos_css,
			td->queue->disk->root_blkg) {
	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
		struct throtl_grp *tg = blkg_to_tg(blkg);
		struct throtl_service_queue *sq = &tg->service_queue;

Loading