Commit 3480373e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba
Browse files

btrfs, block: move REQ_CGROUP_PUNT to btrfs



REQ_CGROUP_PUNT is a bit annoying as it is hard to follow and adds
a branch to the bio submission hot path.  To fix this, export
blkcg_punt_bio_submit and let btrfs call it directly.  Add a new
REQ_FS_PRIVATE flag for btrfs to indicate to it's own low-level
bio submission code that a punt to the cgroup submission helper
is required.

Reviewed-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 0a0596fb
Loading
Loading
Loading
Loading
+17 −14
Original line number Diff line number Diff line
@@ -1688,24 +1688,27 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);

bool __blkcg_punt_bio_submit(struct bio *bio)
/*
 * When a shared kthread issues a bio for a cgroup, doing so synchronously can
 * lead to priority inversions as the kthread can be trapped waiting for that
 * cgroup.  Use this helper instead of submit_bio to punt the actual issuing to
 * a dedicated per-blkcg work item to avoid such priority inversions.
 */
void blkcg_punt_bio_submit(struct bio *bio)
{
	struct blkcg_gq *blkg = bio->bi_blkg;

	/* consume the flag first */
	bio->bi_opf &= ~REQ_CGROUP_PUNT;

	/* never bounce for the root cgroup */
	if (!blkg->parent)
		return false;

	if (blkg->parent) {
		spin_lock_bh(&blkg->async_bio_lock);
		bio_list_add(&blkg->async_bios, bio);
		spin_unlock_bh(&blkg->async_bio_lock);

		queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
	return true;
	} else {
		/* never bounce for the root cgroup */
		submit_bio(bio);
	}
}
EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit);

/*
 * Scale the accumulated delay based on how long it has been since we updated
+0 −12
Original line number Diff line number Diff line
@@ -375,16 +375,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
		if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css),	\
					    (p_blkg)->q)))

bool __blkcg_punt_bio_submit(struct bio *bio);

static inline bool blkcg_punt_bio_submit(struct bio *bio)
{
	if (bio->bi_opf & REQ_CGROUP_PUNT)
		return __blkcg_punt_bio_submit(bio);
	else
		return false;
}

static inline void blkcg_bio_issue_init(struct bio *bio)
{
	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
@@ -506,8 +496,6 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }

static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline void blk_cgroup_bio_start(struct bio *bio) { }
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
+0 −3
Original line number Diff line number Diff line
@@ -830,9 +830,6 @@ EXPORT_SYMBOL(submit_bio_noacct);
 */
void submit_bio(struct bio *bio)
{
	if (blkcg_punt_bio_submit(bio))
		return;

	if (bio_op(bio) == REQ_OP_READ) {
		task_io_account_read(bio->bi_iter.bi_size);
		count_vm_events(PGPGIN, bio_sectors(bio));
+8 −4
Original line number Diff line number Diff line
@@ -435,6 +435,10 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
		dev->devid, bio->bi_iter.bi_size);

	btrfsic_check_bio(bio);

	if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
		blkcg_punt_bio_submit(bio);
	else
		submit_bio(bio);
}

@@ -551,10 +555,10 @@ static void run_one_async_done(struct btrfs_work *work)

	/*
	 * All of the bios that pass through here are from async helpers.
	 * Use REQ_CGROUP_PUNT to issue them from the owning cgroup's context.
	 * This changes nothing when cgroups aren't in use.
	 * Use REQ_BTRFS_CGROUP_PUNT to issue them from the owning cgroup's
	 * context.  This changes nothing when cgroups aren't in use.
	 */
	bio->bi_opf |= REQ_CGROUP_PUNT;
	bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT;
	__btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
}

+3 −0
Original line number Diff line number Diff line
@@ -88,6 +88,9 @@ static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
/* Bio only refers to one ordered extent. */
#define REQ_BTRFS_ONE_ORDERED			REQ_DRV

/* Submit using blkcg_punt_bio_submit. */
#define REQ_BTRFS_CGROUP_PUNT			REQ_FS_PRIVATE

void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num);
int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
			    u64 length, u64 logical, struct page *page,
Loading