Commit 5ef16305 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: only build the icq tracking code when needed



Only bfq needs to code to track icq, so make it conditional.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20211209063131.18537-12-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 90b627f5
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -35,6 +35,9 @@ config BLK_CGROUP_RWSTAT
config BLK_DEV_BSG_COMMON
	tristate

config BLK_ICQ
	bool

config BLK_DEV_BSGLIB
	bool "Block layer SG support v4 helper lib"
	select BLK_DEV_BSG_COMMON
+1 −0
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@ config MQ_IOSCHED_KYBER

config IOSCHED_BFQ
	tristate "BFQ I/O scheduler"
	select BLK_ICQ
	help
	BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
	of the device among all processes according to their weights,
+41 −27
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@
 */
static struct kmem_cache *iocontext_cachep;

#ifdef CONFIG_BLK_ICQ
/**
 * get_io_context - increment reference count to io_context
 * @ioc: io_context to get
@@ -162,6 +163,42 @@ static bool ioc_delay_free(struct io_context *ioc)
	return false;
}

/**
 * ioc_clear_queue - break any ioc association with the specified queue
 * @q: request_queue being cleared
 *
 * Walk @q->icq_list and exit all io_cq's.
 */
void ioc_clear_queue(struct request_queue *q)
{
	LIST_HEAD(icq_list);

	spin_lock_irq(&q->queue_lock);
	list_splice_init(&q->icq_list, &icq_list);
	spin_unlock_irq(&q->queue_lock);

	rcu_read_lock();
	while (!list_empty(&icq_list)) {
		struct io_cq *icq =
			list_entry(icq_list.next, struct io_cq, q_node);

		spin_lock_irq(&icq->ioc->lock);
		if (!(icq->flags & ICQ_DESTROYED))
			ioc_destroy_icq(icq);
		spin_unlock_irq(&icq->ioc->lock);
	}
	rcu_read_unlock();
}
#else /* CONFIG_BLK_ICQ */
static inline void ioc_exit_icqs(struct io_context *ioc)
{
}
static inline bool ioc_delay_free(struct io_context *ioc)
{
	return false;
}
#endif /* CONFIG_BLK_ICQ */

/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
@@ -193,33 +230,6 @@ void exit_io_context(struct task_struct *task)
	}
}

/**
 * ioc_clear_queue - break any ioc association with the specified queue
 * @q: request_queue being cleared
 *
 * Walk @q->icq_list and exit all io_cq's.
 */
void ioc_clear_queue(struct request_queue *q)
{
	LIST_HEAD(icq_list);

	spin_lock_irq(&q->queue_lock);
	list_splice_init(&q->icq_list, &icq_list);
	spin_unlock_irq(&q->queue_lock);

	rcu_read_lock();
	while (!list_empty(&icq_list)) {
		struct io_cq *icq =
			list_entry(icq_list.next, struct io_cq, q_node);

		spin_lock_irq(&icq->ioc->lock);
		if (!(icq->flags & ICQ_DESTROYED))
			ioc_destroy_icq(icq);
		spin_unlock_irq(&icq->ioc->lock);
	}
	rcu_read_unlock();
}

static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ioc;
@@ -231,10 +241,12 @@ static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)

	atomic_long_set(&ioc->refcount, 1);
	atomic_set(&ioc->active_ref, 1);
#ifdef CONFIG_BLK_ICQ
	spin_lock_init(&ioc->lock);
	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
	INIT_HLIST_HEAD(&ioc->icq_list);
	INIT_WORK(&ioc->release_work, ioc_release_fn);
#endif
	return ioc;
}

@@ -300,6 +312,7 @@ int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
	return 0;
}

#ifdef CONFIG_BLK_ICQ
/**
 * ioc_lookup_icq - lookup io_cq from ioc
 * @q: the associated request_queue
@@ -428,6 +441,7 @@ struct io_cq *ioc_find_get_icq(struct request_queue *q)
	return icq;
}
EXPORT_SYMBOL_GPL(ioc_find_get_icq);
#endif /* CONFIG_BLK_ICQ */

static int __init blk_ioc_init(void)
{
+6 −0
Original line number Diff line number Diff line
@@ -366,7 +366,13 @@ static inline unsigned int bio_aligned_discard_max_sectors(
 */
struct io_cq *ioc_find_get_icq(struct request_queue *q);
struct io_cq *ioc_lookup_icq(struct request_queue *q);
#ifdef CONFIG_BLK_ICQ
void ioc_clear_queue(struct request_queue *q);
#else
static inline void ioc_clear_queue(struct request_queue *q)
{
}
#endif /* CONFIG_BLK_ICQ */

#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
+4 −2
Original line number Diff line number Diff line
@@ -100,16 +100,18 @@ struct io_context {
	atomic_long_t refcount;
	atomic_t active_ref;

	unsigned short ioprio;

#ifdef CONFIG_BLK_ICQ
	/* all the fields below are protected by this lock */
	spinlock_t lock;

	unsigned short ioprio;

	struct radix_tree_root	icq_tree;
	struct io_cq __rcu	*icq_hint;
	struct hlist_head	icq_list;

	struct work_struct release_work;
#endif /* CONFIG_BLK_ICQ */
};

struct task_struct;