Commit e6f89be6 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: introduce a struct for hash table



Instead of passing around a pointer to hash buckets, add a bit of type
safety and wrap it into a structure.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d65bc3faba537ec2aca9eabf334394936d44bd28.1655371007.git.asml.silence@gmail.com


Reviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a2cdd519
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -193,12 +193,12 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
	return IOU_OK;
	return IOU_OK;
}
}


void init_hash_table(struct io_hash_bucket *hash_table, unsigned size)
void init_hash_table(struct io_hash_table *table, unsigned size)
{
{
	unsigned int i;
	unsigned int i;


	for (i = 0; i < size; i++) {
	for (i = 0; i < size; i++) {
		spin_lock_init(&hash_table[i].lock);
		spin_lock_init(&table->hbs[i].lock);
		INIT_HLIST_HEAD(&hash_table[i].list);
		INIT_HLIST_HEAD(&table->hbs[i].list);
	}
	}
}
}
+1 −6
Original line number Original line Diff line number Diff line
@@ -4,9 +4,4 @@ int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);


int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
void init_hash_table(struct io_hash_bucket *hash_table, unsigned size);
void init_hash_table(struct io_hash_table *table, unsigned size);

struct io_hash_bucket {
	spinlock_t		lock;
	struct hlist_head	list;
} ____cacheline_aligned_in_smp;
+2 −2
Original line number Original line Diff line number Diff line
@@ -158,8 +158,8 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
		mutex_unlock(&ctx->uring_lock);
		mutex_unlock(&ctx->uring_lock);


	seq_puts(m, "PollList:\n");
	seq_puts(m, "PollList:\n");
	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
	for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
		struct io_hash_bucket *hb = &ctx->cancel_hash[i];
		struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
		struct io_kiocb *req;
		struct io_kiocb *req;


		spin_lock(&hb->lock);
		spin_lock(&hb->lock);
+17 −12
Original line number Original line Diff line number Diff line
@@ -241,11 +241,23 @@ static __cold void io_fallback_req_func(struct work_struct *work)
	percpu_ref_put(&ctx->refs);
	percpu_ref_put(&ctx->refs);
}
}


static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
{
	unsigned hash_buckets = 1U << bits;
	size_t hash_size = hash_buckets * sizeof(table->hbs[0]);

	table->hbs = kmalloc(hash_size, GFP_KERNEL);
	if (!table->hbs)
		return -ENOMEM;

	table->hash_bits = bits;
	init_hash_table(table, hash_buckets);
	return 0;
}

static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
{
	struct io_ring_ctx *ctx;
	struct io_ring_ctx *ctx;
	unsigned hash_buckets;
	size_t hash_size;
	int hash_bits;
	int hash_bits;


	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -261,16 +273,9 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
	 */
	 */
	hash_bits = ilog2(p->cq_entries) - 5;
	hash_bits = ilog2(p->cq_entries) - 5;
	hash_bits = clamp(hash_bits, 1, 8);
	hash_bits = clamp(hash_bits, 1, 8);
	hash_buckets = 1U << hash_bits;
	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
	hash_size = hash_buckets * sizeof(struct io_hash_bucket);

	ctx->cancel_hash_bits = hash_bits;
	ctx->cancel_hash = kmalloc(hash_size, GFP_KERNEL);
	if (!ctx->cancel_hash)
		goto err;
		goto err;


	init_hash_table(ctx->cancel_hash, hash_buckets);

	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
	if (!ctx->dummy_ubuf)
	if (!ctx->dummy_ubuf)
		goto err;
		goto err;
@@ -311,7 +316,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
	return ctx;
	return ctx;
err:
err:
	kfree(ctx->dummy_ubuf);
	kfree(ctx->dummy_ubuf);
	kfree(ctx->cancel_hash);
	kfree(ctx->cancel_table.hbs);
	kfree(ctx->io_bl);
	kfree(ctx->io_bl);
	xa_destroy(&ctx->io_bl_xa);
	xa_destroy(&ctx->io_bl_xa);
	kfree(ctx);
	kfree(ctx);
@@ -2487,7 +2492,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
	io_req_caches_free(ctx);
	io_req_caches_free(ctx);
	if (ctx->hash_map)
	if (ctx->hash_map)
		io_wq_put_hash(ctx->hash_map);
		io_wq_put_hash(ctx->hash_map);
	kfree(ctx->cancel_hash);
	kfree(ctx->cancel_table.hbs);
	kfree(ctx->dummy_ubuf);
	kfree(ctx->dummy_ubuf);
	kfree(ctx->io_bl);
	kfree(ctx->io_bl);
	xa_destroy(&ctx->io_bl_xa);
	xa_destroy(&ctx->io_bl_xa);
+11 −2
Original line number Original line Diff line number Diff line
@@ -9,6 +9,16 @@
#include "io-wq.h"
#include "io-wq.h"
#include "filetable.h"
#include "filetable.h"


struct io_hash_bucket {
	spinlock_t		lock;
	struct hlist_head	list;
} ____cacheline_aligned_in_smp;

struct io_hash_table {
	struct io_hash_bucket	*hbs;
	unsigned		hash_bits;
};

struct io_uring {
struct io_uring {
	u32 head ____cacheline_aligned_in_smp;
	u32 head ____cacheline_aligned_in_smp;
	u32 tail ____cacheline_aligned_in_smp;
	u32 tail ____cacheline_aligned_in_smp;
@@ -224,8 +234,7 @@ struct io_ring_ctx {
		 * manipulate the list, hence no extra locking is needed there.
		 * manipulate the list, hence no extra locking is needed there.
		 */
		 */
		struct io_wq_work_list	iopoll_list;
		struct io_wq_work_list	iopoll_list;
		struct io_hash_bucket	*cancel_hash;
		struct io_hash_table	cancel_table;
		unsigned		cancel_hash_bits;
		bool			poll_multi_queue;
		bool			poll_multi_queue;


		struct list_head	io_buffers_comp;
		struct list_head	io_buffers_comp;
Loading