Commit 2622aa71 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Make pool lookup and alloc APIs type safe

The allocate, lookup index, lookup key and cleanup routines in rxe_pool.c
currently are not type safe against relocating the pelem field in the
objects. Planned changes to move allocation of objects into rdma-core make
addressing this a requirement.

Use the elem_offset field in rxe_type_info make these APIs safe against
moving the pelem field.

Link: https://lore.kernel.org/r/20201216231550.27224-5-rpearson@hpe.com


Signed-off-by: default avatarBob Pearson <rpearson@hpe.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b994d49e
Loading
Loading
Loading
Loading
+38 −17
Original line number Diff line number Diff line
@@ -315,7 +315,9 @@ void rxe_drop_index(void *arg)

void *rxe_alloc(struct rxe_pool *pool)
{
	struct rxe_type_info *info = &rxe_type_info[pool->type];
	struct rxe_pool_entry *elem;
	u8 *obj;
	unsigned long flags;

	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
@@ -334,16 +336,17 @@ void *rxe_alloc(struct rxe_pool *pool)
	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
		goto out_cnt;

	elem = kzalloc(rxe_type_info[pool->type].size,
				 (pool->flags & RXE_POOL_ATOMIC) ?
	obj = kzalloc(info->size, (pool->flags & RXE_POOL_ATOMIC) ?
		      GFP_ATOMIC : GFP_KERNEL);
	if (!elem)
	if (!obj)
		goto out_cnt;

	elem = (struct rxe_pool_entry *)(obj + info->elem_offset);

	elem->pool = pool;
	kref_init(&elem->ref_cnt);

	return elem;
	return obj;

out_cnt:
	atomic_dec(&pool->num_elem);
@@ -391,12 +394,17 @@ void rxe_elem_release(struct kref *kref)
	struct rxe_pool_entry *elem =
		container_of(kref, struct rxe_pool_entry, ref_cnt);
	struct rxe_pool *pool = elem->pool;
	struct rxe_type_info *info = &rxe_type_info[pool->type];
	u8 *obj;

	if (pool->cleanup)
		pool->cleanup(elem);

	if (!(pool->flags & RXE_POOL_NO_ALLOC))
		kfree(elem);
	if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
		obj = (u8 *)elem - info->elem_offset;
		kfree(obj);
	}

	atomic_dec(&pool->num_elem);
	ib_device_put(&pool->rxe->ib_dev);
	rxe_pool_put(pool);
@@ -404,8 +412,10 @@ void rxe_elem_release(struct kref *kref)

void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
	struct rb_node *node = NULL;
	struct rxe_pool_entry *elem = NULL;
	struct rxe_type_info *info = &rxe_type_info[pool->type];
	struct rb_node *node;
	struct rxe_pool_entry *elem;
	u8 *obj = NULL;
	unsigned long flags;

	read_lock_irqsave(&pool->pool_lock, flags);
@@ -422,21 +432,28 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
			node = node->rb_left;
		else if (elem->index < index)
			node = node->rb_right;
		else {
			kref_get(&elem->ref_cnt);
		else
			break;
	}

	if (node) {
		kref_get(&elem->ref_cnt);
		obj = (u8 *)elem - info->elem_offset;
	} else {
		obj = NULL;
	}

out:
	read_unlock_irqrestore(&pool->pool_lock, flags);
	return node ? elem : NULL;
	return obj;
}

void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
	struct rb_node *node = NULL;
	struct rxe_pool_entry *elem = NULL;
	struct rxe_type_info *info = &rxe_type_info[pool->type];
	struct rb_node *node;
	struct rxe_pool_entry *elem;
	u8 *obj = NULL;
	int cmp;
	unsigned long flags;

@@ -461,10 +478,14 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
			break;
	}

	if (node)
	if (node) {
		kref_get(&elem->ref_cnt);
		obj = (u8 *)elem - info->elem_offset;
	} else {
		obj = NULL;
	}

out:
	read_unlock_irqrestore(&pool->pool_lock, flags);
	return node ? elem : NULL;
	return obj;
}