Unverified Commit 4511bbb6 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!2350 UB driver: add implementation of urma ubcore and uburma module

Merge Pull Request from: @yizhen-fan 
 
 **[Content]** 

Add implementation of urma ubcore and uburma module 

Ubcore and uburma module contains 3 main part of functions:

**1. Device management**
Including device registration, device attributes management, cmd handle, 
etc.

**2. Jetty-related function:**

Jetty is a logical concept, an I/O unit that provides
end-to-end communication services. Jetty is classified into three types:
Jetty for Send, Jetty for Receive, and Jetty for Completion.
Each Jetty has a queue that maintains its sending and receiving requests.
1. JFS (Jetty for send): used to submit a DMA task
or send a message on the user side.
2. JFR (jetty for receive): used to prepare a resource for
receiving a message on the home side.
3. JFC (jetty for complete): stores JFS/JFR completion events.
It can be bound to JFS or JFR, or specified separately in specific
URMA commands.
4. jetty: Jetty is duplex and contains both jfs and jfr functions.
5. tjetty (target jetty): represent remote JFR/Jetty imported to local.
6. jetty id: represent a unique jetty in global, contructed by eid,
uasid and id itself.
A Jetty for Send (JFS) can implement the remote memory DMA service
on one side. JFS (source end) + JFR (destination end) can implement
bilateral message services.

**3. Tp-related function:**

TP: As defined in the UB protocol,
the transport layer is established between two UB ports
to provide reliability, congestion control, and multiplexing. 
 
Link:https://gitee.com/openeuler/kernel/pulls/2350

 

Reviewed-by: default avatarLijun Li <jerry.lilijun@huawei.com>
Signed-off-by: default avatarLijun Li <jerry.lilijun@huawei.com>
parents 7e9c9592 3e5fd29d
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -5,8 +5,13 @@

ubcore-objs := ubcore_main.o \
			ubcore_device.o	\
			ubcore_jetty.o \
			ubcore_segment.o \
			ubcore_umem.o \
			ubcore_hash_table.o \
			ubcore_tp.o \
			ubcore_netlink.o
			ubcore_tp_table.o \
			ubcore_netlink.o \
			ubcore_dp.o

obj-$(CONFIG_UB) += ubcore.o
+33 −0
Original line number Diff line number Diff line
@@ -81,6 +81,39 @@ struct ubcore_cmd_query_stats {
	} out;
};

struct ubcore_cmd_query_res {
	struct {
		char dev_name[UBCORE_MAX_DEV_NAME];
		uint8_t eid[UBCORE_CMD_EID_SIZE];
		uint32_t tp_type;
		uint32_t type;
		uint32_t key;
	} in;
	struct {
		uint64_t addr;
		uint32_t len;
	} out;
};

struct ubcore_cmd_set_utp {
	struct {
		char dev_name[UBCORE_MAX_DEV_NAME];
		uint8_t eid[UBCORE_CMD_EID_SIZE];
		uint32_t transport_type;
		bool spray_en;
		uint16_t data_udp_start;
		uint8_t udp_range;
	} in;
};

struct ubcore_cmd_show_utp {
	struct {
		char dev_name[UBCORE_MAX_DEV_NAME];
		uint8_t eid[UBCORE_CMD_EID_SIZE];
		uint32_t transport_type;
	} in;
};

/* copy from user_space addr to kernel args */
static inline int ubcore_copy_from_user(void *args, const void *args_addr, unsigned long args_size)
{
+258 −0
Original line number Diff line number Diff line
@@ -32,6 +32,9 @@
#include <urma/ubcore_uapi.h>
#include <urma/ubcore_api.h>
#include "ubcore_priv.h"
#include "ubcore_hash_table.h"
#include "ubcore_tp.h"
#include "ubcore_tp_table.h"

static LIST_HEAD(g_client_list);
static LIST_HEAD(g_device_list);
@@ -309,6 +312,52 @@ void ubcore_put_device(struct ubcore_device *dev)
		complete(&dev->comp);
}

static struct ubcore_ht_param g_ht_params[] = {
	[UBCORE_HT_JFS] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfs, hnode),
			    offsetof(struct ubcore_jfs, id), sizeof(uint32_t), NULL, NULL },

	[UBCORE_HT_JFR] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfr, hnode),
			    offsetof(struct ubcore_jfr, id), sizeof(uint32_t), NULL, NULL },

	[UBCORE_HT_JFC] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jfc, hnode),
			    offsetof(struct ubcore_jfc, id), sizeof(uint32_t), NULL, NULL },

	[UBCORE_HT_JETTY] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_jetty, hnode),
			      offsetof(struct ubcore_jetty, id), sizeof(uint32_t), NULL, NULL },

	[UBCORE_HT_TP] = { UBCORE_HASH_TABLE_SIZE, offsetof(struct ubcore_tp_node, hnode),
			   offsetof(struct ubcore_tp_node, key), sizeof(struct ubcore_tp_key), NULL,
			   NULL },
};

static int ubcore_alloc_hash_tables(struct ubcore_device *dev)
{
	uint32_t i, j;
	int ret;

	for (i = 0; i < ARRAY_SIZE(g_ht_params); i++) {
		ret = ubcore_hash_table_alloc(&dev->ht[i], &g_ht_params[i]);
		if (ret != 0) {
			ubcore_log_err("alloc hash tables failed.\n");
			goto free_tables;
		}
	}
	return 0;

free_tables:
	for (j = 0; j < i; j++)
		ubcore_hash_table_free(&dev->ht[j]);
	return -1;
}

static void ubcore_free_hash_tables(struct ubcore_device *dev)
{
	uint32_t i;

	for (i = 0; i < ARRAY_SIZE(g_ht_params); i++)
		ubcore_hash_table_free(&dev->ht[i]);
}

static void ubcore_device_release(struct device *device)
{
}
@@ -336,12 +385,17 @@ static int init_ubcore_device(struct ubcore_device *dev)
	init_completion(&dev->comp);
	atomic_set(&dev->use_cnt, 1);

	if (ubcore_alloc_hash_tables(dev) != 0) {
		ubcore_log_err("alloc hash tables failed.\n");
		return -1;
	}
	ubcore_set_default_eid(dev);
	return 0;
}

static void uninit_ubcore_device(struct ubcore_device *dev)
{
	ubcore_free_hash_tables(dev);
	put_device(&dev->dev);
}

@@ -425,8 +479,58 @@ void ubcore_unregister_device(struct ubcore_device *dev)
}
EXPORT_SYMBOL(ubcore_unregister_device);

void ubcore_register_event_handler(struct ubcore_device *dev, struct ubcore_event_handler *handler)
{
	unsigned long flags;

	if (dev == NULL || handler == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return;
	}

	spin_lock_irqsave(&dev->event_handler_lock, flags);
	list_add_tail(&handler->node, &dev->event_handler_list);
	spin_unlock_irqrestore(&dev->event_handler_lock, flags);
}
EXPORT_SYMBOL(ubcore_register_event_handler);

void ubcore_unregister_event_handler(struct ubcore_device *dev,
				     struct ubcore_event_handler *handler)
{
	unsigned long flags;

	if (dev == NULL || handler == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return;
	}

	spin_lock_irqsave(&dev->event_handler_lock, flags);
	list_del(&handler->node);
	spin_unlock_irqrestore(&dev->event_handler_lock, flags);
}
EXPORT_SYMBOL(ubcore_unregister_event_handler);

void ubcore_dispatch_async_event(struct ubcore_event *event)
{
	struct ubcore_event_handler *handler;
	struct ubcore_device *dev;
	unsigned long flags;

	if (event == NULL || event->ub_dev == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return;
	}

	if (event->event_type == UBCORE_EVENT_TP_ERR && event->element.tp != NULL) {
		ubcore_restore_tp(event->ub_dev, event->element.tp);
		return;
	}

	dev = event->ub_dev;
	spin_lock_irqsave(&dev->event_handler_lock, flags);
	list_for_each_entry(handler, &dev->event_handler_list, node)
		handler->event_callback(event, handler);
	spin_unlock_irqrestore(&dev->event_handler_lock, flags);
}
EXPORT_SYMBOL(ubcore_dispatch_async_event);

@@ -486,6 +590,96 @@ int ubcore_set_eid(struct ubcore_device *dev, union ubcore_eid *eid)
}
EXPORT_SYMBOL(ubcore_set_eid);

int ubcore_set_upi(const struct ubcore_device *dev, uint16_t vf_id, uint16_t idx, uint32_t upi)
{
	int ret;

	if (dev == NULL || dev->ops == NULL || dev->ops->set_upi == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->set_upi(dev, vf_id, idx, upi);
	if (ret != 0) {
		ubcore_log_err("failed to set vf%hu upi%hu, ret: %d.\n", vf_id, idx, ret);
		return -EPERM;
	}
	return 0;
}
EXPORT_SYMBOL(ubcore_set_upi);

int ubcore_add_eid(struct ubcore_device *dev, union ubcore_eid *eid)
{
	int ret;

	if (dev == NULL || eid == NULL || dev->ops == NULL || dev->ops->add_eid == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->add_eid(dev, eid);
	if (ret != 0) {
		ubcore_log_err("failed to add eid, ret: %d.\n", ret);
		return -EPERM;
	}
	return ret;
}
EXPORT_SYMBOL(ubcore_add_eid);

int ubcore_delete_eid(struct ubcore_device *dev, uint16_t idx)
{
	int ret;

	if (dev == NULL || dev->ops == NULL || dev->ops->delete_eid_by_idx == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->delete_eid_by_idx(dev, idx);
	if (ret != 0) {
		ubcore_log_err("failed to delete eid, ret: %d.\n", ret);
		return -EPERM;
	}
	return ret;
}
EXPORT_SYMBOL(ubcore_delete_eid);

int ubcore_add_ueid(struct ubcore_device *dev, uint16_t vf_id, struct ubcore_ueid_cfg *cfg)
{
	int ret;

	if (dev == NULL || cfg == NULL || dev->ops == NULL || dev->ops->add_ueid == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->add_ueid(dev, vf_id, cfg);
	if (ret != 0) {
		ubcore_log_err("failed to add ueid, ret: %d.\n", ret);
		return -EPERM;
	}
	return ret;
}
EXPORT_SYMBOL(ubcore_add_ueid);

int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t vf_id, uint16_t idx)
{
	int ret;

	if (dev == NULL || dev->ops == NULL || dev->ops->delete_ueid_by_idx == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->delete_ueid_by_idx(dev, vf_id, idx);
	if (ret != 0) {
		ubcore_log_err("failed to delete eid, ret: %d.\n", ret);
		return -EPERM;
	}
	return ret;
}
EXPORT_SYMBOL(ubcore_delete_ueid);

int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_attr *attr)
{
	int ret;
@@ -504,6 +698,44 @@ int ubcore_query_device_attr(struct ubcore_device *dev, struct ubcore_device_att
}
EXPORT_SYMBOL(ubcore_query_device_attr);

int ubcore_query_device_status(const struct ubcore_device *dev, struct ubcore_device_status *status)
{
	int ret;

	if (dev == NULL || dev->ops == NULL || dev->ops->query_device_status == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->query_device_status(dev, status);
	if (ret != 0) {
		ubcore_log_err("failed to query device status, ret: %d.\n", ret);
		return -EPERM;
	}
	return 0;
}
EXPORT_SYMBOL(ubcore_query_device_status);

int ubcore_query_resource(const struct ubcore_device *dev, struct ubcore_res_key *key,
			  struct ubcore_res_val *val)
{
	int ret;

	if (dev == NULL || key == NULL || val == NULL || dev->ops == NULL ||
	    dev->ops->query_res == NULL) {
		ubcore_log_err("Invalid argument.\n");
		return -EINVAL;
	}

	ret = dev->ops->query_res(dev, key, val);
	if (ret != 0) {
		ubcore_log_err("failed to query res, ret: %d.\n", ret);
		return -EPERM;
	}
	return 0;
}
EXPORT_SYMBOL(ubcore_query_resource);

int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_cfg *cfg)
{
	int ret;
@@ -522,6 +754,32 @@ int ubcore_config_device(struct ubcore_device *dev, const struct ubcore_device_c
}
EXPORT_SYMBOL(ubcore_config_device);

int ubcore_user_control(struct ubcore_user_ctl *k_user_ctl)
{
	struct ubcore_device *dev;
	int ret;

	if (k_user_ctl == NULL || k_user_ctl->uctx == NULL) {
		ubcore_log_err("invalid parameter with input nullptr.\n");
		return -1;
	}

	dev = k_user_ctl->uctx->ub_dev;
	if (dev == NULL || dev->ops == NULL || dev->ops->user_ctl == NULL) {
		ubcore_log_err("invalid parameter with dev nullptr.\n");
		return -1;
	}

	ret = dev->ops->user_ctl(k_user_ctl);
	if (ret != 0) {
		ubcore_log_err("failed to exec kdrv_user_ctl in %s.\n", __func__);
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL(ubcore_user_control);

int ubcore_query_stats(const struct ubcore_device *dev, struct ubcore_stats_key *key,
		       struct ubcore_stats_val *val)
{
+118 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
 * for more details.
 *
 * Description: kmod ub data path API
 * Author: sunfang
 * Create: 2023-05-09
 * Note:
 * History: 2023-05-09
 */
#include <stddef.h>
#include "ubcore_log.h"
#include <urma/ubcore_api.h>
#include <urma/ubcore_opcode.h>
#include <urma/ubcore_types.h>

int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, const struct ubcore_jfs_wr *wr,
			      struct ubcore_jfs_wr **bad_wr)
{
	struct ubcore_ops *dev_ops;

	if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops == NULL ||
	    jetty->ub_dev->ops->post_jetty_send_wr == NULL || wr == NULL || bad_wr == NULL) {
		ubcore_log_err("Invalid parameter");
		return -EINVAL;
	}

	dev_ops = jetty->ub_dev->ops;
	return dev_ops->post_jetty_send_wr(jetty, wr, bad_wr);
}
EXPORT_SYMBOL(ubcore_post_jetty_send_wr);

int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, const struct ubcore_jfr_wr *wr,
			      struct ubcore_jfr_wr **bad_wr)
{
	struct ubcore_ops *dev_ops;

	if (jetty == NULL || jetty->ub_dev == NULL || jetty->ub_dev->ops == NULL ||
	    jetty->ub_dev->ops->post_jetty_recv_wr == NULL || wr == NULL || bad_wr == NULL) {
		ubcore_log_err("Invalid parameter");
		return -EINVAL;
	}

	dev_ops = jetty->ub_dev->ops;
	return dev_ops->post_jetty_recv_wr(jetty, wr, bad_wr);
}
EXPORT_SYMBOL(ubcore_post_jetty_recv_wr);

int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, const struct ubcore_jfs_wr *wr,
		       struct ubcore_jfs_wr **bad_wr)
{
	struct ubcore_ops *dev_ops;

	if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL ||
	    jfs->ub_dev->ops->post_jfs_wr == NULL || wr == NULL || bad_wr == NULL) {
		ubcore_log_err("Invalid parameter");
		return -EINVAL;
	}

	dev_ops = jfs->ub_dev->ops;
	return dev_ops->post_jfs_wr(jfs, wr, bad_wr);
}
EXPORT_SYMBOL(ubcore_post_jfs_wr);

int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, const struct ubcore_jfr_wr *wr,
		       struct ubcore_jfr_wr **bad_wr)
{
	struct ubcore_ops *dev_ops;

	if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops == NULL ||
	    jfr->ub_dev->ops->post_jfr_wr == NULL || wr == NULL || bad_wr == NULL) {
		ubcore_log_err("Invalid parameter");
		return -EINVAL;
	}

	dev_ops = jfr->ub_dev->ops;
	return dev_ops->post_jfr_wr(jfr, wr, bad_wr);
}
EXPORT_SYMBOL(ubcore_post_jfr_wr);

int ubcore_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr)
{
	struct ubcore_ops *dev_ops;

	if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL ||
	    jfc->ub_dev->ops->poll_jfc == NULL || cr == NULL) {
		ubcore_log_err("Invalid parameter");
		return -EINVAL;
	}

	dev_ops = jfc->ub_dev->ops;
	return dev_ops->poll_jfc(jfc, cr_cnt, cr);
}
EXPORT_SYMBOL(ubcore_poll_jfc);

int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only)
{
	struct ubcore_ops *dev_ops;

	if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL ||
	    jfc->ub_dev->ops->rearm_jfc == NULL) {
		ubcore_log_err("Invalid parameter");
		return -EINVAL;
	}

	dev_ops = jfc->ub_dev->ops;
	return dev_ops->rearm_jfc(jfc, solicited_only);
}
EXPORT_SYMBOL(ubcore_rearm_jfc);
+185 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
 * for more details.
 *
 * Description: implement hash table ops
 * Author: Yan Fangfang
 * Create: 2022-08-03
 * Note:
 * History: 2022-08-03  Yan Fangfang  Add base code
 */

#include <linux/slab.h>
#include "ubcore_log.h"
#include "ubcore_hash_table.h"

int ubcore_hash_table_alloc(struct ubcore_hash_table *ht, const struct ubcore_ht_param *p)
{
	uint32_t i;

	if (p == NULL || p->size == 0)
		return -1;
	ht->p = *p;
	ht->head = kcalloc(p->size, sizeof(struct hlist_head), GFP_KERNEL);
	if (ht->head == NULL) {
		ubcore_log_err("hash table allocation failed.\n");
		return -1;
	}
	for (i = 0; i < p->size; i++)
		INIT_HLIST_HEAD(&ht->head[i]);

	spin_lock_init(&ht->lock);
	kref_init(&ht->kref);
	return 0;
}

void ubcore_hash_table_free_with_cb(struct ubcore_hash_table *ht, void (*free_cb)(void *))
{
	struct hlist_node *pos = NULL, *next = NULL;
	struct hlist_head *head;
	uint32_t i;
	void *obj;

	spin_lock(&ht->lock);
	if (ht->head == NULL) {
		spin_unlock(&ht->lock);
		return;
	}
	for (i = 0; i < ht->p.size; i++) {
		hlist_for_each_safe(pos, next, &ht->head[i]) {
			obj = ubcore_ht_obj(ht, pos);
			hlist_del(pos);
			if (free_cb != NULL)
				free_cb(obj);
			else if (ht->p.free_f != NULL)
				ht->p.free_f(obj);
			else
				kfree(obj);
		}
	}
	head = ht->head;
	ht->head = NULL;
	spin_unlock(&ht->lock);
	if (head != NULL)
		kfree(head);
}

void ubcore_hash_table_free(struct ubcore_hash_table *ht)
{
	ubcore_hash_table_free_with_cb(ht, NULL);
}

void ubcore_hash_table_add_nolock(struct ubcore_hash_table *ht, struct hlist_node *hnode,
				  uint32_t hash)
{
	INIT_HLIST_NODE(hnode);
	hlist_add_head(hnode, &ht->head[hash % ht->p.size]);
}

void ubcore_hash_table_add(struct ubcore_hash_table *ht, struct hlist_node *hnode, uint32_t hash)
{
	spin_lock(&ht->lock);
	if (ht->head == NULL) {
		spin_unlock(&ht->lock);
		return;
	}
	ubcore_hash_table_add_nolock(ht, hnode, hash);
	spin_unlock(&ht->lock);
}

void ubcore_hash_table_remove(struct ubcore_hash_table *ht, struct hlist_node *hnode)
{
	spin_lock(&ht->lock);
	if (ht->head == NULL) {
		spin_unlock(&ht->lock);
		return;
	}
	hlist_del(hnode);
	spin_unlock(&ht->lock);
}

void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, uint32_t hash, const void *key)
{
	struct hlist_node *pos = NULL;
	void *obj = NULL;

	hlist_for_each(pos, &ht->head[hash % ht->p.size]) {
		obj = ubcore_ht_obj(ht, pos);
		if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) {
			break;
		} else if (ht->p.key_size > 0 &&
			   memcmp(ubcore_ht_key(ht, pos), key, ht->p.key_size) == 0) {
			break;
		}
		obj = NULL;
	}
	return obj;
}

void *ubcore_hash_table_lookup(struct ubcore_hash_table *ht, uint32_t hash, const void *key)
{
	void *obj = NULL;

	spin_lock(&ht->lock);
	if (ht->head == NULL) {
		spin_unlock(&ht->lock);
		return NULL;
	}
	obj = ubcore_hash_table_lookup_nolock(ht, hash, key);
	spin_unlock(&ht->lock);
	return obj;
}

/* Do not insert a new entry if an old entry with the same key exists */
int ubcore_hash_table_find_add(struct ubcore_hash_table *ht, struct hlist_node *hnode,
			       uint32_t hash)
{
	spin_lock(&ht->lock);
	if (ht->head == NULL) {
		spin_unlock(&ht->lock);
		return -1;
	}
	/* Old entry with the same key exists */
	if (ubcore_hash_table_lookup_nolock(ht, hash, ubcore_ht_key(ht, hnode)) != NULL) {
		spin_unlock(&ht->lock);
		return -1;
	}
	ubcore_hash_table_add_nolock(ht, hnode, hash);
	spin_unlock(&ht->lock);
	return 0;
}

void *ubcore_hash_table_find_remove(struct ubcore_hash_table *ht, uint32_t hash, const void *key)
{
	struct hlist_node *pos = NULL, *next = NULL;
	void *obj = NULL;

	spin_lock(&ht->lock);
	if (ht->head == NULL) {
		spin_unlock(&ht->lock);
		return NULL;
	}
	hlist_for_each_safe(pos, next, &ht->head[hash % ht->p.size]) {
		obj = ubcore_ht_obj(ht, pos);
		if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) {
			hlist_del(pos);
			break;
		} else if (ht->p.key_size > 0 &&
			   memcmp(ubcore_ht_key(ht, pos), key, ht->p.key_size) == 0) {
			hlist_del(pos);
			break;
		}
		obj = NULL;
	}
	spin_unlock(&ht->lock);
	return obj;
}
Loading