Commit f34b2cf1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma updates from Jason Gunthorpe:
 "This is significantly bug fixes and general cleanups. The noteworthy
  new features are fairly small:

   - XRC support for HNS and improves RQ operations

   - Bug fixes and updates for hns, mlx5, bnxt_re, hfi1, i40iw, rxe, siw
     and qib

   - Quite a few general cleanups on spelling, error handling, static
     checker detections, etc

   - Increase the number of device ports supported beyond 255. High port
     count software switches now exist

   - Several bug fixes for rtrs

   - mlx5 Device Memory support for host controlled atomics

   - Report SRQ tables through to rdma-tool"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (145 commits)
  IB/qib: Remove redundant assignment to ret
  RDMA/nldev: Add copy-on-fork attribute to get sys command
  RDMA/bnxt_re: Fix a double free in bnxt_qplib_alloc_res
  RDMA/siw: Fix a use after free in siw_alloc_mr
  IB/hfi1: Remove redundant variable rcd
  RDMA/nldev: Add QP numbers to SRQ information
  RDMA/nldev: Return SRQ information
  RDMA/restrack: Add support to get resource tracking for SRQ
  RDMA/nldev: Return context information
  RDMA/core: Add CM to restrack after successful attachment to a device
  RDMA/cma: Skip device which doesn't support CM
  RDMA/rxe: Fix a bug in rxe_fill_ip_info()
  RDMA/mlx5: Expose private query port
  RDMA/mlx4: Remove an unused variable
  RDMA/mlx5: Fix type assignment for ICM DM
  IB/mlx5: Set right RoCE l3 type and roce version while deleting GID
  RDMA/i40iw: Fix error unwinding when i40iw_hmc_sd_one fails
  RDMA/cxgb4: add missing qpid increment
  IB/ipoib: Remove unnecessary struct declaration
  RDMA/bnxt_re: Get rid of custom module reference counting
  ...
parents 9f67672a 6da7bda3
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -34,6 +34,9 @@ Description: Multipath policy specifies which path should be selected on each IO
		min-inflight (1):
		    select path with minimum inflights.

		min-latency (2):
		    select path with minimum latency.

What:		/sys/class/rtrs-client/<session-name>/paths/
Date:		Feb 2020
KernelVersion:	5.7
@@ -95,6 +98,15 @@ KernelVersion: 5.7
Contact:	Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
Description:	RO, Contains the destination address of the path

What:		/sys/class/rtrs-client/<session-name>/paths/<src@dst>/cur_latency
Date:		Feb 2020
KernelVersion:	5.7
Contact:	Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
Description:	RO, Contains the latency time calculated by the heart-beat messages.
		Whenever the client sends heart-beat message, it checks the time gap
		between sending the heart-beat message and receiving the ACK.
		This value can be changed regularly.

What:		/sys/class/rtrs-client/<session-name>/paths/<src@dst>/stats/reset_all
Date:		Feb 2020
KernelVersion:	5.7
+1 −1
Original line number Diff line number Diff line
Hisilicon RoCE DT description

Hisilicon RoCE engine is a part of network subsystem.
It works depending on other part of network wubsytem, such as, gmac and
It works depending on other part of network subsystem, such as gmac and
dsa fabric.

Additional properties are described here:
+2 −3
Original line number Diff line number Diff line
@@ -8226,7 +8226,6 @@ F: drivers/crypto/hisilicon/zip/
HISILICON ROCE DRIVER
M:	Lijun Ou <oulijun@huawei.com>
M:	Wei Hu(Xavier) <huwei87@hisilicon.com>
M:	Weihang Li <liweihang@huawei.com>
L:	linux-rdma@vger.kernel.org
S:	Maintained
@@ -15826,8 +15825,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
F:	drivers/net/wireless/realtek/rtl8xxxu/
RTRS TRANSPORT DRIVERS
M:	Danil Kipnis <danil.kipnis@cloud.ionos.com>
M:	Jack Wang <jinpu.wang@cloud.ionos.com>
M:	Md. Haris Iqbal <haris.iqbal@ionos.com>
M:	Jack Wang <jinpu.wang@ionos.com>
L:	linux-rdma@vger.kernel.org
S:	Maintained
F:	drivers/infiniband/ulp/rtrs/
+40 −47
Original line number Diff line number Diff line
@@ -121,7 +121,7 @@ struct ib_gid_table {
	u32				default_gid_indices;
};

static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
{
	struct ib_event event;

@@ -197,7 +197,7 @@ int ib_cache_gid_parse_type_str(const char *buf)
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);

static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
{
	return device->port_data[port].cache.gid;
}
@@ -237,10 +237,10 @@ static void put_gid_ndev(struct rcu_head *head)
static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
{
	struct ib_device *device = entry->attr.device;
	u8 port_num = entry->attr.port_num;
	u32 port_num = entry->attr.port_num;
	struct ib_gid_table *table = rdma_gid_table(device, port_num);

	dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
	dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__,
		port_num, entry->attr.index, entry->attr.gid.raw);

	write_lock_irq(&table->rwlock);
@@ -282,7 +282,7 @@ static void free_gid_work(struct work_struct *work)
	struct ib_gid_table_entry *entry =
		container_of(work, struct ib_gid_table_entry, del_work);
	struct ib_device *device = entry->attr.device;
	u8 port_num = entry->attr.port_num;
	u32 port_num = entry->attr.port_num;
	struct ib_gid_table *table = rdma_gid_table(device, port_num);

	mutex_lock(&table->lock);
@@ -379,7 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
 * @ix:		GID entry index to delete
 *
 */
static void del_gid(struct ib_device *ib_dev, u8 port,
static void del_gid(struct ib_device *ib_dev, u32 port,
		    struct ib_gid_table *table, int ix)
{
	struct roce_gid_ndev_storage *ndev_storage;
@@ -387,7 +387,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port,

	lockdep_assert_held(&table->lock);

	dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
	dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
		ix, table->data_vec[ix]->attr.gid.raw);

	write_lock_irq(&table->rwlock);
@@ -543,7 +543,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
	addrconf_ifid_eui48(&gid->raw[8], dev);
}

static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
			      union ib_gid *gid, struct ib_gid_attr *attr,
			      unsigned long mask, bool default_gid)
{
@@ -587,7 +587,7 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
	return ret;
}

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
@@ -598,7 +598,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
}

static int
_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
		  union ib_gid *gid, struct ib_gid_attr *attr,
		  unsigned long mask, bool default_gid)
{
@@ -627,7 +627,7 @@ _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
	return ret;
}

int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
@@ -638,7 +638,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
				     struct net_device *ndev)
{
	struct ib_gid_table *table;
@@ -683,7 +683,7 @@ const struct ib_gid_attr *
rdma_find_gid_by_port(struct ib_device *ib_dev,
		      const union ib_gid *gid,
		      enum ib_gid_type gid_type,
		      u8 port, struct net_device *ndev)
		      u32 port, struct net_device *ndev)
{
	int local_index;
	struct ib_gid_table *table;
@@ -734,7 +734,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
 *
 */
const struct ib_gid_attr *rdma_find_gid_by_filter(
	struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
	struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
	bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
		       void *),
	void *context)
@@ -818,7 +818,7 @@ static void release_gid_table(struct ib_device *device,
	kfree(table);
}

static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
				   struct ib_gid_table *table)
{
	int i;
@@ -834,7 +834,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
	mutex_unlock(&table->lock);
}

void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
				  struct net_device *ndev,
				  unsigned long gid_type_mask,
				  enum ib_cache_gid_default_mode mode)
@@ -867,7 +867,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
	}
}

static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
				      struct ib_gid_table *table)
{
	unsigned int i;
@@ -884,7 +884,7 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,

static void gid_table_release_one(struct ib_device *ib_dev)
{
	unsigned int p;
	u32 p;

	rdma_for_each_port (ib_dev, p) {
		release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
@@ -895,7 +895,7 @@ static void gid_table_release_one(struct ib_device *ib_dev)
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
	struct ib_gid_table *table;
	unsigned int rdma_port;
	u32 rdma_port;

	rdma_for_each_port (ib_dev, rdma_port) {
		table = alloc_gid_table(
@@ -915,7 +915,7 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)

static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
	unsigned int p;
	u32 p;

	rdma_for_each_port (ib_dev, p)
		cleanup_gid_table_port(ib_dev, p,
@@ -950,7 +950,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
 * Returns 0 on success or appropriate error code.
 *
 */
int rdma_query_gid(struct ib_device *device, u8 port_num,
int rdma_query_gid(struct ib_device *device, u32 port_num,
		   int index, union ib_gid *gid)
{
	struct ib_gid_table *table;
@@ -1014,7 +1014,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
	unsigned int p;
	u32 p;

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;
@@ -1043,7 +1043,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
EXPORT_SYMBOL(rdma_find_gid);

int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       u32               port_num,
		       int               index,
		       u16              *pkey)
{
@@ -1069,8 +1069,7 @@ int ib_get_cached_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_pkey);

int ib_get_cached_subnet_prefix(struct ib_device *device,
				u8                port_num,
int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
				u64 *sn_pfx)
{
	unsigned long flags;
@@ -1086,10 +1085,8 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);

int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
			u16 pkey, u16 *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
@@ -1116,9 +1113,10 @@ int ib_find_cached_pkey(struct ib_device *device,
				*index = i;
				ret = 0;
				break;
			} else
			} else {
				partial_ix = i;
			}
		}

	if (ret && partial_ix >= 0) {
		*index = partial_ix;
@@ -1132,10 +1130,8 @@ int ib_find_cached_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_find_cached_pkey);

int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
			      u16 pkey, u16 *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
@@ -1169,9 +1165,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);

int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
{
	unsigned long flags;
	int ret = 0;
@@ -1187,8 +1181,7 @@ int ib_get_cached_lmc(struct ib_device *device,
}
EXPORT_SYMBOL(ib_get_cached_lmc);

int ib_get_cached_port_state(struct ib_device   *device,
			     u8                  port_num,
int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
			     enum ib_port_state *port_state)
{
	unsigned long flags;
@@ -1222,7 +1215,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state);
 * code.
 */
const struct ib_gid_attr *
rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
{
	const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
	struct ib_gid_table *table;
@@ -1263,7 +1256,7 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
	const struct ib_gid_attr *gid_attr;
	ssize_t num_entries = 0, ret;
	struct ib_gid_table *table;
	unsigned int port_num, i;
	u32 port_num, i;
	struct net_device *ndev;
	unsigned long flags;

@@ -1361,7 +1354,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
			container_of(attr, struct ib_gid_table_entry, attr);
	struct ib_device *device = entry->attr.device;
	struct net_device *ndev = ERR_PTR(-EINVAL);
	u8 port_num = entry->attr.port_num;
	u32 port_num = entry->attr.port_num;
	struct ib_gid_table *table;
	unsigned long flags;
	bool valid;
@@ -1441,7 +1434,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
EXPORT_SYMBOL(rdma_read_gid_l2_fields);

static int config_non_roce_gid_cache(struct ib_device *device,
				     u8 port, int gid_tbl_len)
				     u32 port, int gid_tbl_len)
{
	struct ib_gid_attr gid_attr = {};
	struct ib_gid_table *table;
@@ -1472,7 +1465,7 @@ static int config_non_roce_gid_cache(struct ib_device *device,
}

static int
ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
@@ -1621,7 +1614,7 @@ EXPORT_SYMBOL(ib_dispatch_event);

int ib_cache_setup_one(struct ib_device *device)
{
	unsigned int p;
	u32 p;
	int err;

	rwlock_init(&device->cache_lock);
@@ -1641,7 +1634,7 @@ int ib_cache_setup_one(struct ib_device *device)

void ib_cache_release_one(struct ib_device *device)
{
	unsigned int p;
	u32 p;

	/*
	 * The release function frees all the cache elements.
+27 −31
Original line number Diff line number Diff line
@@ -202,7 +202,7 @@ static struct attribute *cm_counter_default_attrs[] = {
struct cm_port {
	struct cm_device *cm_dev;
	struct ib_mad_agent *mad_agent;
	u8 port_num;
	u32 port_num;
	struct list_head cm_priv_prim_list;
	struct list_head cm_priv_altr_list;
	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
@@ -255,7 +255,8 @@ struct cm_id_private {
	struct completion comp;
	refcount_t refcount;
	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
	 * Protected by the cm.lock spinlock. */
	 * Protected by the cm.lock spinlock.
	 */
	int listen_sharecount;
	struct rcu_head rcu;

@@ -420,8 +421,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
	return 0;
}

static void * cm_copy_private_data(const void *private_data,
				   u8 private_data_len)
static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
{
	void *data;

@@ -708,8 +708,8 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
	return NULL;
}

static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
						     *timewait_info)
static struct cm_timewait_info *
cm_insert_remote_id(struct cm_timewait_info *timewait_info)
{
	struct rb_node **link = &cm.remote_id_table.rb_node;
	struct rb_node *parent = NULL;
@@ -767,8 +767,8 @@ static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
	return res;
}

static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
						      *timewait_info)
static struct cm_timewait_info *
cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
{
	struct rb_node **link = &cm.remote_qp_table.rb_node;
	struct rb_node *parent = NULL;
@@ -797,8 +797,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
	return NULL;
}

static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
						    *cm_id_priv)
static struct cm_id_private *
cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
{
	struct rb_node **link = &cm.remote_sidr_table.rb_node;
	struct rb_node *parent = NULL;
@@ -1631,7 +1631,7 @@ static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
					       req_msg))));
}

static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
				 struct sa_path_rec *path, union ib_gid *gid)
{
	if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
@@ -1750,7 +1750,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
static u16 cm_get_bth_pkey(struct cm_work *work)
{
	struct ib_device *ib_dev = work->port->cm_dev->ib_device;
	u8 port_num = work->port->port_num;
	u32 port_num = work->port->port_num;
	u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
	u16 pkey;
	int ret;
@@ -1778,7 +1778,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work,
			      struct sa_path_rec *path)
{
	struct ib_device *dev = work->port->cm_dev->ib_device;
	u8 port_num = work->port->port_num;
	u32 port_num = work->port->port_num;

	if (rdma_cap_opa_ah(dev, port_num) &&
	    (ib_is_opa_gid(&path->sgid))) {
@@ -2138,20 +2138,17 @@ static int cm_req_handler(struct cm_work *work)
		goto destroy;
	}

	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);

	memset(&work->path[0], 0, sizeof(work->path[0]));
	if (cm_req_has_alt_path(req_msg))
		memset(&work->path[1], 0, sizeof(work->path[1]));
	grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
	gid_attr = grh->sgid_attr;

	if (gid_attr &&
	    rdma_protocol_roce(work->port->cm_dev->ib_device,
			       work->port->port_num)) {
	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
		work->path[0].rec_type =
			sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
	} else {
		cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
		cm_path_set_rec_type(
			work->port->cm_dev->ib_device, work->port->port_num,
			&work->path[0],
@@ -3917,8 +3914,7 @@ static int cm_establish(struct ib_cm_id *cm_id)

	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
	spin_lock_irqsave(&cm_id_priv->lock, flags);
	switch (cm_id->state)
	{
	switch (cm_id->state) {
	case IB_CM_REP_SENT:
	case IB_CM_MRA_REP_RCVD:
		cm_id->state = IB_CM_ESTABLISHED;
@@ -4334,7 +4330,7 @@ static int cm_add_one(struct ib_device *ib_device)
	unsigned long flags;
	int ret;
	int count = 0;
	unsigned int i;
	u32 i;

	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
			 GFP_KERNEL);
@@ -4432,7 +4428,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
		.clr_port_cap_mask = IB_PORT_CM_SUP
	};
	unsigned long flags;
	unsigned int i;
	u32 i;

	write_lock_irqsave(&cm.device_lock, flags);
	list_del(&cm_dev->list);
Loading