Commit 26caea5f authored by Wenpeng Liang's avatar Wenpeng Liang Committed by Jason Gunthorpe
Browse files

RDMA/core: Correct format of block comments

Block comments should not use a trailing */ on a separate line and every
line of a block comment should start with an '*'.

Link: https://lore.kernel.org/r/1617783353-48249-7-git-send-email-liweihang@huawei.com


Signed-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b6eb7011
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -255,7 +255,8 @@ struct cm_id_private {
	struct completion comp;
	refcount_t refcount;
	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
	 * Protected by the cm.lock spinlock. */
	 * Protected by the cm.lock spinlock.
	 */
	int listen_sharecount;
	struct rcu_head rcu;

+2 −1
Original line number Diff line number Diff line
@@ -528,7 +528,8 @@ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
}

/* netlink attribute policy for the response to add and query mapping request
 * and response with remote address info */
 * and response with remote address info
 */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
	[IWPM_NLA_RQUERY_MAPPING_SEQ]     = { .type = NLA_U32 },
	[IWPM_NLA_RQUERY_LOCAL_ADDR]      = {
+2 −1
Original line number Diff line number Diff line
@@ -1829,7 +1829,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
				deref_mad_agent(mad_agent_priv);
			} else {
				/* not user rmpp, revert to normal behavior and
				 * drop the mad */
				 * drop the mad
				 */
				ib_free_recv_mad(mad_recv_wc);
				deref_mad_agent(mad_agent_priv);
				return;
+2 −1
Original line number Diff line number Diff line
@@ -342,7 +342,8 @@ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
	}

	/* uverbs manipulates usecnt with proper locking, while the kabi
	   requires the caller to guarantee we can't race here. */
	 * requires the caller to guarantee we can't race here.
	 */
	WARN_ON(atomic_read(&pd->usecnt));

	ret = pd->device->ops.dealloc_pd(pd, udata);