Commit 8d60ecd9 authored by NeilBrown's avatar NeilBrown Committed by Greg Kroah-Hartman
Browse files

staging: lustre: replace LIBCFS_CPT_ALLOC()



LIBCFS_APT_ALLOC() calls kvmalloc_node() with GFP_NOFS
which is not permitted.
Mostly, a kmalloc_node(GFP_NOFS) is appropriate, though occasionally
the allocation is large and GFP_KERNEL is acceptable, so
kvmalloc_node() can be used.

This patch introduces 4 alternatives to LIBCFS_CPT_ALLOC():
 kmalloc_cpt()
 kzalloc_cpt()
 kvmalloc_cpt()
 kvzalloc_cpt().

Each takes a size, gfp flags, and cpt number.

Almost every call to LIBCFS_CPT_ALLOC() passes lnet_cpt_table()
as the table.  This patch embeds that choice in the k*alloc_cpt()
macros, and opencode kzalloc_node(..., cfs_cpt_spread_node(..))
in the one case that lnet_cpt_table() isn't used.

When LIBCFS_CPT_ALLOC() is replaced, the matching LIBCFS_FREE()
is also replaced, with with kfree() or kvfree() as appropriate.

Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d0157f0c
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -126,6 +126,25 @@ do { \
	kvfree(ptr);					  \
} while (0)

/*
 * Use #define rather than inline, as lnet_cpt_table() might
 * not be defined yet
 */
#define kmalloc_cpt(size, flags, cpt) \
	kmalloc_node(size, flags,  cfs_cpt_spread_node(lnet_cpt_table(), cpt))

#define kzalloc_cpt(size, flags, cpt) \
	kmalloc_node(size, flags | __GFP_ZERO,				\
		     cfs_cpt_spread_node(lnet_cpt_table(), cpt))

#define kvmalloc_cpt(size, flags, cpt) \
	kvmalloc_node(size, flags,					\
		      cfs_cpt_spread_node(lnet_cpt_table(), cpt))

#define kvzalloc_cpt(size, flags, cpt) \
	kvmalloc_node(size, flags | __GFP_ZERO,				\
		      cfs_cpt_spread_node(lnet_cpt_table(), cpt))

/******************************************************************************/

void libcfs_debug_dumplog(void);
+37 −62
Original line number Diff line number Diff line
@@ -325,7 +325,7 @@ int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
	LASSERT(net);
	LASSERT(nid != LNET_NID_ANY);

	LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
	peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
	if (!peer) {
		CERROR("Cannot allocate peer\n");
		return -ENOMEM;
@@ -656,15 +656,14 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm

	LASSERT(sched->ibs_nthreads > 0);

	LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
			 sizeof(*init_qp_attr));
	init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt);
	if (!init_qp_attr) {
		CERROR("Can't allocate qp_attr for %s\n",
		       libcfs_nid2str(peer->ibp_nid));
		goto failed_0;
	}

	LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
	conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt);
	if (!conn) {
		CERROR("Can't allocate connection for %s\n",
		       libcfs_nid2str(peer->ibp_nid));
@@ -687,8 +686,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
	INIT_LIST_HEAD(&conn->ibc_active_txs);
	spin_lock_init(&conn->ibc_lock);

	LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
			 sizeof(*conn->ibc_connvars));
	conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt);
	if (!conn->ibc_connvars) {
		CERROR("Can't allocate in-progress connection state\n");
		goto failed_2;
@@ -722,8 +720,8 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm

	write_unlock_irqrestore(glock, flags);

	LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
			 IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
	conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx),
				    GFP_NOFS, cpt);
	if (!conn->ibc_rxs) {
		CERROR("Cannot allocate RX buffers\n");
		goto failed_2;
@@ -877,11 +875,7 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
	if (conn->ibc_rx_pages)
		kiblnd_unmap_rx_descs(conn);

	if (conn->ibc_rxs) {
		LIBCFS_FREE(conn->ibc_rxs,
			    IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
	}

	kfree(conn->ibc_rxs);
	kfree(conn->ibc_connvars);

	if (conn->ibc_hdev)
@@ -1088,7 +1082,7 @@ static void kiblnd_free_pages(struct kib_pages *p)
			__free_page(p->ibp_pages[i]);
	}

	LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
	kfree(p);
}

int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
@@ -1096,14 +1090,13 @@ int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
	struct kib_pages *p;
	int i;

	LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
			 offsetof(struct kib_pages, ibp_pages[npages]));
	p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]),
			GFP_NOFS, cpt);
	if (!p) {
		CERROR("Can't allocate descriptor for %d pages\n", npages);
		return -ENOMEM;
	}

	memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
	p->ibp_npages = npages;

	for (i = 0; i < npages; i++) {
@@ -1375,8 +1368,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_po
	INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
	fpo->fast_reg.fpo_pool_size = 0;
	for (i = 0; i < fps->fps_pool_size; i++) {
		LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
				 sizeof(*frd));
		frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt);
		if (!frd) {
			CERROR("Failed to allocate a new fast_reg descriptor\n");
			rc = -ENOMEM;
@@ -1425,7 +1417,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
	struct kib_fmr_pool *fpo;
	int rc;

	LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
	fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt);
	if (!fpo)
		return -ENOMEM;

@@ -1984,30 +1976,14 @@ static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
		struct kib_tx *tx = &tpo->tpo_tx_descs[i];

		list_del(&tx->tx_list);
		if (tx->tx_pages)
			LIBCFS_FREE(tx->tx_pages,
				    LNET_MAX_IOV *
				    sizeof(*tx->tx_pages));
		if (tx->tx_frags)
			LIBCFS_FREE(tx->tx_frags,
				    (1 + IBLND_MAX_RDMA_FRAGS) *
				     sizeof(*tx->tx_frags));
		if (tx->tx_wrq)
			LIBCFS_FREE(tx->tx_wrq,
				    (1 + IBLND_MAX_RDMA_FRAGS) *
				    sizeof(*tx->tx_wrq));
		if (tx->tx_sge)
			LIBCFS_FREE(tx->tx_sge,
				    (1 + IBLND_MAX_RDMA_FRAGS) *
				    sizeof(*tx->tx_sge));
		if (tx->tx_rd)
			LIBCFS_FREE(tx->tx_rd,
				    offsetof(struct kib_rdma_desc,
					     rd_frags[IBLND_MAX_RDMA_FRAGS]));
	}

	LIBCFS_FREE(tpo->tpo_tx_descs,
		    pool->po_size * sizeof(struct kib_tx));
		kfree(tx->tx_pages);
		kfree(tx->tx_frags);
		kfree(tx->tx_wrq);
		kfree(tx->tx_sge);
		kfree(tx->tx_rd);
	}

	kfree(tpo->tpo_tx_descs);
out:
	kiblnd_fini_pool(pool);
	kfree(tpo);
@@ -2028,7 +2004,7 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
	struct kib_pool *pool;
	struct kib_tx_pool *tpo;

	LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
	tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt);
	if (!tpo) {
		CERROR("Failed to allocate TX pool\n");
		return -ENOMEM;
@@ -2046,8 +2022,8 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
		return -ENOMEM;
	}

	LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
			 size * sizeof(struct kib_tx));
	tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx),
					GFP_NOFS, ps->ps_cpt);
	if (!tpo->tpo_tx_descs) {
		CERROR("Can't allocate %d tx descriptors\n", size);
		ps->ps_pool_destroy(pool);
@@ -2061,36 +2037,35 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,

		tx->tx_pool = tpo;
		if (ps->ps_net->ibn_fmr_ps) {
			LIBCFS_CPT_ALLOC(tx->tx_pages,
					 lnet_cpt_table(), ps->ps_cpt,
					 LNET_MAX_IOV * sizeof(*tx->tx_pages));
			tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages),
						   GFP_NOFS, ps->ps_cpt);
			if (!tx->tx_pages)
				break;
		}

		LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
				 (1 + IBLND_MAX_RDMA_FRAGS) *
				 sizeof(*tx->tx_frags));
		tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
					   sizeof(*tx->tx_frags),
					   GFP_NOFS, ps->ps_cpt);
		if (!tx->tx_frags)
			break;

		sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);

		LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
				 (1 + IBLND_MAX_RDMA_FRAGS) *
				 sizeof(*tx->tx_wrq));
		tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
					 sizeof(*tx->tx_wrq),
					 GFP_NOFS, ps->ps_cpt);
		if (!tx->tx_wrq)
			break;

		LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
				 (1 + IBLND_MAX_RDMA_FRAGS) *
				 sizeof(*tx->tx_sge));
		tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) *
					 sizeof(*tx->tx_sge),
					 GFP_NOFS, ps->ps_cpt);
		if (!tx->tx_sge)
			break;

		LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
				 offsetof(struct kib_rdma_desc,
					  rd_frags[IBLND_MAX_RDMA_FRAGS]));
		tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc,
						 rd_frags[IBLND_MAX_RDMA_FRAGS]),
					GFP_NOFS, ps->ps_cpt);
		if (!tx->tx_rd)
			break;
	}
+5 −10
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni,
	LASSERT(id.pid != LNET_PID_ANY);
	LASSERT(!in_interrupt());

	LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
	peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt);
	if (!peer)
		return -ENOMEM;

@@ -2257,13 +2257,8 @@ ksocknal_free_buffers(void)
		struct ksock_sched_info *info;
		int i;

		cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
			if (info->ksi_scheds) {
				LIBCFS_FREE(info->ksi_scheds,
					    info->ksi_nthreads_max *
					    sizeof(info->ksi_scheds[0]));
			}
		}
		cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info)
			kfree(info->ksi_scheds);
		cfs_percpt_free(ksocknal_data.ksnd_sched_info);
	}

@@ -2452,8 +2447,8 @@ ksocknal_base_startup(void)
		info->ksi_nthreads_max = nthrs;
		info->ksi_cpt = i;

		LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
				 info->ksi_nthreads_max * sizeof(*sched));
		info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched),
					       GFP_NOFS, i);
		if (!info->ksi_scheds)
			goto failed;

+4 −5
Original line number Diff line number Diff line
@@ -49,10 +49,8 @@ cfs_percpt_free(void *vars)

	arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);

	for (i = 0; i < arr->va_count; i++) {
		if (arr->va_ptrs[i])
			LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
	}
	for (i = 0; i < arr->va_count; i++)
		kfree(arr->va_ptrs[i]);

	kvfree(arr);
}
@@ -89,7 +87,8 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
	arr->va_cptab = cptab;

	for (i = 0; i < count; i++) {
		LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
		arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL,
					       cfs_cpt_spread_node(cptab, i));
		if (!arr->va_ptrs[i]) {
			cfs_percpt_free((void *)&arr->va_ptrs[0]);
			return NULL;
+4 −7
Original line number Diff line number Diff line
@@ -404,11 +404,8 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
		       count, lnet_res_type2str(rec->rec_type));
	}

	if (rec->rec_lh_hash) {
		LIBCFS_FREE(rec->rec_lh_hash,
			    LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
	kfree(rec->rec_lh_hash);
	rec->rec_lh_hash = NULL;
	}

	rec->rec_type = 0; /* mark it as finalized */
}
@@ -426,8 +423,8 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
	rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;

	/* Arbitrary choice of hash table size */
	LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
			 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
	rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]),
					GFP_KERNEL, cpt);
	if (!rec->rec_lh_hash) {
		rc = -ENOMEM;
		goto out;
Loading