Commit 5c0e820c authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski
Browse files

net: factorize code in kmalloc_reserve()



All kmalloc_reserve() callers have to make the same computation,
we can factorize them, to prepare following patch in the series.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Acked-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarAlexander Duyck <alexanderduyck@fb.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 65998d2b
Loading
Loading
Loading
Loading
+11 −16
Original line number Diff line number Diff line
@@ -478,17 +478,20 @@ EXPORT_SYMBOL(napi_build_skb);
 * may be used. Otherwise, the packet data may be discarded until enough
 * memory is free
 */
static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
			     bool *pfmemalloc)
{
	void *obj;
	bool ret_pfmemalloc = false;
	unsigned int obj_size;
	void *obj;

	obj_size = SKB_HEAD_ALIGN(*size);
	*size = obj_size = kmalloc_size_roundup(obj_size);
	/*
	 * Try a regular allocation, when that fails and we're not entitled
	 * to the reserves, fail.
	 */
	obj = kmalloc_node_track_caller(size,
	obj = kmalloc_node_track_caller(obj_size,
					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
					node);
	if (obj || !(gfp_pfmemalloc_allowed(flags)))
@@ -496,7 +499,7 @@ static void *kmalloc_reserve(size_t size, gfp_t flags, int node,

	/* Try again but now we are using pfmemalloc reserves */
	ret_pfmemalloc = true;
	obj = kmalloc_node_track_caller(size, flags, node);
	obj = kmalloc_node_track_caller(obj_size, flags, node);

out:
	if (pfmemalloc)
@@ -557,9 +560,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
	 * Both skb->head and skb_shared_info are cache line aligned.
	 */
	size = SKB_HEAD_ALIGN(size);
	size = kmalloc_size_roundup(size);
	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
	data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
	if (unlikely(!data))
		goto nodata;
	/* kmalloc_size_roundup() might give us more room than requested.
@@ -1933,9 +1934,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
	if (skb_pfmemalloc(skb))
		gfp_mask |= __GFP_MEMALLOC;

	size = SKB_HEAD_ALIGN(size);
	size = kmalloc_size_roundup(size);
	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
	if (!data)
		goto nodata;
	size = SKB_WITH_OVERHEAD(size);
@@ -6283,9 +6282,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
	if (skb_pfmemalloc(skb))
		gfp_mask |= __GFP_MEMALLOC;

	size = SKB_HEAD_ALIGN(size);
	size = kmalloc_size_roundup(size);
	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
	if (!data)
		return -ENOMEM;
	size = SKB_WITH_OVERHEAD(size);
@@ -6401,9 +6398,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
	if (skb_pfmemalloc(skb))
		gfp_mask |= __GFP_MEMALLOC;

	size = SKB_HEAD_ALIGN(size);
	size = kmalloc_size_roundup(size);
	data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
	data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
	if (!data)
		return -ENOMEM;
	size = SKB_WITH_OVERHEAD(size);