Commit cc86e0c2 authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton
Browse files

radix tree test suite: add support for slab bulk APIs

Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the
radix tree test suite.

Link: https://lkml.kernel.org/r/20220906194824.2110408-6-Liam.Howlett@oracle.com


Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 000a4493
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
			unsigned int align, unsigned int flags,
			void (*ctor)(void *));

void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
			  void **list);

#endif		/* _TOOLS_SLAB_H */
+116 −2
Original line number Diff line number Diff line
@@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
	return p;
}

void kmem_cache_free(struct kmem_cache *cachep, void *objp)
void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
	assert(objp);
	uatomic_dec(&nr_allocated);
	uatomic_dec(&cachep->nr_allocated);
	if (kmalloc_verbose)
		printf("Freeing %p to slab\n", objp);
	pthread_mutex_lock(&cachep->lock);
	if (cachep->nr_objs > 10 || cachep->align) {
		memset(objp, POISON_FREE, cachep->size);
		free(objp);
@@ -110,7 +109,78 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
		node->parent = cachep->objs;
		cachep->objs = node;
	}
}

void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
	pthread_mutex_lock(&cachep->lock);
	kmem_cache_free_locked(cachep, objp);
	pthread_mutex_unlock(&cachep->lock);
}

void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
{
	if (kmalloc_verbose)
		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);

	pthread_mutex_lock(&cachep->lock);
	for (int i = 0; i < size; i++)
		kmem_cache_free_locked(cachep, list[i]);
	pthread_mutex_unlock(&cachep->lock);
}

int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
			  void **p)
{
	size_t i;

	if (kmalloc_verbose)
		pr_debug("Bulk alloc %lu\n", size);

	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
		if (cachep->non_kernel < size)
			return 0;

		cachep->non_kernel -= size;
	}

	pthread_mutex_lock(&cachep->lock);
	if (cachep->nr_objs >= size) {
		struct radix_tree_node *node;

		for (i = 0; i < size; i++) {
			node = cachep->objs;
			cachep->nr_objs--;
			cachep->objs = node->parent;
			p[i] = node;
			node->parent = NULL;
		}
		pthread_mutex_unlock(&cachep->lock);
	} else {
		pthread_mutex_unlock(&cachep->lock);
		for (i = 0; i < size; i++) {
			if (cachep->align) {
				posix_memalign(&p[i], cachep->align,
					       cachep->size * size);
			} else {
				p[i] = malloc(cachep->size * size);
			}
			if (cachep->ctor)
				cachep->ctor(p[i]);
			else if (gfp & __GFP_ZERO)
				memset(p[i], 0, cachep->size);
		}
	}

	for (i = 0; i < size; i++) {
		uatomic_inc(&nr_allocated);
		uatomic_inc(&cachep->nr_allocated);
		uatomic_inc(&cachep->nr_tallocated);
		if (kmalloc_verbose)
			printf("Allocating %p from slab\n", p[i]);
	}

	return size;
}

struct kmem_cache *
@@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
	ret->non_kernel = 0;
	return ret;
}

/*
 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
 */
void test_kmem_cache_bulk(void)
{
	int i;
	void *list[12];
	static struct kmem_cache *test_cache, *test_cache2;

	/*
	 * Testing the bulk allocators without aligned kmem_cache to force the
	 * bulk alloc/free to reuse
	 */
	test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);

	for (i = 0; i < 5; i++)
		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);

	for (i = 0; i < 5; i++)
		kmem_cache_free(test_cache, list[i]);
	assert(test_cache->nr_objs == 5);

	kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
	kmem_cache_free_bulk(test_cache, 5, list);

	for (i = 0; i < 12 ; i++)
		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);

	for (i = 0; i < 12; i++)
		kmem_cache_free(test_cache, list[i]);

	/* The last free will not be kept around */
	assert(test_cache->nr_objs == 11);

	/* Aligned caches will immediately free */
	test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);

	kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
	kmem_cache_free_bulk(test_cache2, 10, list);
	assert(!test_cache2->nr_objs);


}