Commit cc465c3b authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka
Browse files

mm/slub: Convert detached_freelist to use a struct slab



This gives us a little bit of extra typesafety as we know that nobody
called virt_to_page() instead of virt_to_head_page().

[ vbabka@suse.cz: Use folio as intermediate step when filtering out
  large kmalloc pages ]

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
parent 0b3eb091
Loading
Loading
Loading
Loading
+17 −14
Original line number Diff line number Diff line
@@ -3532,7 +3532,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
EXPORT_SYMBOL(kmem_cache_free);

struct detached_freelist {
	struct page *page;
	struct slab *slab;
	void *tail;
	void *freelist;
	int cnt;
@@ -3554,8 +3554,8 @@ static inline void free_nonslab_page(struct page *page, void *object)
/*
 * This function progressively scans the array with free objects (with
 * a limited look ahead) and extract objects belonging to the same
 * page.  It builds a detached freelist directly within the given
 * page/objects.  This can happen without any need for
 * slab.  It builds a detached freelist directly within the given
 * slab/objects.  This can happen without any need for
 * synchronization, because the objects are owned by running process.
 * The freelist is build up as a single linked list in the objects.
 * The idea is, that this detached freelist can then be bulk
@@ -3570,10 +3570,11 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
	size_t first_skipped_index = 0;
	int lookahead = 3;
	void *object;
	struct page *page;
	struct folio *folio;
	struct slab *slab;

	/* Always re-init detached_freelist */
	df->page = NULL;
	df->slab = NULL;

	do {
		object = p[--size];
@@ -3583,17 +3584,19 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
	if (!object)
		return 0;

	page = virt_to_head_page(object);
	folio = virt_to_folio(object);
	if (!s) {
		/* Handle kalloc'ed objects */
		if (unlikely(!PageSlab(page))) {
			free_nonslab_page(page, object);
		if (unlikely(!folio_test_slab(folio))) {
			free_nonslab_page(folio_page(folio, 0), object);
			p[size] = NULL; /* mark object processed */
			return size;
		}
		/* Derive kmem_cache from object */
		df->s = page->slab_cache;
		slab = folio_slab(folio);
		df->s = slab->slab_cache;
	} else {
		slab = folio_slab(folio);
		df->s = cache_from_obj(s, object); /* Support for memcg */
	}

@@ -3605,7 +3608,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
	}

	/* Start new detached freelist */
	df->page = page;
	df->slab = slab;
	set_freepointer(df->s, object, NULL);
	df->tail = object;
	df->freelist = object;
@@ -3617,8 +3620,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
		if (!object)
			continue; /* Skip processed objects */

		/* df->page is always set at this point */
		if (df->page == virt_to_head_page(object)) {
		/* df->slab is always set at this point */
		if (df->slab == virt_to_slab(object)) {
			/* Opportunity build freelist */
			set_freepointer(df->s, object, df->freelist);
			df->freelist = object;
@@ -3650,10 +3653,10 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
		struct detached_freelist df;

		size = build_detached_freelist(s, size, p, &df);
		if (!df.page)
		if (!df.slab)
			continue;

		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
		slab_free(df.s, slab_page(df.slab), df.freelist, df.tail, df.cnt, _RET_IP_);
	} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);