Commit ca1a46d6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull slab updates from Vlastimil Babka:

 - Separate struct slab from struct page - an offshot of the page folio
   work.

   Struct page fields used by slab allocators are moved from struct page
   to a new struct slab, that uses the same physical storage. Similar to
   struct folio, it always is a head page. This brings better type
   safety, separation of large kmalloc allocations from true slabs, and
   cleanup of related objcg code.

 - A SLAB_MERGE_DEFAULT config optimization.

* tag 'slab-for-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (33 commits)
  mm/slob: Remove unnecessary page_mapcount_reset() function call
  bootmem: Use page->index instead of page->freelist
  zsmalloc: Stop using slab fields in struct page
  mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled
  mm/slub: Simplify struct slab slabs field definition
  mm/sl*b: Differentiate struct slab fields by sl*b implementations
  mm/kfence: Convert kfence_guarded_alloc() to struct slab
  mm/kasan: Convert to struct folio and struct slab
  mm/slob: Convert SLOB to use struct slab and struct folio
  mm/memcg: Convert slab objcgs from struct page to struct slab
  mm: Convert struct page to struct slab in functions used by other subsystems
  mm/slab: Finish struct page to struct slab conversion
  mm/slab: Convert most struct page to struct slab by spatch
  mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab
  mm/slub: Finish struct page to struct slab conversion
  mm/slub: Convert most struct page to struct slab by spatch
  mm/slub: Convert pfmemalloc_match() to take a struct slab
  mm/slub: Convert __free_slab() to use struct slab
  mm/slub: Convert alloc_slab_page() to return a struct slab
  mm/slub: Convert print_page_info() to print_slab_info()
  ...
parents d93aebbd 9d6c59c1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -981,7 +981,7 @@ static void __meminit free_pagetable(struct page *page, int order)
	if (PageReserved(page)) {
		__ClearPageReserved(page);

		magic = (unsigned long)page->freelist;
		magic = page->index;
		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
			while (nr_pages--)
				put_page_bootmem(page++);
+1 −1
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@ void put_page_bootmem(struct page *page);
 */
static inline void free_bootmem_page(struct page *page)
{
	unsigned long magic = (unsigned long)page->freelist;
	unsigned long magic = page->index;

	/*
	 * The reserve_bootmem_region sets the reserved flag on bootmem
+5 −4
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

struct kmem_cache;
struct page;
struct slab;
struct vm_struct;
struct task_struct;

@@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
	return 0;
}

void __kasan_poison_slab(struct page *page);
static __always_inline void kasan_poison_slab(struct page *page)
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
	if (kasan_enabled())
		__kasan_poison_slab(page);
		__kasan_poison_slab(slab);
}

void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
				      slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
					void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
+0 −48
Original line number Diff line number Diff line
@@ -536,45 +536,6 @@ static inline bool folio_memcg_kmem(struct folio *folio)
	return folio->memcg_data & MEMCG_DATA_KMEM;
}

/*
 * page_objcgs - get the object cgroups vector associated with a page
 * @page: a pointer to the page struct
 *
 * Returns a pointer to the object cgroups vector associated with the page,
 * or NULL. This function assumes that the page is known to have an
 * associated object cgroups vector. It's not safe to call this function
 * against pages, which might have an associated memory cgroup: e.g.
 * kernel stack pages.
 */
static inline struct obj_cgroup **page_objcgs(struct page *page)
{
	unsigned long memcg_data = READ_ONCE(page->memcg_data);

	VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
	VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);

	return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}

/*
 * page_objcgs_check - get the object cgroups vector associated with a page
 * @page: a pointer to the page struct
 *
 * Returns a pointer to the object cgroups vector associated with the page,
 * or NULL. This function is safe to use if the page can be directly associated
 * with a memory cgroup.
 */
static inline struct obj_cgroup **page_objcgs_check(struct page *page)
{
	unsigned long memcg_data = READ_ONCE(page->memcg_data);

	if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
		return NULL;

	VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);

	return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}

#else
static inline bool folio_memcg_kmem(struct folio *folio)
@@ -582,15 +543,6 @@ static inline bool folio_memcg_kmem(struct folio *folio)
	return false;
}

static inline struct obj_cgroup **page_objcgs(struct page *page)
{
	return NULL;
}

static inline struct obj_cgroup **page_objcgs_check(struct page *page)
{
	return NULL;
}
#endif

static inline bool PageMemcgKmem(struct page *page)
+12 −0
Original line number Diff line number Diff line
@@ -863,6 +863,13 @@ static inline struct page *virt_to_head_page(const void *x)
	return compound_head(page);
}

static inline struct folio *virt_to_folio(const void *x)
{
	struct page *page = virt_to_page(x);

	return page_folio(page);
}

void __put_page(struct page *page);

void put_pages_list(struct list_head *pages);
@@ -1753,6 +1760,11 @@ void page_address_init(void);
#define page_address_init()  do { } while(0)
#endif

static inline void *folio_address(const struct folio *folio)
{
	return page_address(&folio->page);
}

extern void *page_rmapping(struct page *page);
extern struct anon_vma *page_anon_vma(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
Loading