Loading include/linux/slab.h +0 −3 Original line number Diff line number Diff line Loading @@ -117,9 +117,6 @@ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* Slab deactivation flag */ #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * Loading mm/slab_common.c +1 −1 Original line number Diff line number Diff line Loading @@ -807,7 +807,7 @@ void __init setup_kmalloc_cache_index_table(void) unsigned int i; BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); !is_power_of_2(KMALLOC_MIN_SIZE)); for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { unsigned int elem = size_index_elem(i); Loading mm/slob.c +1 −1 Original line number Diff line number Diff line Loading @@ -708,7 +708,7 @@ int __kmem_cache_shrink(struct kmem_cache *d) return 0; } struct kmem_cache kmem_cache_boot = { static struct kmem_cache kmem_cache_boot = { .name = "kmem_cache", .size = sizeof(struct kmem_cache), .flags = SLAB_PANIC, Loading mm/slub.c +10 −15 Original line number Diff line number Diff line Loading @@ -1788,8 +1788,8 @@ static void *setup_object(struct kmem_cache *s, struct slab *slab, /* * Slab allocation and freeing */ static inline struct slab *alloc_slab_page(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_order_objects oo) static inline struct slab *alloc_slab_page(gfp_t flags, int node, struct kmem_cache_order_objects oo) { struct folio *folio; struct slab *slab; Loading Loading @@ -1941,7 +1941,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); slab = alloc_slab_page(s, alloc_gfp, node, oo); slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) { oo = s->min; alloc_gfp = flags; Loading @@ -1949,7 +1949,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ slab = alloc_slab_page(s, alloc_gfp, node, oo); slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) goto out; stat(s, ORDER_FALLBACK); Loading Loading @@ -4046,7 +4046,7 @@ static void set_cpu_partial(struct kmem_cache *s) * calculate_sizes() determines the order and the distribution of data within * a slab object. */ static int calculate_sizes(struct kmem_cache *s, int forced_order) static int calculate_sizes(struct kmem_cache *s) { slab_flags_t flags = s->flags; unsigned int size = s->object_size; Loading Loading @@ -4150,9 +4150,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size = ALIGN(size, s->align); s->size = size; s->reciprocal_size = reciprocal_value(size); if (forced_order >= 0) order = forced_order; else order = calculate_order(size); if ((int)order < 0) Loading Loading @@ -4189,7 +4186,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) s->random = get_random_long(); #endif if (!calculate_sizes(s, -1)) if (!calculate_sizes(s)) goto error; if (disable_higher_order_debug) { /* Loading @@ -4199,7 +4196,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) if (get_order(s->size) > get_order(s->object_size)) { s->flags &= ~DEBUG_METADATA_FLAGS; s->offset = 0; if (!calculate_sizes(s, -1)) if (!calculate_sizes(s)) goto error; } } Loading Loading @@ -5344,12 +5341,10 @@ struct slab_attribute { }; #define SLAB_ATTR_RO(_name) \ static struct slab_attribute _name##_attr = \ __ATTR(_name, 0400, _name##_show, NULL) static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) #define SLAB_ATTR(_name) \ static struct slab_attribute _name##_attr = \ __ATTR(_name, 0600, _name##_show, _name##_store) static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) static ssize_t slab_size_show(struct kmem_cache *s, char *buf) { Loading Loading
include/linux/slab.h +0 −3 Original line number Diff line number Diff line Loading @@ -117,9 +117,6 @@ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* Slab deactivation flag */ #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * Loading
mm/slab_common.c +1 −1 Original line number Diff line number Diff line Loading @@ -807,7 +807,7 @@ void __init setup_kmalloc_cache_index_table(void) unsigned int i; BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); !is_power_of_2(KMALLOC_MIN_SIZE)); for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { unsigned int elem = size_index_elem(i); Loading
mm/slob.c +1 −1 Original line number Diff line number Diff line Loading @@ -708,7 +708,7 @@ int __kmem_cache_shrink(struct kmem_cache *d) return 0; } struct kmem_cache kmem_cache_boot = { static struct kmem_cache kmem_cache_boot = { .name = "kmem_cache", .size = sizeof(struct kmem_cache), .flags = SLAB_PANIC, Loading
mm/slub.c +10 −15 Original line number Diff line number Diff line Loading @@ -1788,8 +1788,8 @@ static void *setup_object(struct kmem_cache *s, struct slab *slab, /* * Slab allocation and freeing */ static inline struct slab *alloc_slab_page(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_order_objects oo) static inline struct slab *alloc_slab_page(gfp_t flags, int node, struct kmem_cache_order_objects oo) { struct folio *folio; struct slab *slab; Loading Loading @@ -1941,7 +1941,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); slab = alloc_slab_page(s, alloc_gfp, node, oo); slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) { oo = s->min; alloc_gfp = flags; Loading @@ -1949,7 +1949,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ slab = alloc_slab_page(s, alloc_gfp, node, oo); slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) goto out; stat(s, ORDER_FALLBACK); Loading Loading @@ -4046,7 +4046,7 @@ static void set_cpu_partial(struct kmem_cache *s) * calculate_sizes() determines the order and the distribution of data within * a slab object. */ static int calculate_sizes(struct kmem_cache *s, int forced_order) static int calculate_sizes(struct kmem_cache *s) { slab_flags_t flags = s->flags; unsigned int size = s->object_size; Loading Loading @@ -4150,9 +4150,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size = ALIGN(size, s->align); s->size = size; s->reciprocal_size = reciprocal_value(size); if (forced_order >= 0) order = forced_order; else order = calculate_order(size); if ((int)order < 0) Loading Loading @@ -4189,7 +4186,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) s->random = get_random_long(); #endif if (!calculate_sizes(s, -1)) if (!calculate_sizes(s)) goto error; if (disable_higher_order_debug) { /* Loading @@ -4199,7 +4196,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) if (get_order(s->size) > get_order(s->object_size)) { s->flags &= ~DEBUG_METADATA_FLAGS; s->offset = 0; if (!calculate_sizes(s, -1)) if (!calculate_sizes(s)) goto error; } } Loading Loading @@ -5344,12 +5341,10 @@ struct slab_attribute { }; #define SLAB_ATTR_RO(_name) \ static struct slab_attribute _name##_attr = \ __ATTR(_name, 0400, _name##_show, NULL) static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) #define SLAB_ATTR(_name) \ static struct slab_attribute _name##_attr = \ __ATTR(_name, 0600, _name##_show, _name##_store) static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) static ssize_t slab_size_show(struct kmem_cache *s, char *buf) { Loading