Loading include/linux/slub_def.h +17 −4 Original line number Diff line number Diff line Loading @@ -46,7 +46,6 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; Loading Loading @@ -89,6 +88,7 @@ struct kmem_cache { void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ unsigned long min_partial; const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SLUB_DEBUG Loading Loading @@ -120,11 +120,24 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" * is relatively slow so we need this value sufficiently high so that * performance critical objects are allocated through the SLUB fastpath. * * This should be dropped to PAGE_SIZE / 2 once the page allocator * "fastpath" becomes competitive with the slab allocator fastpaths. */ #define SLUB_MAX_SIZE (2 * PAGE_SIZE) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC Loading Loading @@ -212,7 +225,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { if (size > PAGE_SIZE) if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { Loading @@ -234,7 +247,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size <= PAGE_SIZE && !(flags & SLUB_DMA)) { size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Loading mm/slob.c +27 −16 Original line number Diff line number Diff line Loading @@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium); static LIST_HEAD(free_slob_large); /* * slob_page: True for all slob pages (false for bigblock pages) * is_slob_page: True for all slob pages (false for bigblock pages) */ static inline int slob_page(struct slob_page *sp) static inline int is_slob_page(struct slob_page *sp) { return PageSlobPage((struct page *)sp); } Loading @@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp) __ClearPageSlobPage((struct page *)sp); } static inline struct slob_page *slob_page(const void *addr) { return (struct slob_page *)virt_to_page(addr); } /* * slob_page_free: true for pages on free_slob_pages list. */ Loading Loading @@ -230,7 +235,7 @@ static int slob_last(slob_t *s) return !((unsigned long)slob_next(s) & ~PAGE_MASK); } static void *slob_new_page(gfp_t gfp, int order, int node) static void *slob_new_pages(gfp_t gfp, int order, int node) { void *page; Loading @@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) return page_address(page); } static void slob_free_pages(void *b, int order) { free_pages((unsigned long)b, order); } /* * Allocate a slob block within a given slob_page sp. */ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = 0; slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { Loading Loading @@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) /* Not enough space: must allocate a new page */ if (!b) { b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return 0; sp = (struct slob_page *)virt_to_page(b); return NULL; sp = slob_page(b); set_slob_page(sp); spin_lock_irqsave(&slob_lock, flags); Loading Loading @@ -384,7 +394,7 @@ static void slob_free(void *block, int size) return; BUG_ON(!size); sp = (struct slob_page *)virt_to_page(block); sp = slob_page(block); units = SLOB_UNITS(size); spin_lock_irqsave(&slob_lock, flags); Loading @@ -393,10 +403,11 @@ static void slob_free(void *block, int size) /* Go directly to page allocator. Do not pass slob allocator */ if (slob_page_free(sp)) clear_slob_page_free(sp); spin_unlock_irqrestore(&slob_lock, flags); clear_slob_page(sp); free_slob_page(sp); free_page((unsigned long)b); goto out; return; } if (!slob_page_free(sp)) { Loading Loading @@ -476,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) } else { void *ret; ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); if (ret) { struct page *page; page = virt_to_page(ret); Loading @@ -494,8 +505,8 @@ void kfree(const void *block) if (unlikely(ZERO_OR_NULL_PTR(block))) return; sp = (struct slob_page *)virt_to_page(block); if (slob_page(sp)) { sp = slob_page(block); if (is_slob_page(sp)) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); Loading @@ -513,8 +524,8 @@ size_t ksize(const void *block) if (unlikely(block == ZERO_SIZE_PTR)) return 0; sp = (struct slob_page *)virt_to_page(block); if (slob_page(sp)) { sp = slob_page(block); if (is_slob_page(sp)) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; Loading Loading @@ -573,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->size < PAGE_SIZE) b = slob_alloc(c->size, flags, c->align, node); else b = slob_new_page(flags, get_order(c->size), node); b = slob_new_pages(flags, get_order(c->size), node); if (c->ctor) c->ctor(b); Loading @@ -587,7 +598,7 @@ static void __kmem_cache_free(void *b, int size) if (size < PAGE_SIZE) slob_free(b, size); else free_pages((unsigned long)b, get_order(size)); slob_free_pages(b, get_order(size)); } static void kmem_rcu_free(struct rcu_head *head) Loading mm/slub.c +52 −30 Original line number Diff line number Diff line Loading @@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object, static void set_track(struct kmem_cache *s, void *object, enum track_item alloc, unsigned long addr) { struct track *p; struct track *p = get_track(s, object, alloc); if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; p += alloc; if (addr) { p->addr = addr; p->cpu = smp_processor_id(); Loading Loading @@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) n = get_node(s, zone_to_nid(zone)); if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > n->min_partial) { n->nr_partial > s->min_partial) { page = get_partial_node(n); if (page) return page; Loading Loading @@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) slab_unlock(page); } else { stat(c, DEACTIVATE_EMPTY); if (n->nr_partial < n->min_partial) { if (n->nr_partial < s->min_partial) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs Loading Loading @@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s, c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(object, s->objsize); debug_check_no_obj_freed(object, c->objsize); if (likely(page == c->page && c->node >= 0)) { object[c->offset] = c->freelist; c->freelist = object; Loading Loading @@ -1844,6 +1838,7 @@ static inline int calculate_order(int size) int order; int min_objects; int fraction; int max_objects; /* * Attempt to find best configuration for a slab. This Loading @@ -1856,6 +1851,9 @@ static inline int calculate_order(int size) min_objects = slub_min_objects; if (!min_objects) min_objects = 4 * (fls(nr_cpu_ids) + 1); max_objects = (PAGE_SIZE << slub_max_order)/size; min_objects = min(min_objects, max_objects); while (min_objects > 1) { fraction = 16; while (fraction >= 4) { Loading @@ -1865,7 +1863,7 @@ static inline int calculate_order(int size) return order; fraction /= 2; } min_objects /= 2; min_objects --; } /* Loading Loading @@ -1928,17 +1926,6 @@ static void init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) { n->nr_partial = 0; /* * The larger the object size is, the more pages we want on the partial * list to avoid pounding the page allocator excessively. */ n->min_partial = ilog2(s->size); if (n->min_partial < MIN_PARTIAL) n->min_partial = MIN_PARTIAL; else if (n->min_partial > MAX_PARTIAL) n->min_partial = MAX_PARTIAL; spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG Loading Loading @@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) } #endif static void set_min_partial(struct kmem_cache *s, unsigned long min) { if (min < MIN_PARTIAL) min = MIN_PARTIAL; else if (min > MAX_PARTIAL) min = MAX_PARTIAL; s->min_partial = min; } /* * calculate_sizes() determines the order and the distribution of data within * a slab object. Loading Loading @@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, if (!calculate_sizes(s, -1)) goto error; /* * The larger the object size is, the more pages we want on the partial * list to avoid pounding the page allocator excessively. */ set_min_partial(s, ilog2(s->size)); s->refcount = 1; #ifdef CONFIG_NUMA s->remote_node_defrag_ratio = 1000; Loading Loading @@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) Loading Loading @@ -2537,7 +2538,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, } #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; static void sysfs_add_func(struct work_struct *w) { Loading Loading @@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); Loading Loading @@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, flags, node); s = get_slab(size, flags); Loading Loading @@ -2986,7 +2987,7 @@ void __init kmem_cache_init(void) caches++; } for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; Loading Loading @@ -3023,7 +3024,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); Loading Loading @@ -3223,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); Loading @@ -3239,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags); Loading Loading @@ -3836,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf) } SLAB_ATTR(order); static ssize_t min_partial_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%lu\n", s->min_partial); } static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, size_t length) { unsigned long min; int err; err = strict_strtoul(buf, 10, &min); if (err) return err; set_min_partial(s, min); return length; } SLAB_ATTR(min_partial); static ssize_t ctor_show(struct kmem_cache *s, char *buf) { if (s->ctor) { Loading Loading @@ -4151,6 +4172,7 @@ static struct attribute *slab_attrs[] = { &object_size_attr.attr, &objs_per_slab_attr.attr, &order_attr.attr, &min_partial_attr.attr, &objects_attr.attr, &objects_partial_attr.attr, &total_objects_attr.attr, Loading Loading
include/linux/slub_def.h +17 −4 Original line number Diff line number Diff line Loading @@ -46,7 +46,6 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; Loading Loading @@ -89,6 +88,7 @@ struct kmem_cache { void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ unsigned long min_partial; const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SLUB_DEBUG Loading Loading @@ -120,11 +120,24 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" * is relatively slow so we need this value sufficiently high so that * performance critical objects are allocated through the SLUB fastpath. * * This should be dropped to PAGE_SIZE / 2 once the page allocator * "fastpath" becomes competitive with the slab allocator fastpaths. */ #define SLUB_MAX_SIZE (2 * PAGE_SIZE) #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC Loading Loading @@ -212,7 +225,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { if (size > PAGE_SIZE) if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { Loading @@ -234,7 +247,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size <= PAGE_SIZE && !(flags & SLUB_DMA)) { size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Loading
mm/slob.c +27 −16 Original line number Diff line number Diff line Loading @@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium); static LIST_HEAD(free_slob_large); /* * slob_page: True for all slob pages (false for bigblock pages) * is_slob_page: True for all slob pages (false for bigblock pages) */ static inline int slob_page(struct slob_page *sp) static inline int is_slob_page(struct slob_page *sp) { return PageSlobPage((struct page *)sp); } Loading @@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp) __ClearPageSlobPage((struct page *)sp); } static inline struct slob_page *slob_page(const void *addr) { return (struct slob_page *)virt_to_page(addr); } /* * slob_page_free: true for pages on free_slob_pages list. */ Loading Loading @@ -230,7 +235,7 @@ static int slob_last(slob_t *s) return !((unsigned long)slob_next(s) & ~PAGE_MASK); } static void *slob_new_page(gfp_t gfp, int order, int node) static void *slob_new_pages(gfp_t gfp, int order, int node) { void *page; Loading @@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) return page_address(page); } static void slob_free_pages(void *b, int order) { free_pages((unsigned long)b, order); } /* * Allocate a slob block within a given slob_page sp. */ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) { slob_t *prev, *cur, *aligned = 0; slob_t *prev, *cur, *aligned = NULL; int delta = 0, units = SLOB_UNITS(size); for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { Loading Loading @@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) /* Not enough space: must allocate a new page */ if (!b) { b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); if (!b) return 0; sp = (struct slob_page *)virt_to_page(b); return NULL; sp = slob_page(b); set_slob_page(sp); spin_lock_irqsave(&slob_lock, flags); Loading Loading @@ -384,7 +394,7 @@ static void slob_free(void *block, int size) return; BUG_ON(!size); sp = (struct slob_page *)virt_to_page(block); sp = slob_page(block); units = SLOB_UNITS(size); spin_lock_irqsave(&slob_lock, flags); Loading @@ -393,10 +403,11 @@ static void slob_free(void *block, int size) /* Go directly to page allocator. Do not pass slob allocator */ if (slob_page_free(sp)) clear_slob_page_free(sp); spin_unlock_irqrestore(&slob_lock, flags); clear_slob_page(sp); free_slob_page(sp); free_page((unsigned long)b); goto out; return; } if (!slob_page_free(sp)) { Loading Loading @@ -476,7 +487,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) } else { void *ret; ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); if (ret) { struct page *page; page = virt_to_page(ret); Loading @@ -494,8 +505,8 @@ void kfree(const void *block) if (unlikely(ZERO_OR_NULL_PTR(block))) return; sp = (struct slob_page *)virt_to_page(block); if (slob_page(sp)) { sp = slob_page(block); if (is_slob_page(sp)) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); Loading @@ -513,8 +524,8 @@ size_t ksize(const void *block) if (unlikely(block == ZERO_SIZE_PTR)) return 0; sp = (struct slob_page *)virt_to_page(block); if (slob_page(sp)) { sp = slob_page(block); if (is_slob_page(sp)) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; Loading Loading @@ -573,7 +584,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->size < PAGE_SIZE) b = slob_alloc(c->size, flags, c->align, node); else b = slob_new_page(flags, get_order(c->size), node); b = slob_new_pages(flags, get_order(c->size), node); if (c->ctor) c->ctor(b); Loading @@ -587,7 +598,7 @@ static void __kmem_cache_free(void *b, int size) if (size < PAGE_SIZE) slob_free(b, size); else free_pages((unsigned long)b, get_order(size)); slob_free_pages(b, get_order(size)); } static void kmem_rcu_free(struct rcu_head *head) Loading
mm/slub.c +52 −30 Original line number Diff line number Diff line Loading @@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object, static void set_track(struct kmem_cache *s, void *object, enum track_item alloc, unsigned long addr) { struct track *p; struct track *p = get_track(s, object, alloc); if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; p += alloc; if (addr) { p->addr = addr; p->cpu = smp_processor_id(); Loading Loading @@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) n = get_node(s, zone_to_nid(zone)); if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > n->min_partial) { n->nr_partial > s->min_partial) { page = get_partial_node(n); if (page) return page; Loading Loading @@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) slab_unlock(page); } else { stat(c, DEACTIVATE_EMPTY); if (n->nr_partial < n->min_partial) { if (n->nr_partial < s->min_partial) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs Loading Loading @@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s, c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(object, s->objsize); debug_check_no_obj_freed(object, c->objsize); if (likely(page == c->page && c->node >= 0)) { object[c->offset] = c->freelist; c->freelist = object; Loading Loading @@ -1844,6 +1838,7 @@ static inline int calculate_order(int size) int order; int min_objects; int fraction; int max_objects; /* * Attempt to find best configuration for a slab. This Loading @@ -1856,6 +1851,9 @@ static inline int calculate_order(int size) min_objects = slub_min_objects; if (!min_objects) min_objects = 4 * (fls(nr_cpu_ids) + 1); max_objects = (PAGE_SIZE << slub_max_order)/size; min_objects = min(min_objects, max_objects); while (min_objects > 1) { fraction = 16; while (fraction >= 4) { Loading @@ -1865,7 +1863,7 @@ static inline int calculate_order(int size) return order; fraction /= 2; } min_objects /= 2; min_objects --; } /* Loading Loading @@ -1928,17 +1926,6 @@ static void init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) { n->nr_partial = 0; /* * The larger the object size is, the more pages we want on the partial * list to avoid pounding the page allocator excessively. */ n->min_partial = ilog2(s->size); if (n->min_partial < MIN_PARTIAL) n->min_partial = MIN_PARTIAL; else if (n->min_partial > MAX_PARTIAL) n->min_partial = MAX_PARTIAL; spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG Loading Loading @@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) } #endif static void set_min_partial(struct kmem_cache *s, unsigned long min) { if (min < MIN_PARTIAL) min = MIN_PARTIAL; else if (min > MAX_PARTIAL) min = MAX_PARTIAL; s->min_partial = min; } /* * calculate_sizes() determines the order and the distribution of data within * a slab object. Loading Loading @@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, if (!calculate_sizes(s, -1)) goto error; /* * The larger the object size is, the more pages we want on the partial * list to avoid pounding the page allocator excessively. */ set_min_partial(s, ilog2(s->size)); s->refcount = 1; #ifdef CONFIG_NUMA s->remote_node_defrag_ratio = 1000; Loading Loading @@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) Loading Loading @@ -2537,7 +2538,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, } #ifdef CONFIG_ZONE_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; static void sysfs_add_func(struct work_struct *w) { Loading Loading @@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); Loading Loading @@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, flags, node); s = get_slab(size, flags); Loading Loading @@ -2986,7 +2987,7 @@ void __init kmem_cache_init(void) caches++; } for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; Loading Loading @@ -3023,7 +3024,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); Loading Loading @@ -3223,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); Loading @@ -3239,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s; if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags); Loading Loading @@ -3836,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf) } SLAB_ATTR(order); static ssize_t min_partial_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%lu\n", s->min_partial); } static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, size_t length) { unsigned long min; int err; err = strict_strtoul(buf, 10, &min); if (err) return err; set_min_partial(s, min); return length; } SLAB_ATTR(min_partial); static ssize_t ctor_show(struct kmem_cache *s, char *buf) { if (s->ctor) { Loading Loading @@ -4151,6 +4172,7 @@ static struct attribute *slab_attrs[] = { &object_size_attr.attr, &objs_per_slab_attr.attr, &order_attr.attr, &min_partial_attr.attr, &objects_attr.attr, &objects_partial_attr.attr, &total_objects_attr.attr, Loading