Loading Documentation/vm/slabinfo.c +6 −4 Original line number Diff line number Diff line Loading @@ -38,7 +38,7 @@ struct slabinfo { unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill; unsigned long cpuslab_flush, deactivate_full, deactivate_empty; unsigned long deactivate_to_head, deactivate_to_tail; unsigned long deactivate_remote_frees; unsigned long deactivate_remote_frees, order_fallback; int numa[MAX_NODES]; int numa_partial[MAX_NODES]; } slabinfo[MAX_SLABS]; Loading Loading @@ -293,7 +293,7 @@ int line = 0; void first_line(void) { if (show_activity) printf("Name Objects Alloc Free %%Fast\n"); printf("Name Objects Alloc Free %%Fast Fallb O\n"); else printf("Name Objects Objsize Space " "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n"); Loading Loading @@ -573,11 +573,12 @@ void slabcache(struct slabinfo *s) total_alloc = s->alloc_fastpath + s->alloc_slowpath; total_free = s->free_fastpath + s->free_slowpath; printf("%-21s %8ld %8ld %8ld %3ld %3ld \n", printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n", s->name, s->objects, total_alloc, total_free, total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0, total_free ? (s->free_fastpath * 100 / total_free) : 0); total_free ? (s->free_fastpath * 100 / total_free) : 0, s->order_fallback, s->order); } else printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n", Loading Loading @@ -1188,6 +1189,7 @@ void read_slab_dir(void) slab->deactivate_to_head = get_obj("deactivate_to_head"); slab->deactivate_to_tail = get_obj("deactivate_to_tail"); slab->deactivate_remote_frees = get_obj("deactivate_remote_frees"); slab->order_fallback = get_obj("order_fallback"); chdir(".."); if (slab->name[0] == ':') alias_targets++; Loading init/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -720,7 +720,7 @@ config VM_EVENT_COUNTERS config SLUB_DEBUG default y bool "Enable SLUB debugging support" if EMBEDDED depends on SLUB depends on SLUB && SYSFS help SLUB has extensive debug support features. Disabling these can result in significant savings in code size. This also disables Loading mm/slub.c +27 −17 Original line number Diff line number Diff line Loading @@ -217,7 +217,7 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) #ifdef CONFIG_SLUB_DEBUG static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); Loading Loading @@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) { if (s->flags & SLAB_TRACE) { printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", Loading Loading @@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n, spin_unlock(&n->list_lock); } static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); Loading @@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s, * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); Loading Loading @@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) * so that the others get filled first. That way the * size of the partial list stays small. * * kmem_cache_shrink can reclaim any empty slabs from the * partial list. * kmem_cache_shrink can reclaim any empty slabs from * the partial list. */ add_partial(n, page, 1); slab_unlock(page); Loading Loading @@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg) return 0; /* * We are bringing a node online. No memory is availabe yet. We must * We are bringing a node online. No memory is available yet. We must * allocate a kmem_cache_node structure in order to bring the node * online. */ Loading Loading @@ -3246,7 +3247,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) #ifdef CONFIG_SLUB_DEBUG static unsigned long count_partial(struct kmem_cache_node *n, int (*get_count)(struct page *)) { Loading Loading @@ -3275,9 +3276,7 @@ static int count_free(struct page *page) { return page->objects - page->inuse; } #endif #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { Loading Loading @@ -3812,7 +3811,12 @@ SLAB_ATTR_RO(objs_per_slab); static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { int order = simple_strtoul(buf, NULL, 10); unsigned long order; int err; err = strict_strtoul(buf, 10, &order); if (err) return err; if (order > slub_max_order || order < slub_min_order) return -EINVAL; Loading Loading @@ -4065,10 +4069,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, const char *buf, size_t length) { int n = simple_strtoul(buf, NULL, 10); unsigned long ratio; int err; err = strict_strtoul(buf, 10, &ratio); if (err) return err; if (ratio < 100) s->remote_node_defrag_ratio = ratio * 10; if (n < 100) s->remote_node_defrag_ratio = n * 10; return length; } SLAB_ATTR(remote_node_defrag_ratio); Loading Loading
Documentation/vm/slabinfo.c +6 −4 Original line number Diff line number Diff line Loading @@ -38,7 +38,7 @@ struct slabinfo { unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill; unsigned long cpuslab_flush, deactivate_full, deactivate_empty; unsigned long deactivate_to_head, deactivate_to_tail; unsigned long deactivate_remote_frees; unsigned long deactivate_remote_frees, order_fallback; int numa[MAX_NODES]; int numa_partial[MAX_NODES]; } slabinfo[MAX_SLABS]; Loading Loading @@ -293,7 +293,7 @@ int line = 0; void first_line(void) { if (show_activity) printf("Name Objects Alloc Free %%Fast\n"); printf("Name Objects Alloc Free %%Fast Fallb O\n"); else printf("Name Objects Objsize Space " "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n"); Loading Loading @@ -573,11 +573,12 @@ void slabcache(struct slabinfo *s) total_alloc = s->alloc_fastpath + s->alloc_slowpath; total_free = s->free_fastpath + s->free_slowpath; printf("%-21s %8ld %8ld %8ld %3ld %3ld \n", printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n", s->name, s->objects, total_alloc, total_free, total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0, total_free ? (s->free_fastpath * 100 / total_free) : 0); total_free ? (s->free_fastpath * 100 / total_free) : 0, s->order_fallback, s->order); } else printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n", Loading Loading @@ -1188,6 +1189,7 @@ void read_slab_dir(void) slab->deactivate_to_head = get_obj("deactivate_to_head"); slab->deactivate_to_tail = get_obj("deactivate_to_tail"); slab->deactivate_remote_frees = get_obj("deactivate_remote_frees"); slab->order_fallback = get_obj("order_fallback"); chdir(".."); if (slab->name[0] == ':') alias_targets++; Loading
init/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -720,7 +720,7 @@ config VM_EVENT_COUNTERS config SLUB_DEBUG default y bool "Enable SLUB debugging support" if EMBEDDED depends on SLUB depends on SLUB && SYSFS help SLUB has extensive debug support features. Disabling these can result in significant savings in code size. This also disables Loading
mm/slub.c +27 −17 Original line number Diff line number Diff line Loading @@ -217,7 +217,7 @@ struct track { enum track_item { TRACK_ALLOC, TRACK_FREE }; #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) #ifdef CONFIG_SLUB_DEBUG static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); Loading Loading @@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) { if (s->flags & SLAB_TRACE) { printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", Loading Loading @@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n, spin_unlock(&n->list_lock); } static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); Loading @@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s, * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); Loading Loading @@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) * so that the others get filled first. That way the * size of the partial list stays small. * * kmem_cache_shrink can reclaim any empty slabs from the * partial list. * kmem_cache_shrink can reclaim any empty slabs from * the partial list. */ add_partial(n, page, 1); slab_unlock(page); Loading Loading @@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg) return 0; /* * We are bringing a node online. No memory is availabe yet. We must * We are bringing a node online. No memory is available yet. We must * allocate a kmem_cache_node structure in order to bring the node * online. */ Loading Loading @@ -3246,7 +3247,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) #ifdef CONFIG_SLUB_DEBUG static unsigned long count_partial(struct kmem_cache_node *n, int (*get_count)(struct page *)) { Loading Loading @@ -3275,9 +3276,7 @@ static int count_free(struct page *page) { return page->objects - page->inuse; } #endif #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { Loading Loading @@ -3812,7 +3811,12 @@ SLAB_ATTR_RO(objs_per_slab); static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { int order = simple_strtoul(buf, NULL, 10); unsigned long order; int err; err = strict_strtoul(buf, 10, &order); if (err) return err; if (order > slub_max_order || order < slub_min_order) return -EINVAL; Loading Loading @@ -4065,10 +4069,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, const char *buf, size_t length) { int n = simple_strtoul(buf, NULL, 10); unsigned long ratio; int err; err = strict_strtoul(buf, 10, &ratio); if (err) return err; if (ratio < 100) s->remote_node_defrag_ratio = ratio * 10; if (n < 100) s->remote_node_defrag_ratio = n * 10; return length; } SLAB_ATTR(remote_node_defrag_ratio); Loading