Loading mm/slub.c +13 −13 Original line number Diff line number Diff line Loading @@ -2607,6 +2607,19 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); static unsigned long count_partial(struct kmem_cache_node *n) { unsigned long flags; unsigned long x = 0; struct page *page; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += page->inuse; spin_unlock_irqrestore(&n->list_lock, flags); return x; } /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the Loading Loading @@ -3078,19 +3091,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } static unsigned long count_partial(struct kmem_cache_node *n) { unsigned long flags; unsigned long x = 0; struct page *page; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += page->inuse; spin_unlock_irqrestore(&n->list_lock, flags); return x; } #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) Loading Loading
mm/slub.c +13 −13 Original line number Diff line number Diff line Loading @@ -2607,6 +2607,19 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); static unsigned long count_partial(struct kmem_cache_node *n) { unsigned long flags; unsigned long x = 0; struct page *page; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += page->inuse; spin_unlock_irqrestore(&n->list_lock, flags); return x; } /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the Loading Loading @@ -3078,19 +3091,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return slab_alloc(s, gfpflags, node, caller); } static unsigned long count_partial(struct kmem_cache_node *n) { unsigned long flags; unsigned long x = 0; struct page *page; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += page->inuse; spin_unlock_irqrestore(&n->list_lock, flags); return x; } #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) Loading