Loading mm/slub.c +25 −13 Original line number Diff line number Diff line Loading @@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) { if (s->flags & SLAB_TRACE) { printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", Loading Loading @@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n, spin_unlock(&n->list_lock); } static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); Loading @@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s, * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); Loading Loading @@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) * so that the others get filled first. That way the * size of the partial list stays small. * * kmem_cache_shrink can reclaim any empty slabs from the * partial list. * kmem_cache_shrink can reclaim any empty slabs from * the partial list. */ add_partial(n, page, 1); slab_unlock(page); Loading Loading @@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg) return 0; /* * We are bringing a node online. No memory is availabe yet. We must * We are bringing a node online. No memory is available yet. We must * allocate a kmem_cache_node structure in order to bring the node * online. */ Loading Loading @@ -3812,7 +3813,12 @@ SLAB_ATTR_RO(objs_per_slab); static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { int order = simple_strtoul(buf, NULL, 10); unsigned long order; int err; err = strict_strtoul(buf, 10, &order); if (err) return err; if (order > slub_max_order || order < slub_min_order) return -EINVAL; Loading Loading @@ -4065,10 +4071,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, const char *buf, size_t length) { int n = simple_strtoul(buf, NULL, 10); unsigned long ratio; int err; err = strict_strtoul(buf, 10, &ratio); if (err) return err; if (ratio < 100) s->remote_node_defrag_ratio = ratio * 10; if (n < 100) s->remote_node_defrag_ratio = n * 10; return length; } SLAB_ATTR(remote_node_defrag_ratio); Loading Loading
mm/slub.c +25 −13 Original line number Diff line number Diff line Loading @@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) return search == NULL; } static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) { if (s->flags & SLAB_TRACE) { printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", Loading Loading @@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n, spin_unlock(&n->list_lock); } static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page) { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); Loading @@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s, * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { list_del(&page->lru); Loading Loading @@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) * so that the others get filled first. That way the * size of the partial list stays small. * * kmem_cache_shrink can reclaim any empty slabs from the * partial list. * kmem_cache_shrink can reclaim any empty slabs from * the partial list. */ add_partial(n, page, 1); slab_unlock(page); Loading Loading @@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg) return 0; /* * We are bringing a node online. No memory is availabe yet. We must * We are bringing a node online. No memory is available yet. We must * allocate a kmem_cache_node structure in order to bring the node * online. */ Loading Loading @@ -3812,7 +3813,12 @@ SLAB_ATTR_RO(objs_per_slab); static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { int order = simple_strtoul(buf, NULL, 10); unsigned long order; int err; err = strict_strtoul(buf, 10, &order); if (err) return err; if (order > slub_max_order || order < slub_min_order) return -EINVAL; Loading Loading @@ -4065,10 +4071,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, const char *buf, size_t length) { int n = simple_strtoul(buf, NULL, 10); unsigned long ratio; int err; err = strict_strtoul(buf, 10, &ratio); if (err) return err; if (ratio < 100) s->remote_node_defrag_ratio = ratio * 10; if (n < 100) s->remote_node_defrag_ratio = n * 10; return length; } SLAB_ATTR(remote_node_defrag_ratio); Loading