Loading mm/slub.c +8 −7 Original line number Diff line number Diff line Loading @@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n, * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { remove_partial(n, page); Loading @@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, /* * Try to allocate a partial slab from a specific node. */ static struct page *get_partial_node(struct kmem_cache_node *n) static struct page *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n) { struct page *page; Loading @@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) spin_lock(&n->list_lock); list_for_each_entry(page, &n->partial, lru) if (lock_and_freeze_slab(n, page)) if (lock_and_freeze_slab(s, n, page)) goto out; page = NULL; out: Loading Loading @@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > s->min_partial) { page = get_partial_node(n); page = get_partial_node(s, n); if (page) { put_mems_allowed(); return page; Loading @@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) struct page *page; int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; page = get_partial_node(get_node(s, searchnode)); page = get_partial_node(s, get_node(s, searchnode)); if (page || node != NUMA_NO_NODE) return page; Loading Loading @@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, { void *prior; void **object = (void *)x; unsigned long flags; unsigned long uninitialized_var(flags); local_irq_save(flags); slab_lock(page); Loading Loading
mm/slub.c +8 −7 Original line number Diff line number Diff line Loading @@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n, * * Must hold list_lock. */ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) static inline int lock_and_freeze_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { if (slab_trylock(page)) { remove_partial(n, page); Loading @@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, /* * Try to allocate a partial slab from a specific node. */ static struct page *get_partial_node(struct kmem_cache_node *n) static struct page *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n) { struct page *page; Loading @@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) spin_lock(&n->list_lock); list_for_each_entry(page, &n->partial, lru) if (lock_and_freeze_slab(n, page)) if (lock_and_freeze_slab(s, n, page)) goto out; page = NULL; out: Loading Loading @@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > s->min_partial) { page = get_partial_node(n); page = get_partial_node(s, n); if (page) { put_mems_allowed(); return page; Loading @@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) struct page *page; int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; page = get_partial_node(get_node(s, searchnode)); page = get_partial_node(s, get_node(s, searchnode)); if (page || node != NUMA_NO_NODE) return page; Loading Loading @@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, { void *prior; void **object = (void *)x; unsigned long flags; unsigned long uninitialized_var(flags); local_irq_save(flags); slab_lock(page); Loading