Loading drivers/md/bcache/bcache.h +5 −5 Original line number Diff line number Diff line Loading @@ -269,7 +269,7 @@ struct bcache_device { atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; struct bio_set *bio_split; struct bio_set bio_split; unsigned data_csum:1; Loading Loading @@ -530,9 +530,9 @@ struct cache_set { struct closure sb_write; struct semaphore sb_write_mutex; mempool_t *search; mempool_t *bio_meta; struct bio_set *bio_split; mempool_t search; mempool_t bio_meta; struct bio_set bio_split; /* For the btree cache */ struct shrinker shrink; Loading Loading @@ -657,7 +657,7 @@ struct cache_set { * A btree node on disk could have too many bsets for an iterator to fit * on the stack - have to dynamically allocate them */ mempool_t *fill_iter; mempool_t fill_iter; struct bset_sort_state sort; Loading drivers/md/bcache/bset.c +4 −9 Original line number Diff line number Diff line Loading @@ -1118,8 +1118,7 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, void bch_bset_sort_state_free(struct bset_sort_state *state) { if (state->pool) mempool_destroy(state->pool); mempool_exit(&state->pool); } int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) Loading @@ -1129,11 +1128,7 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) state->page_order = page_order; state->crit_factor = int_sqrt(1 << page_order); state->pool = mempool_create_page_pool(1, page_order); if (!state->pool) return -ENOMEM; return 0; return mempool_init_page_pool(&state->pool, 1, page_order); } EXPORT_SYMBOL(bch_bset_sort_state_init); Loading Loading @@ -1191,7 +1186,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, BUG_ON(order > state->page_order); outp = mempool_alloc(state->pool, GFP_NOIO); outp = mempool_alloc(&state->pool, GFP_NOIO); out = page_address(outp); used_mempool = true; order = state->page_order; Loading Loading @@ -1220,7 +1215,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, } if (used_mempool) mempool_free(virt_to_page(out), state->pool); mempool_free(virt_to_page(out), &state->pool); else free_pages((unsigned long) out, order); Loading drivers/md/bcache/bset.h +1 −1 Original line number Diff line number Diff line Loading @@ -347,7 +347,7 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, /* Sorting */ struct bset_sort_state { mempool_t *pool; mempool_t pool; unsigned page_order; unsigned crit_factor; Loading drivers/md/bcache/btree.c +2 −2 Original line number Diff line number Diff line Loading @@ -204,7 +204,7 @@ void bch_btree_node_read_done(struct btree *b) struct bset *i = btree_bset_first(b); struct btree_iter *iter; iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); iter->size = b->c->sb.bucket_size / b->c->sb.block_size; iter->used = 0; Loading Loading @@ -271,7 +271,7 @@ void bch_btree_node_read_done(struct btree *b) bch_bset_init_next(&b->keys, write_block(b), bset_magic(&b->c->sb)); out: mempool_free(iter, b->c->fill_iter); mempool_free(iter, &b->c->fill_iter); return; err: set_btree_node_io_error(b); Loading drivers/md/bcache/io.c +2 −2 Original line number Diff line number Diff line Loading @@ -17,12 +17,12 @@ void bch_bbio_free(struct bio *bio, struct cache_set *c) { struct bbio *b = container_of(bio, struct bbio, bio); mempool_free(b, c->bio_meta); mempool_free(b, &c->bio_meta); } struct bio *bch_bbio_alloc(struct cache_set *c) { struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); struct bio *bio = &b->bio; bio_init(bio, bio->bi_inline_vecs, bucket_pages(c)); Loading Loading
drivers/md/bcache/bcache.h +5 −5 Original line number Diff line number Diff line Loading @@ -269,7 +269,7 @@ struct bcache_device { atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; struct bio_set *bio_split; struct bio_set bio_split; unsigned data_csum:1; Loading Loading @@ -530,9 +530,9 @@ struct cache_set { struct closure sb_write; struct semaphore sb_write_mutex; mempool_t *search; mempool_t *bio_meta; struct bio_set *bio_split; mempool_t search; mempool_t bio_meta; struct bio_set bio_split; /* For the btree cache */ struct shrinker shrink; Loading Loading @@ -657,7 +657,7 @@ struct cache_set { * A btree node on disk could have too many bsets for an iterator to fit * on the stack - have to dynamically allocate them */ mempool_t *fill_iter; mempool_t fill_iter; struct bset_sort_state sort; Loading
drivers/md/bcache/bset.c +4 −9 Original line number Diff line number Diff line Loading @@ -1118,8 +1118,7 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, void bch_bset_sort_state_free(struct bset_sort_state *state) { if (state->pool) mempool_destroy(state->pool); mempool_exit(&state->pool); } int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) Loading @@ -1129,11 +1128,7 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order) state->page_order = page_order; state->crit_factor = int_sqrt(1 << page_order); state->pool = mempool_create_page_pool(1, page_order); if (!state->pool) return -ENOMEM; return 0; return mempool_init_page_pool(&state->pool, 1, page_order); } EXPORT_SYMBOL(bch_bset_sort_state_init); Loading Loading @@ -1191,7 +1186,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, BUG_ON(order > state->page_order); outp = mempool_alloc(state->pool, GFP_NOIO); outp = mempool_alloc(&state->pool, GFP_NOIO); out = page_address(outp); used_mempool = true; order = state->page_order; Loading Loading @@ -1220,7 +1215,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, } if (used_mempool) mempool_free(virt_to_page(out), state->pool); mempool_free(virt_to_page(out), &state->pool); else free_pages((unsigned long) out, order); Loading
drivers/md/bcache/bset.h +1 −1 Original line number Diff line number Diff line Loading @@ -347,7 +347,7 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, /* Sorting */ struct bset_sort_state { mempool_t *pool; mempool_t pool; unsigned page_order; unsigned crit_factor; Loading
drivers/md/bcache/btree.c +2 −2 Original line number Diff line number Diff line Loading @@ -204,7 +204,7 @@ void bch_btree_node_read_done(struct btree *b) struct bset *i = btree_bset_first(b); struct btree_iter *iter; iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); iter->size = b->c->sb.bucket_size / b->c->sb.block_size; iter->used = 0; Loading Loading @@ -271,7 +271,7 @@ void bch_btree_node_read_done(struct btree *b) bch_bset_init_next(&b->keys, write_block(b), bset_magic(&b->c->sb)); out: mempool_free(iter, b->c->fill_iter); mempool_free(iter, &b->c->fill_iter); return; err: set_btree_node_io_error(b); Loading
drivers/md/bcache/io.c +2 −2 Original line number Diff line number Diff line Loading @@ -17,12 +17,12 @@ void bch_bbio_free(struct bio *bio, struct cache_set *c) { struct bbio *b = container_of(bio, struct bbio, bio); mempool_free(b, c->bio_meta); mempool_free(b, &c->bio_meta); } struct bio *bch_bbio_alloc(struct cache_set *c) { struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); struct bio *bio = &b->bio; bio_init(bio, bio->bi_inline_vecs, bucket_pages(c)); Loading