Loading drivers/md/bcache/alloc.c +3 −2 Original line number Diff line number Diff line Loading @@ -155,7 +155,8 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) static bool can_invalidate_bucket(struct cache *ca, struct bucket *b) { return GC_MARK(b) == GC_MARK_RECLAIMABLE && return (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) && !atomic_read(&b->pin) && can_inc_bucket_gen(b); } Loading Loading @@ -475,7 +476,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) { struct bucket *b = PTR_BUCKET(c, k, i); SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); bch_bucket_add_unused(PTR_CACHE(c, k, i), b); } Loading drivers/md/bcache/bcache.h +3 −3 Original line number Diff line number Diff line Loading @@ -207,9 +207,9 @@ struct bucket { */ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 #define GC_MARK_RECLAIMABLE 1 #define GC_MARK_DIRTY 2 #define GC_MARK_METADATA 3 #define GC_SECTORS_USED_SIZE 13 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); Loading drivers/md/bcache/btree.c +12 −6 Original line number Diff line number Diff line Loading @@ -1160,6 +1160,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, SET_GC_MARK(g, GC_MARK_METADATA); else if (KEY_DIRTY(k)) SET_GC_MARK(g, GC_MARK_DIRTY); else if (!GC_MARK(g)) SET_GC_MARK(g, GC_MARK_RECLAIMABLE); /* guard against overflow */ SET_GC_SECTORS_USED(g, min_t(unsigned, Loading Loading @@ -1559,7 +1561,7 @@ static void btree_gc_start(struct cache_set *c) for_each_bucket(b, ca) { b->gc_gen = b->gen; if (!atomic_read(&b->pin)) { SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); } } Loading Loading @@ -1622,14 +1624,18 @@ size_t bch_btree_gc_finish(struct cache_set *c) b->last_gc = b->gc_gen; c->need_gc = max(c->need_gc, bucket_gc_gen(b)); if (!atomic_read(&b->pin) && GC_MARK(b) == GC_MARK_RECLAIMABLE) { if (atomic_read(&b->pin)) continue; BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) available++; if (!GC_SECTORS_USED(b)) if (!GC_MARK(b)) bch_bucket_add_unused(ca, b); } } } mutex_unlock(&c->bucket_lock); return available; Loading drivers/md/bcache/extents.c +3 −3 Original line number Diff line number Diff line Loading @@ -499,9 +499,9 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, if (mutex_trylock(&b->c->bucket_lock)) { if (b->c->gc_mark_valid && ((GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)) || GC_MARK(g) == GC_MARK_METADATA)) (!GC_MARK(g) || GC_MARK(g) == GC_MARK_METADATA || (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) goto err; if (g->prio == BTREE_PRIO) Loading Loading
drivers/md/bcache/alloc.c +3 −2 Original line number Diff line number Diff line Loading @@ -155,7 +155,8 @@ bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) static bool can_invalidate_bucket(struct cache *ca, struct bucket *b) { return GC_MARK(b) == GC_MARK_RECLAIMABLE && return (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) && !atomic_read(&b->pin) && can_inc_bucket_gen(b); } Loading Loading @@ -475,7 +476,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) { struct bucket *b = PTR_BUCKET(c, k, i); SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); bch_bucket_add_unused(PTR_CACHE(c, k, i), b); } Loading
drivers/md/bcache/bcache.h +3 −3 Original line number Diff line number Diff line Loading @@ -207,9 +207,9 @@ struct bucket { */ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 #define GC_MARK_RECLAIMABLE 1 #define GC_MARK_DIRTY 2 #define GC_MARK_METADATA 3 #define GC_SECTORS_USED_SIZE 13 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); Loading
drivers/md/bcache/btree.c +12 −6 Original line number Diff line number Diff line Loading @@ -1160,6 +1160,8 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, SET_GC_MARK(g, GC_MARK_METADATA); else if (KEY_DIRTY(k)) SET_GC_MARK(g, GC_MARK_DIRTY); else if (!GC_MARK(g)) SET_GC_MARK(g, GC_MARK_RECLAIMABLE); /* guard against overflow */ SET_GC_SECTORS_USED(g, min_t(unsigned, Loading Loading @@ -1559,7 +1561,7 @@ static void btree_gc_start(struct cache_set *c) for_each_bucket(b, ca) { b->gc_gen = b->gen; if (!atomic_read(&b->pin)) { SET_GC_MARK(b, GC_MARK_RECLAIMABLE); SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); } } Loading Loading @@ -1622,14 +1624,18 @@ size_t bch_btree_gc_finish(struct cache_set *c) b->last_gc = b->gc_gen; c->need_gc = max(c->need_gc, bucket_gc_gen(b)); if (!atomic_read(&b->pin) && GC_MARK(b) == GC_MARK_RECLAIMABLE) { if (atomic_read(&b->pin)) continue; BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) available++; if (!GC_SECTORS_USED(b)) if (!GC_MARK(b)) bch_bucket_add_unused(ca, b); } } } mutex_unlock(&c->bucket_lock); return available; Loading
drivers/md/bcache/extents.c +3 −3 Original line number Diff line number Diff line Loading @@ -499,9 +499,9 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, if (mutex_trylock(&b->c->bucket_lock)) { if (b->c->gc_mark_valid && ((GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)) || GC_MARK(g) == GC_MARK_METADATA)) (!GC_MARK(g) || GC_MARK(g) == GC_MARK_METADATA || (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) goto err; if (g->prio == BTREE_PRIO) Loading