Commit 3349b57f authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba
Browse files

btrfs: convert block group bit field to use bit helpers



We use a bit field in the btrfs_block_group for different flags, however
this is awkward because we have to hold the block_group->lock for any
modification of any of these fields, and makes the code clunky for a few
of these flags.  Convert these to a properly flags setup so we can
utilize the bit helpers.

Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 723de71d
Loading
Loading
Loading
Loading
+15 −10
Original line number Diff line number Diff line
@@ -772,7 +772,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
	WARN_ON(cache->caching_ctl);
	cache->caching_ctl = caching_ctl;
	cache->cached = BTRFS_CACHE_STARTED;
	cache->has_caching_ctl = 1;
	set_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &cache->runtime_flags);
	spin_unlock(&cache->lock);

	write_lock(&fs_info->block_group_cache_lock);
@@ -988,11 +988,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
		kobject_put(kobj);
	}

	if (block_group->has_caching_ctl)

	if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags))
		caching_ctl = btrfs_get_caching_control(block_group);
	if (block_group->cached == BTRFS_CACHE_STARTED)
		btrfs_wait_block_group_cache_done(block_group);
	if (block_group->has_caching_ctl) {
	if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags)) {
		write_lock(&fs_info->block_group_cache_lock);
		if (!caching_ctl) {
			struct btrfs_caching_control *ctl;
@@ -1034,12 +1035,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
			< block_group->zone_unusable);
		WARN_ON(block_group->space_info->disk_total
			< block_group->length * factor);
		WARN_ON(block_group->zone_is_active &&
		WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
				 &block_group->runtime_flags) &&
			block_group->space_info->active_total_bytes
			< block_group->length);
	}
	block_group->space_info->total_bytes -= block_group->length;
	if (block_group->zone_is_active)
	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
		block_group->space_info->active_total_bytes -= block_group->length;
	block_group->space_info->bytes_readonly -=
		(block_group->length - block_group->zone_unusable);
@@ -1069,7 +1071,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
		goto out;

	spin_lock(&block_group->lock);
	block_group->removed = 1;
	set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);

	/*
	 * At this point trimming or scrub can't start on this block group,
	 * because we removed the block group from the rbtree
@@ -2409,7 +2412,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
		ret = insert_block_group_item(trans, block_group);
		if (ret)
			btrfs_abort_transaction(trans, ret);
		if (!block_group->chunk_item_inserted) {
		if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
			      &block_group->runtime_flags)) {
			mutex_lock(&fs_info->chunk_mutex);
			ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
			mutex_unlock(&fs_info->chunk_mutex);
@@ -3955,7 +3959,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
		while (block_group) {
			btrfs_wait_block_group_cache_done(block_group);
			spin_lock(&block_group->lock);
			if (block_group->iref)
			if (test_bit(BLOCK_GROUP_FLAG_IREF,
				     &block_group->runtime_flags))
				break;
			spin_unlock(&block_group->lock);
			block_group = btrfs_next_block_group(block_group);
@@ -3968,7 +3973,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
		}

		inode = block_group->inode;
		block_group->iref = 0;
		clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags);
		block_group->inode = NULL;
		spin_unlock(&block_group->lock);
		ASSERT(block_group->io_ctl.inode == NULL);
@@ -4110,7 +4115,7 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)

	spin_lock(&block_group->lock);
	cleanup = (atomic_dec_and_test(&block_group->frozen) &&
		   block_group->removed);
		   test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
	spin_unlock(&block_group->lock);

	if (cleanup) {
+13 −8
Original line number Diff line number Diff line
@@ -46,6 +46,18 @@ enum btrfs_chunk_alloc_enum {
	CHUNK_ALLOC_FORCE_FOR_EXTENT,
};

/* Block group flags set at runtime */
enum btrfs_block_group_flags {
	BLOCK_GROUP_FLAG_IREF,
	BLOCK_GROUP_FLAG_HAS_CACHING_CTL,
	BLOCK_GROUP_FLAG_REMOVED,
	BLOCK_GROUP_FLAG_TO_COPY,
	BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
	BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
	BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
	BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
};

struct btrfs_caching_control {
	struct list_head list;
	struct mutex mutex;
@@ -95,16 +107,9 @@ struct btrfs_block_group {

	/* For raid56, this is a full stripe, without parity */
	unsigned long full_stripe_len;
	unsigned long runtime_flags;

	unsigned int ro;
	unsigned int iref:1;
	unsigned int has_caching_ctl:1;
	unsigned int removed:1;
	unsigned int to_copy:1;
	unsigned int relocating_repair:1;
	unsigned int chunk_item_inserted:1;
	unsigned int zone_is_active:1;
	unsigned int zoned_data_reloc_ongoing:1;

	int disk_cache_state;

+3 −3
Original line number Diff line number Diff line
@@ -546,7 +546,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
			continue;

		spin_lock(&cache->lock);
		cache->to_copy = 1;
		set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
		spin_unlock(&cache->lock);

		btrfs_put_block_group(cache);
@@ -577,7 +577,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
		return true;

	spin_lock(&cache->lock);
	if (cache->removed) {
	if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
		spin_unlock(&cache->lock);
		return true;
	}
@@ -611,7 +611,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,

	/* Last stripe on this device */
	spin_lock(&cache->lock);
	cache->to_copy = 0;
	clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
	spin_unlock(&cache->lock);

	return true;
+3 −2
Original line number Diff line number Diff line
@@ -3804,7 +3804,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
	       block_group->start == fs_info->data_reloc_bg ||
	       fs_info->data_reloc_bg == 0);

	if (block_group->ro || block_group->zoned_data_reloc_ongoing) {
	if (block_group->ro ||
	    test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
		ret = 1;
		goto out;
	}
@@ -3881,7 +3882,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
		 * regular extents) at the same time to the same zone, which
		 * easily break the write pointer.
		 */
		block_group->zoned_data_reloc_ongoing = 1;
		set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
		fs_info->data_reloc_bg = 0;
	}
	spin_unlock(&fs_info->relocation_bg_lock);
+7 −9
Original line number Diff line number Diff line
@@ -126,10 +126,8 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
	}

	if (!block_group->iref) {
	if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
		block_group->inode = igrab(inode);
		block_group->iref = 1;
	}
	spin_unlock(&block_group->lock);

	return inode;
@@ -241,8 +239,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
	clear_nlink(inode);
	/* One for the block groups ref */
	spin_lock(&block_group->lock);
	if (block_group->iref) {
		block_group->iref = 0;
	if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
		block_group->inode = NULL;
		spin_unlock(&block_group->lock);
		iput(inode);
@@ -2876,7 +2873,8 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
	if (btrfs_is_zoned(fs_info)) {
		btrfs_info(fs_info, "free space %llu active %d",
			   block_group->zone_capacity - block_group->alloc_offset,
			   block_group->zone_is_active);
			   test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
				    &block_group->runtime_flags));
		return;
	}

@@ -4008,7 +4006,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
	if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
		spin_unlock(&block_group->lock);
		return 0;
	}
@@ -4038,7 +4036,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
	if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
		spin_unlock(&block_group->lock);
		return 0;
	}
@@ -4060,7 +4058,7 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
	if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
		spin_unlock(&block_group->lock);
		return 0;
	}
Loading