Commit 68a384b5 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba
Browse files

btrfs: zoned: load active zone info for block group



Load activeness of underlying zones of a block group. When underlying zones
are active, we add the block group to the fs_info->zone_active_bgs list.

Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent afba2bc0
Loading
Loading
Loading
Loading
+24 −0
Original line number Diff line number Diff line
@@ -1170,6 +1170,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
	unsigned int nofs_flag;
	u64 *alloc_offsets = NULL;
	u64 *caps = NULL;
	unsigned long *active = NULL;
	u64 last_alloc = 0;
	u32 num_sequential = 0, num_conventional = 0;

@@ -1214,6 +1215,12 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
		goto out;
	}

	active = bitmap_zalloc(map->num_stripes, GFP_NOFS);
	if (!active) {
		ret = -ENOMEM;
		goto out;
	}

	for (i = 0; i < map->num_stripes; i++) {
		bool is_sequential;
		struct blk_zone zone;
@@ -1297,8 +1304,16 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
			/* Partially used zone */
			alloc_offsets[i] =
					((zone.wp - zone.start) << SECTOR_SHIFT);
			__set_bit(i, active);
			break;
		}

		/*
		 * Consider a zone as active if we can allow any number of
		 * active zones.
		 */
		if (!device->zone_info->max_active_zones)
			__set_bit(i, active);
	}

	if (num_sequential > 0)
@@ -1346,6 +1361,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
		}
		cache->alloc_offset = alloc_offsets[0];
		cache->zone_capacity = caps[0];
		cache->zone_is_active = test_bit(0, active);
		break;
	case BTRFS_BLOCK_GROUP_DUP:
	case BTRFS_BLOCK_GROUP_RAID1:
@@ -1361,6 +1377,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
		goto out;
	}

	if (cache->zone_is_active) {
		btrfs_get_block_group(cache);
		spin_lock(&fs_info->zone_active_bgs_lock);
		list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
		spin_unlock(&fs_info->zone_active_bgs_lock);
	}

out:
	if (cache->alloc_offset > fs_info->zone_size) {
		btrfs_err(fs_info,
@@ -1392,6 +1415,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
		kfree(cache->physical_map);
		cache->physical_map = NULL;
	}
	bitmap_free(active);
	kfree(caps);
	kfree(alloc_offsets);
	free_extent_map(em);