Commit 8379c0b3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:
 "Fixes:

   - check that subvolume is writable when changing xattrs from security
     namespace

   - fix memory leak in device lookup helper

   - update generation of hole file extent item when merging holes

   - fix space cache corruption and potential double allocations; this
     is a rare bug but can be serious once it happens, stable backports
     and analysis tool will be provided

   - fix error handling when deleting root references

   - fix crash due to assert when attempting to cancel suspended device
     replace, add message what to do if mount fails due to missing
     replace item

  Regressions:

   - don't merge pages into bio if their page offset is not contiguous

   - don't allow large NOWAIT direct reads, this could lead to short
     reads eg. in io_uring"

* tag 'for-6.0-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: add info when mount fails due to stale replace target
  btrfs: replace: drop assert for suspended replace
  btrfs: fix silent failure when deleting root reference
  btrfs: fix space cache corruption and potential double allocations
  btrfs: don't allow large NOWAIT direct reads
  btrfs: don't merge pages into bio if their page offset is not contiguous
  btrfs: update generation of hole file extent item when merging holes
  btrfs: fix possible memory leak in btrfs_get_dev_args_from_path()
  btrfs: check if root is readonly while setting security xattr
parents c7bb3fbc f2c3bec2
Loading
Loading
Loading
Loading
+15 −32
Original line number Diff line number Diff line
@@ -440,39 +440,26 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
	btrfs_put_caching_control(caching_ctl);
}

int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
				       struct btrfs_caching_control *caching_ctl)
{
	wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
	return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
}

static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
{
	struct btrfs_caching_control *caching_ctl;
	int ret = 0;
	int ret;

	caching_ctl = btrfs_get_caching_control(cache);
	if (!caching_ctl)
		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;

	wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
	if (cache->cached == BTRFS_CACHE_ERROR)
		ret = -EIO;
	ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
	btrfs_put_caching_control(caching_ctl);
	return ret;
}

static bool space_cache_v1_done(struct btrfs_block_group *cache)
{
	bool ret;

	spin_lock(&cache->lock);
	ret = cache->cached != BTRFS_CACHE_FAST;
	spin_unlock(&cache->lock);

	return ret;
}

void btrfs_wait_space_cache_v1_finished(struct btrfs_block_group *cache,
				struct btrfs_caching_control *caching_ctl)
{
	wait_event(caching_ctl->wait, space_cache_v1_done(cache));
}

#ifdef CONFIG_BTRFS_DEBUG
static void fragment_free_space(struct btrfs_block_group *block_group)
{
@@ -750,9 +737,8 @@ static noinline void caching_thread(struct btrfs_work *work)
	btrfs_put_block_group(block_group);
}

int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
{
	DEFINE_WAIT(wait);
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_caching_control *caching_ctl = NULL;
	int ret = 0;
@@ -785,9 +771,6 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only
	}
	WARN_ON(cache->caching_ctl);
	cache->caching_ctl = caching_ctl;
	if (btrfs_test_opt(fs_info, SPACE_CACHE))
		cache->cached = BTRFS_CACHE_FAST;
	else
	cache->cached = BTRFS_CACHE_STARTED;
	cache->has_caching_ctl = 1;
	spin_unlock(&cache->lock);
@@ -801,8 +784,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only

	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
out:
	if (load_cache_only && caching_ctl)
		btrfs_wait_space_cache_v1_finished(cache, caching_ctl);
	if (wait && caching_ctl)
		ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
	if (caching_ctl)
		btrfs_put_caching_control(caching_ctl);

@@ -3312,7 +3295,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
		 * space back to the block group, otherwise we will leak space.
		 */
		if (!alloc && !btrfs_block_group_done(cache))
			btrfs_cache_block_group(cache, 1);
			btrfs_cache_block_group(cache, true);

		byte_in_group = bytenr - cache->start;
		WARN_ON(byte_in_group > cache->length);
+1 −3
Original line number Diff line number Diff line
@@ -263,9 +263,7 @@ void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
				           u64 num_bytes);
int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
int btrfs_cache_block_group(struct btrfs_block_group *cache,
			    int load_cache_only);
int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
		struct btrfs_block_group *cache);
+0 −1
Original line number Diff line number Diff line
@@ -505,7 +505,6 @@ struct btrfs_free_cluster {
enum btrfs_caching_type {
	BTRFS_CACHE_NO,
	BTRFS_CACHE_STARTED,
	BTRFS_CACHE_FAST,
	BTRFS_CACHE_FINISHED,
	BTRFS_CACHE_ERROR,
};
+2 −3
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
		 */
		if (btrfs_find_device(fs_info->fs_devices, &args)) {
			btrfs_err(fs_info,
			"replace devid present without an active replace item");
"replace without active item, run 'device scan --forget' on the target device");
			ret = -EUCLEAN;
		} else {
			dev_replace->srcdev = NULL;
@@ -1129,8 +1129,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
		up_write(&dev_replace->rwsem);

		/* Scrub for replace must not be running in suspended state */
		ret = btrfs_scrub_cancel(fs_info);
		ASSERT(ret != -ENOTCONN);
		btrfs_scrub_cancel(fs_info);

		trans = btrfs_start_transaction(root, 0);
		if (IS_ERR(trans)) {
+6 −24
Original line number Diff line number Diff line
@@ -2551,17 +2551,10 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
		return -EINVAL;

	/*
	 * pull in the free space cache (if any) so that our pin
	 * removes the free space from the cache.  We have load_only set
	 * to one because the slow code to read in the free extents does check
	 * the pinned extents.
	 * Fully cache the free space first so that our pin removes the free space
	 * from the cache.
	 */
	btrfs_cache_block_group(cache, 1);
	/*
	 * Make sure we wait until the cache is completely built in case it is
	 * missing or is invalid and therefore needs to be rebuilt.
	 */
	ret = btrfs_wait_block_group_cache_done(cache);
	ret = btrfs_cache_block_group(cache, true);
	if (ret)
		goto out;

@@ -2584,12 +2577,7 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
	if (!block_group)
		return -EINVAL;

	btrfs_cache_block_group(block_group, 1);
	/*
	 * Make sure we wait until the cache is completely built in case it is
	 * missing or is invalid and therefore needs to be rebuilt.
	 */
	ret = btrfs_wait_block_group_cache_done(block_group);
	ret = btrfs_cache_block_group(block_group, true);
	if (ret)
		goto out;

@@ -4399,7 +4387,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
		ffe_ctl->cached = btrfs_block_group_done(block_group);
		if (unlikely(!ffe_ctl->cached)) {
			ffe_ctl->have_caching_bg = true;
			ret = btrfs_cache_block_group(block_group, 0);
			ret = btrfs_cache_block_group(block_group, false);

			/*
			 * If we get ENOMEM here or something else we want to
@@ -6169,13 +6157,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)

		if (end - start >= range->minlen) {
			if (!btrfs_block_group_done(cache)) {
				ret = btrfs_cache_block_group(cache, 0);
				if (ret) {
					bg_failed++;
					bg_ret = ret;
					continue;
				}
				ret = btrfs_wait_block_group_cache_done(cache);
				ret = btrfs_cache_block_group(cache, true);
				if (ret) {
					bg_failed++;
					bg_ret = ret;
Loading