Commit a785fd28 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:
 "More fixes, some of them going back to older releases and there are
  fixes for hangs in stress tests regarding space caching:

   - fixes and progress tracking for hangs in free space caching, found
     by test generic/475

   - writeback fixes, write pages in integrity mode and skip writing
     pages that have been written meanwhile

   - properly clear end of extent range after an error

   - relocation fixes:
      - fix race betwen qgroup tree creation and relocation
      - detect and report invalid reloc roots"

* tag 'for-6.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: set cache_block_group_error if we find an error
  btrfs: reject invalid reloc tree root keys with stack dump
  btrfs: exit gracefully if reloc roots don't match
  btrfs: avoid race between qgroup tree creation and relocation
  btrfs: properly clear end of the unreserved range in cow_file_range
  btrfs: don't wait for writeback on clean pages in extent_write_cache_pages
  btrfs: don't stop integrity writeback too early
  btrfs: wait for actual caching progress during allocation
parents ae545c32 92fb94b6
Loading
Loading
Loading
Loading
+15 −2
Original line number Diff line number Diff line
@@ -441,13 +441,23 @@ void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
					   u64 num_bytes)
{
	struct btrfs_caching_control *caching_ctl;
	int progress;

	caching_ctl = btrfs_get_caching_control(cache);
	if (!caching_ctl)
		return;

	/*
	 * We've already failed to allocate from this block group, so even if
	 * there's enough space in the block group it isn't contiguous enough to
	 * allow for an allocation, so wait for at least the next wakeup tick,
	 * or for the thing to be done.
	 */
	progress = atomic_read(&caching_ctl->progress);

	wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
		   (cache->free_space_ctl->free_space >= num_bytes));
		   (progress != atomic_read(&caching_ctl->progress) &&
		    (cache->free_space_ctl->free_space >= num_bytes)));

	btrfs_put_caching_control(caching_ctl);
}
@@ -802,10 +812,12 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)

			if (total_found > CACHING_CTL_WAKE_UP) {
				total_found = 0;
				if (wakeup)
				if (wakeup) {
					atomic_inc(&caching_ctl->progress);
					wake_up(&caching_ctl->wait);
				}
			}
		}
		path->slots[0]++;
	}

@@ -910,6 +922,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
	init_waitqueue_head(&caching_ctl->wait);
	caching_ctl->block_group = cache;
	refcount_set(&caching_ctl->count, 2);
	atomic_set(&caching_ctl->progress, 0);
	btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);

	spin_lock(&cache->lock);
+2 −0
Original line number Diff line number Diff line
@@ -90,6 +90,8 @@ struct btrfs_caching_control {
	wait_queue_head_t wait;
	struct btrfs_work work;
	struct btrfs_block_group *block_group;
	/* Track progress of caching during allocation. */
	atomic_t progress;
	refcount_t count;
};

+12 −1
Original line number Diff line number Diff line
@@ -1103,7 +1103,8 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
	btrfs_drew_lock_init(&root->snapshot_lock);

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
	    !btrfs_is_data_reloc_root(root)) {
	    !btrfs_is_data_reloc_root(root) &&
	    is_fstree(root->root_key.objectid)) {
		set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
		btrfs_check_and_init_root_item(&root->root_item);
	}
@@ -1300,6 +1301,16 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
	root = btrfs_get_global_root(fs_info, objectid);
	if (root)
		return root;

	/*
	 * If we're called for non-subvolume trees, and above function didn't
	 * find one, do not try to read it from disk.
	 *
	 * This is namely for free-space-tree and quota tree, which can change
	 * at runtime and should only be grabbed from fs_info.
	 */
	if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
		return ERR_PTR(-ENOENT);
again:
	root = btrfs_lookup_fs_root(fs_info, objectid);
	if (root) {
+4 −1
Original line number Diff line number Diff line
@@ -4310,8 +4310,11 @@ static noinline int find_free_extent(struct btrfs_root *root,
			ret = 0;
		}

		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) {
			if (!cache_block_group_error)
				cache_block_group_error = -EIO;
			goto loop;
		}

		if (!find_free_extent_check_size_class(ffe_ctl, block_group))
			goto loop;
+10 −3
Original line number Diff line number Diff line
@@ -2145,6 +2145,12 @@ static int extent_write_cache_pages(struct address_space *mapping,
				continue;
			}

			if (!folio_test_dirty(folio)) {
				/* Someone wrote it for us. */
				folio_unlock(folio);
				continue;
			}

			if (wbc->sync_mode != WB_SYNC_NONE) {
				if (folio_test_writeback(folio))
					submit_write_bio(bio_ctrl, 0);
@@ -2164,11 +2170,12 @@ static int extent_write_cache_pages(struct address_space *mapping,
			}

			/*
			 * the filesystem may choose to bump up nr_to_write.
			 * The filesystem may choose to bump up nr_to_write.
			 * We have to make sure to honor the new nr_to_write
			 * at any time
			 * at any time.
			 */
			nr_to_write_done = wbc->nr_to_write <= 0;
			nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
					    wbc->nr_to_write <= 0);
		}
		folio_batch_release(&fbatch);
		cond_resched();
Loading