Commit 66fcf74e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:

 - explicitly initialize zlib work memory to fix a KCSAN warning

 - limit number of send clones by maximum memory allocated

 - limit device size extent in case it device shrink races with chunk
   allocation

 - raid56 fixes:
     - fix copy&paste error in RAID6 stripe recovery
     - make error bitmap update atomic

* tag 'for-6.2-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: raid56: make error_bitmap update atomic
  btrfs: send: limit number of clones and allocated memory size
  btrfs: zlib: zero-initialize zlib workspace
  btrfs: limit device extents to the device size
  btrfs: raid56: fix stripes if vertical errors are found
parents d2d11f34 a9ad4d87
Loading
Loading
Loading
Loading
+11 −3
Original line number Diff line number Diff line
@@ -1426,12 +1426,20 @@ static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bi
	u32 bio_size = 0;
	struct bio_vec *bvec;
	struct bvec_iter_all iter_all;
	int i;

	bio_for_each_segment_all(bvec, bio, iter_all)
		bio_size += bvec->bv_len;

	bitmap_set(rbio->error_bitmap, total_sector_nr,
		   bio_size >> rbio->bioc->fs_info->sectorsize_bits);
	/*
	 * Since we can have multiple bios touching the error_bitmap, we cannot
	 * call bitmap_set() without protection.
	 *
	 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
	 */
	for (i = total_sector_nr; i < total_sector_nr +
	     (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
		set_bit(i, rbio->error_bitmap);
}

/* Verify the data sectors at read time. */
@@ -1886,7 +1894,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
		sector->uptodate = 1;
	}
	if (failb >= 0) {
		ret = verify_one_sector(rbio, faila, sector_nr);
		ret = verify_one_sector(rbio, failb, sector_nr);
		if (ret < 0)
			goto cleanup;

+3 −3
Original line number Diff line number Diff line
@@ -8073,10 +8073,10 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
	/*
	 * Check that we don't overflow at later allocations, we request
	 * clone_sources_count + 1 items, and compare to unsigned long inside
	 * access_ok.
	 * access_ok. Also set an upper limit for allocation size so this can't
	 * easily exhaust memory. Max number of clone sources is about 200K.
	 */
	if (arg->clone_sources_count >
	    ULONG_MAX / sizeof(struct clone_root) - 1) {
	if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
		ret = -EINVAL;
		goto out;
	}
+5 −1
Original line number Diff line number Diff line
@@ -1600,7 +1600,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
	if (ret < 0)
		goto out;

	while (1) {
	while (search_start < search_end) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
@@ -1623,6 +1623,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
		if (key.type != BTRFS_DEV_EXTENT_KEY)
			goto next;

		if (key.offset > search_end)
			break;

		if (key.offset > search_start) {
			hole_size = key.offset - search_start;
			dev_extent_hole_check(device, &search_start, &hole_size,
@@ -1683,6 +1686,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
	else
		ret = 0;

	ASSERT(max_hole_start + max_hole_size <= search_end);
out:
	btrfs_free_path(path);
	*start = max_hole_start;
+1 −1
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level)

	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
			zlib_inflate_workspacesize());
	workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
	workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
	workspace->level = level;
	workspace->buf = NULL;
	/*