Commit 22c306fe authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba
Browse files

btrfs: introduce alloc_compressed_bio() for compression



Just aggregate the bio allocation code into one helper, so that we can
replace 4 call sites.

There is one special note for zoned write.

Currently btrfs_submit_compressed_write() will only allocate the first
bio using ZONE_APPEND.  If we have to submit current bio due to stripe
boundary, the new bio allocated will not use ZONE_APPEND.

In theory this should be a bug, but considering zoned mode currently
only support SINGLE profile, which doesn't have any stripe boundary
limit, it should never be a problem and we have assertions in place.

This function will provide a good entrance for any work which needs to
be done at bio allocation time. Like determining the stripe boundary.

Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2d4e0b84
Loading
Loading
Loading
Loading
+58 −32
Original line number Original line Diff line number Diff line
@@ -434,6 +434,36 @@ static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
	return ret;
	return ret;
}
}


/*
 * Allocate a compressed_bio, which will be used to read/write on-disk data.
 */
static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
					unsigned int opf, bio_end_io_t endio_func)
{
	struct bio *bio;

	bio = btrfs_bio_alloc(BIO_MAX_VECS);

	bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
	bio->bi_opf = opf;
	bio->bi_private = cb;
	bio->bi_end_io = endio_func;

	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
		struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
		struct btrfs_device *device;

		device = btrfs_zoned_get_device(fs_info, disk_bytenr,
						fs_info->sectorsize);
		if (IS_ERR(device)) {
			bio_put(bio);
			return ERR_CAST(device);
		}
		bio_set_dev(bio, device->bdev);
	}
	return bio;
}

/*
/*
 * worker function to build and submit bios for previously compressed pages.
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * The corresponding pages in the inode should be marked for writeback
@@ -479,23 +509,11 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
	cb->orig_bio = NULL;
	cb->orig_bio = NULL;
	cb->nr_pages = nr_pages;
	cb->nr_pages = nr_pages;


	bio = btrfs_bio_alloc(BIO_MAX_VECS);
	bio = alloc_compressed_bio(cb, first_byte, bio_op | write_flags,
	bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
				   end_compressed_bio_write);
	bio->bi_opf = bio_op | write_flags;
	if (IS_ERR(bio)) {
	bio->bi_private = cb;
	bio->bi_end_io = end_compressed_bio_write;

	if (use_append) {
		struct btrfs_device *device;

		device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
		if (IS_ERR(device)) {
		kfree(cb);
		kfree(cb);
			bio_put(bio);
		return errno_to_blk_status(PTR_ERR(bio));
			return BLK_STS_NOTSUPP;
		}

		bio_set_dev(bio, device->bdev);
	}
	}


	if (blkcg_css) {
	if (blkcg_css) {
@@ -539,11 +557,14 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
			if (ret)
			if (ret)
				goto finish_cb;
				goto finish_cb;


			bio = btrfs_bio_alloc(BIO_MAX_VECS);
			bio = alloc_compressed_bio(cb, first_byte,
			bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
					bio_op | write_flags,
			bio->bi_opf = bio_op | write_flags;
					end_compressed_bio_write);
			bio->bi_private = cb;
			if (IS_ERR(bio)) {
			bio->bi_end_io = end_compressed_bio_write;
				ret = errno_to_blk_status(PTR_ERR(bio));
				bio = NULL;
				goto finish_cb;
			}
			if (blkcg_css)
			if (blkcg_css)
				bio->bi_opf |= REQ_CGROUP_PUNT;
				bio->bi_opf |= REQ_CGROUP_PUNT;
			/*
			/*
@@ -839,11 +860,13 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
	/* include any pages we added in add_ra-bio_pages */
	/* include any pages we added in add_ra-bio_pages */
	cb->len = bio->bi_iter.bi_size;
	cb->len = bio->bi_iter.bi_size;


	comp_bio = btrfs_bio_alloc(BIO_MAX_VECS);
	comp_bio = alloc_compressed_bio(cb, cur_disk_byte, REQ_OP_READ,
	comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT;
					end_compressed_bio_read);
	comp_bio->bi_opf = REQ_OP_READ;
	if (IS_ERR(comp_bio)) {
	comp_bio->bi_private = cb;
		ret = errno_to_blk_status(PTR_ERR(comp_bio));
	comp_bio->bi_end_io = end_compressed_bio_read;
		comp_bio = NULL;
		goto fail2;
	}


	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
		u32 pg_len = PAGE_SIZE;
		u32 pg_len = PAGE_SIZE;
@@ -884,11 +907,14 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
			if (ret)
			if (ret)
				goto finish_cb;
				goto finish_cb;


			comp_bio = btrfs_bio_alloc(BIO_MAX_VECS);
			comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
			comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT;
					REQ_OP_READ,
			comp_bio->bi_opf = REQ_OP_READ;
					end_compressed_bio_read);
			comp_bio->bi_private = cb;
			if (IS_ERR(comp_bio)) {
			comp_bio->bi_end_io = end_compressed_bio_read;
				ret = errno_to_blk_status(PTR_ERR(comp_bio));
				comp_bio = NULL;
				goto finish_cb;
			}


			bio_add_page(comp_bio, page, pg_len, 0);
			bio_add_page(comp_bio, page, pg_len, 0);
		}
		}