Commit ee5f017d authored by David Sterba's avatar David Sterba
Browse files

btrfs: merge struct extent_page_data to btrfs_bio_ctrl



The two structures appear on the same call paths, btrfs_bio_ctrl is
embedded in extent_page_data and we pass bio_ctrl to some functions.
After merging there are fewer indirections and we have only one control
structure. The packing remains same.

The btrfs_bio_ctrl was selected as the target structure as the operation
is closer to bio processing.

Structure layout:

struct btrfs_bio_ctrl {
        struct bio *               bio;                  /*     0     8 */
        int                        mirror_num;           /*     8     4 */
        enum btrfs_compression_type compress_type;       /*    12     4 */
        u32                        len_to_stripe_boundary; /*    16     4 */
        u32                        len_to_oe_boundary;   /*    20     4 */
        btrfs_bio_end_io_t         end_io_func;          /*    24     8 */
        bool                       extent_locked;        /*    32     1 */
        bool                       sync_io;              /*    33     1 */

        /* size: 40, cachelines: 1, members: 8 */
        /* padding: 6 */
        /* last cacheline: 40 bytes */
};

Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 8ec8519b
Loading
Loading
Loading
Loading
+55 −60
Original line number Original line Diff line number Diff line
@@ -102,16 +102,14 @@ struct btrfs_bio_ctrl {
	u32 len_to_stripe_boundary;
	u32 len_to_stripe_boundary;
	u32 len_to_oe_boundary;
	u32 len_to_oe_boundary;
	btrfs_bio_end_io_t end_io_func;
	btrfs_bio_end_io_t end_io_func;
};


struct extent_page_data {
	/*
	struct btrfs_bio_ctrl bio_ctrl;
	 * Tell writepage not to lock the state bits for this range, it still
	/* tells writepage not to lock the state bits for this range
	 * does the unlocking.
	 * it still does the unlocking
	 */
	 */
	bool extent_locked;
	bool extent_locked;


	/* tells the submit_bio code to use REQ_SYNC */
	/* Tell the submit_bio code to use REQ_SYNC */
	bool sync_io;
	bool sync_io;
};
};


@@ -148,11 +146,11 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
}
}


/*
/*
 * Submit or fail the current bio in an extent_page_data structure.
 * Submit or fail the current bio in the bio_ctrl structure.
 */
 */
static void submit_write_bio(struct extent_page_data *epd, int ret)
static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
{
{
	struct bio *bio = epd->bio_ctrl.bio;
	struct bio *bio = bio_ctrl->bio;


	if (!bio)
	if (!bio)
		return;
		return;
@@ -161,9 +159,9 @@ static void submit_write_bio(struct extent_page_data *epd, int ret)
		ASSERT(ret < 0);
		ASSERT(ret < 0);
		btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
		btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
		/* The bio is owned by the end_io handler now */
		/* The bio is owned by the end_io handler now */
		epd->bio_ctrl.bio = NULL;
		bio_ctrl->bio = NULL;
	} else {
	} else {
		submit_one_bio(&epd->bio_ctrl);
		submit_one_bio(bio_ctrl);
	}
	}
}
}


@@ -2071,7 +2069,7 @@ static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
				 struct page *page,
				 struct page *page,
				 struct writeback_control *wbc,
				 struct writeback_control *wbc,
				 struct extent_page_data *epd,
				 struct btrfs_bio_ctrl *bio_ctrl,
				 loff_t i_size,
				 loff_t i_size,
				 int *nr_ret)
				 int *nr_ret)
{
{
@@ -2103,7 +2101,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
	 */
	 */
	wbc->nr_to_write--;
	wbc->nr_to_write--;


	epd->bio_ctrl.end_io_func = end_bio_extent_writepage;
	bio_ctrl->end_io_func = end_bio_extent_writepage;
	while (cur <= end) {
	while (cur <= end) {
		u64 disk_bytenr;
		u64 disk_bytenr;
		u64 em_end;
		u64 em_end;
@@ -2197,7 +2195,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
		btrfs_page_clear_dirty(fs_info, page, cur, iosize);
		btrfs_page_clear_dirty(fs_info, page, cur, iosize);


		ret = submit_extent_page(op | write_flags, wbc,
		ret = submit_extent_page(op | write_flags, wbc,
					 &epd->bio_ctrl, disk_bytenr,
					 bio_ctrl, disk_bytenr,
					 page, iosize,
					 page, iosize,
					 cur - page_offset(page),
					 cur - page_offset(page),
					 0, false);
					 0, false);
@@ -2237,7 +2235,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
 * Return <0 for error.
 * Return <0 for error.
 */
 */
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
			      struct extent_page_data *epd)
			      struct btrfs_bio_ctrl *bio_ctrl)
{
{
	struct folio *folio = page_folio(page);
	struct folio *folio = page_folio(page);
	struct inode *inode = page->mapping->host;
	struct inode *inode = page->mapping->host;
@@ -2274,7 +2272,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
		goto done;
		goto done;
	}
	}


	if (!epd->extent_locked) {
	if (!bio_ctrl->extent_locked) {
		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
		ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
		if (ret == 1)
		if (ret == 1)
			return 0;
			return 0;
@@ -2282,7 +2280,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
			goto done;
			goto done;
	}
	}


	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
	ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, bio_ctrl, i_size,
				    &nr);
				    &nr);
	if (ret == 1)
	if (ret == 1)
		return 0;
		return 0;
@@ -2326,9 +2324,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
	 */
	 */
	if (PageError(page))
	if (PageError(page))
		end_extent_writepage(page, ret, page_start, page_end);
		end_extent_writepage(page, ret, page_start, page_end);
	if (epd->extent_locked) {
	if (bio_ctrl->extent_locked) {
		/*
		/*
		 * If epd->extent_locked, it's from extent_write_locked_range(),
		 * If bio_ctrl->extent_locked, it's from extent_write_locked_range(),
		 * the page can either be locked by lock_page() or
		 * the page can either be locked by lock_page() or
		 * process_one_page().
		 * process_one_page().
		 * Let btrfs_page_unlock_writer() handle both cases.
		 * Let btrfs_page_unlock_writer() handle both cases.
@@ -2367,7 +2365,7 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
 * Return <0 if something went wrong, no page is locked.
 * Return <0 if something went wrong, no page is locked.
 */
 */
static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
			  struct extent_page_data *epd)
			  struct btrfs_bio_ctrl *bio_ctrl)
{
{
	struct btrfs_fs_info *fs_info = eb->fs_info;
	struct btrfs_fs_info *fs_info = eb->fs_info;
	int i, num_pages;
	int i, num_pages;
@@ -2375,17 +2373,17 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
	int ret = 0;
	int ret = 0;


	if (!btrfs_try_tree_write_lock(eb)) {
	if (!btrfs_try_tree_write_lock(eb)) {
		submit_write_bio(epd, 0);
		submit_write_bio(bio_ctrl, 0);
		flush = 1;
		flush = 1;
		btrfs_tree_lock(eb);
		btrfs_tree_lock(eb);
	}
	}


	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
	if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
		btrfs_tree_unlock(eb);
		btrfs_tree_unlock(eb);
		if (!epd->sync_io)
		if (!bio_ctrl->sync_io)
			return 0;
			return 0;
		if (!flush) {
		if (!flush) {
			submit_write_bio(epd, 0);
			submit_write_bio(bio_ctrl, 0);
			flush = 1;
			flush = 1;
		}
		}
		while (1) {
		while (1) {
@@ -2432,7 +2430,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb


		if (!trylock_page(p)) {
		if (!trylock_page(p)) {
			if (!flush) {
			if (!flush) {
				submit_write_bio(epd, 0);
				submit_write_bio(bio_ctrl, 0);
				flush = 1;
				flush = 1;
			}
			}
			lock_page(p);
			lock_page(p);
@@ -2672,7 +2670,7 @@ static void prepare_eb_write(struct extent_buffer *eb)
 */
 */
static int write_one_subpage_eb(struct extent_buffer *eb,
static int write_one_subpage_eb(struct extent_buffer *eb,
				struct writeback_control *wbc,
				struct writeback_control *wbc,
				struct extent_page_data *epd)
				struct btrfs_bio_ctrl *bio_ctrl)
{
{
	struct btrfs_fs_info *fs_info = eb->fs_info;
	struct btrfs_fs_info *fs_info = eb->fs_info;
	struct page *page = eb->pages[0];
	struct page *page = eb->pages[0];
@@ -2692,10 +2690,10 @@ static int write_one_subpage_eb(struct extent_buffer *eb,
	if (no_dirty_ebs)
	if (no_dirty_ebs)
		clear_page_dirty_for_io(page);
		clear_page_dirty_for_io(page);


	epd->bio_ctrl.end_io_func = end_bio_subpage_eb_writepage;
	bio_ctrl->end_io_func = end_bio_subpage_eb_writepage;


	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
	ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
			&epd->bio_ctrl, eb->start, page, eb->len,
			bio_ctrl, eb->start, page, eb->len,
			eb->start - page_offset(page), 0, false);
			eb->start - page_offset(page), 0, false);
	if (ret) {
	if (ret) {
		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
		btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
@@ -2718,7 +2716,7 @@ static int write_one_subpage_eb(struct extent_buffer *eb,


static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
			struct writeback_control *wbc,
			struct writeback_control *wbc,
			struct extent_page_data *epd)
			struct btrfs_bio_ctrl *bio_ctrl)
{
{
	u64 disk_bytenr = eb->start;
	u64 disk_bytenr = eb->start;
	int i, num_pages;
	int i, num_pages;
@@ -2727,7 +2725,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,


	prepare_eb_write(eb);
	prepare_eb_write(eb);


	epd->bio_ctrl.end_io_func = end_bio_extent_buffer_writepage;
	bio_ctrl->end_io_func = end_bio_extent_buffer_writepage;


	num_pages = num_extent_pages(eb);
	num_pages = num_extent_pages(eb);
	for (i = 0; i < num_pages; i++) {
	for (i = 0; i < num_pages; i++) {
@@ -2736,7 +2734,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
		clear_page_dirty_for_io(p);
		clear_page_dirty_for_io(p);
		set_page_writeback(p);
		set_page_writeback(p);
		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
		ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
					 &epd->bio_ctrl, disk_bytenr, p,
					 bio_ctrl, disk_bytenr, p,
					 PAGE_SIZE, 0, 0, false);
					 PAGE_SIZE, 0, 0, false);
		if (ret) {
		if (ret) {
			set_btree_ioerr(p, eb);
			set_btree_ioerr(p, eb);
@@ -2779,7 +2777,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 */
 */
static int submit_eb_subpage(struct page *page,
static int submit_eb_subpage(struct page *page,
			     struct writeback_control *wbc,
			     struct writeback_control *wbc,
			     struct extent_page_data *epd)
			     struct btrfs_bio_ctrl *bio_ctrl)
{
{
	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
	int submitted = 0;
	int submitted = 0;
@@ -2832,7 +2830,7 @@ static int submit_eb_subpage(struct page *page,
		if (!eb)
		if (!eb)
			continue;
			continue;


		ret = lock_extent_buffer_for_io(eb, epd);
		ret = lock_extent_buffer_for_io(eb, bio_ctrl);
		if (ret == 0) {
		if (ret == 0) {
			free_extent_buffer(eb);
			free_extent_buffer(eb);
			continue;
			continue;
@@ -2841,7 +2839,7 @@ static int submit_eb_subpage(struct page *page,
			free_extent_buffer(eb);
			free_extent_buffer(eb);
			goto cleanup;
			goto cleanup;
		}
		}
		ret = write_one_subpage_eb(eb, wbc, epd);
		ret = write_one_subpage_eb(eb, wbc, bio_ctrl);
		free_extent_buffer(eb);
		free_extent_buffer(eb);
		if (ret < 0)
		if (ret < 0)
			goto cleanup;
			goto cleanup;
@@ -2851,7 +2849,7 @@ static int submit_eb_subpage(struct page *page,


cleanup:
cleanup:
	/* We hit error, end bio for the submitted extent buffers */
	/* We hit error, end bio for the submitted extent buffers */
	submit_write_bio(epd, ret);
	submit_write_bio(bio_ctrl, ret);
	return ret;
	return ret;
}
}


@@ -2876,7 +2874,7 @@ static int submit_eb_subpage(struct page *page,
 * Return <0 for fatal error.
 * Return <0 for fatal error.
 */
 */
static int submit_eb_page(struct page *page, struct writeback_control *wbc,
static int submit_eb_page(struct page *page, struct writeback_control *wbc,
			  struct extent_page_data *epd,
			  struct btrfs_bio_ctrl *bio_ctrl,
			  struct extent_buffer **eb_context)
			  struct extent_buffer **eb_context)
{
{
	struct address_space *mapping = page->mapping;
	struct address_space *mapping = page->mapping;
@@ -2888,7 +2886,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
		return 0;
		return 0;


	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
		return submit_eb_subpage(page, wbc, epd);
		return submit_eb_subpage(page, wbc, bio_ctrl);


	spin_lock(&mapping->private_lock);
	spin_lock(&mapping->private_lock);
	if (!PagePrivate(page)) {
	if (!PagePrivate(page)) {
@@ -2931,7 +2929,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,


	*eb_context = eb;
	*eb_context = eb;


	ret = lock_extent_buffer_for_io(eb, epd);
	ret = lock_extent_buffer_for_io(eb, bio_ctrl);
	if (ret <= 0) {
	if (ret <= 0) {
		btrfs_revert_meta_write_pointer(cache, eb);
		btrfs_revert_meta_write_pointer(cache, eb);
		if (cache)
		if (cache)
@@ -2946,7 +2944,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
		btrfs_schedule_zone_finish_bg(cache, eb);
		btrfs_schedule_zone_finish_bg(cache, eb);
		btrfs_put_block_group(cache);
		btrfs_put_block_group(cache);
	}
	}
	ret = write_one_eb(eb, wbc, epd);
	ret = write_one_eb(eb, wbc, bio_ctrl);
	free_extent_buffer(eb);
	free_extent_buffer(eb);
	if (ret < 0)
	if (ret < 0)
		return ret;
		return ret;
@@ -2957,10 +2955,9 @@ int btree_write_cache_pages(struct address_space *mapping,
				   struct writeback_control *wbc)
				   struct writeback_control *wbc)
{
{
	struct extent_buffer *eb_context = NULL;
	struct extent_buffer *eb_context = NULL;
	struct extent_page_data epd = {
	struct btrfs_bio_ctrl bio_ctrl = {
		.bio_ctrl = { 0 },
		.extent_locked = 0,
		.extent_locked = 0,
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
		.sync_io = (wbc->sync_mode == WB_SYNC_ALL),
	};
	};
	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
	struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
	int ret = 0;
	int ret = 0;
@@ -3003,7 +3000,7 @@ int btree_write_cache_pages(struct address_space *mapping,
		for (i = 0; i < nr_pages; i++) {
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct page *page = pvec.pages[i];


			ret = submit_eb_page(page, wbc, &epd, &eb_context);
			ret = submit_eb_page(page, wbc, &bio_ctrl, &eb_context);
			if (ret == 0)
			if (ret == 0)
				continue;
				continue;
			if (ret < 0) {
			if (ret < 0) {
@@ -3064,7 +3061,7 @@ int btree_write_cache_pages(struct address_space *mapping,
		ret = 0;
		ret = 0;
	if (!ret && BTRFS_FS_ERROR(fs_info))
	if (!ret && BTRFS_FS_ERROR(fs_info))
		ret = -EROFS;
		ret = -EROFS;
	submit_write_bio(&epd, ret);
	submit_write_bio(&bio_ctrl, ret);


	btrfs_zoned_meta_io_unlock(fs_info);
	btrfs_zoned_meta_io_unlock(fs_info);
	return ret;
	return ret;
@@ -3075,7 +3072,7 @@ int btree_write_cache_pages(struct address_space *mapping,
 *
 *
 * @mapping:   address space structure to write
 * @mapping:   address space structure to write
 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
 * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
 * @epd:     holds context for the write, namely the bio
 * @bio_ctrl:  holds context for the write, namely the bio
 *
 *
 * If a page is already under I/O, write_cache_pages() skips it, even
 * If a page is already under I/O, write_cache_pages() skips it, even
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
@@ -3087,7 +3084,7 @@ int btree_write_cache_pages(struct address_space *mapping,
 */
 */
static int extent_write_cache_pages(struct address_space *mapping,
static int extent_write_cache_pages(struct address_space *mapping,
			     struct writeback_control *wbc,
			     struct writeback_control *wbc,
			     struct extent_page_data *epd)
			     struct btrfs_bio_ctrl *bio_ctrl)
{
{
	struct inode *inode = mapping->host;
	struct inode *inode = mapping->host;
	int ret = 0;
	int ret = 0;
@@ -3168,7 +3165,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
			 * tmpfs file mapping
			 * tmpfs file mapping
			 */
			 */
			if (!trylock_page(page)) {
			if (!trylock_page(page)) {
				submit_write_bio(epd, 0);
				submit_write_bio(bio_ctrl, 0);
				lock_page(page);
				lock_page(page);
			}
			}


@@ -3179,7 +3176,7 @@ static int extent_write_cache_pages(struct address_space *mapping,


			if (wbc->sync_mode != WB_SYNC_NONE) {
			if (wbc->sync_mode != WB_SYNC_NONE) {
				if (PageWriteback(page))
				if (PageWriteback(page))
					submit_write_bio(epd, 0);
					submit_write_bio(bio_ctrl, 0);
				wait_on_page_writeback(page);
				wait_on_page_writeback(page);
			}
			}


@@ -3189,7 +3186,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
				continue;
				continue;
			}
			}


			ret = __extent_writepage(page, wbc, epd);
			ret = __extent_writepage(page, wbc, bio_ctrl);
			if (ret < 0) {
			if (ret < 0) {
				done = 1;
				done = 1;
				break;
				break;
@@ -3219,7 +3216,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
		 * page in our current bio, and thus deadlock, so flush the
		 * page in our current bio, and thus deadlock, so flush the
		 * write bio here.
		 * write bio here.
		 */
		 */
		submit_write_bio(epd, 0);
		submit_write_bio(bio_ctrl, 0);
		goto retry;
		goto retry;
	}
	}


@@ -3245,8 +3242,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
	u64 cur = start;
	u64 cur = start;
	unsigned long nr_pages;
	unsigned long nr_pages;
	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
	const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
	struct extent_page_data epd = {
	struct btrfs_bio_ctrl bio_ctrl = {
		.bio_ctrl = { 0 },
		.extent_locked = 1,
		.extent_locked = 1,
		.sync_io = 1,
		.sync_io = 1,
	};
	};
@@ -3277,7 +3273,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
		ASSERT(PageLocked(page));
		ASSERT(PageLocked(page));
		ASSERT(PageDirty(page));
		ASSERT(PageDirty(page));
		clear_page_dirty_for_io(page);
		clear_page_dirty_for_io(page);
		ret = __extent_writepage(page, &wbc_writepages, &epd);
		ret = __extent_writepage(page, &wbc_writepages, &bio_ctrl);
		ASSERT(ret <= 0);
		ASSERT(ret <= 0);
		if (ret < 0) {
		if (ret < 0) {
			found_error = true;
			found_error = true;
@@ -3287,7 +3283,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
		cur = cur_end + 1;
		cur = cur_end + 1;
	}
	}


	submit_write_bio(&epd, found_error ? ret : 0);
	submit_write_bio(&bio_ctrl, found_error ? ret : 0);


	wbc_detach_inode(&wbc_writepages);
	wbc_detach_inode(&wbc_writepages);
	if (found_error)
	if (found_error)
@@ -3300,10 +3296,9 @@ int extent_writepages(struct address_space *mapping,
{
{
	struct inode *inode = mapping->host;
	struct inode *inode = mapping->host;
	int ret = 0;
	int ret = 0;
	struct extent_page_data epd = {
	struct btrfs_bio_ctrl bio_ctrl = {
		.bio_ctrl = { 0 },
		.extent_locked = 0,
		.extent_locked = 0,
		.sync_io = wbc->sync_mode == WB_SYNC_ALL,
		.sync_io = (wbc->sync_mode == WB_SYNC_ALL),
	};
	};


	/*
	/*
@@ -3311,8 +3306,8 @@ int extent_writepages(struct address_space *mapping,
	 * protect the write pointer updates.
	 * protect the write pointer updates.
	 */
	 */
	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
	btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
	ret = extent_write_cache_pages(mapping, wbc, &epd);
	ret = extent_write_cache_pages(mapping, wbc, &bio_ctrl);
	submit_write_bio(&epd, ret);
	submit_write_bio(&bio_ctrl, ret);
	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
	btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
	return ret;
	return ret;
}
}