Commit 5c7ecada authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs update from Jaegeuk Kim:
 "In this round, we've mainly modified to support non-power-of-two zone
  size, which is not required for f2fs by design. In order to avoid arch
  dependency, we refactored the messy rb_entry structure shared across
  different extent_cache. In addition to the improvement, we've also
  fixed several subtle bugs and error cases.

  Enhancements:
   - support non-power-of-two zone size for zoned device
   - remove sharing the rb_entry structure in extent cache
   - refactor f2fs_gc to call checkpoint in urgent condition
   - support iopoll

  Bug fixes:
   - fix potential corruption when moving a directory
   - fix to avoid use-after-free for cached IPU bio
   - fix the folio private usage
   - avoid kernel warnings or panics in the cp_error case
   - fix to recover quota data correctly
   - fix some bugs in atomic operations
   - fix system crash due to lack of free space in LFS
   - fix null pointer panic in tracepoint in __replace_atomic_write_block
   - fix iostat lock protection
   - fix scheduling while atomic in decompression path
   - preserve direct write semantics when buffering is forced
   - fix to call f2fs_wait_on_page_writeback() in f2fs_write_raw_pages()"

* tag 'f2fs-for-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (52 commits)
  f2fs: remove unnessary comment in __may_age_extent_tree
  f2fs: allocate node blocks for atomic write block replacement
  f2fs: use cow inode data when updating atomic write
  f2fs: remove power-of-two limitation of zoned device
  f2fs: allocate trace path buffer from names_cache
  f2fs: add has_enough_free_secs()
  f2fs: relax sanity check if checkpoint is corrupted
  f2fs: refactor f2fs_gc to call checkpoint in urgent condition
  f2fs: remove folio_detach_private() in .invalidate_folio and .release_folio
  f2fs: remove bulk remove_proc_entry() and unnecessary kobject_del()
  f2fs: support iopoll method
  f2fs: remove batched_trim_sections node description
  f2fs: fix to check return value of inc_valid_block_count()
  f2fs: fix to check return value of f2fs_do_truncate_blocks()
  f2fs: fix passing relative address when discard zones
  f2fs: fix potential corruption when moving a directory
  f2fs: add radix_tree_preload_end in error case
  f2fs: fix to recover quota data correctly
  f2fs: fix to check readonly condition correctly
  docs: f2fs: Correct instruction to disable checkpoint
  ...
parents fbfaf03e 8375be2b
Loading
Loading
Loading
Loading
+17 −6
Original line number Original line Diff line number Diff line
@@ -190,12 +190,6 @@ Description: Controls the memory footprint used by free nids and cached
		nat entries. By default, 1 is set, which indicates
		nat entries. By default, 1 is set, which indicates
		10 MB / 1 GB RAM.
		10 MB / 1 GB RAM.


What:		/sys/fs/f2fs/<disk>/batched_trim_sections
Date:		February 2015
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:	Controls the trimming rate in batch mode.
		<deprecated>

What:		/sys/fs/f2fs/<disk>/cp_interval
What:		/sys/fs/f2fs/<disk>/cp_interval
Date:		October 2015
Date:		October 2015
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
@@ -729,3 +723,20 @@ What: /sys/fs/f2fs/<disk>/last_age_weight
Date:		January 2023
Date:		January 2023
Contact:	"Ping Xiong" <xiongping1@xiaomi.com>
Contact:	"Ping Xiong" <xiongping1@xiaomi.com>
Description:	When DATA SEPARATION is on, it controls the weight of last data block age.
Description:	When DATA SEPARATION is on, it controls the weight of last data block age.

What:		/sys/fs/f2fs/<disk>/compress_watermark
Date:		February 2023
Contact:	"Yangtao Li" <frank.li@vivo.com>
Description:	When compress cache is on, it controls free memory watermark
		in order to limit caching compress page. If free memory is lower
		than watermark, then deny caching compress page. The value should be in
		range of (0, 100], by default it was initialized as 20(%).

What:		/sys/fs/f2fs/<disk>/compress_percent
Date:		February 2023
Contact:	"Yangtao Li" <frank.li@vivo.com>
Description:	When compress cache is on, it controls cached page
		percent(compress pages / free_ram) in order to limit caching compress page.
		If cached page percent exceed threshold, then deny caching compress page.
		The value should be in range of (0, 100], by default it was initialized
		as 20(%).
+1 −1
Original line number Original line Diff line number Diff line
@@ -264,7 +264,7 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl
			 disabled, any unmounting or unexpected shutdowns will cause
			 disabled, any unmounting or unexpected shutdowns will cause
			 the filesystem contents to appear as they did when the
			 the filesystem contents to appear as they did when the
			 filesystem was mounted with that option.
			 filesystem was mounted with that option.
			 While mounting with checkpoint=disabled, the filesystem must
			 While mounting with checkpoint=disable, the filesystem must
			 run garbage collection to ensure that all available space can
			 run garbage collection to ensure that all available space can
			 be used. If this takes too much time, the mount may return
			 be used. If this takes too much time, the mount may return
			 EAGAIN. You may optionally add a value to indicate how much
			 EAGAIN. You may optionally add a value to indicate how much
+25 −27
Original line number Original line Diff line number Diff line
@@ -152,6 +152,11 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
	se = get_seg_entry(sbi, segno);
	se = get_seg_entry(sbi, segno);


	exist = f2fs_test_bit(offset, se->cur_valid_map);
	exist = f2fs_test_bit(offset, se->cur_valid_map);

	/* skip data, if we already have an error in checkpoint. */
	if (unlikely(f2fs_cp_error(sbi)))
		return exist;

	if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) {
	if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) {
		f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
		f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
			 blkaddr, exist);
			 blkaddr, exist);
@@ -202,6 +207,11 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
	case DATA_GENERIC_ENHANCE_UPDATE:
	case DATA_GENERIC_ENHANCE_UPDATE:
		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
		if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
				blkaddr < MAIN_BLKADDR(sbi))) {
				blkaddr < MAIN_BLKADDR(sbi))) {

			/* Skip to emit an error message. */
			if (unlikely(f2fs_cp_error(sbi)))
				return false;

			f2fs_warn(sbi, "access invalid blkaddr:%u",
			f2fs_warn(sbi, "access invalid blkaddr:%u",
				  blkaddr);
				  blkaddr);
			set_sbi_flag(sbi, SBI_NEED_FSCK);
			set_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -325,8 +335,15 @@ static int __f2fs_write_meta_page(struct page *page,


	trace_f2fs_writepage(page, META);
	trace_f2fs_writepage(page, META);


	if (unlikely(f2fs_cp_error(sbi)))
	if (unlikely(f2fs_cp_error(sbi))) {
		if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
			ClearPageUptodate(page);
			dec_page_count(sbi, F2FS_DIRTY_META);
			unlock_page(page);
			return 0;
		}
		goto redirty_out;
		goto redirty_out;
	}
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
		goto redirty_out;
		goto redirty_out;
	if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
	if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@@ -508,6 +525,7 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
	if (!e) {
	if (!e) {
		if (!new) {
		if (!new) {
			spin_unlock(&im->ino_lock);
			spin_unlock(&im->ino_lock);
			radix_tree_preload_end();
			goto retry;
			goto retry;
		}
		}
		e = new;
		e = new;
@@ -706,32 +724,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
{
	block_t start_blk, orphan_blocks, i, j;
	block_t start_blk, orphan_blocks, i, j;
	unsigned int s_flags = sbi->sb->s_flags;
	int err = 0;
	int err = 0;
#ifdef CONFIG_QUOTA
	int quota_enabled;
#endif


	if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
	if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
		return 0;
		return 0;


	if (bdev_read_only(sbi->sb->s_bdev)) {
	if (f2fs_hw_is_readonly(sbi)) {
		f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
		f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
		return 0;
		return 0;
	}
	}


	if (s_flags & SB_RDONLY) {
	if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
		f2fs_info(sbi, "orphan cleanup on readonly fs");
		f2fs_info(sbi, "orphan cleanup on readonly fs");
		sbi->sb->s_flags &= ~SB_RDONLY;
	}

#ifdef CONFIG_QUOTA
	/*
	 * Turn on quotas which were not enabled for read-only mounts if
	 * filesystem has quota feature, so that they are updated correctly.
	 */
	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif


	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
@@ -765,13 +769,6 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
out:
out:
	set_sbi_flag(sbi, SBI_IS_RECOVERED);
	set_sbi_flag(sbi, SBI_IS_RECOVERED);


#ifdef CONFIG_QUOTA
	/* Turn quotas off */
	if (quota_enabled)
		f2fs_quota_off_umount(sbi->sb);
#endif
	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */

	return err;
	return err;
}
}


@@ -982,7 +979,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)


	cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
	cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
	if (cur_page == cp2)
	if (cur_page == cp2)
		cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
		cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg));


	for (i = 1; i < cp_blks; i++) {
	for (i = 1; i < cp_blks; i++) {
		void *sit_bitmap_ptr;
		void *sit_bitmap_ptr;
@@ -1133,7 +1130,7 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
	goto retry;
	goto retry;
}
}


int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
static int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
{
{
	struct list_head *head = &sbi->inode_list[DIRTY_META];
	struct list_head *head = &sbi->inode_list[DIRTY_META];
	struct inode *inode;
	struct inode *inode;
@@ -1306,7 +1303,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
		if (!get_pages(sbi, type))
		if (!get_pages(sbi, type))
			break;
			break;


		if (unlikely(f2fs_cp_error(sbi)))
		if (unlikely(f2fs_cp_error(sbi) &&
			!is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
			break;
			break;


		if (type == F2FS_DIRTY_META)
		if (type == F2FS_DIRTY_META)
+22 −25
Original line number Original line Diff line number Diff line
@@ -264,35 +264,21 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
	cc->private = NULL;
	cc->private = NULL;
}
}


#ifdef CONFIG_F2FS_FS_LZ4HC
static int lz4_compress_pages(struct compress_ctx *cc)
static int lz4hc_compress_pages(struct compress_ctx *cc)
{
{
	int len = -EINVAL;
	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
	unsigned char level = F2FS_I(cc->inode)->i_compress_level;
	int len;


	if (level)
	if (!level)
		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
					cc->clen, level, cc->private);
	else
		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
		len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
						cc->clen, cc->private);
						cc->clen, cc->private);
	if (!len)
		return -EAGAIN;

	cc->clen = len;
	return 0;
}
#endif

static int lz4_compress_pages(struct compress_ctx *cc)
{
	int len;

#ifdef CONFIG_F2FS_FS_LZ4HC
#ifdef CONFIG_F2FS_FS_LZ4HC
	return lz4hc_compress_pages(cc);
	else
		len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
					cc->clen, level, cc->private);
#endif
#endif
	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
	if (len < 0)
						cc->clen, cc->private);
		return len;
	if (!len)
	if (!len)
		return -EAGAIN;
		return -EAGAIN;


@@ -670,7 +656,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)


	cc->cbuf->clen = cpu_to_le32(cc->clen);
	cc->cbuf->clen = cpu_to_le32(cc->clen);


	if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
	if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
		chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
		chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
					cc->cbuf->cdata, cc->clen);
					cc->cbuf->cdata, cc->clen);
	cc->cbuf->chksum = cpu_to_le32(chksum);
	cc->cbuf->chksum = cpu_to_le32(chksum);
@@ -755,13 +741,18 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)


	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
		ret = -EFSCORRUPTED;
		ret = -EFSCORRUPTED;

		/* Avoid f2fs_commit_super in irq context */
		if (in_task)
			f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
		else
			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
		goto out_release;
		goto out_release;
	}
	}


	ret = cops->decompress_pages(dic);
	ret = cops->decompress_pages(dic);


	if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
	if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
		u32 provided = le32_to_cpu(dic->cbuf->chksum);
		u32 provided = le32_to_cpu(dic->cbuf->chksum);
		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
		u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);


@@ -1456,6 +1447,12 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
		if (!PageDirty(cc->rpages[i]))
		if (!PageDirty(cc->rpages[i]))
			goto continue_unlock;
			goto continue_unlock;


		if (PageWriteback(cc->rpages[i])) {
			if (wbc->sync_mode == WB_SYNC_NONE)
				goto continue_unlock;
			f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
		}

		if (!clear_page_dirty_for_io(cc->rpages[i]))
		if (!clear_page_dirty_for_io(cc->rpages[i]))
			goto continue_unlock;
			goto continue_unlock;


+30 −41
Original line number Original line Diff line number Diff line
@@ -93,17 +93,17 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
/* postprocessing steps for read bios */
enum bio_post_read_step {
enum bio_post_read_step {
#ifdef CONFIG_FS_ENCRYPTION
#ifdef CONFIG_FS_ENCRYPTION
	STEP_DECRYPT	= 1 << 0,
	STEP_DECRYPT	= BIT(0),
#else
#else
	STEP_DECRYPT	= 0,	/* compile out the decryption-related code */
	STEP_DECRYPT	= 0,	/* compile out the decryption-related code */
#endif
#endif
#ifdef CONFIG_F2FS_FS_COMPRESSION
#ifdef CONFIG_F2FS_FS_COMPRESSION
	STEP_DECOMPRESS	= 1 << 1,
	STEP_DECOMPRESS	= BIT(1),
#else
#else
	STEP_DECOMPRESS	= 0,	/* compile out the decompression-related code */
	STEP_DECOMPRESS	= 0,	/* compile out the decompression-related code */
#endif
#endif
#ifdef CONFIG_FS_VERITY
#ifdef CONFIG_FS_VERITY
	STEP_VERITY	= 1 << 2,
	STEP_VERITY	= BIT(2),
#else
#else
	STEP_VERITY	= 0,	/* compile out the verity-related code */
	STEP_VERITY	= 0,	/* compile out the verity-related code */
#endif
#endif
@@ -420,7 +420,7 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)


static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
{
{
	unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
	unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
	unsigned int fua_flag, meta_flag, io_flag;
	unsigned int fua_flag, meta_flag, io_flag;
	blk_opf_t op_flags = 0;
	blk_opf_t op_flags = 0;


@@ -442,9 +442,9 @@ static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
	 *    5 |    4 |   3 |    2 |    1 |   0 |
	 *    5 |    4 |   3 |    2 |    1 |   0 |
	 * Cold | Warm | Hot | Cold | Warm | Hot |
	 * Cold | Warm | Hot | Cold | Warm | Hot |
	 */
	 */
	if ((1 << fio->temp) & meta_flag)
	if (BIT(fio->temp) & meta_flag)
		op_flags |= REQ_META;
		op_flags |= REQ_META;
	if ((1 << fio->temp) & fua_flag)
	if (BIT(fio->temp) & fua_flag)
		op_flags |= REQ_FUA;
		op_flags |= REQ_FUA;
	return op_flags;
	return op_flags;
}
}
@@ -874,6 +874,8 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
	bool found = false;
	bool found = false;
	struct bio *target = bio ? *bio : NULL;
	struct bio *target = bio ? *bio : NULL;


	f2fs_bug_on(sbi, !target && !page);

	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
		struct list_head *head = &io->bio_list;
		struct list_head *head = &io->bio_list;
@@ -2235,6 +2237,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
	if (ret)
	if (ret)
		goto out;
		goto out;


	if (unlikely(f2fs_cp_error(sbi))) {
		ret = -EIO;
		goto out_put_dnode;
	}
	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
	f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);


skip_reading_dnode:
skip_reading_dnode:
@@ -2798,7 +2804,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
		 * don't drop any dirty dentry pages for keeping lastest
		 * don't drop any dirty dentry pages for keeping lastest
		 * directory structure.
		 * directory structure.
		 */
		 */
		if (S_ISDIR(inode->i_mode))
		if (S_ISDIR(inode->i_mode) &&
				!is_sbi_flag_set(sbi, SBI_IS_CLOSE))
			goto redirty_out;
			goto redirty_out;
		goto out;
		goto out;
	}
	}
@@ -2898,6 +2905,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,


	if (unlikely(f2fs_cp_error(sbi))) {
	if (unlikely(f2fs_cp_error(sbi))) {
		f2fs_submit_merged_write(sbi, DATA);
		f2fs_submit_merged_write(sbi, DATA);
		if (bio && *bio)
			f2fs_submit_merged_ipu_write(sbi, bio, NULL);
			f2fs_submit_merged_ipu_write(sbi, bio, NULL);
		submitted = NULL;
		submitted = NULL;
	}
	}
@@ -3123,12 +3131,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
			}
			}


			if (folio_test_writeback(folio)) {
			if (folio_test_writeback(folio)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
				if (wbc->sync_mode == WB_SYNC_NONE)
					f2fs_wait_on_page_writeback(
							&folio->page,
							DATA, true, true);
				else
					goto continue_unlock;
					goto continue_unlock;
				f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
			}
			}


			if (!folio_clear_dirty_for_io(folio))
			if (!folio_clear_dirty_for_io(folio))
@@ -3486,7 +3491,7 @@ static int __reserve_data_block(struct inode *inode, pgoff_t index,


static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned int len,
			struct page *page, loff_t pos, unsigned int len,
			block_t *blk_addr, bool *node_changed)
			block_t *blk_addr, bool *node_changed, bool *use_cow)
{
{
	struct inode *inode = page->mapping->host;
	struct inode *inode = page->mapping->host;
	struct inode *cow_inode = F2FS_I(inode)->cow_inode;
	struct inode *cow_inode = F2FS_I(inode)->cow_inode;
@@ -3500,10 +3505,12 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,


	/* Look for the block in COW inode first */
	/* Look for the block in COW inode first */
	err = __find_data_block(cow_inode, index, blk_addr);
	err = __find_data_block(cow_inode, index, blk_addr);
	if (err)
	if (err) {
		return err;
		return err;
	else if (*blk_addr != NULL_ADDR)
	} else if (*blk_addr != NULL_ADDR) {
		*use_cow = true;
		return 0;
		return 0;
	}


	if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
	if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
		goto reserve_block;
		goto reserve_block;
@@ -3533,6 +3540,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
	struct page *page = NULL;
	struct page *page = NULL;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
	bool need_balance = false;
	bool need_balance = false;
	bool use_cow = false;
	block_t blkaddr = NULL_ADDR;
	block_t blkaddr = NULL_ADDR;
	int err = 0;
	int err = 0;


@@ -3592,7 +3600,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,


	if (f2fs_is_atomic_file(inode))
	if (f2fs_is_atomic_file(inode))
		err = prepare_atomic_write_begin(sbi, page, pos, len,
		err = prepare_atomic_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
					&blkaddr, &need_balance, &use_cow);
	else
	else
		err = prepare_write_begin(sbi, page, pos, len,
		err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
					&blkaddr, &need_balance);
@@ -3632,7 +3640,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
			f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
			f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
			goto fail;
			goto fail;
		}
		}
		err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
		err = f2fs_submit_page_read(use_cow ?
				F2FS_I(inode)->cow_inode : inode, page,
				blkaddr, 0, true);
		if (err)
		if (err)
			goto fail;
			goto fail;


@@ -3725,37 +3735,16 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
			f2fs_remove_dirty_inode(inode);
			f2fs_remove_dirty_inode(inode);
		}
		}
	}
	}

	clear_page_private_all(&folio->page);
	clear_page_private_reference(&folio->page);
	clear_page_private_gcing(&folio->page);

	if (test_opt(sbi, COMPRESS_CACHE) &&
			inode->i_ino == F2FS_COMPRESS_INO(sbi))
		clear_page_private_data(&folio->page);

	folio_detach_private(folio);
}
}


bool f2fs_release_folio(struct folio *folio, gfp_t wait)
bool f2fs_release_folio(struct folio *folio, gfp_t wait)
{
{
	struct f2fs_sb_info *sbi;

	/* If this is dirty folio, keep private data */
	/* If this is dirty folio, keep private data */
	if (folio_test_dirty(folio))
	if (folio_test_dirty(folio))
		return false;
		return false;


	sbi = F2FS_M_SB(folio->mapping);
	clear_page_private_all(&folio->page);
	if (test_opt(sbi, COMPRESS_CACHE)) {
		struct inode *inode = folio->mapping->host;

		if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
			clear_page_private_data(&folio->page);
	}

	clear_page_private_reference(&folio->page);
	clear_page_private_gcing(&folio->page);

	folio_detach_private(folio);
	return true;
	return true;
}
}


Loading