Commit 1daf117f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "In this cycle, we mainly fixed some corner cases that manipulate a
  per-file compression flag inappropriately. And, we found f2fs counted
  valid blocks in a section incorrectly when zone capacity is set, and
  thus, fixed it with additional sysfs entry to check it easily.

  Lastly, this series includes several patches with respect to the new
  atomic write support such as a couple of bug fixes and re-adding
  atomic_write_abort support that we removed by mistake in the previous
  release.

  Enhancements:
   - add sysfs entries to understand atomic write operations and zone
     capacity
   - introduce memory mode to get a hint for low-memory devices
   - adjust the waiting time of foreground GC
   - decompress clusters under softirq to avoid non-deterministic
     latency
   - do not skip updating inode when retrying to flush node page
   - enforce single zone capacity

  Bug fixes:
   - set the compression/no-compression flags correctly
   - revive F2FS_IOC_ABORT_VOLATILE_WRITE
   - check inline_data during compressed inode conversion
   - understand zone capacity when calculating valid block count

  As usual, the series includes several minor clean-ups and sanity
  checks"

* tag 'f2fs-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (29 commits)
  f2fs: use onstack pages instead of pvec
  f2fs: intorduce f2fs_all_cluster_page_ready
  f2fs: clean up f2fs_abort_atomic_write()
  f2fs: handle decompress only post processing in softirq
  f2fs: do not allow to decompress files have FI_COMPRESS_RELEASED
  f2fs: do not set compression bit if kernel doesn't support
  f2fs: remove device type check for direct IO
  f2fs: fix null-ptr-deref in f2fs_get_dnode_of_data
  f2fs: revive F2FS_IOC_ABORT_VOLATILE_WRITE
  f2fs: fix to do sanity check on segment type in build_sit_entries()
  f2fs: obsolete unused MAX_DISCARD_BLOCKS
  f2fs: fix to avoid use f2fs_bug_on() in f2fs_new_node_page()
  f2fs: fix to remove F2FS_COMPR_FL and tag F2FS_NOCOMP_FL at the same time
  f2fs: introduce sysfs atomic write statistics
  f2fs: don't bother wait_ms by foreground gc
  f2fs: invalidate meta pages only for post_read required inode
  f2fs: allow compression of files without blocks
  f2fs: fix to check inline_data during compressed inode conversion
  f2fs: Delete f2fs_copy_page() and replace with memcpy_page()
  f2fs: fix to invalidate META_MAPPING before DIO write
  ...
parents 2bd5d41e 01fc4b9a
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -580,3 +580,33 @@ Date: January 2022
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:	Controls max # of node block writes to be used for roll forward
		recovery. This can limit the roll forward recovery time.

What:		/sys/fs/f2fs/<disk>/unusable_blocks_per_sec
Date:		June 2022
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:	Shows the number of unusable blocks in a section which was defined by
		the zone capacity reported by underlying zoned device.

What:		/sys/fs/f2fs/<disk>/current_atomic_write
Date:		July 2022
Contact:	"Daeho Jeong" <daehojeong@google.com>
Description:	Show the total current atomic write block count, which is not committed yet.
		This is a read-only entry.

What:		/sys/fs/f2fs/<disk>/peak_atomic_write
Date:		July 2022
Contact:	"Daeho Jeong" <daehojeong@google.com>
Description:	Show the peak value of total current atomic write block count after boot.
		If you write "0" here, you can initialize to "0".

What:		/sys/fs/f2fs/<disk>/committed_atomic_block
Date:		July 2022
Contact:	"Daeho Jeong" <daehojeong@google.com>
Description:	Show the accumulated total committed atomic write block count after boot.
		If you write "0" here, you can initialize to "0".

What:		/sys/fs/f2fs/<disk>/revoked_atomic_block
Date:		July 2022
Contact:	"Daeho Jeong" <daehojeong@google.com>
Description:	Show the accumulated total revoked atomic write block count after boot.
		If you write "0" here, you can initialize to "0".
+5 −0
Original line number Diff line number Diff line
@@ -336,6 +336,11 @@ discard_unit=%s Control discard unit, the argument can be "block", "segment"
			 default, it is helpful for large sized SMR or ZNS devices to
			 reduce memory cost by getting rid of fs metadata supports small
			 discard.
memory=%s		 Control memory mode. This supports "normal" and "low" modes.
			 "low" mode is introduced to support low memory devices.
			 Because of the nature of low memory devices, in this mode, f2fs
			 will try to save memory sometimes by sacrificing performance.
			 "normal" mode is the default mode and same as before.
======================== ============================================================

Debugfs Entries
+153 −76
Original line number Diff line number Diff line
@@ -729,14 +729,19 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
	return ret;
}

void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
		bool pre_alloc);
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback, bool pre_alloc);

void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
	struct f2fs_inode_info *fi = F2FS_I(dic->inode);
	const struct f2fs_compress_ops *cops =
			f2fs_cops[fi->i_compress_algorithm];
	bool bypass_callback = false;
	int ret;
	int i;

	trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
				dic->cluster_size, fi->i_compress_algorithm);
@@ -746,41 +751,10 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
		goto out_end_io;
	}

	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
	if (!dic->tpages) {
		ret = -ENOMEM;
		goto out_end_io;
	}

	for (i = 0; i < dic->cluster_size; i++) {
		if (dic->rpages[i]) {
			dic->tpages[i] = dic->rpages[i];
			continue;
		}

		dic->tpages[i] = f2fs_compress_alloc_page();
		if (!dic->tpages[i]) {
			ret = -ENOMEM;
			goto out_end_io;
		}
	}

	if (cops->init_decompress_ctx) {
		ret = cops->init_decompress_ctx(dic);
		if (ret)
			goto out_end_io;
	}

	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
	if (!dic->rbuf) {
		ret = -ENOMEM;
		goto out_destroy_decompress_ctx;
	}

	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
	if (!dic->cbuf) {
		ret = -ENOMEM;
		goto out_vunmap_rbuf;
	ret = f2fs_prepare_decomp_mem(dic, false);
	if (ret) {
		bypass_callback = true;
		goto out_release;
	}

	dic->clen = le32_to_cpu(dic->cbuf->clen);
@@ -788,7 +762,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)

	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
		ret = -EFSCORRUPTED;
		goto out_vunmap_cbuf;
		goto out_release;
	}

	ret = cops->decompress_pages(dic);
@@ -809,17 +783,13 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
		}
	}

out_vunmap_cbuf:
	vm_unmap_ram(dic->cbuf, dic->nr_cpages);
out_vunmap_rbuf:
	vm_unmap_ram(dic->rbuf, dic->cluster_size);
out_destroy_decompress_ctx:
	if (cops->destroy_decompress_ctx)
		cops->destroy_decompress_ctx(dic);
out_release:
	f2fs_release_decomp_mem(dic, bypass_callback, false);

out_end_io:
	trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
							dic->clen, ret);
	f2fs_decompress_end_io(dic, ret);
	f2fs_decompress_end_io(dic, ret, in_task);
}

/*
@@ -829,7 +799,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
 * (or in the case of a failure, cleans up without actually decompressing).
 */
void f2fs_end_read_compressed_page(struct page *page, bool failed,
						block_t blkaddr)
		block_t blkaddr, bool in_task)
{
	struct decompress_io_ctx *dic =
			(struct decompress_io_ctx *)page_private(page);
@@ -839,12 +809,12 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,

	if (failed)
		WRITE_ONCE(dic->failed, true);
	else if (blkaddr)
	else if (blkaddr && in_task)
		f2fs_cache_compressed_page(sbi, page,
					dic->inode->i_ino, blkaddr);

	if (atomic_dec_and_test(&dic->remaining_pages))
		f2fs_decompress_cluster(dic);
		f2fs_decompress_cluster(dic, in_task);
}

static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -871,19 +841,26 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
	return is_page_in_cluster(cc, index);
}

bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
				int index, int nr_pages)
bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
				int index, int nr_pages, bool uptodate)
{
	unsigned long pgidx;
	int i;
	unsigned long pgidx = pages[index]->index;
	int i = uptodate ? 0 : 1;

	if (nr_pages - index < cc->cluster_size)
	/*
	 * when uptodate set to true, try to check all pages in cluster is
	 * uptodate or not.
	 */
	if (uptodate && (pgidx % cc->cluster_size))
		return false;

	pgidx = pvec->pages[index]->index;
	if (nr_pages - index < cc->cluster_size)
		return false;

	for (i = 1; i < cc->cluster_size; i++) {
		if (pvec->pages[index + i]->index != pgidx + i)
	for (; i < cc->cluster_size; i++) {
		if (pages[index + i]->index != pgidx + i)
			return false;
		if (uptodate && !PageUptodate(pages[index + i]))
			return false;
	}

@@ -1552,16 +1529,85 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
	return err;
}

static void f2fs_free_dic(struct decompress_io_ctx *dic);
static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
		bool pre_alloc)
{
	return pre_alloc ^ f2fs_low_mem_mode(sbi);
}

static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
		bool pre_alloc)
{
	const struct f2fs_compress_ops *cops =
		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
	int i;

	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
		return 0;

	dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
	if (!dic->tpages)
		return -ENOMEM;

	for (i = 0; i < dic->cluster_size; i++) {
		if (dic->rpages[i]) {
			dic->tpages[i] = dic->rpages[i];
			continue;
		}

		dic->tpages[i] = f2fs_compress_alloc_page();
		if (!dic->tpages[i])
			return -ENOMEM;
	}

	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
	if (!dic->rbuf)
		return -ENOMEM;

	dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
	if (!dic->cbuf)
		return -ENOMEM;

	if (cops->init_decompress_ctx) {
		int ret = cops->init_decompress_ctx(dic);

		if (ret)
			return ret;
	}

	return 0;
}

static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback, bool pre_alloc)
{
	const struct f2fs_compress_ops *cops =
		f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];

	if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
		return;

	if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
		cops->destroy_decompress_ctx(dic);

	if (dic->cbuf)
		vm_unmap_ram(dic->cbuf, dic->nr_cpages);

	if (dic->rbuf)
		vm_unmap_ram(dic->rbuf, dic->cluster_size);
}

static void f2fs_free_dic(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback);

struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{
	struct decompress_io_ctx *dic;
	pgoff_t start_idx = start_idx_of_cluster(cc);
	int i;
	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
	int i, ret;

	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO,
					false, F2FS_I_SB(cc->inode));
	dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
	if (!dic)
		return ERR_PTR(-ENOMEM);

@@ -1587,32 +1633,43 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
	dic->nr_rpages = cc->cluster_size;

	dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
	if (!dic->cpages)
	if (!dic->cpages) {
		ret = -ENOMEM;
		goto out_free;
	}

	for (i = 0; i < dic->nr_cpages; i++) {
		struct page *page;

		page = f2fs_compress_alloc_page();
		if (!page)
		if (!page) {
			ret = -ENOMEM;
			goto out_free;
		}

		f2fs_set_compressed_page(page, cc->inode,
					start_idx + i + 1, dic);
		dic->cpages[i] = page;
	}

	ret = f2fs_prepare_decomp_mem(dic, true);
	if (ret)
		goto out_free;

	return dic;

out_free:
	f2fs_free_dic(dic);
	return ERR_PTR(-ENOMEM);
	f2fs_free_dic(dic, true);
	return ERR_PTR(ret);
}

static void f2fs_free_dic(struct decompress_io_ctx *dic)
static void f2fs_free_dic(struct decompress_io_ctx *dic,
		bool bypass_destroy_callback)
{
	int i;

	f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);

	if (dic->tpages) {
		for (i = 0; i < dic->cluster_size; i++) {
			if (dic->rpages[i])
@@ -1637,17 +1694,33 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic)
	kmem_cache_free(dic_entry_slab, dic);
}

static void f2fs_put_dic(struct decompress_io_ctx *dic)
static void f2fs_late_free_dic(struct work_struct *work)
{
	if (refcount_dec_and_test(&dic->refcnt))
		f2fs_free_dic(dic);
	struct decompress_io_ctx *dic =
		container_of(work, struct decompress_io_ctx, free_work);

	f2fs_free_dic(dic, false);
}

static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
{
	if (refcount_dec_and_test(&dic->refcnt)) {
		if (in_task) {
			f2fs_free_dic(dic, false);
		} else {
			INIT_WORK(&dic->free_work, f2fs_late_free_dic);
			queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
					&dic->free_work);
		}
	}
}

/*
 * Update and unlock the cluster's pagecache pages, and release the reference to
 * the decompress_io_ctx that was being held for I/O completion.
 */
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
				bool in_task)
{
	int i;

@@ -1668,7 +1741,7 @@ static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
		unlock_page(rpage);
	}

	f2fs_put_dic(dic);
	f2fs_put_dic(dic, in_task);
}

static void f2fs_verify_cluster(struct work_struct *work)
@@ -1685,14 +1758,15 @@ static void f2fs_verify_cluster(struct work_struct *work)
			SetPageError(rpage);
	}

	__f2fs_decompress_end_io(dic, false);
	__f2fs_decompress_end_io(dic, false, true);
}

/*
 * This is called when a compressed cluster has been decompressed
 * (or failed to be read and/or decompressed).
 */
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
				bool in_task)
{
	if (!failed && dic->need_verity) {
		/*
@@ -1704,7 +1778,7 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
		INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
		fsverity_enqueue_verify_work(&dic->verity_work);
	} else {
		__f2fs_decompress_end_io(dic, failed);
		__f2fs_decompress_end_io(dic, failed, in_task);
	}
}

@@ -1713,12 +1787,12 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
 *
 * This is called when the page is no longer needed and can be freed.
 */
void f2fs_put_page_dic(struct page *page)
void f2fs_put_page_dic(struct page *page, bool in_task)
{
	struct decompress_io_ctx *dic =
			(struct decompress_io_ctx *)page_private(page);

	f2fs_put_dic(dic);
	f2fs_put_dic(dic, in_task);
}

/*
@@ -1903,6 +1977,9 @@ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
	dev_t dev = sbi->sb->s_bdev->bd_dev;
	char slab_name[32];

	if (!f2fs_sb_has_compression(sbi))
		return 0;

	sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));

	sbi->page_array_slab_size = sizeof(struct page *) <<
+50 −32
Original line number Diff line number Diff line
@@ -119,7 +119,7 @@ struct bio_post_read_ctx {
	block_t fs_blkaddr;
};

static void f2fs_finish_read_bio(struct bio *bio)
static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{
	struct bio_vec *bv;
	struct bvec_iter_all iter_all;
@@ -133,8 +133,9 @@ static void f2fs_finish_read_bio(struct bio *bio)

		if (f2fs_is_compressed_page(page)) {
			if (bio->bi_status)
				f2fs_end_read_compressed_page(page, true, 0);
			f2fs_put_page_dic(page);
				f2fs_end_read_compressed_page(page, true, 0,
							in_task);
			f2fs_put_page_dic(page, in_task);
			continue;
		}

@@ -191,7 +192,7 @@ static void f2fs_verify_bio(struct work_struct *work)
		fsverity_verify_bio(bio);
	}

	f2fs_finish_read_bio(bio);
	f2fs_finish_read_bio(bio, true);
}

/*
@@ -203,7 +204,7 @@ static void f2fs_verify_bio(struct work_struct *work)
 * can involve reading verity metadata pages from the file, and these verity
 * metadata pages may be encrypted and/or compressed.
 */
static void f2fs_verify_and_finish_bio(struct bio *bio)
static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
{
	struct bio_post_read_ctx *ctx = bio->bi_private;

@@ -211,7 +212,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
		INIT_WORK(&ctx->work, f2fs_verify_bio);
		fsverity_enqueue_verify_work(&ctx->work);
	} else {
		f2fs_finish_read_bio(bio);
		f2fs_finish_read_bio(bio, in_task);
	}
}

@@ -224,7 +225,8 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
 * that the bio includes at least one compressed page.  The actual decompression
 * is done on a per-cluster basis, not a per-bio basis.
 */
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
		bool in_task)
{
	struct bio_vec *bv;
	struct bvec_iter_all iter_all;
@@ -237,7 +239,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
		/* PG_error was set if decryption failed. */
		if (f2fs_is_compressed_page(page))
			f2fs_end_read_compressed_page(page, PageError(page),
						blkaddr);
						blkaddr, in_task);
		else
			all_compressed = false;

@@ -262,15 +264,16 @@ static void f2fs_post_read_work(struct work_struct *work)
		fscrypt_decrypt_bio(ctx->bio);

	if (ctx->enabled_steps & STEP_DECOMPRESS)
		f2fs_handle_step_decompress(ctx);
		f2fs_handle_step_decompress(ctx, true);

	f2fs_verify_and_finish_bio(ctx->bio);
	f2fs_verify_and_finish_bio(ctx->bio, true);
}

static void f2fs_read_end_io(struct bio *bio)
{
	struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
	struct bio_post_read_ctx *ctx;
	bool intask = in_task();

	iostat_update_and_unbind_ctx(bio, 0);
	ctx = bio->bi_private;
@@ -281,18 +284,31 @@ static void f2fs_read_end_io(struct bio *bio)
	}

	if (bio->bi_status) {
		f2fs_finish_read_bio(bio);
		f2fs_finish_read_bio(bio, intask);
		return;
	}

	if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
	if (ctx) {
		unsigned int enabled_steps = ctx->enabled_steps &
					(STEP_DECRYPT | STEP_DECOMPRESS);

		/*
		 * If we have only decompression step between decompression and
		 * decrypt, we don't need post processing for this.
		 */
		if (enabled_steps == STEP_DECOMPRESS &&
				!f2fs_low_mem_mode(sbi)) {
			f2fs_handle_step_decompress(ctx, intask);
		} else if (enabled_steps) {
			INIT_WORK(&ctx->work, f2fs_post_read_work);
			queue_work(ctx->sbi->post_read_wq, &ctx->work);
	} else {
		f2fs_verify_and_finish_bio(bio);
			return;
		}
	}

	f2fs_verify_and_finish_bio(bio, intask);
}

static void f2fs_write_end_io(struct bio *bio)
{
	struct f2fs_sb_info *sbi;
@@ -1682,8 +1698,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
		 */
		f2fs_wait_on_block_writeback_range(inode,
						map->m_pblk, map->m_len);
		invalidate_mapping_pages(META_MAPPING(sbi),
						map->m_pblk, map->m_pblk);

		if (map->m_multidev_dio) {
			block_t blk_addr = map->m_pblk;
@@ -2223,7 +2237,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,

		if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
			if (atomic_dec_and_test(&dic->remaining_pages))
				f2fs_decompress_cluster(dic);
				f2fs_decompress_cluster(dic, true);
			continue;
		}

@@ -2241,7 +2255,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
					page->index, for_write);
			if (IS_ERR(bio)) {
				ret = PTR_ERR(bio);
				f2fs_decompress_end_io(dic, ret);
				f2fs_decompress_end_io(dic, ret, true);
				f2fs_put_dnode(&dn);
				*bio_ret = NULL;
				return ret;
@@ -2731,6 +2745,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
		.submitted = false,
		.compr_blocks = compr_blocks,
		.need_lock = LOCK_RETRY,
		.post_read = f2fs_post_read_required(inode),
		.io_type = io_type,
		.io_wbc = wbc,
		.bio = bio,
@@ -2902,7 +2917,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
{
	int ret = 0;
	int done = 0, retry = 0;
	struct pagevec pvec;
	struct page *pages[F2FS_ONSTACK_PAGES];
	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
	struct bio *bio = NULL;
	sector_t last_block;
@@ -2933,8 +2948,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
	int submitted = 0;
	int i;

	pagevec_init(&pvec);

	if (get_dirty_pages(mapping->host) <=
				SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
		set_inode_flag(mapping->host, FI_HOT_DATA);
@@ -2960,13 +2973,13 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
		tag_pages_for_writeback(mapping, index, end);
	done_index = index;
	while (!done && !retry && (index <= end)) {
		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
				tag);
		nr_pages = find_get_pages_range_tag(mapping, &index, end,
				tag, F2FS_ONSTACK_PAGES, pages);
		if (nr_pages == 0)
			break;

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];
			struct page *page = pages[i];
			bool need_readd;
readd:
			need_readd = false;
@@ -2997,6 +3010,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
				if (!f2fs_cluster_is_empty(&cc))
					goto lock_page;

				if (f2fs_all_cluster_page_ready(&cc,
					pages, i, nr_pages, true))
					goto lock_page;

				ret2 = f2fs_prepare_compress_overwrite(
							inode, &pagep,
							page->index, &fsdata);
@@ -3007,8 +3024,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
				} else if (ret2 &&
					(!f2fs_compress_write_end(inode,
						fsdata, page->index, 1) ||
					 !f2fs_all_cluster_page_loaded(&cc,
						&pvec, i, nr_pages))) {
					 !f2fs_all_cluster_page_ready(&cc,
						pages, i, nr_pages, false))) {
					retry = 1;
					break;
				}
@@ -3098,7 +3115,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
			if (need_readd)
				goto readd;
		}
		pagevec_release(&pvec);
		release_pages(pages, nr_pages);
		cond_resched();
	}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -3408,12 +3425,11 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
	struct inode *cow_inode = F2FS_I(inode)->cow_inode;
	pgoff_t index = page->index;
	int err = 0;
	block_t ori_blk_addr;
	block_t ori_blk_addr = NULL_ADDR;

	/* If pos is beyond the end of file, reserve a new block in COW inode */
	if ((pos & PAGE_MASK) >= i_size_read(inode))
		return __reserve_data_block(cow_inode, index, blk_addr,
					node_changed);
		goto reserve_block;

	/* Look for the block in COW inode first */
	err = __find_data_block(cow_inode, index, blk_addr);
@@ -3427,10 +3443,12 @@ static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
	if (err)
		return err;

reserve_block:
	/* Finally, we should reserve a new block in COW inode for the update */
	err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
	if (err)
		return err;
	inc_atomic_write_cnt(inode);

	if (ori_blk_addr != NULL_ADDR)
		*blk_addr = ori_blk_addr;
+1 −1
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)

	bimodal = 0;
	total_vblocks = 0;
	blks_per_sec = BLKS_PER_SEC(sbi);
	blks_per_sec = CAP_BLKS_PER_SEC(sbi);
	hblks_per_sec = blks_per_sec / 2;
	for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
		vblocks = get_valid_blocks(sbi, segno, true);
Loading