Commit 3db1de0e authored by Daeho Jeong's avatar Daeho Jeong Committed by Jaegeuk Kim
Browse files

f2fs: change the current atomic write way



Current atomic write has three major issues like below.
 - keeps the updates in non-reclaimable memory space and they are even
   hard to be migrated, which is not good for contiguous memory
   allocation.
 - disk spaces used for atomic files cannot be garbage collected, so
   this makes it difficult for the filesystem to be defragmented.
 - If atomic write operations hit the threshold of either memory usage
   or garbage collection failure count, All the atomic write operations
   will fail immediately.

To resolve the issues, I will keep a COW inode internally for all the
updates to be flushed from memory, when we need to flush them out in a
situation like high memory pressure. These COW inodes will be tagged
as orphan inodes to be reclaimed in case of sudden power-cut or system
failure during atomic writes.

Signed-off-by: default avatarDaeho Jeong <daehojeong@google.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 6213f5d4
Loading
Loading
Loading
Loading
+115 −65
Original line number Diff line number Diff line
@@ -69,8 +69,7 @@ static bool __is_cp_guaranteed(struct page *page)

	if (f2fs_is_compressed_page(page))
		return false;
	if ((S_ISREG(inode->i_mode) &&
			(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
	if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
			page_private_gcing(page))
		return true;
	return false;
@@ -2563,7 +2562,12 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
	bool ipu_force = false;
	int err = 0;

	/* Use COW inode to make dnode_of_data for atomic write */
	if (f2fs_is_atomic_file(inode))
		set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
	else
		set_new_dnode(&dn, inode, NULL, NULL, 0);

	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;
@@ -2600,6 +2604,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
		err = -EFSCORRUPTED;
		goto out_writepage;
	}

	/*
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
@@ -3313,6 +3318,100 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
	return err;
}

static int __find_data_block(struct inode *inode, pgoff_t index,
				block_t *blk_addr)
{
	struct dnode_of_data dn;
	struct page *ipage;
	struct extent_info ei = {0, };
	int err = 0;

	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
	} else {
		/* hole case */
		err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
		if (err) {
			dn.data_blkaddr = NULL_ADDR;
			err = 0;
		}
	}
	*blk_addr = dn.data_blkaddr;
	f2fs_put_dnode(&dn);
	return err;
}

static int __reserve_data_block(struct inode *inode, pgoff_t index,
				block_t *blk_addr, bool *node_changed)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage;
	int err = 0;

	f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);

	ipage = f2fs_get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_out;
	}
	set_new_dnode(&dn, inode, ipage, ipage, 0);

	err = f2fs_get_block(&dn, index);

	*blk_addr = dn.data_blkaddr;
	*node_changed = dn.node_changed;
	f2fs_put_dnode(&dn);

unlock_out:
	f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
	return err;
}

static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
			struct page *page, loff_t pos, unsigned int len,
			block_t *blk_addr, bool *node_changed)
{
	struct inode *inode = page->mapping->host;
	struct inode *cow_inode = F2FS_I(inode)->cow_inode;
	pgoff_t index = page->index;
	int err = 0;
	block_t ori_blk_addr;

	/* If pos is beyond the end of file, reserve a new block in COW inode */
	if ((pos & PAGE_MASK) >= i_size_read(inode))
		return __reserve_data_block(cow_inode, index, blk_addr,
					node_changed);

	/* Look for the block in COW inode first */
	err = __find_data_block(cow_inode, index, blk_addr);
	if (err)
		return err;
	else if (*blk_addr != NULL_ADDR)
		return 0;

	/* Look for the block in the original inode */
	err = __find_data_block(inode, index, &ori_blk_addr);
	if (err)
		return err;

	/* Finally, we should reserve a new block in COW inode for the update */
	err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
	if (err)
		return err;

	if (ori_blk_addr != NULL_ADDR)
		*blk_addr = ori_blk_addr;
	return 0;
}

static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
@@ -3321,7 +3420,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page = NULL;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
	bool need_balance = false, drop_atomic = false;
	bool need_balance = false;
	block_t blkaddr = NULL_ADDR;
	int err = 0;

@@ -3332,14 +3431,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		goto fail;
	}

	if ((f2fs_is_atomic_file(inode) &&
			!f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
			is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
		err = -ENOMEM;
		drop_atomic = true;
		goto fail;
	}

	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
@@ -3387,6 +3478,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,

	*pagep = page;

	if (f2fs_is_atomic_file(inode))
		err = prepare_atomic_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
	else
		err = prepare_write_begin(sbi, page, pos, len,
					&blkaddr, &need_balance);
	if (err)
@@ -3443,8 +3538,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
fail:
	f2fs_put_page(page, 1);
	f2fs_write_failed(inode, pos + len);
	if (drop_atomic)
		f2fs_drop_inmem_pages_all(sbi, false);
	return err;
}

@@ -3488,8 +3581,12 @@ static int f2fs_write_end(struct file *file,
	set_page_dirty(page);

	if (pos + copied > i_size_read(inode) &&
	    !f2fs_verity_in_progress(inode))
	    !f2fs_verity_in_progress(inode)) {
		f2fs_i_size_write(inode, pos + copied);
		if (f2fs_is_atomic_file(inode))
			f2fs_i_size_write(F2FS_I(inode)->cow_inode,
					pos + copied);
	}
unlock_out:
	f2fs_put_page(page, 1);
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
@@ -3522,9 +3619,6 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
			inode->i_ino == F2FS_COMPRESS_INO(sbi))
		clear_page_private_data(&folio->page);

	if (page_private_atomic(&folio->page))
		return f2fs_drop_inmem_page(inode, &folio->page);

	folio_detach_private(folio);
}

@@ -3534,10 +3628,6 @@ int f2fs_release_page(struct page *page, gfp_t wait)
	if (PageDirty(page))
		return 0;

	/* This is atomic written page, keep Private */
	if (page_private_atomic(page))
		return 0;

	if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
		struct inode *inode = page->mapping->host;

@@ -3563,18 +3653,6 @@ static bool f2fs_dirty_data_folio(struct address_space *mapping,
		folio_mark_uptodate(folio);
	BUG_ON(folio_test_swapcache(folio));

	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
		if (!page_private_atomic(&folio->page)) {
			f2fs_register_inmem_page(inode, &folio->page);
			return true;
		}
		/*
		 * Previously, this page has been registered, we just
		 * return here.
		 */
		return false;
	}

	if (!folio_test_dirty(folio)) {
		filemap_dirty_folio(mapping, folio);
		f2fs_update_dirty_folio(inode, folio);
@@ -3654,42 +3732,14 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
int f2fs_migrate_page(struct address_space *mapping,
		struct page *newpage, struct page *page, enum migrate_mode mode)
{
	int rc, extra_count;
	struct f2fs_inode_info *fi = F2FS_I(mapping->host);
	bool atomic_written = page_private_atomic(page);
	int rc, extra_count = 0;

	BUG_ON(PageWriteback(page));

	/* migrating an atomic written page is safe with the inmem_lock hold */
	if (atomic_written) {
		if (mode != MIGRATE_SYNC)
			return -EBUSY;
		if (!mutex_trylock(&fi->inmem_lock))
			return -EAGAIN;
	}

	/* one extra reference was held for atomic_write page */
	extra_count = atomic_written ? 1 : 0;
	rc = migrate_page_move_mapping(mapping, newpage,
				page, extra_count);
	if (rc != MIGRATEPAGE_SUCCESS) {
		if (atomic_written)
			mutex_unlock(&fi->inmem_lock);
	if (rc != MIGRATEPAGE_SUCCESS)
		return rc;
	}

	if (atomic_written) {
		struct inmem_pages *cur;

		list_for_each_entry(cur, &fi->inmem_pages, list)
			if (cur->page == page) {
				cur->page = newpage;
				break;
			}
		mutex_unlock(&fi->inmem_lock);
		put_page(page);
		get_page(newpage);
	}

	/* guarantee to start from no stale private field */
	set_page_private(newpage, 0);
+2 −10
Original line number Diff line number Diff line
@@ -91,7 +91,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
	si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
	si->nquota_files = sbi->nquota_files;
	si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
	si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
	si->aw_cnt = sbi->atomic_files;
	si->vw_cnt = atomic_read(&sbi->vw_cnt);
	si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
@@ -167,8 +166,6 @@ static void update_general_status(struct f2fs_sb_info *sbi)
	si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
	si->io_skip_bggc = sbi->io_skip_bggc;
	si->other_skip_bggc = sbi->other_skip_bggc;
	si->skipped_atomic_files[BG_GC] = sbi->skipped_atomic_files[BG_GC];
	si->skipped_atomic_files[FG_GC] = sbi->skipped_atomic_files[FG_GC];
	si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
		/ 2;
@@ -296,7 +293,6 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
				sizeof(struct nat_entry);
	si->cache_mem += NM_I(sbi)->nat_cnt[DIRTY_NAT] *
				sizeof(struct nat_entry_set);
	si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
	for (i = 0; i < MAX_INO_ENTRY; i++)
		si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
	si->cache_mem += atomic_read(&sbi->total_ext_tree) *
@@ -491,10 +487,6 @@ static int stat_show(struct seq_file *s, void *v)
				si->bg_data_blks);
		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
				si->bg_node_blks);
		seq_printf(s, "Skipped : atomic write %llu (%llu)\n",
				si->skipped_atomic_files[BG_GC] +
				si->skipped_atomic_files[FG_GC],
				si->skipped_atomic_files[BG_GC]);
		seq_printf(s, "BG skip : IO: %u, Other: %u\n",
				si->io_skip_bggc, si->other_skip_bggc);
		seq_puts(s, "\nExtent Cache:\n");
@@ -519,9 +511,9 @@ static int stat_show(struct seq_file *s, void *v)
			   si->flush_list_empty,
			   si->nr_discarding, si->nr_discarded,
			   si->nr_discard_cmd, si->undiscard_blks);
		seq_printf(s, "  - inmem: %4d, atomic IO: %4d (Max. %4d), "
		seq_printf(s, "  - atomic IO: %4d (Max. %4d), "
			"volatile IO: %4d (Max. %4d)\n",
			   si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
			   si->aw_cnt, si->max_aw_cnt,
			   si->vw_cnt, si->max_vw_cnt);
		seq_printf(s, "  - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
		seq_printf(s, "  - nodes: %4d in %4d\n",
+7 −26
Original line number Diff line number Diff line
@@ -716,7 +716,6 @@ enum {

enum {
	GC_FAILURE_PIN,
	GC_FAILURE_ATOMIC,
	MAX_GC_FAILURE
};

@@ -738,7 +737,6 @@ enum {
	FI_UPDATE_WRITE,	/* inode has in-place-update data */
	FI_NEED_IPU,		/* used for ipu per file */
	FI_ATOMIC_FILE,		/* indicate atomic file */
	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
	FI_VOLATILE_FILE,	/* indicate volatile file */
	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
	FI_DROP_CACHE,		/* drop dirty page cache */
@@ -752,7 +750,6 @@ enum {
	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
	FI_PIN_FILE,		/* indicate file should not be gced */
	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
	FI_COMPRESS_CORRUPT,	/* indicate compressed cluster is corrupted */
@@ -794,11 +791,9 @@ struct f2fs_inode_info {
#endif
	struct list_head dirty_list;	/* dirty list for dirs and files */
	struct list_head gdirty_list;	/* linked in global dirty list */
	struct list_head inmem_ilist;	/* list for inmem inodes */
	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
	struct task_struct *inmem_task;	/* store inmemory task */
	struct mutex inmem_lock;	/* lock for inmemory pages */
	struct task_struct *atomic_write_task;	/* store atomic write task */
	struct extent_tree *extent_tree;	/* cached extent_tree entry */
	struct inode *cow_inode;	/* copy-on-write inode for atomic write */

	/* avoid racing between foreground op and gc */
	struct f2fs_rwsem i_gc_rwsem[2];
@@ -1092,7 +1087,6 @@ enum count_type {
	F2FS_DIRTY_QDATA,
	F2FS_DIRTY_NODES,
	F2FS_DIRTY_META,
	F2FS_INMEM_PAGES,
	F2FS_DIRTY_IMETA,
	F2FS_WB_CP_DATA,
	F2FS_WB_DATA,
@@ -1122,11 +1116,7 @@ enum page_type {
	META,
	NR_PAGE_TYPE,
	META_FLUSH,
	INMEM,		/* the below types are used by tracepoints only. */
	INMEM_DROP,
	INMEM_INVALIDATE,
	INMEM_REVOKE,
	IPU,
	IPU,		/* the below types are used by tracepoints only. */
	OPU,
};

@@ -1718,7 +1708,6 @@ struct f2fs_sb_info {

	/* for skip statistic */
	unsigned int atomic_files;		/* # of opened atomic file */
	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
	unsigned long long skipped_gc_rwsem;		/* FG_GC only */

	/* threshold for gc trials on pinned files */
@@ -3202,11 +3191,6 @@ static inline bool f2fs_is_atomic_file(struct inode *inode)
	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}

static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
{
	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
}

static inline bool f2fs_is_volatile_file(struct inode *inode)
{
	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
@@ -3444,6 +3428,8 @@ void f2fs_handle_failed_inode(struct inode *inode);
int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
							bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
		     struct inode **new_inode);

/*
 * dir.c
@@ -3579,11 +3565,8 @@ void f2fs_destroy_node_manager_caches(void);
 * segment.c
 */
bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
void f2fs_register_inmem_page(struct inode *inode, struct page *page);
void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
void f2fs_drop_inmem_pages(struct inode *inode);
void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
int f2fs_commit_inmem_pages(struct inode *inode);
int f2fs_commit_atomic_write(struct inode *inode);
void f2fs_abort_atomic_write(struct inode *inode, bool clean);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
@@ -3815,7 +3798,6 @@ struct f2fs_stat_info {
	int ext_tree, zombie_tree, ext_node;
	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
	int ndirty_data, ndirty_qdata;
	int inmem_pages;
	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
	int nats, dirty_nats, sits, dirty_sits;
	int free_nids, avail_nids, alloc_nids;
@@ -3845,7 +3827,6 @@ struct f2fs_stat_info {
	int bg_node_segs, bg_data_segs;
	int tot_blks, data_blks, node_blks;
	int bg_data_blks, bg_node_blks;
	unsigned long long skipped_atomic_files[2];
	int curseg[NR_CURSEG_TYPE];
	int cursec[NR_CURSEG_TYPE];
	int curzone[NR_CURSEG_TYPE];
+27 −22
Original line number Diff line number Diff line
@@ -1813,9 +1813,8 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
			atomic_read(&inode->i_writecount) != 1)
		return 0;

	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
		f2fs_drop_inmem_pages(inode);
		f2fs_abort_atomic_write(inode, true);
	if (f2fs_is_volatile_file(inode)) {
		set_inode_flag(inode, FI_DROP_CACHE);
		filemap_fdatawrite(inode->i_mapping);
@@ -1837,8 +1836,8 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
	 * before dropping file lock, it needs to do in ->flush.
	 */
	if (f2fs_is_atomic_file(inode) &&
			F2FS_I(inode)->inmem_task == current)
		f2fs_drop_inmem_pages(inode);
			F2FS_I(inode)->atomic_write_task == current)
		f2fs_abort_atomic_write(inode, true);
	return 0;
}

@@ -2001,6 +2000,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
	struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct inode *pinode;
	int ret;

	if (!inode_owner_or_capable(mnt_userns, inode))
@@ -2023,11 +2023,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
		goto out;
	}

	if (f2fs_is_atomic_file(inode)) {
		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
			ret = -EINVAL;
	if (f2fs_is_atomic_file(inode))
		goto out;
	}

	ret = f2fs_convert_inline_inode(inode);
	if (ret)
@@ -2048,19 +2045,33 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
		goto out;
	}

	/* Create a COW inode for atomic write */
	pinode = f2fs_iget(inode->i_sb, fi->i_pino);
	if (IS_ERR(pinode)) {
		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
		ret = PTR_ERR(pinode);
		goto out;
	}

	ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
	iput(pinode);
	if (ret) {
		f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
		goto out;
	}
	f2fs_i_size_write(fi->cow_inode, i_size_read(inode));

	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
	if (list_empty(&fi->inmem_ilist))
		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
	sbi->atomic_files++;
	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);

	/* add inode in inmem_list first and set atomic_file */
	set_inode_flag(inode, FI_ATOMIC_FILE);
	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
	set_inode_flag(fi->cow_inode, FI_ATOMIC_FILE);
	clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
	f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);

	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
	F2FS_I(inode)->inmem_task = current;
	F2FS_I(inode)->atomic_write_task = current;
	stat_update_max_atomic_write(inode);
out:
	inode_unlock(inode);
@@ -2091,21 +2102,17 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
	}

	if (f2fs_is_atomic_file(inode)) {
		ret = f2fs_commit_inmem_pages(inode);
		ret = f2fs_commit_atomic_write(inode);
		if (ret)
			goto err_out;

		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
		if (!ret)
			f2fs_drop_inmem_pages(inode);
			f2fs_abort_atomic_write(inode, false);
	} else {
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
	}
err_out:
	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
		ret = -EINVAL;
	}
	inode_unlock(inode);
	mnt_drop_write_file(filp);
	return ret;
@@ -2193,15 +2200,13 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
	inode_lock(inode);

	if (f2fs_is_atomic_file(inode))
		f2fs_drop_inmem_pages(inode);
		f2fs_abort_atomic_write(inode, true);
	if (f2fs_is_volatile_file(inode)) {
		clear_inode_flag(inode, FI_VOLATILE_FILE);
		stat_dec_volatile_write(inode);
		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
	}

	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);

	inode_unlock(inode);

	mnt_drop_write_file(filp);
+1 −26
Original line number Diff line number Diff line
@@ -1245,13 +1245,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
		goto out;
	}

	if (f2fs_is_atomic_file(inode)) {
		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
		err = -EAGAIN;
		goto out;
	}

	err = f2fs_gc_pinned_control(inode, gc_type, segno);
	if (err)
		goto out;
@@ -1393,12 +1386,6 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
		goto out;
	}

	if (f2fs_is_atomic_file(inode)) {
		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
		err = -EAGAIN;
		goto out;
	}
	err = f2fs_gc_pinned_control(inode, gc_type, segno);
	if (err)
		goto out;
@@ -1765,8 +1752,6 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
		.ilist = LIST_HEAD_INIT(gc_list.ilist),
		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
	};
	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
	unsigned long long first_skipped;
	unsigned int skipped_round = 0, round = 0;

	trace_f2fs_gc_begin(sbi->sb, sync, background,
@@ -1780,7 +1765,6 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,

	cpc.reason = __get_cp_reason(sbi);
	sbi->skipped_gc_rwsem = 0;
	first_skipped = last_skipped;
gc_more:
	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
		ret = -EINVAL;
@@ -1831,10 +1815,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
	total_freed += seg_freed;

	if (gc_type == FG_GC) {
		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
						sbi->skipped_gc_rwsem)
		if (sbi->skipped_gc_rwsem)
			skipped_round++;
		last_skipped = sbi->skipped_atomic_files[FG_GC];
		round++;
	}

@@ -1860,13 +1842,6 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
		segno = NULL_SEGNO;
		goto gc_more;
	}
	if (first_skipped < last_skipped &&
			(last_skipped - first_skipped) >
					sbi->skipped_gc_rwsem) {
		f2fs_drop_inmem_pages_all(sbi, true);
		segno = NULL_SEGNO;
		goto gc_more;
	}
	if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
		ret = f2fs_write_checkpoint(sbi, &cpc);
stop:
Loading