Commit 5911d2d1 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim
Browse files

f2fs: introduce gc_merge mount option



In this patch, we will add two new mount options: "gc_merge" and
"nogc_merge", when background_gc is on, "gc_merge" option can be
set to let background GC thread to handle foreground GC requests,
it can eliminate the sluggish issue caused by slow foreground GC
operation when GC is triggered from a process with limited I/O
and CPU resources.

Original idea is from Xiang.

Signed-off-by: default avatarGao Xiang <xiang@kernel.org>
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 823d13e1
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -110,6 +110,12 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
			 on synchronous garbage collection running in background.
			 Default value for this option is on. So garbage
			 collection is on by default.
gc_merge		 When background_gc is on, this option can be enabled to
			 let background GC thread to handle foreground GC requests,
			 it can eliminate the sluggish issue caused by slow foreground
			 GC operation when GC is triggered from a process with limited
			 I/O and CPU resources.
nogc_merge		 Disable GC merge feature.
disable_roll_forward	 Disable the roll-forward recovery routine
norecovery		 Disable the roll-forward recovery routine, mounted read-
			 only (i.e., -o ro,disable_roll_forward)
+1 −0
Original line number Diff line number Diff line
@@ -97,6 +97,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
#define F2FS_MOUNT_NORECOVERY		0x04000000
#define F2FS_MOUNT_ATGC			0x08000000
#define F2FS_MOUNT_MERGE_CHECKPOINT	0x10000000
#define	F2FS_MOUNT_GC_MERGE		0x20000000

#define F2FS_OPTION(sbi)	((sbi)->mount_opt)
#define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
+22 −4
Original line number Diff line number Diff line
@@ -31,19 +31,24 @@ static int gc_thread_func(void *data)
	struct f2fs_sb_info *sbi = data;
	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
	wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
	unsigned int wait_ms;

	wait_ms = gc_th->min_sleep_time;

	set_freezable();
	do {
		bool sync_mode;
		bool sync_mode, foreground = false;

		wait_event_interruptible_timeout(*wq,
				kthread_should_stop() || freezing(current) ||
				waitqueue_active(fggc_wq) ||
				gc_th->gc_wake,
				msecs_to_jiffies(wait_ms));

		if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
			foreground = true;

		/* give it a try one time */
		if (gc_th->gc_wake)
			gc_th->gc_wake = 0;
@@ -90,7 +95,10 @@ static int gc_thread_func(void *data)
			goto do_gc;
		}

		if (!down_write_trylock(&sbi->gc_lock)) {
		if (foreground) {
			down_write(&sbi->gc_lock);
			goto do_gc;
		} else if (!down_write_trylock(&sbi->gc_lock)) {
			stat_other_skip_bggc_count(sbi);
			goto next;
		}
@@ -107,14 +115,22 @@ static int gc_thread_func(void *data)
		else
			increase_sleep_time(gc_th, &wait_ms);
do_gc:
		if (!foreground)
			stat_inc_bggc_count(sbi->stat_info);

		sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;

		/* foreground GC was been triggered via f2fs_balance_fs() */
		if (foreground)
			sync_mode = false;

		/* if return value is not zero, no victim was selected */
		if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
		if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
			wait_ms = gc_th->no_gc_sleep_time;

		if (foreground)
			wake_up_all(&gc_th->fggc_wq);

		trace_f2fs_background_gc(sbi->sb, wait_ms,
				prefree_segments(sbi), free_segments(sbi));

@@ -148,6 +164,7 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)

	sbi->gc_thread = gc_th;
	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
	init_waitqueue_head(&sbi->gc_thread->fggc_wq);
	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
	if (IS_ERR(gc_th->f2fs_gc_task)) {
@@ -165,6 +182,7 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
	if (!gc_th)
		return;
	kthread_stop(gc_th->f2fs_gc_task);
	wake_up_all(&gc_th->fggc_wq);
	kfree(gc_th);
	sbi->gc_thread = NULL;
}
+6 −0
Original line number Diff line number Diff line
@@ -42,6 +42,12 @@ struct f2fs_gc_kthread {

	/* for changing gc mode */
	unsigned int gc_wake;

	/* for GC_MERGE mount option */
	wait_queue_head_t fggc_wq;		/*
						 * caller of f2fs_balance_fs()
						 * will wait on this wait queue.
						 */
};

struct gc_inode_list {
+13 −2
Original line number Diff line number Diff line
@@ -503,10 +503,21 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
	 * dir/node pages without enough free segments.
	 */
	if (has_not_enough_free_secs(sbi, 0, 0)) {
		if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
					sbi->gc_thread->f2fs_gc_task) {
			DEFINE_WAIT(wait);

			prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
						TASK_UNINTERRUPTIBLE);
			wake_up(&sbi->gc_thread->gc_wait_queue_head);
			io_schedule();
			finish_wait(&sbi->gc_thread->fggc_wq, &wait);
		} else {
			down_write(&sbi->gc_lock);
			f2fs_gc(sbi, false, false, false, NULL_SEGNO);
		}
	}
}

void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
{
Loading