Commit 92901222 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "In this cycle, we don't have a highlighted feature enhancement, but
  mostly have fixed issues mainly in two parts: 1) zoned block device,
  and 2) compression support.

  For zoned block device, we've tried to improve the power-off recovery
  flow as much as possible. For compression, we found some corner cases
  caused by wrong compression policy and logics. Other than them, there
  were some reverts and stat corrections.

  Bug fixes:
   - use finish zone command when closing a zone
   - check zone type before sending async reset zone command
   - fix to assign compress_level for lz4 correctly
   - fix error path of f2fs_submit_page_read()
   - don't {,de}compress non-full cluster
   - send small discard commands during checkpoint back
   - flush inode if atomic file is aborted
   - correct to account gc/cp stats

  And, there are minor bug fixes, avoiding false lockdep warning, and
  clean-ups"

* tag 'f2fs-for-6-6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (25 commits)
  f2fs: use finish zone command when closing a zone
  f2fs: compress: fix to assign compress_level for lz4 correctly
  f2fs: fix error path of f2fs_submit_page_read()
  f2fs: clean up error handling in sanity_check_{compress_,}inode()
  f2fs: avoid false alarm of circular locking
  Revert "f2fs: do not issue small discard commands during checkpoint"
  f2fs: doc: fix description of max_small_discards
  f2fs: should update REQ_TIME for direct write
  f2fs: fix to account cp stats correctly
  f2fs: fix to account gc stats correctly
  f2fs: remove unneeded check condition in __f2fs_setxattr()
  f2fs: fix to update i_ctime in __f2fs_setxattr()
  Revert "f2fs: fix to do sanity check on extent cache correctly"
  f2fs: increase usage of folio_next_index() helper
  f2fs: Only lfs mode is allowed with zoned block device feature
  f2fs: check zone type before sending async reset zone command
  f2fs: compress: don't {,de}compress non-full cluster
  f2fs: allow f2fs_ioc_{,de}compress_file to be interrupted
  f2fs: don't reopen the main block device in f2fs_scan_devices
  f2fs: fix to avoid mmap vs set_compress_option case
  ...
parents b89b0293 3b716612
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -102,9 +102,9 @@ What: /sys/fs/f2fs/<disk>/max_small_discards
Date:		November 2013
Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:	Controls the issue rate of discard commands that consist of small
		blocks less than 2MB. The candidates to be discarded are cached until
		checkpoint is triggered, and issued during the checkpoint.
		By default, it is disabled with 0.
		blocks less than 2MB. The candidates to be discarded are cached during
		checkpoint, and issued by issue_discard thread after checkpoint.
		It is enabled by default.

What:		/sys/fs/f2fs/<disk>/max_ordered_discard
Date:		October 2022
+1 −1
Original line number Diff line number Diff line
@@ -1701,9 +1701,9 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	}

	f2fs_restore_inmem_curseg(sbi);
	stat_inc_cp_count(sbi);
stop:
	unblock_operations(sbi);
	stat_inc_cp_count(sbi->stat_info);

	if (cpc->reason & CP_RECOVERY)
		f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
+1 −13
Original line number Diff line number Diff line
@@ -649,13 +649,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
		goto destroy_compress_ctx;
	}

	for (i = 0; i < cc->nr_cpages; i++) {
	for (i = 0; i < cc->nr_cpages; i++)
		cc->cpages[i] = f2fs_compress_alloc_page();
		if (!cc->cpages[i]) {
			ret = -ENOMEM;
			goto out_free_cpages;
		}
	}

	cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
	if (!cc->rbuf) {
@@ -1574,8 +1569,6 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
		}

		dic->tpages[i] = f2fs_compress_alloc_page();
		if (!dic->tpages[i])
			return -ENOMEM;
	}

	dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
@@ -1656,11 +1649,6 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
		struct page *page;

		page = f2fs_compress_alloc_page();
		if (!page) {
			ret = -ENOMEM;
			goto out_free;
		}

		f2fs_set_compressed_page(page, cc->inode,
					start_idx + i + 1, dic);
		dic->cpages[i] = page;
+6 −8
Original line number Diff line number Diff line
@@ -1167,6 +1167,9 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
	f2fs_wait_on_block_writeback(inode, blkaddr);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		iostat_update_and_unbind_ctx(bio);
		if (bio->bi_private)
			mempool_free(bio->bi_private, bio_post_read_ctx_pool);
		bio_put(bio);
		return -EFAULT;
	}
@@ -1389,18 +1392,14 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:

	page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
	if (IS_ERR(page))
		return page;

	/* wait for read completion */
	lock_page(page);
	if (unlikely(page->mapping != mapping)) {
		f2fs_put_page(page, 1);
		goto repeat;
	}
	if (unlikely(!PageUptodate(page))) {
	if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
		f2fs_put_page(page, 1);
		return ERR_PTR(-EIO);
	}
@@ -3236,8 +3235,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
					}
					goto next;
				}
				done_index = folio->index +
					folio_nr_pages(folio);
				done_index = folio_next_index(folio);
				done = 1;
				break;
			}
+26 −7
Original line number Diff line number Diff line
@@ -215,6 +215,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
		si->valid_blks[type] += blks;
	}

	for (i = 0; i < MAX_CALL_TYPE; i++)
		si->cp_call_count[i] = atomic_read(&sbi->cp_call_count[i]);

	for (i = 0; i < 2; i++) {
		si->segment_count[i] = sbi->segment_count[i];
		si->block_count[i] = sbi->block_count[i];
@@ -497,7 +500,9 @@ static int stat_show(struct seq_file *s, void *v)
		seq_printf(s, "  - Prefree: %d\n  - Free: %d (%d)\n\n",
			   si->prefree_count, si->free_segs, si->free_secs);
		seq_printf(s, "CP calls: %d (BG: %d)\n",
				si->cp_count, si->bg_cp_count);
			   si->cp_call_count[TOTAL_CALL],
			   si->cp_call_count[BACKGROUND]);
		seq_printf(s, "CP count: %d\n", si->cp_count);
		seq_printf(s, "  - cp blocks : %u\n", si->meta_count[META_CP]);
		seq_printf(s, "  - sit blocks : %u\n",
				si->meta_count[META_SIT]);
@@ -511,12 +516,24 @@ static int stat_show(struct seq_file *s, void *v)
		seq_printf(s, "  - Total : %4d\n", si->nr_total_ckpt);
		seq_printf(s, "  - Cur time : %4d(ms)\n", si->cur_ckpt_time);
		seq_printf(s, "  - Peak time : %4d(ms)\n", si->peak_ckpt_time);
		seq_printf(s, "GC calls: %d (BG: %d)\n",
			   si->call_count, si->bg_gc);
		seq_printf(s, "  - data segments : %d (%d)\n",
				si->data_segs, si->bg_data_segs);
		seq_printf(s, "  - node segments : %d (%d)\n",
				si->node_segs, si->bg_node_segs);
		seq_printf(s, "GC calls: %d (gc_thread: %d)\n",
			   si->gc_call_count[BACKGROUND] +
			   si->gc_call_count[FOREGROUND],
			   si->gc_call_count[BACKGROUND]);
		if (__is_large_section(sbi)) {
			seq_printf(s, "  - data sections : %d (BG: %d)\n",
					si->gc_secs[DATA][BG_GC] + si->gc_secs[DATA][FG_GC],
					si->gc_secs[DATA][BG_GC]);
			seq_printf(s, "  - node sections : %d (BG: %d)\n",
					si->gc_secs[NODE][BG_GC] + si->gc_secs[NODE][FG_GC],
					si->gc_secs[NODE][BG_GC]);
		}
		seq_printf(s, "  - data segments : %d (BG: %d)\n",
				si->gc_segs[DATA][BG_GC] + si->gc_segs[DATA][FG_GC],
				si->gc_segs[DATA][BG_GC]);
		seq_printf(s, "  - node segments : %d (BG: %d)\n",
				si->gc_segs[NODE][BG_GC] + si->gc_segs[NODE][FG_GC],
				si->gc_segs[NODE][BG_GC]);
		seq_puts(s, "  - Reclaimed segs :\n");
		seq_printf(s, "    - Normal : %d\n", sbi->gc_reclaimed_segs[GC_NORMAL]);
		seq_printf(s, "    - Idle CB : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_CB]);
@@ -687,6 +704,8 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
	atomic_set(&sbi->inplace_count, 0);
	for (i = META_CP; i < META_MAX; i++)
		atomic_set(&sbi->meta_count[i], 0);
	for (i = 0; i < MAX_CALL_TYPE; i++)
		atomic_set(&sbi->cp_call_count[i], 0);

	atomic_set(&sbi->max_aw_cnt, 0);

Loading