Commit a2649315 authored by Fengnan Chang's avatar Fengnan Chang Committed by Jaegeuk Kim
Browse files

f2fs: compress: avoid duplicate counting of valid blocks when read compressed file



Since cluster is basic unit of compression, one cluster is compressed or
not, so we can calculate valid blocks only for first page in cluster,
the other pages just skip.

Signed-off-by: default avatarFengnan Chang <changfengnan@vivo.com>
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 65ddf656
Loading
Loading
Loading
Loading
+17 −5
Original line number Diff line number Diff line
@@ -2299,6 +2299,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
		.nr_rpages = 0,
		.nr_cpages = 0,
	};
	pgoff_t nc_cluster_idx = NULL_CLUSTER;
#endif
	unsigned nr_pages = rac ? readahead_count(rac) : 1;
	unsigned max_nr_pages = nr_pages;
@@ -2331,12 +2332,23 @@ static int f2fs_mpage_readpages(struct inode *inode,
				if (ret)
					goto set_error_page;
			}
			if (cc.cluster_idx == NULL_CLUSTER) {
				if (nc_cluster_idx ==
					page->index >> cc.log_cluster_size) {
					goto read_single_page;
				}

				ret = f2fs_is_compressed_cluster(inode, page->index);
				if (ret < 0)
					goto set_error_page;
			else if (!ret)
				else if (!ret) {
					nc_cluster_idx =
						page->index >> cc.log_cluster_size;
					goto read_single_page;
				}

				nc_cluster_idx = NULL_CLUSTER;
			}
			ret = f2fs_init_compress_ctx(&cc);
			if (ret)
				goto set_error_page;