Commit 1ae9470c authored by Jingbo Xu's avatar Jingbo Xu Committed by Gao Xiang
Browse files

erofs: clean up .read_folio() and .readahead() in fscache mode



The implementation of these two functions in fscache mode is almost the
same. Extract the same part as a generic helper to remove the code
duplication.

Signed-off-by: default avatarJingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: default avatarJia Zhu <zhujia.zj@bytedance.com>
Link: https://lore.kernel.org/r/20220922062414.20437-1-jefflexu@linux.alibaba.com


Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent 2ef16441
Loading
Loading
Loading
Loading
+83 −130
Original line number Diff line number Diff line
@@ -240,113 +240,111 @@ static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
	return ret;
}

static int erofs_fscache_read_folio_inline(struct folio *folio,
					 struct erofs_map_blocks *map)
/*
 * Read into page cache in the range described by (@pos, @len).
 *
 * On return, the caller is responsible for page unlocking if the output @unlock
 * is true, or the callee will take this responsibility through netfs_io_request
 * interface.
 *
 * The return value is the number of bytes successfully handled, or negative
 * error code on failure. The only exception is that, the length of the range
 * instead of the error code is returned on failure after netfs_io_request is
 * allocated, so that .readahead() could advance rac accordingly.
 */
static int erofs_fscache_data_read(struct address_space *mapping,
				   loff_t pos, size_t len, bool *unlock)
{
	struct super_block *sb = folio_mapping(folio)->host->i_sb;
	struct inode *inode = mapping->host;
	struct super_block *sb = inode->i_sb;
	struct netfs_io_request *rreq;
	struct erofs_map_blocks map;
	struct erofs_map_dev mdev;
	struct iov_iter iter;
	size_t count;
	int ret;

	*unlock = true;

	map.m_la = pos;
	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
	if (ret)
		return ret;

	if (map.m_flags & EROFS_MAP_META) {
		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
		erofs_blk_t blknr;
	size_t offset, len;
	void *src, *dst;
		size_t offset, size;
		void *src;

		/* For tail packing layout, the offset may be non-zero. */
	offset = erofs_blkoff(map->m_pa);
	blknr = erofs_blknr(map->m_pa);
	len = map->m_llen;
		offset = erofs_blkoff(map.m_pa);
		blknr = erofs_blknr(map.m_pa);
		size = map.m_llen;

		src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
		if (IS_ERR(src))
			return PTR_ERR(src);

	dst = kmap_local_folio(folio, 0);
	memcpy(dst, src + offset, len);
	memset(dst + len, 0, PAGE_SIZE - len);
	kunmap_local(dst);

		iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE);
		if (copy_to_iter(src + offset, size, &iter) != size)
			return -EFAULT;
		iov_iter_zero(PAGE_SIZE - size, &iter);
		erofs_put_metabuf(&buf);
	return 0;
		return PAGE_SIZE;
	}

static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
{
	struct inode *inode = folio_mapping(folio)->host;
	struct super_block *sb = inode->i_sb;
	struct erofs_map_blocks map;
	struct erofs_map_dev mdev;
	struct netfs_io_request *rreq;
	erofs_off_t pos;
	loff_t pstart;
	int ret;

	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);

	pos = folio_pos(folio);
	map.m_la = pos;

	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
	if (ret)
		goto out_unlock;
	count = min_t(size_t, map.m_llen - (pos - map.m_la), len);
	DBG_BUGON(!count || count % PAGE_SIZE);

	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
		folio_zero_range(folio, 0, folio_size(folio));
		goto out_uptodate;
	}

	if (map.m_flags & EROFS_MAP_META) {
		ret = erofs_fscache_read_folio_inline(folio, &map);
		goto out_uptodate;
		iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count);
		iov_iter_zero(count, &iter);
		return count;
	}

	mdev = (struct erofs_map_dev) {
		.m_deviceid = map.m_deviceid,
		.m_pa = map.m_pa,
	};

	ret = erofs_map_dev(sb, &mdev);
	if (ret)
		goto out_unlock;
		return ret;

	rreq = erofs_fscache_alloc_request(mapping, pos, count);
	if (IS_ERR(rreq))
		return PTR_ERR(rreq);

	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
				folio_pos(folio), folio_size(folio));
	if (IS_ERR(rreq)) {
		ret = PTR_ERR(rreq);
		goto out_unlock;
	*unlock = false;
	erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
			rreq, mdev.m_pa + (pos - map.m_la));
	return count;
}

	pstart = mdev.m_pa + (pos - map.m_la);
	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
				rreq, pstart);
static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
{
	bool unlock;
	int ret;

out_uptodate:
	if (!ret)
		folio_mark_uptodate(folio);
out_unlock:
	folio_unlock(folio);
	return ret;
}
	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);

static void erofs_fscache_advance_folios(struct readahead_control *rac,
					 size_t len, bool unlock)
{
	while (len) {
		struct folio *folio = readahead_folio(rac);
		len -= folio_size(folio);
	ret = erofs_fscache_data_read(folio_mapping(folio), folio_pos(folio),
				      folio_size(folio), &unlock);
	if (unlock) {
		if (ret > 0)
			folio_mark_uptodate(folio);
		folio_unlock(folio);
	}
	}
	return ret < 0 ? ret : 0;
}

static void erofs_fscache_readahead(struct readahead_control *rac)
{
	struct inode *inode = rac->mapping->host;
	struct super_block *sb = inode->i_sb;
	size_t len, count, done = 0;
	erofs_off_t pos;
	loff_t start, offset;
	int ret;
	struct folio *folio;
	size_t len, done = 0;
	loff_t start, pos;
	bool unlock;
	int ret, size;

	if (!readahead_count(rac))
		return;
@@ -355,67 +353,22 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
	len = readahead_length(rac);

	do {
		struct erofs_map_blocks map;
		struct erofs_map_dev mdev;
		struct netfs_io_request *rreq;

		pos = start + done;
		map.m_la = pos;

		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
		if (ret)
		ret = erofs_fscache_data_read(rac->mapping, pos,
					      len - done, &unlock);
		if (ret <= 0)
			return;

		offset = start + done;
		count = min_t(size_t, map.m_llen - (pos - map.m_la),
			      len - done);

		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
			struct iov_iter iter;

			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
					offset, count);
			iov_iter_zero(count, &iter);

			erofs_fscache_advance_folios(rac, count, true);
			ret = count;
			continue;
		}

		if (map.m_flags & EROFS_MAP_META) {
			struct folio *folio = readahead_folio(rac);

			ret = erofs_fscache_read_folio_inline(folio, &map);
			if (!ret) {
		size = ret;
		while (size) {
			folio = readahead_folio(rac);
			size -= folio_size(folio);
			if (unlock) {
				folio_mark_uptodate(folio);
				ret = folio_size(folio);
			}

				folio_unlock(folio);
			continue;
			}

		mdev = (struct erofs_map_dev) {
			.m_deviceid = map.m_deviceid,
			.m_pa = map.m_pa,
		};
		ret = erofs_map_dev(sb, &mdev);
		if (ret)
			return;

		rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
		if (IS_ERR(rreq))
			return;
		/*
		 * Drop the ref of folios here. Unlock them in
		 * rreq_unlock_folios() when rreq complete.
		 */
		erofs_fscache_advance_folios(rac, count, false);
		ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
					rreq, mdev.m_pa + (pos - map.m_la));
		if (!ret)
			ret = count;
	} while (ret > 0 && ((done += ret) < len));
		}
	} while ((done += ret) < len);
}

static const struct address_space_operations erofs_fscache_meta_aops = {