Commit dfef313e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull erofs updates from Gao Xiang:
 "This cycle addresses a reported permission issue with overlay due to a
  duplicated permission check for "trusted." xattrs. Also, a REQ_RAHEAD
  flag is added now to all readahead requests in order to trace
  readahead I/Os. The others are random cleanups.

  All commits have been tested and have been in linux-next as well.

  Summary:

   - fix an issue which can cause overlay permission problem due to
     duplicated permission check for "trusted." xattrs;

   - add REQ_RAHEAD flag to readahead requests for blktrace;

   - several random cleanup"

* tag 'erofs-for-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: remove unnecessary enum entries
  erofs: add REQ_RAHEAD flag to readahead requests
  erofs: fold in should_decompress_synchronously()
  erofs: avoid unnecessary variable `err'
  erofs: remove unneeded parameter
  erofs: avoid duplicated permission check for "trusted." xattrs
parents 11e3235b 915f4c93
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -224,7 +224,7 @@ static inline struct bio *erofs_read_raw_page(struct bio *bio,
		bio_set_dev(bio, sb->s_bdev);
		bio->bi_iter.bi_sector = (sector_t)blknr <<
			LOG_SECTORS_PER_BLOCK;
		bio->bi_opf = REQ_OP_READ;
		bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
	}

	err = bio_add_page(bio, page, PAGE_SIZE, 0);
+0 −2
Original line number Diff line number Diff line
@@ -211,9 +211,7 @@ static void erofs_default_options(struct erofs_fs_context *ctx)

enum {
	Opt_user_xattr,
	Opt_nouser_xattr,
	Opt_acl,
	Opt_noacl,
	Opt_cache_strategy,
	Opt_err
};
+0 −2
Original line number Diff line number Diff line
@@ -473,8 +473,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
			return -EOPNOTSUPP;
		break;
	case EROFS_XATTR_INDEX_TRUSTED:
		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
		break;
	case EROFS_XATTR_INDEX_SECURITY:
		break;
+21 −27
Original line number Diff line number Diff line
@@ -135,6 +135,7 @@ struct z_erofs_decompress_frontend {
	struct z_erofs_collector clt;
	struct erofs_map_blocks map;

	bool readahead;
	/* used for applying cache strategy on the fly */
	bool backmost;
	erofs_off_t headoffset;
@@ -153,8 +154,7 @@ static DEFINE_MUTEX(z_pagemap_global_lock);

static void preload_compressed_pages(struct z_erofs_collector *clt,
				     struct address_space *mc,
				     enum z_erofs_cache_alloctype type,
				     struct list_head *pagepool)
				     enum z_erofs_cache_alloctype type)
{
	const struct z_erofs_pcluster *pcl = clt->pcl;
	const unsigned int clusterpages = BIT(pcl->clusterbits);
@@ -562,8 +562,7 @@ static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
}

static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
				struct page *page,
				struct list_head *pagepool)
				struct page *page)
{
	struct inode *const inode = fe->inode;
	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@@ -620,8 +619,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
	else
		cache_strategy = DONTALLOC;

	preload_compressed_pages(clt, MNGD_MAPPING(sbi),
				 cache_strategy, pagepool);
	preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy);

hitted:
	/*
@@ -653,7 +651,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
	/* should allocate an additional staging page for pagevec */
	if (err == -EAGAIN) {
		struct page *const newpage =
			erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
				alloc_page(GFP_NOFS | __GFP_NOFAIL);

		newpage->mapping = Z_EROFS_MAPPING_STAGING;
		err = z_erofs_attach_page(clt, newpage,
@@ -1151,7 +1149,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
}

static void z_erofs_submit_queue(struct super_block *sb,
				 z_erofs_next_pcluster_t owned_head,
				 struct z_erofs_decompress_frontend *f,
				 struct list_head *pagepool,
				 struct z_erofs_decompressqueue *fgq,
				 bool *force_fg)
@@ -1160,6 +1158,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
	void *bi_private;
	z_erofs_next_pcluster_t owned_head = f->clt.owned_head;
	/* since bio will be NULL, no need to initialize last_index */
	pgoff_t last_index;
	unsigned int nr_bios = 0;
@@ -1193,7 +1192,6 @@ static void z_erofs_submit_queue(struct super_block *sb,

		do {
			struct page *page;
			int err;

			page = pickup_page_for_submission(pcl, i++, pagepool,
							  MNGD_MAPPING(sbi),
@@ -1216,11 +1214,12 @@ static void z_erofs_submit_queue(struct super_block *sb,
					LOG_SECTORS_PER_BLOCK;
				bio->bi_private = bi_private;
				bio->bi_opf = REQ_OP_READ;
				if (f->readahead)
					bio->bi_opf |= REQ_RAHEAD;
				++nr_bios;
			}

			err = bio_add_page(bio, page, PAGE_SIZE, 0);
			if (err < PAGE_SIZE)
			if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
				goto submit_bio_retry;

			last_index = cur;
@@ -1248,14 +1247,14 @@ static void z_erofs_submit_queue(struct super_block *sb,
}

static void z_erofs_runqueue(struct super_block *sb,
			     struct z_erofs_collector *clt,
			     struct z_erofs_decompress_frontend *f,
			     struct list_head *pagepool, bool force_fg)
{
	struct z_erofs_decompressqueue io[NR_JOBQUEUES];

	if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
	if (f->clt.owned_head == Z_EROFS_PCLUSTER_TAIL)
		return;
	z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
	z_erofs_submit_queue(sb, f, pagepool, io, &force_fg);

	/* handle bypass queue (no i/o pclusters) immediately */
	z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
@@ -1282,11 +1281,11 @@ static int z_erofs_readpage(struct file *file, struct page *page)

	f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;

	err = z_erofs_do_read_page(&f, page, &pagepool);
	err = z_erofs_do_read_page(&f, page);
	(void)z_erofs_collector_end(&f.clt);

	/* if some compressed cluster ready, need submit them anyway */
	z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true);
	z_erofs_runqueue(inode->i_sb, &f, &pagepool, true);

	if (err)
		erofs_err(inode->i_sb, "failed to read, err [%d]", err);
@@ -1299,25 +1298,20 @@ static int z_erofs_readpage(struct file *file, struct page *page)
	return err;
}

static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
					    unsigned int nr)
{
	return nr <= sbi->ctx.max_sync_decompress_pages;
}

static void z_erofs_readahead(struct readahead_control *rac)
{
	struct inode *const inode = rac->mapping->host;
	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);

	bool sync = should_decompress_synchronously(sbi, readahead_count(rac));
	unsigned int nr_pages = readahead_count(rac);
	bool sync = (nr_pages <= sbi->ctx.max_sync_decompress_pages);
	struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
	struct page *page, *head = NULL;
	LIST_HEAD(pagepool);

	trace_erofs_readpages(inode, readahead_index(rac),
			readahead_count(rac), false);
	trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);

	f.readahead = true;
	f.headoffset = readahead_pos(rac);

	while ((page = readahead_page(rac))) {
@@ -1341,7 +1335,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
		/* traversal in reverse order */
		head = (void *)page_private(page);

		err = z_erofs_do_read_page(&f, page, &pagepool);
		err = z_erofs_do_read_page(&f, page);
		if (err)
			erofs_err(inode->i_sb,
				  "readahead error at page %lu @ nid %llu",
@@ -1351,7 +1345,7 @@ static void z_erofs_readahead(struct readahead_control *rac)

	(void)z_erofs_collector_end(&f.clt);

	z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync);
	z_erofs_runqueue(inode->i_sb, &f, &pagepool, sync);

	if (f.map.mpage)
		put_page(f.map.mpage);