Commit e31b283a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull jffs2, ubi and ubifs updates from Richard Weinberger:
 "JFFS2:
   - Fix memory corruption in error path
   - Spelling and coding style fixes

  UBI:
   - Switch to BLK_MQ_F_BLOCKING in ubiblock
   - Wire up partent device (for sysfs)
   - Multiple UAF bugfixes
   - Fix for an infinite loop in WL error path

  UBIFS:
   - Fix for multiple memory leaks in error paths
   - Fixes for wrong space accounting
   - Minor cleanups
   - Spelling and coding style fixes"

* tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: (36 commits)
  ubi: block: Fix a possible use-after-free bug in ubiblock_create()
  ubifs: make kobj_type structures constant
  mtd: ubi: block: wire-up device parent
  mtd: ubi: wire-up parent MTD device
  ubi: use correct names in function kernel-doc comments
  ubi: block: set BLK_MQ_F_BLOCKING
  jffs2: Fix list_del corruption if compressors initialized failed
  jffs2: Use function instead of macro when initialize compressors
  jffs2: fix spelling mistake "neccecary"->"necessary"
  ubifs: Fix kernel-doc
  ubifs: Fix some kernel-doc comments
  UBI: Fastmap: Fix kernel-doc
  ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
  ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
  ubi: fastmap: Fix missed fm_anchor PEB in wear-leveling after disabling fastmap
  ubifs: ubifs_releasepage: Remove ubifs_assert(0) to valid this process
  ubifs: ubifs_writepage: Mark page dirty after writing inode failed
  ubifs: dirty_cow_znode: Fix memleak in error handling path
  ubifs: Re-statistic cleaned znode count if commit failed
  ubi: Fix permission display of the debugfs files
  ...
parents 3808330b 8fcf2d01
Loading
Loading
Loading
Loading
+36 −73
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/mtd/ubi.h>
#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/hdreg.h>
@@ -62,7 +61,6 @@ struct ubiblock_param {
};

struct ubiblock_pdu {
	struct work_struct work;
	struct ubi_sgl usgl;
};

@@ -82,8 +80,6 @@ struct ubiblock {
	struct gendisk *gd;
	struct request_queue *rq;

	struct workqueue_struct *wq;

	struct mutex dev_mutex;
	struct list_head list;
	struct blk_mq_tag_set tag_set;
@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
	return NULL;
}

static int ubiblock_read(struct ubiblock_pdu *pdu)
static blk_status_t ubiblock_read(struct request *req)
{
	int ret, leb, offset, bytes_left, to_read;
	u64 pos;
	struct request *req = blk_mq_rq_from_pdu(pdu);
	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
	struct ubiblock *dev = req->q->queuedata;
	u64 pos = blk_rq_pos(req) << 9;
	int to_read = blk_rq_bytes(req);
	int bytes_left = to_read;
	/* Get LEB:offset address to read from */
	int offset = do_div(pos, dev->leb_size);
	int leb = pos;
	struct req_iterator iter;
	struct bio_vec bvec;
	int ret;

	to_read = blk_rq_bytes(req);
	pos = blk_rq_pos(req) << 9;
	blk_mq_start_request(req);

	/* Get LEB:offset address to read from */
	offset = do_div(pos, dev->leb_size);
	leb = pos;
	bytes_left = to_read;
	/*
	 * It is safe to ignore the return value of blk_rq_map_sg() because
	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
	 * and ubi_read_sg() will check that limit.
	 */
	ubi_sgl_init(&pdu->usgl);
	blk_rq_map_sg(req->q, req, pdu->usgl.sg);

	while (bytes_left) {
		/*
@@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)

		ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
		if (ret < 0)
			return ret;
			break;

		bytes_left -= to_read;
		to_read = bytes_left;
		leb += 1;
		offset = 0;
	}
	return 0;

	rq_for_each_segment(bvec, req, iter)
		flush_dcache_page(bvec.bv_page);
	return errno_to_blk_status(ret);
}

static int ubiblock_open(struct block_device *bdev, fmode_t mode)
@@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = {
	.getgeo	= ubiblock_getgeo,
};

static void ubiblock_do_work(struct work_struct *work)
{
	int ret;
	struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
	struct request *req = blk_mq_rq_from_pdu(pdu);
	struct req_iterator iter;
	struct bio_vec bvec;

	blk_mq_start_request(req);

	/*
	 * It is safe to ignore the return value of blk_rq_map_sg() because
	 * the number of sg entries is limited to UBI_MAX_SG_COUNT
	 * and ubi_read_sg() will check that limit.
	 */
	blk_rq_map_sg(req->q, req, pdu->usgl.sg);

	ret = ubiblock_read(pdu);

	rq_for_each_segment(bvec, req, iter)
		flush_dcache_page(bvec.bv_page);

	blk_mq_end_request(req, errno_to_blk_status(ret));
}

static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
			     const struct blk_mq_queue_data *bd)
{
	struct request *req = bd->rq;
	struct ubiblock *dev = hctx->queue->queuedata;
	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);

	switch (req_op(req)) {
	switch (req_op(bd->rq)) {
	case REQ_OP_READ:
		ubi_sgl_init(&pdu->usgl);
		queue_work(dev->wq, &pdu->work);
		return BLK_STS_OK;
		return ubiblock_read(bd->rq);
	default:
		return BLK_STS_IOERR;
	}

}

static int ubiblock_init_request(struct blk_mq_tag_set *set,
@@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
	struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);

	sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
	INIT_WORK(&pdu->work, ubiblock_do_work);

	return 0;
}

@@ -354,8 +328,11 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
	u64 size = vi->used_bytes >> 9;

	if (vi->used_bytes % 512) {
		pr_warn("UBI: block: volume size is not a multiple of 512, "
			"last %llu bytes are ignored!\n",
		if (vi->vol_type == UBI_DYNAMIC_VOLUME)
			pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
				vi->used_bytes - (size << 9));
		else
			pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
				vi->used_bytes - (size << 9));
	}

@@ -401,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
	dev->tag_set.ops = &ubiblock_mq_ops;
	dev->tag_set.queue_depth = 64;
	dev->tag_set.numa_node = NUMA_NO_NODE;
	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
	dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
	dev->tag_set.driver_data = dev;
	dev->tag_set.nr_hw_queues = 1;
@@ -439,32 +416,20 @@ int ubiblock_create(struct ubi_volume_info *vi)
	dev->rq = gd->queue;
	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);

	/*
	 * Create one workqueue per volume (per registered block device).
	 * Remember workqueues are cheap, they're not threads.
	 */
	dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
	if (!dev->wq) {
		ret = -ENOMEM;
		goto out_remove_minor;
	}

	list_add_tail(&dev->list, &ubiblock_devices);

	/* Must be the last step: anyone can call file ops from now on */
	ret = add_disk(dev->gd);
	ret = device_add_disk(vi->dev, dev->gd, NULL);
	if (ret)
		goto out_destroy_wq;
		goto out_remove_minor;

	dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
		 dev->ubi_num, dev->vol_id, vi->name);
	mutex_unlock(&devices_mutex);
	return 0;

out_destroy_wq:
	list_del(&dev->list);
	destroy_workqueue(dev->wq);
out_remove_minor:
	list_del(&dev->list);
	idr_remove(&ubiblock_minor_idr, gd->first_minor);
out_cleanup_disk:
	put_disk(dev->gd);
@@ -482,8 +447,6 @@ static void ubiblock_cleanup(struct ubiblock *dev)
{
	/* Stop new requests to arrive */
	del_gendisk(dev->gd);
	/* Flush pending work */
	destroy_workqueue(dev->wq);
	/* Finally destroy the blk queue */
	dev_info(disk_to_dev(dev->gd), "released");
	put_disk(dev->gd);
+28 −4
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@
#define MTD_PARAM_LEN_MAX 64

/* Maximum number of comma-separated items in the 'mtd=' parameter */
#define MTD_PARAM_MAX_COUNT 4
#define MTD_PARAM_MAX_COUNT 5

/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -53,12 +53,14 @@
 * @ubi_num: UBI number
 * @vid_hdr_offs: VID header offset
 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
 * @enable_fm: enable fastmap when value is non-zero
 */
struct mtd_dev_param {
	char name[MTD_PARAM_LEN_MAX];
	int ubi_num;
	int vid_hdr_offs;
	int max_beb_per1024;
	int enable_fm;
};

/* Numbers of elements set in the @mtd_dev_param array */
@@ -468,6 +470,7 @@ static int uif_init(struct ubi_device *ubi)
			err = ubi_add_volume(ubi, ubi->volumes[i]);
			if (err) {
				ubi_err(ubi, "cannot add volume %d", i);
				ubi->volumes[i] = NULL;
				goto out_volumes;
			}
		}
@@ -663,6 +666,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);

	if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
	    ubi->vid_hdr_alsize)) {
		ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
		return -EINVAL;
	}

	dbg_gen("min_io_size      %d", ubi->min_io_size);
	dbg_gen("max_write_size   %d", ubi->max_write_size);
	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@@ -906,6 +915,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
	ubi->dev.release = dev_release;
	ubi->dev.class = &ubi_class;
	ubi->dev.groups = ubi_dev_groups;
	ubi->dev.parent = &mtd->dev;

	ubi->mtd = mtd;
	ubi->ubi_num = ubi_num;
@@ -1248,7 +1258,7 @@ static int __init ubi_init(void)
		mutex_lock(&ubi_devices_mutex);
		err = ubi_attach_mtd_dev(mtd, p->ubi_num,
					 p->vid_hdr_offs, p->max_beb_per1024,
					 false);
					 p->enable_fm == 0 ? true : false);
		mutex_unlock(&ubi_devices_mutex);
		if (err < 0) {
			pr_err("UBI error: cannot attach mtd%d\n",
@@ -1427,7 +1437,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
		int err = kstrtoint(token, 10, &p->max_beb_per1024);

		if (err) {
			pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
			pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
			       token);
			return -EINVAL;
		}
@@ -1438,13 +1448,25 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
		int err = kstrtoint(token, 10, &p->ubi_num);

		if (err) {
			pr_err("UBI error: bad value for ubi_num parameter: %s",
			pr_err("UBI error: bad value for ubi_num parameter: %s\n",
			       token);
			return -EINVAL;
		}
	} else
		p->ubi_num = UBI_DEV_NUM_AUTO;

	token = tokens[4];
	if (token) {
		int err = kstrtoint(token, 10, &p->enable_fm);

		if (err) {
			pr_err("UBI error: bad value for enable_fm parameter: %s\n",
				token);
			return -EINVAL;
		}
	} else
		p->enable_fm = 0;

	mtd_devs += 1;
	return 0;
}
@@ -1457,11 +1479,13 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
		      "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
		      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
		      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
		      "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
		      "\n"
		      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
		      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
		      "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
		      "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
		      "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
		      "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);
+10 −9
Original line number Diff line number Diff line
@@ -504,6 +504,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
{
	unsigned long ubi_num = ubi->ubi_num;
	struct ubi_debug_info *d = &ubi->dbg;
	umode_t mode = S_IRUSR | S_IWUSR;
	int n;

	if (!IS_ENABLED(CONFIG_DEBUG_FS))
@@ -518,41 +519,41 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)

	d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);

	d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
	d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
					     (void *)ubi_num, &dfs_fops);

	d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
	d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
					    (void *)ubi_num, &dfs_fops);

	d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
	d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
						 d->dfs_dir, (void *)ubi_num,
						 &dfs_fops);

	d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
	d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
						 d->dfs_dir, (void *)ubi_num,
						 &dfs_fops);

	d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
						      S_IWUSR, d->dfs_dir,
						      mode, d->dfs_dir,
						      (void *)ubi_num,
						      &dfs_fops);

	d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
							 S_IWUSR, d->dfs_dir,
							 mode, d->dfs_dir,
							 (void *)ubi_num,
							 &dfs_fops);

	d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
						       S_IWUSR, d->dfs_dir,
						       mode, d->dfs_dir,
						       (void *)ubi_num,
						       &dfs_fops);

	d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
						   S_IWUSR, d->dfs_dir,
						   mode, d->dfs_dir,
						   (void *)ubi_num, &dfs_fops);

	d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
						   S_IWUSR, d->dfs_dir,
						   mode, d->dfs_dir,
						   (void *)ubi_num, &dfs_fops);

	debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+1 −1
Original line number Diff line number Diff line
@@ -61,7 +61,7 @@ struct ubi_eba_table {
};

/**
 * next_sqnum - get next sequence number.
 * ubi_next_sqnum - get next sequence number.
 * @ubi: UBI device description object
 *
 * This function returns next sequence number to use, which is just the current
+7 −5
Original line number Diff line number Diff line
@@ -146,8 +146,10 @@ void ubi_refill_pools(struct ubi_device *ubi)
	if (ubi->fm_anchor) {
		wl_tree_add(ubi->fm_anchor, &ubi->free);
		ubi->free_count++;
		ubi->fm_anchor = NULL;
	}

	if (!ubi->fm_disabled)
		/*
		 * All available PEBs are in ubi->free, now is the time to get
		 * the best anchor PEBs.
Loading