Unverified Commit 2aaa19d9 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4510 ubi: fastmap: Optimize ubi wl algorithm to improve flash service life

Merge Pull Request from: @ci-robot 
 
PR sync from: Zhihao Cheng <chengzhihao1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/KQK22WSAGEOXCMTZ57FEX22CJSSNMNS6/ 
Christophe JAILLET (1):
  ubi: fastmap: Use the bitmap API to allocate bitmaps

Jilin Yuan (1):
  ubi: Fix repeated words in comments

Yang Li (1):
  ubi: Simplify bool conversion

Zhihao Cheng (13):
  ubi: fastmap: Check wl_pool for free peb before wear leveling
  ubi: fastmap: Fix missed ec updating after erasing old fastmap data
    block
  ubi: fastmap: erase_block: Get erase counter from wl_entry rather than
    flash
  ubi: fastmap: Allocate memory with GFP_NOFS in ubi_update_fastmap
  ubi: Replace erase_block() with sync_erase()
  ubi: fastmap: Use free pebs reserved for bad block handling
  ubi: fastmap: Wait until there are enough free PEBs before filling
    pools
  ubi: fastmap: Remove unneeded break condition while filling pools
  ubi: fastmap: may_reserve_for_fm: Don't reserve PEB if fm_anchor
    exists
  ubi: fastmap: Get wl PEB even ec beyonds the 'max' if free PEBs are
    run out
  ubi: fastmap: Fix lapsed wear leveling for first 64 PEBs
  ubi: fastmap: Add module parameter to control reserving filling pool
    PEBs
  ubi: fastmap: Add control in 'UBI_IOCATT' ioctl to reserve PEBs for
    filling pools


-- 
2.31.1
 
https://gitee.com/openeuler/kernel/issues/I9195H 
 
Link:https://gitee.com/openeuler/kernel/pulls/4510

 

Reviewed-by: default avatarzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents c6bcd162 6bb4e95c
Loading
Loading
Loading
Loading
+22 −3
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@
#define MTD_PARAM_LEN_MAX 64

/* Maximum number of comma-separated items in the 'mtd=' parameter */
#define MTD_PARAM_MAX_COUNT 5
#define MTD_PARAM_MAX_COUNT 6

/* Maximum value for the number of bad PEBs per 1024 PEBs */
#define MAX_MTD_UBI_BEB_LIMIT 768
@@ -53,6 +53,7 @@
 * @vid_hdr_offs: VID header offset
 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
 * @enable_fm: enable fastmap when value is non-zero
 * @need_resv_pool: reserve pool->max_size pebs when value is none-zero
 */
struct mtd_dev_param {
	char name[MTD_PARAM_LEN_MAX];
@@ -60,6 +61,7 @@ struct mtd_dev_param {
	int vid_hdr_offs;
	int max_beb_per1024;
	int enable_fm;
	int need_resv_pool;
};

/* Numbers of elements set in the @mtd_dev_param array */
@@ -827,6 +829,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
 * @vid_hdr_offset: VID header offset
 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
 * @disable_fm: whether disable fastmap
 * @need_resv_pool: whether reserve pebs to fill fm_pool
 *
 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -842,7 +845,8 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
 * @ubi_devices_mutex.
 */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
		       int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
		       int vid_hdr_offset, int max_beb_per1024, bool disable_fm,
		       bool need_resv_pool)
{
	struct ubi_device *ubi;
	int i, err;
@@ -952,6 +956,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
		UBI_FM_MIN_POOL_SIZE);

	ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
	ubi->fm_pool_rsv_cnt = need_resv_pool ? ubi->fm_pool.max_size : 0;
	ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
	if (fm_debug)
		ubi_enable_dbg_chk_fastmap(ubi);
@@ -1274,7 +1279,8 @@ static int __init ubi_init(void)
		mutex_lock(&ubi_devices_mutex);
		err = ubi_attach_mtd_dev(mtd, p->ubi_num,
					 p->vid_hdr_offs, p->max_beb_per1024,
					 p->enable_fm == 0 ? true : false);
					 p->enable_fm == 0,
					 p->need_resv_pool != 0);
		mutex_unlock(&ubi_devices_mutex);
		if (err < 0) {
			pr_err("UBI error: cannot attach mtd%d\n",
@@ -1484,6 +1490,18 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
	} else
		p->enable_fm = 0;

	token = tokens[5];
	if (token) {
		int err = kstrtoint(token, 10, &p->need_resv_pool);

		if (err) {
			pr_err("UBI error: bad value for need_resv_pool parameter: %s\n",
				token);
			return -EINVAL;
		}
	} else
		p->need_resv_pool = 0;

	mtd_devs += 1;
	return 0;
}
@@ -1497,6 +1515,7 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
		      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
		      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
		      "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
		      "Optional \"need_resv_pool\" parameter determines whether to reserve pool->max_size pebs during attach. If the value is non-zero, peb reservation is enabled. Default value is 0.\n"
		      "\n"
		      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
		      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+3 −2
Original line number Diff line number Diff line
@@ -672,7 +672,7 @@ static int verify_rsvol_req(const struct ubi_device *ubi,
 * @req: volumes re-name request
 *
 * This is a helper function for the volume re-name IOCTL which validates the
 * the request, opens the volume and calls corresponding volumes management
 * request, opens the volume and calls corresponding volumes management
 * function. Returns zero in case of success and a negative error code in case
 * of failure.
 */
@@ -1041,7 +1041,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
		 */
		mutex_lock(&ubi_devices_mutex);
		err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
					 req.max_beb_per1024, !!req.disable_fm);
					 req.max_beb_per1024, !!req.disable_fm,
					 !!req.need_resv_pool);
		mutex_unlock(&ubi_devices_mutex);
		if (err < 0)
			put_mtd_device(mtd);
+1 −4
Original line number Diff line number Diff line
@@ -33,9 +33,6 @@
#include <linux/err.h>
#include "ubi.h"

/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1

/**
 * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
 * @pnum: the physical eraseblock number attached to the LEB
@@ -378,7 +375,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
 *
 * This function locks a logical eraseblock for writing if there is no
 * contention and does nothing if there is contention. Returns %0 in case of
 * success, %1 in case of contention, and and a negative error code in case of
 * success, %1 in case of contention, and a negative error code in case of
 * failure.
 */
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+138 −22
Original line number Diff line number Diff line
@@ -76,7 +76,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
	struct ubi_wl_entry *e = NULL;

	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
	if (!ubi->free.rb_node)
		goto out;

	if (anchor)
@@ -98,43 +98,104 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
}

/*
 * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
 * wait_free_pebs_for_pool - wait until there enough free pebs
 * @ubi: UBI device description object
 * @is_wl_pool: whether UBI is filling wear leveling pool
 *
 * This helper function checks whether there are enough free pebs (deducted
 * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
 * there is at least one of free pebs is filled into fm_wl_pool.
 * For wear leveling pool, UBI should also reserve free pebs for bad pebs
 * handling, because there maybe no enough free pebs for user volumes after
 * producing new bad pebs.
 * Wait and execute do_work until there are enough free pebs, fill pool
 * as much as we can. This will reduce pool refilling times, which can
 * reduce the fastmap updating frequency.
 */
static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
static void wait_free_pebs_for_pool(struct ubi_device *ubi)
{
	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
	struct ubi_fm_pool *pool = &ubi->fm_pool;
	int free, expect_free, executed;
	/*
	 * There are at least following free pebs which reserved by UBI:
	 * 1. WL_RESERVED_PEBS[1]
	 * 2. EBA_RESERVED_PEBS[1]
	 * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
	 * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
	 */
	int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
		       ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;

	do {
		spin_lock(&ubi->wl_lock);
		free = ubi->free_count;
		free += pool->size - pool->used + wl_pool->size - wl_pool->used;
		expect_free = reserved + ubi->beb_rsvd_pebs;
		spin_unlock(&ubi->wl_lock);

		/*
		 * Break out if there are no works or work is executed failure,
		 * given the fact that erase_worker will schedule itself when
		 * -EBUSY is returned from mtd layer caused by system shutdown.
		 */
		if (do_work(ubi, &executed) || !executed)
			break;
	} while (free < expect_free);
}

/*
 * left_free_count - returns the number of free pebs to fill fm pools
 * @ubi: UBI device description object
 *
 * This helper function returns the number of free pebs (deducted
 * by fastmap pebs) to fill fm_pool and fm_wl_pool.
 */
static int left_free_count(struct ubi_device *ubi)
{
	int fm_used = 0;	// fastmap non anchor pebs.
	int beb_rsvd_pebs;

	if (!ubi->free.rb_node)
		return false;
		return 0;

	beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
	if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
	if (!ubi->ro_mode && !ubi->fm_disabled)
		fm_used = ubi->fm_size / ubi->leb_size - 1;

	return ubi->free_count - beb_rsvd_pebs > fm_used;
	return ubi->free_count - fm_used;
}

/*
 * can_fill_pools - whether free PEBs will be left after filling pools
 * @ubi: UBI device description object
 * @free: current number of free PEBs
 *
 * Return %1 if there are still left free PEBs after filling pools,
 * otherwise %0 is returned.
 */
static int can_fill_pools(struct ubi_device *ubi, int free)
{
	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
	struct ubi_fm_pool *pool = &ubi->fm_pool;
	int pool_need = pool->max_size - pool->size +
			wl_pool->max_size - wl_pool->size;

	if (free - pool_need < 1)
		return 0;

	return 1;
}

/**
 * ubi_refill_pools - refills all fastmap PEB pools.
 * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
 * @ubi: UBI device description object
 */
void ubi_refill_pools(struct ubi_device *ubi)
void ubi_refill_pools_and_lock(struct ubi_device *ubi)
{
	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
	struct ubi_fm_pool *pool = &ubi->fm_pool;
	struct ubi_wl_entry *e;
	int enough;

	if (!ubi->ro_mode && !ubi->fm_disabled)
		wait_free_pebs_for_pool(ubi);

	down_write(&ubi->fm_protect);
	down_write(&ubi->work_sem);
	down_write(&ubi->fm_eba_sem);

	spin_lock(&ubi->wl_lock);

	return_unused_pool_pebs(ubi, wl_pool);
@@ -159,7 +220,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
	for (;;) {
		enough = 0;
		if (pool->size < pool->max_size) {
			if (!has_enough_free_count(ubi, false))
			if (left_free_count(ubi) <= 0)
				break;

			e = wl_get_wle(ubi);
@@ -172,10 +233,13 @@ void ubi_refill_pools(struct ubi_device *ubi)
			enough++;

		if (wl_pool->size < wl_pool->max_size) {
			if (!has_enough_free_count(ubi, true))
			int left_free = left_free_count(ubi);

			if (left_free <= 0)
				break;

			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
					  !can_fill_pools(ubi, left_free));
			self_check_in_wl_tree(ubi, e, &ubi->free);
			rb_erase(&e->u.rb, &ubi->free);
			ubi->free_count--;
@@ -210,7 +274,7 @@ static int produce_free_peb(struct ubi_device *ubi)

	while (!ubi->free.rb_node && ubi->works_count) {
		dbg_wl("do one work synchronously");
		err = do_work(ubi);
		err = do_work(ubi, NULL);

		if (err)
			return err;
@@ -277,6 +341,58 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
	return ret;
}

/**
 * next_peb_for_wl - returns next PEB to be used internally by the
 * WL sub-system.
 *
 * @ubi: UBI device description object
 */
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
{
	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
	int pnum;

	if (pool->used == pool->size)
		return NULL;

	pnum = pool->pebs[pool->used];
	return ubi->lookuptbl[pnum];
}

/**
 * need_wear_leveling - checks whether to trigger a wear leveling work.
 * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
 * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
 * 'wl_pool' by ubi_refill_pools().
 *
 * @ubi: UBI device description object
 */
static bool need_wear_leveling(struct ubi_device *ubi)
{
	int ec;
	struct ubi_wl_entry *e;

	if (!ubi->used.rb_node)
		return false;

	e = next_peb_for_wl(ubi);
	if (!e) {
		if (!ubi->free.rb_node)
			return false;
		e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
		ec = e->ec;
	} else {
		ec = e->ec;
		if (ubi->free.rb_node) {
			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
			ec = max(ec, e->ec);
		}
	}
	e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);

	return ec - e->ec >= UBI_WL_THRESHOLD;
}

/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
 *
 * @ubi: UBI device description object
@@ -429,7 +545,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
					   struct ubi_wl_entry *e,
					   struct rb_root *root) {
	if (e && !ubi->fm_disabled && !ubi->fm &&
	if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
	    e->pnum < UBI_FM_MAX_START)
		e = rb_entry(rb_next(root->rb_node),
			     struct ubi_wl_entry, u.rb);
+10 −64
Original line number Diff line number Diff line
@@ -20,8 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
	if (!ubi_dbg_chk_fastmap(ubi))
		return NULL;

	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
		      GFP_KERNEL);
	ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
	if (!ret)
		return ERR_PTR(-ENOMEM);

@@ -34,7 +33,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
 */
static inline void free_seen(unsigned long *seen)
{
	kfree(seen);
	bitmap_free(seen);
}

/**
@@ -106,7 +105,7 @@ static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
	struct ubi_vid_io_buf *new;
	struct ubi_vid_hdr *vh;

	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
	new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
	if (!new)
		goto out;

@@ -1097,8 +1096,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
	if (!ubi->fast_attach)
		return 0;

	vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
				GFP_KERNEL);
	vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
	if (!vol->checkmap)
		return -ENOMEM;

@@ -1107,7 +1105,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)

void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
{
	kfree(vol->checkmap);
	bitmap_free(vol->checkmap);
}

/**
@@ -1380,53 +1378,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
	return ret;
}

/**
 * erase_block - Manually erase a PEB.
 * @ubi: UBI device object
 * @pnum: PEB to be erased
 *
 * Returns the new EC value on success, < 0 indicates an internal error.
 */
static int erase_block(struct ubi_device *ubi, int pnum)
{
	int ret;
	struct ubi_ec_hdr *ec_hdr;
	long long ec;

	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
	if (!ec_hdr)
		return -ENOMEM;

	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
	if (ret < 0)
		goto out;
	else if (ret && ret != UBI_IO_BITFLIPS) {
		ret = -EINVAL;
		goto out;
	}

	ret = ubi_io_sync_erase(ubi, pnum, 0);
	if (ret < 0)
		goto out;

	ec = be64_to_cpu(ec_hdr->ec);
	ec += ret;
	if (ec > UBI_MAX_ERASECOUNTER) {
		ret = -EINVAL;
		goto out;
	}

	ec_hdr->ec = cpu_to_be64(ec);
	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
	if (ret < 0)
		goto out;

	ret = ec;
out:
	kfree(ec_hdr);
	return ret;
}

/**
 * invalidate_fastmap - destroys a fastmap.
 * @ubi: UBI device object
@@ -1453,7 +1404,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
	ubi->fm = NULL;

	ret = -ENOMEM;
	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
	fm = kzalloc(sizeof(*fm), GFP_NOFS);
	if (!fm)
		goto out;

@@ -1529,11 +1480,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
	struct ubi_fastmap_layout *new_fm, *old_fm;
	struct ubi_wl_entry *tmp_e;

	down_write(&ubi->fm_protect);
	down_write(&ubi->work_sem);
	down_write(&ubi->fm_eba_sem);

	ubi_refill_pools(ubi);
	ubi_refill_pools_and_lock(ubi);

	if (ubi->ro_mode || ubi->fm_disabled) {
		up_write(&ubi->fm_eba_sem);
@@ -1542,7 +1489,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
		return 0;
	}

	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
	new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
	if (!new_fm) {
		up_write(&ubi->fm_eba_sem);
		up_write(&ubi->work_sem);
@@ -1567,7 +1514,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)

		if (!tmp_e) {
			if (old_fm && old_fm->e[i]) {
				ret = erase_block(ubi, old_fm->e[i]->pnum);
				ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
				if (ret < 0) {
					ubi_err(ubi, "could not erase old fastmap PEB");

@@ -1619,7 +1566,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
	if (old_fm) {
		/* no fresh anchor PEB was found, reuse the old one */
		if (!tmp_e) {
			ret = erase_block(ubi, old_fm->e[0]->pnum);
			ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
			if (ret < 0) {
				ubi_err(ubi, "could not erase old anchor PEB");

@@ -1631,7 +1578,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
				goto err;
			}
			new_fm->e[0] = old_fm->e[0];
			new_fm->e[0]->ec = ret;
			old_fm->e[0] = NULL;
		} else {
			/* we've got a new anchor PEB, return the old one */
Loading