Unverified Commit 8941cd8d authored by Patrice Chotard's avatar Patrice Chotard Committed by Mark Brown
Browse files

mtd: spinand: use the spi-mem poll status APIs



Make use of spi-mem poll status APIs to let advanced controllers
optimize wait operations.
This should also fix the high CPU usage for system that don't have
a dedicated STATUS poll block logic.

Signed-off-by: default avatarPatrice Chotard <patrice.chotard@foss.st.com>
Signed-off-by: default avatarChristophe Kerello <christophe.kerello@foss.st.com>
Reviewed-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Acked-by: default avatarMiquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/r/20210518162754.15940-3-patrice.chotard@foss.st.com


Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent c955a0cc
Loading
Loading
Loading
Loading
+32 −13
Original line number Diff line number Diff line
@@ -473,20 +473,26 @@ static int spinand_erase_op(struct spinand_device *spinand,
	return spi_mem_exec_op(spinand->spimem, &op);
}

static int spinand_wait(struct spinand_device *spinand, u8 *s)
static int spinand_wait(struct spinand_device *spinand,
			unsigned long initial_delay_us,
			unsigned long poll_delay_us,
			u8 *s)
{
	unsigned long timeo =  jiffies + msecs_to_jiffies(400);
	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
						      spinand->scratchbuf);
	u8 status;
	int ret;

	do {
		ret = spinand_read_status(spinand, &status);
	ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
				  initial_delay_us,
				  poll_delay_us,
				  SPINAND_WAITRDY_TIMEOUT_MS);
	if (ret)
		return ret;

	status = *spinand->scratchbuf;
	if (!(status & STATUS_BUSY))
		goto out;
	} while (time_before(jiffies, timeo));

	/*
	 * Extra read, just in case the STATUS_READY bit has changed
@@ -526,7 +532,10 @@ static int spinand_reset_op(struct spinand_device *spinand)
	if (ret)
		return ret;

	return spinand_wait(spinand, NULL);
	return spinand_wait(spinand,
			    SPINAND_RESET_INITIAL_DELAY_US,
			    SPINAND_RESET_POLL_DELAY_US,
			    NULL);
}

static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -549,7 +558,10 @@ static int spinand_read_page(struct spinand_device *spinand,
	if (ret)
		return ret;

	ret = spinand_wait(spinand, &status);
	ret = spinand_wait(spinand,
			   SPINAND_READ_INITIAL_DELAY_US,
			   SPINAND_READ_POLL_DELAY_US,
			   &status);
	if (ret < 0)
		return ret;

@@ -585,7 +597,10 @@ static int spinand_write_page(struct spinand_device *spinand,
	if (ret)
		return ret;

	ret = spinand_wait(spinand, &status);
	ret = spinand_wait(spinand,
			   SPINAND_WRITE_INITIAL_DELAY_US,
			   SPINAND_WRITE_POLL_DELAY_US,
			   &status);
	if (!ret && (status & STATUS_PROG_FAILED))
		return -EIO;

@@ -768,7 +783,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
	if (ret)
		return ret;

	ret = spinand_wait(spinand, &status);
	ret = spinand_wait(spinand,
			   SPINAND_ERASE_INITIAL_DELAY_US,
			   SPINAND_ERASE_POLL_DELAY_US,
			   &status);

	if (!ret && (status & STATUS_ERASE_FAILED))
		ret = -EIO;

+22 −0
Original line number Diff line number Diff line
@@ -170,6 +170,28 @@ struct spinand_op;
struct spinand_device;

#define SPINAND_MAX_ID_LEN	4
/*
 * For erase, write and read operation, we got the following timings :
 * tBERS (erase) 1ms to 4ms
 * tPROG 300us to 400us
 * tREAD 25us to 100us
 * In order to minimize latency, the min value is divided by 4 for the
 * initial delay, and dividing by 20 for the poll delay.
 * For reset, 5us/10us/500us if the device is respectively
 * reading/programming/erasing when the RESET occurs. Since we always
 * issue a RESET when the device is IDLE, 5us is selected for both initial
 * and poll delay.
 */
#define SPINAND_READ_INITIAL_DELAY_US	6
#define SPINAND_READ_POLL_DELAY_US	5
#define SPINAND_RESET_INITIAL_DELAY_US	5
#define SPINAND_RESET_POLL_DELAY_US	5
#define SPINAND_WRITE_INITIAL_DELAY_US	75
#define SPINAND_WRITE_POLL_DELAY_US	15
#define SPINAND_ERASE_INITIAL_DELAY_US	250
#define SPINAND_ERASE_POLL_DELAY_US	50

#define SPINAND_WAITRDY_TIMEOUT_MS	400

/**
 * struct spinand_id - SPI NAND id structure