Commit 97fce126 authored by Avri Altman's avatar Avri Altman Committed by Ulf Hansson
Browse files

mmc: block: Issue a cache flush only when it's enabled



In command queueing mode, the cache isn't flushed via the mmc_flush_cache()
function, but instead by issuing a CMDQ_TASK_MGMT (CMD48) with a
FLUSH_CACHE opcode. In this path, we need to check if cache has been
enabled, before deciding to flush the cache, along the lines of what's
being done in mmc_flush_cache().

To fix this problem, let's add a new bus ops callback ->cache_enabled() and
implement it for the mmc bus type. In this way, the mmc block device driver
can call it to know whether cache flushing should be done.

Fixes: 1e8e55b6 (mmc: block: Add CQE support)
Cc: stable@vger.kernel.org
Reported-by: default avatarBrendan Peter <bpeter@lytx.com>
Signed-off-by: default avatarAvri Altman <avri.altman@wdc.com>
Tested-by: default avatarBrendan Peter <bpeter@lytx.com>
Acked-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Link: https://lore.kernel.org/r/20210425060207.2591-2-avri.altman@wdc.com
Link: https://lore.kernel.org/r/20210425060207.2591-3-avri.altman@wdc.com


[Ulf: Squashed the two patches and made some minor updates]
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent 2f156712
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -2237,6 +2237,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
	case MMC_ISSUE_ASYNC:
		switch (req_op(req)) {
		case REQ_OP_FLUSH:
			if (!mmc_cache_enabled(host)) {
				blk_mq_end_request(req, BLK_STS_OK);
				return MMC_REQ_FINISHED;
			}
			ret = mmc_blk_cqe_issue_flush(mq, req);
			break;
		case REQ_OP_READ:
+9 −0
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@ struct mmc_bus_ops {
	int (*shutdown)(struct mmc_host *);
	int (*hw_reset)(struct mmc_host *);
	int (*sw_reset)(struct mmc_host *);
	bool (*cache_enabled)(struct mmc_host *);
};

void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -163,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
		host->ops->post_req(host, mrq, err);
}

static inline bool mmc_cache_enabled(struct mmc_host *host)
{
	if (host->bus_ops->cache_enabled)
		return host->bus_ops->cache_enabled(host);

	return false;
}

#endif
+7 −0
Original line number Diff line number Diff line
@@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
	}
}

static bool _mmc_cache_enabled(struct mmc_host *host)
{
	return host->card->ext_csd.cache_size > 0 &&
	       host->card->ext_csd.cache_ctrl & 1;
}

static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
	int err = 0;
@@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
	.alive = mmc_alive,
	.shutdown = mmc_shutdown,
	.hw_reset = _mmc_hw_reset,
	.cache_enabled = _mmc_cache_enabled,
};

/*
+1 −3
Original line number Diff line number Diff line
@@ -968,9 +968,7 @@ int mmc_flush_cache(struct mmc_card *card)
{
	int err = 0;

	if (mmc_card_mmc(card) &&
			(card->ext_csd.cache_size > 0) &&
			(card->ext_csd.cache_ctrl & 1)) {
	if (mmc_cache_enabled(card->host)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_FLUSH_CACHE, 1,
				 MMC_CACHE_FLUSH_TIMEOUT_MS);