Commit 13c044e9 authored by Avri Altman's avatar Avri Altman Committed by Martin K. Petersen
Browse files

scsi: ufs: ufshpb: Add "cold" regions timer

In order not to hang on to "cold" regions, we inactivate a region that has
had no READ access for a predefined amount of time - READ_TO_MS. For that
purpose monitor the active regions list, polling it on every
POLLING_INTERVAL_MS. On timeout expiry add the region to the
"to-be-inactivated" list unless it is clean and did not exhaust its
READ_TO_EXPIRIES - another parameter.

None of this applies to pinned regions.

Link: https://lore.kernel.org/r/20210712095039.8093-9-avri.altman@wdc.com


Reviewed-by: default avatarDaejun Park <daejun7.park@samsung.com>
Signed-off-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 67001ff1
Loading
Loading
Loading
Loading
+71 −3
Original line number Diff line number Diff line
@@ -18,6 +18,9 @@

#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
#define EVICTION_THRESHOLD (ACTIVATION_THRESHOLD << 5) /* 256 IOs */
#define READ_TO_MS 1000
#define READ_TO_EXPIRIES 100
#define POLLING_INTERVAL_MS 200

/* memory management */
static struct kmem_cache *ufshpb_mctx_cache;
@@ -1032,12 +1035,63 @@ static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
	return 0;
}

static void ufshpb_read_to_handler(struct work_struct *work)
{
	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
					     ufshpb_read_to_work.work);
	struct victim_select_info *lru_info = &hpb->lru_info;
	struct ufshpb_region *rgn, *next_rgn;
	unsigned long flags;
	LIST_HEAD(expired_list);

	if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
		return;

	spin_lock_irqsave(&hpb->rgn_state_lock, flags);

	list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
				 list_lru_rgn) {
		bool timedout = ktime_after(ktime_get(), rgn->read_timeout);

		if (timedout) {
			rgn->read_timeout_expiries--;
			if (is_rgn_dirty(rgn) ||
			    rgn->read_timeout_expiries == 0)
				list_add(&rgn->list_expired_rgn, &expired_list);
			else
				rgn->read_timeout = ktime_add_ms(ktime_get(),
							 READ_TO_MS);
		}
	}

	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);

	list_for_each_entry_safe(rgn, next_rgn, &expired_list,
				 list_expired_rgn) {
		list_del_init(&rgn->list_expired_rgn);
		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
	}

	ufshpb_kick_map_work(hpb);

	clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);

	schedule_delayed_work(&hpb->ufshpb_read_to_work,
			      msecs_to_jiffies(POLLING_INTERVAL_MS));
}

static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
				struct ufshpb_region *rgn)
{
	rgn->rgn_state = HPB_RGN_ACTIVE;
	list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
	atomic_inc(&lru_info->active_cnt);
	if (rgn->hpb->is_hcm) {
		rgn->read_timeout = ktime_add_ms(ktime_get(), READ_TO_MS);
		rgn->read_timeout_expiries = READ_TO_EXPIRIES;
	}
}

static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
@@ -1819,6 +1873,7 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)

		INIT_LIST_HEAD(&rgn->list_inact_rgn);
		INIT_LIST_HEAD(&rgn->list_lru_rgn);
		INIT_LIST_HEAD(&rgn->list_expired_rgn);

		if (rgn_idx == hpb->rgns_per_lu - 1) {
			srgn_cnt = ((hpb->srgns_per_lu - 1) %
@@ -1840,6 +1895,7 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
		}

		rgn->rgn_flags = 0;
		rgn->hpb = hpb;
	}

	return 0;
@@ -2063,9 +2119,12 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
	INIT_LIST_HEAD(&hpb->list_hpb_lu);

	INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
	if (hpb->is_hcm)
	if (hpb->is_hcm) {
		INIT_WORK(&hpb->ufshpb_normalization_work,
			  ufshpb_normalization_work_handler);
		INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
				  ufshpb_read_to_handler);
	}

	hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
			  sizeof(struct ufshpb_req), 0, 0, NULL);
@@ -2099,6 +2158,10 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
	ufshpb_stat_init(hpb);
	ufshpb_param_init(hpb);

	if (hpb->is_hcm)
		schedule_delayed_work(&hpb->ufshpb_read_to_work,
				      msecs_to_jiffies(POLLING_INTERVAL_MS));

	return 0;

release_pre_req_mempool:
@@ -2165,9 +2228,10 @@ static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)

static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
{
	if (hpb->is_hcm)
	if (hpb->is_hcm) {
		cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
		cancel_work_sync(&hpb->ufshpb_normalization_work);

	}
	cancel_work_sync(&hpb->map_work);
}

@@ -2275,6 +2339,10 @@ void ufshpb_resume(struct ufs_hba *hba)
			continue;
		ufshpb_set_state(hpb, HPB_PRESENT);
		ufshpb_kick_map_work(hpb);
		if (hpb->is_hcm)
			schedule_delayed_work(&hpb->ufshpb_read_to_work,
				msecs_to_jiffies(POLLING_INTERVAL_MS));

	}
}

+8 −0
Original line number Diff line number Diff line
@@ -115,6 +115,7 @@ struct ufshpb_subregion {
};

struct ufshpb_region {
	struct ufshpb_lu *hpb;
	struct ufshpb_subregion *srgn_tbl;
	enum HPB_RGN_STATE rgn_state;
	int rgn_idx;
@@ -132,6 +133,10 @@ struct ufshpb_region {
	/* region reads - for host mode */
	spinlock_t rgn_lock;
	unsigned int reads;
	/* region "cold" timer - for host mode */
	ktime_t read_timeout;
	unsigned int read_timeout_expiries;
	struct list_head list_expired_rgn;
};

#define for_each_sub_region(rgn, i, srgn)				\
@@ -223,6 +228,9 @@ struct ufshpb_lu {
	/* for selecting victim */
	struct victim_select_info lru_info;
	struct work_struct ufshpb_normalization_work;
	struct delayed_work ufshpb_read_to_work;
	unsigned long work_data_bits;
#define TIMEOUT_WORK_RUNNING 0

	/* pinned region information */
	u32 lu_pinned_start;