Commit 78f815c1 authored by Chia-I Wu's avatar Chia-I Wu Committed by Rob Clark
Browse files

drm/msm: return the average load over the polling period



simple_ondemand interacts poorly with clamp_to_idle.  It only looks at
the load since the last get_dev_status call, while it should really look
at the load over polling_ms.  When clamp_to_idle true, it almost always
picks the lowest frequency on active because the gpu is idle between
msm_devfreq_idle/msm_devfreq_active.

This logic could potentially be moved into devfreq core.

Fixes: 7c0ffcd4 ("drm/msm/gpu: Respect PM QoS constraints")
Signed-off-by: default avatarChia-I Wu <olvaffe@gmail.com>
Cc: Rob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20220416003314.59211-3-olvaffe@gmail.com


Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 15c41198
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#include <linux/adreno-smmu-priv.h>
#include <linux/clk.h>
#include <linux/devfreq.h>
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
@@ -118,6 +119,8 @@ struct msm_gpu_devfreq {
	/** idle_time: Time of last transition to idle: */
	ktime_t idle_time;

	struct devfreq_dev_status average_status;

	/**
	 * idle_work:
	 *
+59 −1
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
#include <linux/math64.h>
#include <linux/units.h>

/*
@@ -75,12 +76,69 @@ static void get_raw_dev_status(struct msm_gpu *gpu,
	status->busy_time = busy_time;
}

static void update_average_dev_status(struct msm_gpu *gpu,
		const struct devfreq_dev_status *raw)
{
	struct msm_gpu_devfreq *df = &gpu->devfreq;
	const u32 polling_ms = df->devfreq->profile->polling_ms;
	const u32 max_history_ms = polling_ms * 11 / 10;
	struct devfreq_dev_status *avg = &df->average_status;
	u64 avg_freq;

	/* simple_ondemand governor interacts poorly with gpu->clamp_to_idle.
	 * When we enforce the constraint on idle, it calls get_dev_status
	 * which would normally reset the stats.  When we remove the
	 * constraint on active, it calls get_dev_status again where busy_time
	 * would be 0.
	 *
	 * To remedy this, we always return the average load over the past
	 * polling_ms.
	 */

	/* raw is longer than polling_ms or avg has no history */
	if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms ||
	    !avg->total_time) {
		*avg = *raw;
		return;
	}

	/* Truncate the oldest history first.
	 *
	 * Because we keep the history with a single devfreq_dev_status,
	 * rather than a list of devfreq_dev_status, we have to assume freq
	 * and load are the same over avg->total_time.  We can scale down
	 * avg->busy_time and avg->total_time by the same factor to drop
	 * history.
	 */
	if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >=
			max_history_ms) {
		const u32 new_total_time = polling_ms * USEC_PER_MSEC -
			raw->total_time;
		avg->busy_time = div_u64(
				mul_u32_u32(avg->busy_time, new_total_time),
				avg->total_time);
		avg->total_time = new_total_time;
	}

	/* compute the average freq over avg->total_time + raw->total_time */
	avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time);
	avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time);
	do_div(avg_freq, avg->total_time + raw->total_time);

	avg->current_frequency = avg_freq;
	avg->busy_time += raw->busy_time;
	avg->total_time += raw->total_time;
}

static int msm_devfreq_get_dev_status(struct device *dev,
		struct devfreq_dev_status *status)
{
	struct msm_gpu *gpu = dev_to_gpu(dev);
	struct devfreq_dev_status raw;

	get_raw_dev_status(gpu, status);
	get_raw_dev_status(gpu, &raw);
	update_average_dev_status(gpu, &raw);
	*status = gpu->devfreq.average_status;

	return 0;
}