Commit 009c963e authored by Georgi Djakov's avatar Georgi Djakov
Browse files

Merge branch 'icc-rpm' into icc-next

This patch set is to support bucket in icc-rpm driver, so it implements
the similar mechanism in the icc-rpmh driver.

It uses interconnect path tag to indicate the bandwidth voting is for
which buckets, and there have three kinds of buckets: AWC, WAKE and
SLEEP, finally the wake and sleep bucket values are used to set the
corresponding clock (active and sleep clocks).  So far, we keep the AWC
bucket but doesn't really use it.

Link: https://lore.kernel.org/r/20220712015929.2789881-1-leo.yan@linaro.org


Signed-off-by: default avatarGeorgi Djakov <djakov@kernel.org>
parents 33f033dc e3305daa
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -45,7 +45,11 @@ properties:
      - qcom,sdm660-snoc

  '#interconnect-cells':
    const: 1
    description: |
      Value: <1> is one cell in an interconnect specifier for the
      interconnect node id, <2> requires the interconnect node id and an
      extra path tag.
    enum: [ 1, 2 ]

  clocks:
    minItems: 2
+3 −0
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0

obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o

interconnect_qcom-y			:= icc-common.o
icc-bcm-voter-objs			:= bcm-voter.o
qnoc-msm8916-objs			:= msm8916.o
qnoc-msm8939-objs			:= msm8939.o
+34 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2022 Linaro Ltd.
 */

#include <linux/of.h>
#include <linux/slab.h>

#include "icc-common.h"

struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
{
	struct icc_node_data *ndata;
	struct icc_node *node;

	node = of_icc_xlate_onecell(spec, data);
	if (IS_ERR(node))
		return ERR_CAST(node);

	ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
	if (!ndata)
		return ERR_PTR(-ENOMEM);

	ndata->node = node;

	if (spec->args_count == 2)
		ndata->tag = spec->args[1];

	if (spec->args_count > 2)
		pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);

	return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
+13 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2022 Linaro Ltd.
 */

#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
#define __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__

#include <linux/interconnect-provider.h>

struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);

#endif
+142 −26
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/slab.h>

#include "smd-rpm.h"
#include "icc-common.h"
#include "icc-rpm.h"

/* QNOC QoS */
@@ -233,48 +234,162 @@ static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
	return ret;
}

static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
			  u64 sum_bw)
{
	int ret;

	if (!qn->qos.ap_owned) {
		/* send bandwidth request message to the RPM processor */
		ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
		if (ret)
			return ret;
	} else if (qn->qos.qos_mode != -1) {
		/* set bandwidth directly from the AP */
		ret = qcom_icc_qos_set(n, sum_bw);
		if (ret)
			return ret;
	}

	return 0;
}

/**
 * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
 * @node: icc node to operate on
 */
static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
{
	struct qcom_icc_node *qn;
	size_t i;

	qn = node->data;
	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
		qn->sum_avg[i] = 0;
		qn->max_peak[i] = 0;
	}
}

/**
 * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
 * @node: node to aggregate
 * @tag: tag to indicate which buckets to aggregate
 * @avg_bw: new bw to sum aggregate
 * @peak_bw: new bw to max aggregate
 * @agg_avg: existing aggregate avg bw val
 * @agg_peak: existing aggregate peak bw val
 */
static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
				 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
	size_t i;
	struct qcom_icc_node *qn;

	qn = node->data;

	if (!tag)
		tag = QCOM_ICC_TAG_ALWAYS;

	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
		if (tag & BIT(i)) {
			qn->sum_avg[i] += avg_bw;
			qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
		}
	}

	*agg_avg += avg_bw;
	*agg_peak = max_t(u32, *agg_peak, peak_bw);
	return 0;
}

/**
 * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
 * @provider: generic interconnect provider
 * @agg_avg: an array for aggregated average bandwidth of buckets
 * @agg_peak: an array for aggregated peak bandwidth of buckets
 * @max_agg_avg: pointer to max value of aggregated average bandwidth
 */
static void qcom_icc_bus_aggregate(struct icc_provider *provider,
				   u64 *agg_avg, u64 *agg_peak,
				   u64 *max_agg_avg)
{
	struct icc_node *node;
	struct qcom_icc_node *qn;
	int i;

	/* Initialise aggregate values */
	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
		agg_avg[i] = 0;
		agg_peak[i] = 0;
	}

	*max_agg_avg = 0;

	/*
	 * Iterate nodes on the interconnect and aggregate bandwidth
	 * requests for every bucket.
	 */
	list_for_each_entry(node, &provider->nodes, node_list) {
		qn = node->data;
		for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
			agg_avg[i] += qn->sum_avg[i];
			agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
		}
	}

	/* Find maximum values across all buckets */
	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
		*max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
}

static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
	struct qcom_icc_provider *qp;
	struct qcom_icc_node *qn;
	struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
	struct icc_provider *provider;
	struct icc_node *n;
	u64 sum_bw;
	u64 max_peak_bw;
	u64 rate;
	u32 agg_avg = 0;
	u32 agg_peak = 0;
	u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
	u64 max_agg_avg;
	int ret, i;
	int bucket;

	qn = src->data;
	src_qn = src->data;
	if (dst)
		dst_qn = dst->data;
	provider = src->provider;
	qp = to_qcom_provider(provider);

	list_for_each_entry(n, &provider->nodes, node_list)
		provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
				    &agg_avg, &agg_peak);
	qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);

	sum_bw = icc_units_to_bps(agg_avg);
	max_peak_bw = icc_units_to_bps(agg_peak);
	sum_bw = icc_units_to_bps(max_agg_avg);

	if (!qn->qos.ap_owned) {
		/* send bandwidth request message to the RPM processor */
		ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
	ret = __qcom_icc_set(src, src_qn, sum_bw);
	if (ret)
		return ret;
	} else if (qn->qos.qos_mode != -1) {
		/* set bandwidth directly from the AP */
		ret = qcom_icc_qos_set(src, sum_bw);
	if (dst_qn) {
		ret = __qcom_icc_set(dst, dst_qn, sum_bw);
		if (ret)
			return ret;
	}

	rate = max(sum_bw, max_peak_bw);
	for (i = 0; i < qp->num_clks; i++) {
		/*
		 * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
		 * for other clocks.  If a platform doesn't set interconnect
		 * path tags, by default use sleep bucket for all clocks.
		 *
		 * Note, AMC bucket is not supported yet.
		 */
		if (!strcmp(qp->bus_clks[i].id, "bus_a"))
			bucket = QCOM_ICC_BUCKET_WAKE;
		else
			bucket = QCOM_ICC_BUCKET_SLEEP;

	do_div(rate, qn->buswidth);
		rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
		do_div(rate, src_qn->buswidth);
		rate = min_t(u64, rate, LONG_MAX);

	for (i = 0; i < qp->num_clks; i++) {
		if (qp->bus_clk_rate[i] == rate)
			continue;

@@ -394,8 +509,9 @@ int qnoc_probe(struct platform_device *pdev)
	INIT_LIST_HEAD(&provider->nodes);
	provider->dev = dev;
	provider->set = qcom_icc_set;
	provider->aggregate = icc_std_aggregate;
	provider->xlate = of_icc_xlate_onecell;
	provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
	provider->aggregate = qcom_icc_bw_aggregate;
	provider->xlate_extended = qcom_icc_xlate_extended;
	provider->data = data;

	ret = icc_provider_add(provider);
Loading