Commit d058fd9e authored by Rahul Tanwar's avatar Rahul Tanwar Committed by Stephen Boyd
Browse files

clk: intel: Add CGU clock driver for a new SoC



Clock Generation Unit(CGU) is a new clock controller IP of a forthcoming
Intel network processor SoC named Lightning Mountain(LGM). It provides
programming interfaces to control & configure all CPU & peripheral clocks.
Add common clock framework based clock controller driver for CGU.

Signed-off-by: default avatarRahul Tanwar <rahul.tanwar@linux.intel.com>
Link: https://lkml.kernel.org/r/42a4f71847714df482bacffdcd84341a4052800b.1587102634.git.rahul.tanwar@linux.intel.com


[sboyd@kernel.org: Kill init function to alloc and cleanup newline]
Signed-off-by: default avatarStephen Boyd <sboyd@kernel.org>
parent e2266f4c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -360,6 +360,7 @@ source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
source "drivers/clk/x86/Kconfig"
source "drivers/clk/zynqmp/Kconfig"

endmenu
+8 −0
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0-only
config CLK_LGM_CGU
	depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
	select OF_EARLY_FLATTREE
	bool "Clock driver for Lightning Mountain(LGM) platform"
	help
	  Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
	  network processor SoC.
+1 −0
Original line number Diff line number Diff line
@@ -3,3 +3,4 @@ obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE)	+= clk-st.o
clk-x86-lpss-objs		:= clk-lpt.o
obj-$(CONFIG_X86_INTEL_LPSS)	+= clk-x86-lpss.o
obj-$(CONFIG_CLK_LGM_CGU)	+= clk-cgu.o clk-cgu-pll.o clk-lgm.o
+156 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2020 Intel Corporation.
 * Zhu YiXin <yixin.zhu@intel.com>
 * Rahul Tanwar <rahul.tanwar@intel.com>
 */

#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>

#include "clk-cgu.h"

#define to_lgm_clk_pll(_hw)	container_of(_hw, struct lgm_clk_pll, hw)
#define PLL_REF_DIV(x)		((x) + 0x08)

/*
 * Calculate formula:
 * rate = (prate * mult + (prate * frac) / frac_div) / div
 */
static unsigned long
lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
		  unsigned int div, unsigned int frac, unsigned int frac_div)
{
	u64 crate, frate, rate64;

	rate64 = prate;
	crate = rate64 * mult;
	frate = rate64 * frac;
	do_div(frate, frac_div);
	crate += frate;
	do_div(crate, div);

	return crate;
}

static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
	unsigned int div, mult, frac;
	unsigned long flags;

	spin_lock_irqsave(&pll->lock, flags);
	mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
	div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
	frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
	spin_unlock_irqrestore(&pll->lock, flags);

	if (pll->type == TYPE_LJPLL)
		div *= 4;

	return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
}

static int lgm_pll_is_enabled(struct clk_hw *hw)
{
	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
	unsigned long flags;
	unsigned int ret;

	spin_lock_irqsave(&pll->lock, flags);
	ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
	spin_unlock_irqrestore(&pll->lock, flags);

	return ret;
}

static int lgm_pll_enable(struct clk_hw *hw)
{
	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
	unsigned long flags;
	u32 val;
	int ret;

	spin_lock_irqsave(&pll->lock, flags);
	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
	ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
					val, (val & 0x1), 1, 100);
	spin_unlock_irqrestore(&pll->lock, flags);

	return ret;
}

static void lgm_pll_disable(struct clk_hw *hw)
{
	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
	unsigned long flags;

	spin_lock_irqsave(&pll->lock, flags);
	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
	spin_unlock_irqrestore(&pll->lock, flags);
}

static const struct clk_ops lgm_pll_ops = {
	.recalc_rate = lgm_pll_recalc_rate,
	.is_enabled = lgm_pll_is_enabled,
	.enable = lgm_pll_enable,
	.disable = lgm_pll_disable,
};

static struct clk_hw *
lgm_clk_register_pll(struct lgm_clk_provider *ctx,
		     const struct lgm_pll_clk_data *list)
{
	struct clk_init_data init = {};
	struct lgm_clk_pll *pll;
	struct device *dev = ctx->dev;
	struct clk_hw *hw;
	int ret;

	init.ops = &lgm_pll_ops;
	init.name = list->name;
	init.flags = list->flags;
	init.parent_data = list->parent_data;
	init.num_parents = list->num_parents;

	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
	if (!pll)
		return ERR_PTR(-ENOMEM);

	pll->membase = ctx->membase;
	pll->lock = ctx->lock;
	pll->reg = list->reg;
	pll->flags = list->flags;
	pll->type = list->type;
	pll->hw.init = &init;

	hw = &pll->hw;
	ret = clk_hw_register(dev, hw);
	if (ret)
		return ERR_PTR(ret);

	return hw;
}

int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
			  const struct lgm_pll_clk_data *list,
			  unsigned int nr_clk)
{
	struct clk_hw *hw;
	int i;

	for (i = 0; i < nr_clk; i++, list++) {
		hw = lgm_clk_register_pll(ctx, list);
		if (IS_ERR(hw)) {
			dev_err(ctx->dev, "failed to register pll: %s\n",
				list->name);
			return PTR_ERR(hw);
		}
		ctx->clk_data.hws[list->id] = hw;
	}

	return 0;
}
+636 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2020 Intel Corporation.
 * Zhu YiXin <yixin.zhu@intel.com>
 * Rahul Tanwar <rahul.tanwar@intel.com>
 */
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/of.h>

#include "clk-cgu.h"

#define GATE_HW_REG_STAT(reg)	((reg) + 0x0)
#define GATE_HW_REG_EN(reg)	((reg) + 0x4)
#define GATE_HW_REG_DIS(reg)	((reg) + 0x8)
#define MAX_DDIV_REG	8
#define MAX_DIVIDER_VAL 64

#define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
#define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
#define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
#define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)

static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
					     const struct lgm_clk_branch *list)
{
	unsigned long flags;

	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
		spin_lock_irqsave(&ctx->lock, flags);
		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
				list->div_width, list->div_val);
		spin_unlock_irqrestore(&ctx->lock, flags);
	}

	return clk_hw_register_fixed_rate(NULL, list->name,
					  list->parent_data[0].name,
					  list->flags, list->mux_flags);
}

static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
{
	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
	unsigned long flags;
	u32 val;

	spin_lock_irqsave(&mux->lock, flags);
	if (mux->flags & MUX_CLK_SW)
		val = mux->reg;
	else
		val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
				      mux->width);
	spin_unlock_irqrestore(&mux->lock, flags);
	return clk_mux_val_to_index(hw, NULL, mux->flags, val);
}

static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
	unsigned long flags;
	u32 val;

	val = clk_mux_index_to_val(NULL, mux->flags, index);
	spin_lock_irqsave(&mux->lock, flags);
	if (mux->flags & MUX_CLK_SW)
		mux->reg = val;
	else
		lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
				mux->width, val);
	spin_unlock_irqrestore(&mux->lock, flags);

	return 0;
}

static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
				      struct clk_rate_request *req)
{
	struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);

	return clk_mux_determine_rate_flags(hw, req, mux->flags);
}

static const struct clk_ops lgm_clk_mux_ops = {
	.get_parent = lgm_clk_mux_get_parent,
	.set_parent = lgm_clk_mux_set_parent,
	.determine_rate = lgm_clk_mux_determine_rate,
};

static struct clk_hw *
lgm_clk_register_mux(struct lgm_clk_provider *ctx,
		     const struct lgm_clk_branch *list)
{
	unsigned long flags, cflags = list->mux_flags;
	struct device *dev = ctx->dev;
	u8 shift = list->mux_shift;
	u8 width = list->mux_width;
	struct clk_init_data init = {};
	struct lgm_clk_mux *mux;
	u32 reg = list->mux_off;
	struct clk_hw *hw;
	int ret;

	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
	if (!mux)
		return ERR_PTR(-ENOMEM);

	init.name = list->name;
	init.ops = &lgm_clk_mux_ops;
	init.flags = list->flags;
	init.parent_data = list->parent_data;
	init.num_parents = list->num_parents;

	mux->membase = ctx->membase;
	mux->lock = ctx->lock;
	mux->reg = reg;
	mux->shift = shift;
	mux->width = width;
	mux->flags = cflags;
	mux->hw.init = &init;

	hw = &mux->hw;
	ret = clk_hw_register(dev, hw);
	if (ret)
		return ERR_PTR(ret);

	if (cflags & CLOCK_FLAG_VAL_INIT) {
		spin_lock_irqsave(&mux->lock, flags);
		lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
		spin_unlock_irqrestore(&mux->lock, flags);
	}

	return hw;
}

static unsigned long
lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
	unsigned long flags;
	unsigned int val;

	spin_lock_irqsave(&divider->lock, flags);
	val = lgm_get_clk_val(divider->membase, divider->reg,
			      divider->shift, divider->width);
	spin_unlock_irqrestore(&divider->lock, flags);

	return divider_recalc_rate(hw, parent_rate, val, divider->table,
				   divider->flags, divider->width);
}

static long
lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long *prate)
{
	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);

	return divider_round_rate(hw, rate, prate, divider->table,
				  divider->width, divider->flags);
}

static int
lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
			 unsigned long prate)
{
	struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
	unsigned long flags;
	int value;

	value = divider_get_val(rate, prate, divider->table,
				divider->width, divider->flags);
	if (value < 0)
		return value;

	spin_lock_irqsave(&divider->lock, flags);
	lgm_set_clk_val(divider->membase, divider->reg,
			divider->shift, divider->width, value);
	spin_unlock_irqrestore(&divider->lock, flags);

	return 0;
}

static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
{
	struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
	unsigned long flags;

	spin_lock_irqsave(&div->lock, flags);
	lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
			div->width_gate, enable);
	spin_unlock_irqrestore(&div->lock, flags);
	return 0;
}

static int lgm_clk_divider_enable(struct clk_hw *hw)
{
	return lgm_clk_divider_enable_disable(hw, 1);
}

static void lgm_clk_divider_disable(struct clk_hw *hw)
{
	lgm_clk_divider_enable_disable(hw, 0);
}

static const struct clk_ops lgm_clk_divider_ops = {
	.recalc_rate = lgm_clk_divider_recalc_rate,
	.round_rate = lgm_clk_divider_round_rate,
	.set_rate = lgm_clk_divider_set_rate,
	.enable = lgm_clk_divider_enable,
	.disable = lgm_clk_divider_disable,
};

static struct clk_hw *
lgm_clk_register_divider(struct lgm_clk_provider *ctx,
			 const struct lgm_clk_branch *list)
{
	unsigned long flags, cflags = list->div_flags;
	struct device *dev = ctx->dev;
	struct lgm_clk_divider *div;
	struct clk_init_data init = {};
	u8 shift = list->div_shift;
	u8 width = list->div_width;
	u8 shift_gate = list->div_shift_gate;
	u8 width_gate = list->div_width_gate;
	u32 reg = list->div_off;
	struct clk_hw *hw;
	int ret;

	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
	if (!div)
		return ERR_PTR(-ENOMEM);

	init.name = list->name;
	init.ops = &lgm_clk_divider_ops;
	init.flags = list->flags;
	init.parent_data = list->parent_data;
	init.num_parents = 1;

	div->membase = ctx->membase;
	div->lock = ctx->lock;
	div->reg = reg;
	div->shift = shift;
	div->width = width;
	div->shift_gate	= shift_gate;
	div->width_gate	= width_gate;
	div->flags = cflags;
	div->table = list->div_table;
	div->hw.init = &init;

	hw = &div->hw;
	ret = clk_hw_register(dev, hw);
	if (ret)
		return ERR_PTR(ret);

	if (cflags & CLOCK_FLAG_VAL_INIT) {
		spin_lock_irqsave(&div->lock, flags);
		lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
		spin_unlock_irqrestore(&div->lock, flags);
	}

	return hw;
}

static struct clk_hw *
lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
			      const struct lgm_clk_branch *list)
{
	unsigned long flags;
	struct clk_hw *hw;

	hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
					  list->parent_data[0].name, list->flags,
					  list->mult, list->div);
	if (IS_ERR(hw))
		return ERR_CAST(hw);

	if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
		spin_lock_irqsave(&ctx->lock, flags);
		lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
				list->div_width, list->div_val);
		spin_unlock_irqrestore(&ctx->lock, flags);
	}

	return hw;
}

static int lgm_clk_gate_enable(struct clk_hw *hw)
{
	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
	unsigned long flags;
	unsigned int reg;

	spin_lock_irqsave(&gate->lock, flags);
	reg = GATE_HW_REG_EN(gate->reg);
	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
	spin_unlock_irqrestore(&gate->lock, flags);

	return 0;
}

static void lgm_clk_gate_disable(struct clk_hw *hw)
{
	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
	unsigned long flags;
	unsigned int reg;

	spin_lock_irqsave(&gate->lock, flags);
	reg = GATE_HW_REG_DIS(gate->reg);
	lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
	spin_unlock_irqrestore(&gate->lock, flags);
}

static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
{
	struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
	unsigned int reg, ret;
	unsigned long flags;

	spin_lock_irqsave(&gate->lock, flags);
	reg = GATE_HW_REG_STAT(gate->reg);
	ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
	spin_unlock_irqrestore(&gate->lock, flags);

	return ret;
}

static const struct clk_ops lgm_clk_gate_ops = {
	.enable = lgm_clk_gate_enable,
	.disable = lgm_clk_gate_disable,
	.is_enabled = lgm_clk_gate_is_enabled,
};

static struct clk_hw *
lgm_clk_register_gate(struct lgm_clk_provider *ctx,
		      const struct lgm_clk_branch *list)
{
	unsigned long flags, cflags = list->gate_flags;
	const char *pname = list->parent_data[0].name;
	struct device *dev = ctx->dev;
	u8 shift = list->gate_shift;
	struct clk_init_data init = {};
	struct lgm_clk_gate *gate;
	u32 reg = list->gate_off;
	struct clk_hw *hw;
	int ret;

	gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
	if (!gate)
		return ERR_PTR(-ENOMEM);

	init.name = list->name;
	init.ops = &lgm_clk_gate_ops;
	init.flags = list->flags;
	init.parent_names = pname ? &pname : NULL;
	init.num_parents = pname ? 1 : 0;

	gate->membase = ctx->membase;
	gate->lock = ctx->lock;
	gate->reg = reg;
	gate->shift = shift;
	gate->flags = cflags;
	gate->hw.init = &init;

	hw = &gate->hw;
	ret = clk_hw_register(dev, hw);
	if (ret)
		return ERR_PTR(ret);

	if (cflags & CLOCK_FLAG_VAL_INIT) {
		spin_lock_irqsave(&gate->lock, flags);
		lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
		spin_unlock_irqrestore(&gate->lock, flags);
	}

	return hw;
}

int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
			      const struct lgm_clk_branch *list,
			      unsigned int nr_clk)
{
	struct clk_hw *hw;
	unsigned int idx;

	for (idx = 0; idx < nr_clk; idx++, list++) {
		switch (list->type) {
		case CLK_TYPE_FIXED:
			hw = lgm_clk_register_fixed(ctx, list);
			break;
		case CLK_TYPE_MUX:
			hw = lgm_clk_register_mux(ctx, list);
			break;
		case CLK_TYPE_DIVIDER:
			hw = lgm_clk_register_divider(ctx, list);
			break;
		case CLK_TYPE_FIXED_FACTOR:
			hw = lgm_clk_register_fixed_factor(ctx, list);
			break;
		case CLK_TYPE_GATE:
			hw = lgm_clk_register_gate(ctx, list);
			break;
		default:
			dev_err(ctx->dev, "invalid clk type\n");
			return -EINVAL;
		}

		if (IS_ERR(hw)) {
			dev_err(ctx->dev,
				"register clk: %s, type: %u failed!\n",
				list->name, list->type);
			return -EIO;
		}
		ctx->clk_data.hws[list->id] = hw;
	}

	return 0;
}

static unsigned long
lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
	unsigned int div0, div1, exdiv;
	unsigned long flags;
	u64 prate;

	spin_lock_irqsave(&ddiv->lock, flags);
	div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
			       ddiv->shift0, ddiv->width0) + 1;
	div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
			       ddiv->shift1, ddiv->width1) + 1;
	exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
				ddiv->shift2, ddiv->width2);
	spin_unlock_irqrestore(&ddiv->lock, flags);

	prate = (u64)parent_rate;
	do_div(prate, div0);
	do_div(prate, div1);

	if (exdiv) {
		do_div(prate, ddiv->div);
		prate *= ddiv->mult;
	}

	return prate;
}

static int lgm_clk_ddiv_enable(struct clk_hw *hw)
{
	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
	unsigned long flags;

	spin_lock_irqsave(&ddiv->lock, flags);
	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
			ddiv->width_gate, 1);
	spin_unlock_irqrestore(&ddiv->lock, flags);
	return 0;
}

static void lgm_clk_ddiv_disable(struct clk_hw *hw)
{
	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
	unsigned long flags;

	spin_lock_irqsave(&ddiv->lock, flags);
	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
			ddiv->width_gate, 0);
	spin_unlock_irqrestore(&ddiv->lock, flags);
}

static int
lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
{
	u32 idx, temp;

	*ddiv1 = 1;
	*ddiv2 = 1;

	if (div > MAX_DIVIDER_VAL)
		div = MAX_DIVIDER_VAL;

	if (div > 1) {
		for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
			temp = DIV_ROUND_UP_ULL((u64)div, idx);
			if (div % idx == 0 && temp <= MAX_DDIV_REG)
				break;
		}

		if (idx > MAX_DDIV_REG)
			return -EINVAL;

		*ddiv1 = temp;
		*ddiv2 = idx;
	}

	return 0;
}

static int
lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
		      unsigned long prate)
{
	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
	u32 div, ddiv1, ddiv2;
	unsigned long flags;

	div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);

	spin_lock_irqsave(&ddiv->lock, flags);
	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
		div = div * 2;
	}

	if (div <= 0) {
		spin_unlock_irqrestore(&ddiv->lock, flags);
		return -EINVAL;
	}

	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
		spin_unlock_irqrestore(&ddiv->lock, flags);
		return -EINVAL;
	}

	lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
			ddiv1 - 1);

	lgm_set_clk_val(ddiv->membase, ddiv->reg,  ddiv->shift1, ddiv->width1,
			ddiv2 - 1);
	spin_unlock_irqrestore(&ddiv->lock, flags);

	return 0;
}

static long
lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
			unsigned long *prate)
{
	struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
	u32 div, ddiv1, ddiv2;
	unsigned long flags;
	u64 rate64 = rate;

	div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);

	/* if predivide bit is enabled, modify div by factor of 2.5 */
	spin_lock_irqsave(&ddiv->lock, flags);
	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
		div = div * 2;
		div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
	}

	if (div <= 0) {
		spin_unlock_irqrestore(&ddiv->lock, flags);
		return *prate;
	}

	if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) {
		if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) {
			spin_unlock_irqrestore(&ddiv->lock, flags);
			return -EINVAL;
		}
	}

	rate64 = *prate;
	do_div(rate64, ddiv1);
	do_div(rate64, ddiv2);

	/* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
	if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
		rate64 = rate64 * 2;
		rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
	}
	spin_unlock_irqrestore(&ddiv->lock, flags);

	return rate64;
}

static const struct clk_ops lgm_clk_ddiv_ops = {
	.recalc_rate = lgm_clk_ddiv_recalc_rate,
	.enable	= lgm_clk_ddiv_enable,
	.disable = lgm_clk_ddiv_disable,
	.set_rate = lgm_clk_ddiv_set_rate,
	.round_rate = lgm_clk_ddiv_round_rate,
};

int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
			  const struct lgm_clk_ddiv_data *list,
			  unsigned int nr_clk)
{
	struct device *dev = ctx->dev;
	struct clk_init_data init = {};
	struct lgm_clk_ddiv *ddiv;
	struct clk_hw *hw;
	unsigned int idx;
	int ret;

	for (idx = 0; idx < nr_clk; idx++, list++) {
		ddiv = NULL;
		ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
		if (!ddiv)
			return -ENOMEM;

		memset(&init, 0, sizeof(init));
		init.name = list->name;
		init.ops = &lgm_clk_ddiv_ops;
		init.flags = list->flags;
		init.parent_data = list->parent_data;
		init.num_parents = 1;

		ddiv->membase = ctx->membase;
		ddiv->lock = ctx->lock;
		ddiv->reg = list->reg;
		ddiv->shift0 = list->shift0;
		ddiv->width0 = list->width0;
		ddiv->shift1 = list->shift1;
		ddiv->width1 = list->width1;
		ddiv->shift_gate = list->shift_gate;
		ddiv->width_gate = list->width_gate;
		ddiv->shift2 = list->ex_shift;
		ddiv->width2 = list->ex_width;
		ddiv->flags = list->div_flags;
		ddiv->mult = 2;
		ddiv->div = 5;
		ddiv->hw.init = &init;

		hw = &ddiv->hw;
		ret = clk_hw_register(dev, hw);
		if (ret) {
			dev_err(dev, "register clk: %s failed!\n", list->name);
			return ret;
		}
		ctx->clk_data.hws[list->id] = hw;
	}

	return 0;
}
Loading