2019-05-27 08:55:06 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-07-03 01:58:39 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014 MundoReader S.L.
|
|
|
|
* Author: Heiko Stuebner <heiko@sntech.de>
|
|
|
|
*
|
2016-03-09 10:37:04 +08:00
|
|
|
* Copyright (c) 2016 Rockchip Electronics Co. Ltd.
|
|
|
|
* Author: Xing Zheng <zhengxing@rock-chips.com>
|
|
|
|
*
|
2014-07-03 01:58:39 +02:00
|
|
|
* based on
|
|
|
|
*
|
|
|
|
* samsung/clk.c
|
|
|
|
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
|
|
|
|
* Copyright (c) 2013 Linaro Ltd.
|
|
|
|
* Author: Thomas Abraham <thomas.ab@samsung.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/clk-provider.h>
|
2019-04-18 15:20:22 -07:00
|
|
|
#include <linux/io.h>
|
2014-07-03 01:59:10 +02:00
|
|
|
#include <linux/mfd/syscon.h>
|
2024-12-11 17:58:53 +01:00
|
|
|
#include <linux/platform_device.h>
|
2014-07-03 01:59:10 +02:00
|
|
|
#include <linux/regmap.h>
|
2014-08-19 17:45:38 -07:00
|
|
|
#include <linux/reboot.h>
|
2021-08-12 20:00:22 +03:00
|
|
|
|
|
|
|
#include "../clk-fractional-divider.h"
|
2014-07-03 01:58:39 +02:00
|
|
|
#include "clk.h"
|
|
|
|
|
2021-01-20 09:30:21 +00:00
|
|
|
/*
|
2014-07-03 01:58:39 +02:00
|
|
|
* Register a clock branch.
|
|
|
|
* Most clock branches have a form like
|
|
|
|
*
|
|
|
|
* src1 --|--\
|
|
|
|
* |M |--[GATE]-[DIV]-
|
|
|
|
* src2 --|--/
|
|
|
|
*
|
|
|
|
* sometimes without one of those components.
|
|
|
|
*/
|
2014-08-27 00:54:56 +02:00
|
|
|
static struct clk *rockchip_clk_register_branch(const char *name,
|
2016-04-19 21:29:27 +02:00
|
|
|
const char *const *parent_names, u8 num_parents,
|
|
|
|
void __iomem *base,
|
2014-07-03 01:58:39 +02:00
|
|
|
int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
|
2022-09-07 21:31:56 +05:30
|
|
|
u32 *mux_table,
|
2019-04-03 17:42:26 +08:00
|
|
|
int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
|
2014-07-03 01:58:39 +02:00
|
|
|
struct clk_div_table *div_table, int gate_offset,
|
|
|
|
u8 gate_shift, u8 gate_flags, unsigned long flags,
|
|
|
|
spinlock_t *lock)
|
|
|
|
{
|
2020-09-14 10:22:20 +08:00
|
|
|
struct clk_hw *hw;
|
2014-07-03 01:58:39 +02:00
|
|
|
struct clk_mux *mux = NULL;
|
|
|
|
struct clk_gate *gate = NULL;
|
|
|
|
struct clk_divider *div = NULL;
|
|
|
|
const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
|
|
|
|
*gate_ops = NULL;
|
2018-02-28 14:56:48 +08:00
|
|
|
int ret;
|
2014-07-03 01:58:39 +02:00
|
|
|
|
|
|
|
if (num_parents > 1) {
|
|
|
|
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
|
|
|
|
if (!mux)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
mux->reg = base + muxdiv_offset;
|
|
|
|
mux->shift = mux_shift;
|
|
|
|
mux->mask = BIT(mux_width) - 1;
|
|
|
|
mux->flags = mux_flags;
|
2022-09-07 21:31:56 +05:30
|
|
|
mux->table = mux_table;
|
2014-07-03 01:58:39 +02:00
|
|
|
mux->lock = lock;
|
|
|
|
mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
|
|
|
|
: &clk_mux_ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gate_offset >= 0) {
|
|
|
|
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
|
2018-02-28 14:56:48 +08:00
|
|
|
if (!gate) {
|
|
|
|
ret = -ENOMEM;
|
2016-02-02 11:37:50 +08:00
|
|
|
goto err_gate;
|
2018-02-28 14:56:48 +08:00
|
|
|
}
|
2014-07-03 01:58:39 +02:00
|
|
|
|
|
|
|
gate->flags = gate_flags;
|
|
|
|
gate->reg = base + gate_offset;
|
|
|
|
gate->bit_idx = gate_shift;
|
|
|
|
gate->lock = lock;
|
|
|
|
gate_ops = &clk_gate_ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (div_width > 0) {
|
|
|
|
div = kzalloc(sizeof(*div), GFP_KERNEL);
|
2018-02-28 14:56:48 +08:00
|
|
|
if (!div) {
|
|
|
|
ret = -ENOMEM;
|
2016-02-02 11:37:50 +08:00
|
|
|
goto err_div;
|
2018-02-28 14:56:48 +08:00
|
|
|
}
|
2014-07-03 01:58:39 +02:00
|
|
|
|
|
|
|
div->flags = div_flags;
|
2019-04-03 17:42:26 +08:00
|
|
|
if (div_offset)
|
|
|
|
div->reg = base + div_offset;
|
|
|
|
else
|
|
|
|
div->reg = base + muxdiv_offset;
|
2014-07-03 01:58:39 +02:00
|
|
|
div->shift = div_shift;
|
|
|
|
div->width = div_width;
|
|
|
|
div->lock = lock;
|
|
|
|
div->table = div_table;
|
2016-01-21 21:53:09 +01:00
|
|
|
div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
|
|
|
|
? &clk_divider_ro_ops
|
|
|
|
: &clk_divider_ops;
|
2014-07-03 01:58:39 +02:00
|
|
|
}
|
|
|
|
|
2020-09-14 10:22:20 +08:00
|
|
|
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
|
|
|
|
mux ? &mux->hw : NULL, mux_ops,
|
|
|
|
div ? &div->hw : NULL, div_ops,
|
|
|
|
gate ? &gate->hw : NULL, gate_ops,
|
|
|
|
flags);
|
|
|
|
if (IS_ERR(hw)) {
|
|
|
|
kfree(div);
|
|
|
|
kfree(gate);
|
|
|
|
return ERR_CAST(hw);
|
2018-02-28 14:56:48 +08:00
|
|
|
}
|
|
|
|
|
2020-09-14 10:22:20 +08:00
|
|
|
return hw->clk;
|
2016-02-02 11:37:50 +08:00
|
|
|
err_div:
|
|
|
|
kfree(gate);
|
|
|
|
err_gate:
|
|
|
|
kfree(mux);
|
2018-02-28 14:56:48 +08:00
|
|
|
return ERR_PTR(ret);
|
2014-07-03 01:58:39 +02:00
|
|
|
}
|
|
|
|
|
2015-12-22 22:27:59 +01:00
|
|
|
struct rockchip_clk_frac {
|
|
|
|
struct notifier_block clk_nb;
|
|
|
|
struct clk_fractional_divider div;
|
|
|
|
struct clk_gate gate;
|
|
|
|
|
|
|
|
struct clk_mux mux;
|
|
|
|
const struct clk_ops *mux_ops;
|
|
|
|
int mux_frac_idx;
|
|
|
|
|
|
|
|
bool rate_change_remuxed;
|
|
|
|
int rate_change_idx;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define to_rockchip_clk_frac_nb(nb) \
|
|
|
|
container_of(nb, struct rockchip_clk_frac, clk_nb)
|
|
|
|
|
|
|
|
static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *data)
|
|
|
|
{
|
|
|
|
struct clk_notifier_data *ndata = data;
|
|
|
|
struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
|
|
|
|
struct clk_mux *frac_mux = &frac->mux;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
|
|
|
|
__func__, event, ndata->old_rate, ndata->new_rate);
|
|
|
|
if (event == PRE_RATE_CHANGE) {
|
2016-04-19 21:29:27 +02:00
|
|
|
frac->rate_change_idx =
|
|
|
|
frac->mux_ops->get_parent(&frac_mux->hw);
|
2015-12-22 22:27:59 +01:00
|
|
|
if (frac->rate_change_idx != frac->mux_frac_idx) {
|
2016-04-19 21:29:27 +02:00
|
|
|
frac->mux_ops->set_parent(&frac_mux->hw,
|
|
|
|
frac->mux_frac_idx);
|
2015-12-22 22:27:59 +01:00
|
|
|
frac->rate_change_remuxed = 1;
|
|
|
|
}
|
|
|
|
} else if (event == POST_RATE_CHANGE) {
|
|
|
|
/*
|
|
|
|
* The POST_RATE_CHANGE notifier runs directly after the
|
|
|
|
* divider clock is set in clk_change_rate, so we'll have
|
|
|
|
* remuxed back to the original parent before clk_change_rate
|
|
|
|
* reaches the mux itself.
|
|
|
|
*/
|
|
|
|
if (frac->rate_change_remuxed) {
|
2016-04-19 21:29:27 +02:00
|
|
|
frac->mux_ops->set_parent(&frac_mux->hw,
|
|
|
|
frac->rate_change_idx);
|
2015-12-22 22:27:59 +01:00
|
|
|
frac->rate_change_remuxed = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return notifier_from_errno(ret);
|
|
|
|
}
|
|
|
|
|
2021-01-20 09:30:21 +00:00
|
|
|
/*
|
2017-08-01 18:22:24 +02:00
|
|
|
* fractional divider must set that denominator is 20 times larger than
|
|
|
|
* numerator to generate precise clock frequency.
|
|
|
|
*/
|
2017-08-23 15:35:41 -07:00
|
|
|
static void rockchip_fractional_approximation(struct clk_hw *hw,
|
2017-08-01 18:22:24 +02:00
|
|
|
unsigned long rate, unsigned long *parent_rate,
|
|
|
|
unsigned long *m, unsigned long *n)
|
|
|
|
{
|
2022-01-31 17:32:24 +01:00
|
|
|
struct clk_fractional_divider *fd = to_clk_fd(hw);
|
2017-08-01 18:22:24 +02:00
|
|
|
unsigned long p_rate, p_parent_rate;
|
|
|
|
struct clk_hw *p_parent;
|
|
|
|
|
|
|
|
p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
|
|
|
|
if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
|
|
|
|
p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
|
|
|
|
p_parent_rate = clk_hw_get_rate(p_parent);
|
|
|
|
*parent_rate = p_parent_rate;
|
|
|
|
}
|
|
|
|
|
2022-01-31 17:32:24 +01:00
|
|
|
fd->flags |= CLK_FRAC_DIVIDER_POWER_OF_TWO_PS;
|
|
|
|
|
2021-08-12 20:00:22 +03:00
|
|
|
clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
|
2017-08-01 18:22:24 +02:00
|
|
|
}
|
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
static struct clk *rockchip_clk_register_frac_branch(
|
|
|
|
struct rockchip_clk_provider *ctx, const char *name,
|
2015-05-28 10:45:51 +02:00
|
|
|
const char *const *parent_names, u8 num_parents,
|
|
|
|
void __iomem *base, int muxdiv_offset, u8 div_flags,
|
2014-08-27 00:54:21 +02:00
|
|
|
int gate_offset, u8 gate_shift, u8 gate_flags,
|
2015-12-22 22:27:59 +01:00
|
|
|
unsigned long flags, struct rockchip_clk_branch *child,
|
|
|
|
spinlock_t *lock)
|
2014-08-27 00:54:21 +02:00
|
|
|
{
|
2020-09-14 10:22:20 +08:00
|
|
|
struct clk_hw *hw;
|
2015-12-22 22:27:59 +01:00
|
|
|
struct rockchip_clk_frac *frac;
|
2014-08-27 00:54:21 +02:00
|
|
|
struct clk_gate *gate = NULL;
|
|
|
|
struct clk_fractional_divider *div = NULL;
|
|
|
|
const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
|
|
|
|
|
2015-12-22 22:27:59 +01:00
|
|
|
if (muxdiv_offset < 0)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
if (child && child->branch_type != branch_mux) {
|
|
|
|
pr_err("%s: fractional child clock for %s can only be a mux\n",
|
|
|
|
__func__, name);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2014-08-27 00:54:21 +02:00
|
|
|
|
2015-12-22 22:27:59 +01:00
|
|
|
frac = kzalloc(sizeof(*frac), GFP_KERNEL);
|
|
|
|
if (!frac)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
if (gate_offset >= 0) {
|
|
|
|
gate = &frac->gate;
|
2014-08-27 00:54:21 +02:00
|
|
|
gate->flags = gate_flags;
|
|
|
|
gate->reg = base + gate_offset;
|
|
|
|
gate->bit_idx = gate_shift;
|
|
|
|
gate->lock = lock;
|
|
|
|
gate_ops = &clk_gate_ops;
|
|
|
|
}
|
|
|
|
|
2015-12-22 22:27:59 +01:00
|
|
|
div = &frac->div;
|
2014-08-27 00:54:21 +02:00
|
|
|
div->flags = div_flags;
|
|
|
|
div->reg = base + muxdiv_offset;
|
|
|
|
div->mshift = 16;
|
2015-09-22 18:54:10 +03:00
|
|
|
div->mwidth = 16;
|
2014-08-27 00:54:21 +02:00
|
|
|
div->nshift = 0;
|
2015-09-22 18:54:10 +03:00
|
|
|
div->nwidth = 16;
|
2014-08-27 00:54:21 +02:00
|
|
|
div->lock = lock;
|
2017-08-01 18:22:24 +02:00
|
|
|
div->approximation = rockchip_fractional_approximation;
|
2014-08-27 00:54:21 +02:00
|
|
|
div_ops = &clk_fractional_divider_ops;
|
|
|
|
|
2020-09-14 10:22:20 +08:00
|
|
|
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
|
|
|
|
NULL, NULL,
|
|
|
|
&div->hw, div_ops,
|
|
|
|
gate ? &gate->hw : NULL, gate_ops,
|
|
|
|
flags | CLK_SET_RATE_UNGATE);
|
|
|
|
if (IS_ERR(hw)) {
|
2015-12-22 22:27:59 +01:00
|
|
|
kfree(frac);
|
2020-09-14 10:22:20 +08:00
|
|
|
return ERR_CAST(hw);
|
2015-12-22 22:27:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (child) {
|
|
|
|
struct clk_mux *frac_mux = &frac->mux;
|
|
|
|
struct clk_init_data init;
|
|
|
|
struct clk *mux_clk;
|
2018-05-21 19:57:50 +08:00
|
|
|
int ret;
|
2015-12-22 22:27:59 +01:00
|
|
|
|
2018-05-21 19:57:50 +08:00
|
|
|
frac->mux_frac_idx = match_string(child->parent_names,
|
|
|
|
child->num_parents, name);
|
2015-12-22 22:27:59 +01:00
|
|
|
frac->mux_ops = &clk_mux_ops;
|
|
|
|
frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
|
|
|
|
|
|
|
|
frac_mux->reg = base + child->muxdiv_offset;
|
|
|
|
frac_mux->shift = child->mux_shift;
|
|
|
|
frac_mux->mask = BIT(child->mux_width) - 1;
|
|
|
|
frac_mux->flags = child->mux_flags;
|
2022-09-07 21:31:56 +05:30
|
|
|
if (child->mux_table)
|
|
|
|
frac_mux->table = child->mux_table;
|
2015-12-22 22:27:59 +01:00
|
|
|
frac_mux->lock = lock;
|
|
|
|
frac_mux->hw.init = &init;
|
|
|
|
|
|
|
|
init.name = child->name;
|
|
|
|
init.flags = child->flags | CLK_SET_RATE_PARENT;
|
|
|
|
init.ops = frac->mux_ops;
|
|
|
|
init.parent_names = child->parent_names;
|
|
|
|
init.num_parents = child->num_parents;
|
|
|
|
|
|
|
|
mux_clk = clk_register(NULL, &frac_mux->hw);
|
2018-02-28 14:56:48 +08:00
|
|
|
if (IS_ERR(mux_clk)) {
|
|
|
|
kfree(frac);
|
2020-09-14 10:22:20 +08:00
|
|
|
return mux_clk;
|
2018-02-28 14:56:48 +08:00
|
|
|
}
|
2015-12-22 22:27:59 +01:00
|
|
|
|
2024-12-11 17:58:52 +01:00
|
|
|
rockchip_clk_set_lookup(ctx, mux_clk, child->id);
|
2015-12-22 22:27:59 +01:00
|
|
|
|
|
|
|
/* notifier on the fraction divider to catch rate changes */
|
|
|
|
if (frac->mux_frac_idx >= 0) {
|
2018-05-21 19:57:50 +08:00
|
|
|
pr_debug("%s: found fractional parent in mux at pos %d\n",
|
|
|
|
__func__, frac->mux_frac_idx);
|
2020-09-14 10:22:20 +08:00
|
|
|
ret = clk_notifier_register(hw->clk, &frac->clk_nb);
|
2015-12-22 22:27:59 +01:00
|
|
|
if (ret)
|
|
|
|
pr_err("%s: failed to register clock notifier for %s\n",
|
|
|
|
__func__, name);
|
|
|
|
} else {
|
|
|
|
pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
|
|
|
|
__func__, name, child->name);
|
|
|
|
}
|
|
|
|
}
|
2014-08-27 00:54:21 +02:00
|
|
|
|
2020-09-14 10:22:20 +08:00
|
|
|
return hw->clk;
|
2014-08-27 00:54:21 +02:00
|
|
|
}
|
|
|
|
|
2015-06-20 13:08:57 +02:00
|
|
|
static struct clk *rockchip_clk_register_factor_branch(const char *name,
|
|
|
|
const char *const *parent_names, u8 num_parents,
|
|
|
|
void __iomem *base, unsigned int mult, unsigned int div,
|
|
|
|
int gate_offset, u8 gate_shift, u8 gate_flags,
|
|
|
|
unsigned long flags, spinlock_t *lock)
|
|
|
|
{
|
2020-09-14 10:22:20 +08:00
|
|
|
struct clk_hw *hw;
|
2015-06-20 13:08:57 +02:00
|
|
|
struct clk_gate *gate = NULL;
|
|
|
|
struct clk_fixed_factor *fix = NULL;
|
|
|
|
|
|
|
|
/* without gate, register a simple factor clock */
|
|
|
|
if (gate_offset == 0) {
|
|
|
|
return clk_register_fixed_factor(NULL, name,
|
|
|
|
parent_names[0], flags, mult,
|
|
|
|
div);
|
|
|
|
}
|
|
|
|
|
|
|
|
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
|
|
|
|
if (!gate)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
gate->flags = gate_flags;
|
|
|
|
gate->reg = base + gate_offset;
|
|
|
|
gate->bit_idx = gate_shift;
|
|
|
|
gate->lock = lock;
|
|
|
|
|
|
|
|
fix = kzalloc(sizeof(*fix), GFP_KERNEL);
|
|
|
|
if (!fix) {
|
|
|
|
kfree(gate);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
fix->mult = mult;
|
|
|
|
fix->div = div;
|
|
|
|
|
2020-09-14 10:22:20 +08:00
|
|
|
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
|
|
|
|
NULL, NULL,
|
|
|
|
&fix->hw, &clk_fixed_factor_ops,
|
|
|
|
&gate->hw, &clk_gate_ops, flags);
|
|
|
|
if (IS_ERR(hw)) {
|
2015-06-20 13:08:57 +02:00
|
|
|
kfree(fix);
|
|
|
|
kfree(gate);
|
2020-09-14 10:22:20 +08:00
|
|
|
return ERR_CAST(hw);
|
2015-06-20 13:08:57 +02:00
|
|
|
}
|
|
|
|
|
2020-09-14 10:22:20 +08:00
|
|
|
return hw->clk;
|
2015-06-20 13:08:57 +02:00
|
|
|
}
|
|
|
|
|
2024-12-11 17:58:50 +01:00
|
|
|
static struct rockchip_clk_provider *rockchip_clk_init_base(
|
|
|
|
struct device_node *np, void __iomem *base,
|
|
|
|
unsigned long nr_clks, bool has_late_clocks)
|
2014-07-03 01:58:39 +02:00
|
|
|
{
|
2016-03-09 10:37:04 +08:00
|
|
|
struct rockchip_clk_provider *ctx;
|
|
|
|
struct clk **clk_table;
|
2024-12-11 17:58:50 +01:00
|
|
|
struct clk *default_clk_val;
|
2016-03-09 10:37:04 +08:00
|
|
|
int i;
|
|
|
|
|
2024-12-11 17:58:50 +01:00
|
|
|
default_clk_val = ERR_PTR(has_late_clocks ? -EPROBE_DEFER : -ENOENT);
|
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
|
2016-04-19 21:29:27 +02:00
|
|
|
if (!ctx)
|
2016-03-09 10:37:04 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2014-07-03 01:58:39 +02:00
|
|
|
|
|
|
|
clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
|
2016-04-19 21:29:27 +02:00
|
|
|
if (!clk_table)
|
2016-03-09 10:37:04 +08:00
|
|
|
goto err_free;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_clks; ++i)
|
2024-12-11 17:58:50 +01:00
|
|
|
clk_table[i] = default_clk_val;
|
2014-07-03 01:58:39 +02:00
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
ctx->reg_base = base;
|
|
|
|
ctx->clk_data.clks = clk_table;
|
|
|
|
ctx->clk_data.clk_num = nr_clks;
|
|
|
|
ctx->cru_node = np;
|
|
|
|
spin_lock_init(&ctx->lock);
|
|
|
|
|
clk: rockchip: introduce auxiliary GRFs
The MUXGRF clock branch type depends on having access to some sort of
GRF as a regmap to be registered. So far, we could easily get away with
only ever having one GRF stowed away in the context.
However, newer Rockchip SoCs, such as the RK3576, have several GRFs
which are relevant for clock purposes. It already depends on the pmu0
GRF for MUXGRF reasons, but could get away with not refactoring this
because it didn't need the sysgrf at all, so could overwrite the pointer
in the clock provider to the pmu0 grf regmap handle.
In preparation for needing to finally access more than one GRF per SoC,
let's untangle this. Introduce an auxiliary GRF hashmap, and a GRF type
enum. The hashmap is keyed by the enum, and clock branches now have a
struct member to store the value of that enum, which defaults to the
system GRF.
The SoC-specific _clk_init function can then insert pointers to GRF
regmaps into the hashmap based on the grf type.
During clock branch registration, we then pick the right GRF for each
branch from the hashmap if something other than the sys GRF is
requested.
The reason for doing it with this grf type indirection in the clock
branches is so that we don't need to define the MUXGRF branches in a
separate step, just to have a direct pointer to a regmap available
already.
Signed-off-by: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
Link: https://lore.kernel.org/r/20250502-rk3576-sai-v3-2-376cef19dd7c@collabora.com
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
2025-05-02 13:03:08 +02:00
|
|
|
hash_init(ctx->aux_grf_table);
|
|
|
|
|
2016-03-15 16:40:32 +01:00
|
|
|
ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
|
|
|
|
"rockchip,grf");
|
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
return ctx;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(ctx);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2024-12-11 17:58:50 +01:00
|
|
|
|
|
|
|
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
|
|
|
|
void __iomem *base,
|
|
|
|
unsigned long nr_clks)
|
|
|
|
{
|
|
|
|
return rockchip_clk_init_base(np, base, nr_clks, false);
|
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_init);
|
2016-03-09 10:37:04 +08:00
|
|
|
|
2024-12-11 17:58:50 +01:00
|
|
|
struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
|
|
|
|
void __iomem *base,
|
|
|
|
unsigned long nr_clks)
|
|
|
|
{
|
|
|
|
return rockchip_clk_init_base(np, base, nr_clks, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_init_early);
|
|
|
|
|
|
|
|
void rockchip_clk_finalize(struct rockchip_clk_provider *ctx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ctx->clk_data.clk_num; ++i)
|
|
|
|
if (ctx->clk_data.clks[i] == ERR_PTR(-EPROBE_DEFER))
|
|
|
|
ctx->clk_data.clks[i] = ERR_PTR(-ENOENT);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_finalize);
|
|
|
|
|
2020-09-14 10:22:23 +08:00
|
|
|
void rockchip_clk_of_add_provider(struct device_node *np,
|
|
|
|
struct rockchip_clk_provider *ctx)
|
2016-03-09 10:37:04 +08:00
|
|
|
{
|
2016-03-13 00:25:53 +08:00
|
|
|
if (of_clk_add_provider(np, of_clk_src_onecell_get,
|
|
|
|
&ctx->clk_data))
|
|
|
|
pr_err("%s: could not register clk provider\n", __func__);
|
2014-07-03 01:58:39 +02:00
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_of_add_provider);
|
2014-07-03 01:58:39 +02:00
|
|
|
|
2020-09-14 10:22:23 +08:00
|
|
|
void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
|
2016-03-09 10:37:04 +08:00
|
|
|
struct rockchip_pll_clock *list,
|
2014-07-03 01:59:10 +02:00
|
|
|
unsigned int nr_pll, int grf_lock_offset)
|
|
|
|
{
|
|
|
|
struct clk *clk;
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < nr_pll; idx++, list++) {
|
2016-03-09 10:37:04 +08:00
|
|
|
clk = rockchip_clk_register_pll(ctx, list->type, list->name,
|
2014-07-03 01:59:10 +02:00
|
|
|
list->parent_names, list->num_parents,
|
2016-03-09 10:37:04 +08:00
|
|
|
list->con_offset, grf_lock_offset,
|
2014-07-03 01:59:10 +02:00
|
|
|
list->lock_shift, list->mode_offset,
|
2014-11-20 20:38:50 +01:00
|
|
|
list->mode_shift, list->rate_table,
|
2016-07-29 15:56:55 +08:00
|
|
|
list->flags, list->pll_flags);
|
2014-07-03 01:59:10 +02:00
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
pr_err("%s: failed to register clock %s\n", __func__,
|
|
|
|
list->name);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-12-11 17:58:52 +01:00
|
|
|
rockchip_clk_set_lookup(ctx, clk, list->id);
|
2014-07-03 01:59:10 +02:00
|
|
|
}
|
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
|
2014-07-03 01:59:10 +02:00
|
|
|
|
2024-01-26 19:18:22 +01:00
|
|
|
unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
|
|
|
|
unsigned int nr_clk)
|
|
|
|
{
|
|
|
|
unsigned long max = 0;
|
|
|
|
unsigned int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < nr_clk; idx++, list++) {
|
|
|
|
if (list->id > max)
|
|
|
|
max = list->id;
|
|
|
|
if (list->child && list->child->id > max)
|
2024-09-12 13:32:05 +00:00
|
|
|
max = list->child->id;
|
2024-01-26 19:18:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return max;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
|
|
|
|
|
2024-12-11 17:58:53 +01:00
|
|
|
static struct platform_device *rockchip_clk_register_gate_link(
|
|
|
|
struct device *parent_dev,
|
|
|
|
struct rockchip_clk_provider *ctx,
|
|
|
|
struct rockchip_clk_branch *clkbr)
|
|
|
|
{
|
|
|
|
struct rockchip_gate_link_platdata gate_link_pdata = {
|
|
|
|
.ctx = ctx,
|
|
|
|
.clkbr = clkbr,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct platform_device_info pdevinfo = {
|
|
|
|
.parent = parent_dev,
|
|
|
|
.name = "rockchip-gate-link-clk",
|
|
|
|
.id = clkbr->id,
|
|
|
|
.fwnode = dev_fwnode(parent_dev),
|
|
|
|
.of_node_reused = true,
|
|
|
|
.data = &gate_link_pdata,
|
|
|
|
.size_data = sizeof(gate_link_pdata),
|
|
|
|
};
|
|
|
|
|
|
|
|
return platform_device_register_full(&pdevinfo);
|
|
|
|
}
|
|
|
|
|
2020-09-14 10:22:23 +08:00
|
|
|
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
|
|
|
|
struct rockchip_clk_branch *list,
|
|
|
|
unsigned int nr_clk)
|
2014-07-03 01:58:39 +02:00
|
|
|
{
|
clk: rockchip: introduce auxiliary GRFs
The MUXGRF clock branch type depends on having access to some sort of
GRF as a regmap to be registered. So far, we could easily get away with
only ever having one GRF stowed away in the context.
However, newer Rockchip SoCs, such as the RK3576, have several GRFs
which are relevant for clock purposes. It already depends on the pmu0
GRF for MUXGRF reasons, but could get away with not refactoring this
because it didn't need the sysgrf at all, so could overwrite the pointer
in the clock provider to the pmu0 grf regmap handle.
In preparation for needing to finally access more than one GRF per SoC,
let's untangle this. Introduce an auxiliary GRF hashmap, and a GRF type
enum. The hashmap is keyed by the enum, and clock branches now have a
struct member to store the value of that enum, which defaults to the
system GRF.
The SoC-specific _clk_init function can then insert pointers to GRF
regmaps into the hashmap based on the grf type.
During clock branch registration, we then pick the right GRF for each
branch from the hashmap if something other than the sys GRF is
requested.
The reason for doing it with this grf type indirection in the clock
branches is so that we don't need to define the MUXGRF branches in a
separate step, just to have a direct pointer to a regmap available
already.
Signed-off-by: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
Link: https://lore.kernel.org/r/20250502-rk3576-sai-v3-2-376cef19dd7c@collabora.com
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
2025-05-02 13:03:08 +02:00
|
|
|
struct regmap *grf = ctx->grf;
|
|
|
|
struct rockchip_aux_grf *agrf;
|
2024-03-25 20:33:36 +01:00
|
|
|
struct clk *clk;
|
2014-07-03 01:58:39 +02:00
|
|
|
unsigned int idx;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
for (idx = 0; idx < nr_clk; idx++, list++) {
|
|
|
|
flags = list->flags;
|
2024-03-25 20:33:36 +01:00
|
|
|
clk = NULL;
|
2014-07-03 01:58:39 +02:00
|
|
|
|
clk: rockchip: introduce auxiliary GRFs
The MUXGRF clock branch type depends on having access to some sort of
GRF as a regmap to be registered. So far, we could easily get away with
only ever having one GRF stowed away in the context.
However, newer Rockchip SoCs, such as the RK3576, have several GRFs
which are relevant for clock purposes. It already depends on the pmu0
GRF for MUXGRF reasons, but could get away with not refactoring this
because it didn't need the sysgrf at all, so could overwrite the pointer
in the clock provider to the pmu0 grf regmap handle.
In preparation for needing to finally access more than one GRF per SoC,
let's untangle this. Introduce an auxiliary GRF hashmap, and a GRF type
enum. The hashmap is keyed by the enum, and clock branches now have a
struct member to store the value of that enum, which defaults to the
system GRF.
The SoC-specific _clk_init function can then insert pointers to GRF
regmaps into the hashmap based on the grf type.
During clock branch registration, we then pick the right GRF for each
branch from the hashmap if something other than the sys GRF is
requested.
The reason for doing it with this grf type indirection in the clock
branches is so that we don't need to define the MUXGRF branches in a
separate step, just to have a direct pointer to a regmap available
already.
Signed-off-by: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
Link: https://lore.kernel.org/r/20250502-rk3576-sai-v3-2-376cef19dd7c@collabora.com
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
2025-05-02 13:03:08 +02:00
|
|
|
/* for GRF-dependent branches, choose the right grf first */
|
2025-05-08 20:27:51 +02:00
|
|
|
if ((list->branch_type == branch_grf_mux ||
|
2025-05-06 09:22:03 +00:00
|
|
|
list->branch_type == branch_grf_gate ||
|
|
|
|
list->branch_type == branch_grf_mmc) &&
|
|
|
|
list->grf_type != grf_type_sys) {
|
clk: rockchip: introduce auxiliary GRFs
The MUXGRF clock branch type depends on having access to some sort of
GRF as a regmap to be registered. So far, we could easily get away with
only ever having one GRF stowed away in the context.
However, newer Rockchip SoCs, such as the RK3576, have several GRFs
which are relevant for clock purposes. It already depends on the pmu0
GRF for MUXGRF reasons, but could get away with not refactoring this
because it didn't need the sysgrf at all, so could overwrite the pointer
in the clock provider to the pmu0 grf regmap handle.
In preparation for needing to finally access more than one GRF per SoC,
let's untangle this. Introduce an auxiliary GRF hashmap, and a GRF type
enum. The hashmap is keyed by the enum, and clock branches now have a
struct member to store the value of that enum, which defaults to the
system GRF.
The SoC-specific _clk_init function can then insert pointers to GRF
regmaps into the hashmap based on the grf type.
During clock branch registration, we then pick the right GRF for each
branch from the hashmap if something other than the sys GRF is
requested.
The reason for doing it with this grf type indirection in the clock
branches is so that we don't need to define the MUXGRF branches in a
separate step, just to have a direct pointer to a regmap available
already.
Signed-off-by: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
Link: https://lore.kernel.org/r/20250502-rk3576-sai-v3-2-376cef19dd7c@collabora.com
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
2025-05-02 13:03:08 +02:00
|
|
|
hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) {
|
|
|
|
if (agrf->type == list->grf_type) {
|
|
|
|
grf = agrf->grf;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-03 01:58:39 +02:00
|
|
|
/* catch simple muxes */
|
|
|
|
switch (list->branch_type) {
|
|
|
|
case branch_mux:
|
2022-09-07 21:31:56 +05:30
|
|
|
if (list->mux_table)
|
|
|
|
clk = clk_register_mux_table(NULL, list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
|
|
|
flags,
|
|
|
|
ctx->reg_base + list->muxdiv_offset,
|
|
|
|
list->mux_shift, list->mux_width,
|
|
|
|
list->mux_flags, list->mux_table,
|
|
|
|
&ctx->lock);
|
|
|
|
else
|
|
|
|
clk = clk_register_mux(NULL, list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
|
|
|
flags,
|
|
|
|
ctx->reg_base + list->muxdiv_offset,
|
|
|
|
list->mux_shift, list->mux_width,
|
|
|
|
list->mux_flags, &ctx->lock);
|
2014-07-03 01:58:39 +02:00
|
|
|
break;
|
2025-05-08 20:27:51 +02:00
|
|
|
case branch_grf_mux:
|
2016-12-27 00:00:38 +01:00
|
|
|
clk = rockchip_clk_register_muxgrf(list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
clk: rockchip: introduce auxiliary GRFs
The MUXGRF clock branch type depends on having access to some sort of
GRF as a regmap to be registered. So far, we could easily get away with
only ever having one GRF stowed away in the context.
However, newer Rockchip SoCs, such as the RK3576, have several GRFs
which are relevant for clock purposes. It already depends on the pmu0
GRF for MUXGRF reasons, but could get away with not refactoring this
because it didn't need the sysgrf at all, so could overwrite the pointer
in the clock provider to the pmu0 grf regmap handle.
In preparation for needing to finally access more than one GRF per SoC,
let's untangle this. Introduce an auxiliary GRF hashmap, and a GRF type
enum. The hashmap is keyed by the enum, and clock branches now have a
struct member to store the value of that enum, which defaults to the
system GRF.
The SoC-specific _clk_init function can then insert pointers to GRF
regmaps into the hashmap based on the grf type.
During clock branch registration, we then pick the right GRF for each
branch from the hashmap if something other than the sys GRF is
requested.
The reason for doing it with this grf type indirection in the clock
branches is so that we don't need to define the MUXGRF branches in a
separate step, just to have a direct pointer to a regmap available
already.
Signed-off-by: Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
Link: https://lore.kernel.org/r/20250502-rk3576-sai-v3-2-376cef19dd7c@collabora.com
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
2025-05-02 13:03:08 +02:00
|
|
|
flags, grf, list->muxdiv_offset,
|
2016-12-27 00:00:38 +01:00
|
|
|
list->mux_shift, list->mux_width,
|
|
|
|
list->mux_flags);
|
|
|
|
break;
|
2014-07-03 01:58:39 +02:00
|
|
|
case branch_divider:
|
|
|
|
if (list->div_table)
|
|
|
|
clk = clk_register_divider_table(NULL,
|
|
|
|
list->name, list->parent_names[0],
|
2016-04-19 21:29:27 +02:00
|
|
|
flags,
|
|
|
|
ctx->reg_base + list->muxdiv_offset,
|
2014-07-03 01:58:39 +02:00
|
|
|
list->div_shift, list->div_width,
|
|
|
|
list->div_flags, list->div_table,
|
2016-03-09 10:37:04 +08:00
|
|
|
&ctx->lock);
|
2014-07-03 01:58:39 +02:00
|
|
|
else
|
|
|
|
clk = clk_register_divider(NULL, list->name,
|
|
|
|
list->parent_names[0], flags,
|
2016-03-09 10:37:04 +08:00
|
|
|
ctx->reg_base + list->muxdiv_offset,
|
2014-07-03 01:58:39 +02:00
|
|
|
list->div_shift, list->div_width,
|
2016-03-09 10:37:04 +08:00
|
|
|
list->div_flags, &ctx->lock);
|
2014-07-03 01:58:39 +02:00
|
|
|
break;
|
|
|
|
case branch_fraction_divider:
|
2016-03-09 10:37:04 +08:00
|
|
|
clk = rockchip_clk_register_frac_branch(ctx, list->name,
|
2014-08-27 00:54:21 +02:00
|
|
|
list->parent_names, list->num_parents,
|
2016-04-19 21:29:27 +02:00
|
|
|
ctx->reg_base, list->muxdiv_offset,
|
|
|
|
list->div_flags,
|
2014-08-27 00:54:21 +02:00
|
|
|
list->gate_offset, list->gate_shift,
|
2015-12-22 22:27:59 +01:00
|
|
|
list->gate_flags, flags, list->child,
|
2016-03-09 10:37:04 +08:00
|
|
|
&ctx->lock);
|
2014-07-03 01:58:39 +02:00
|
|
|
break;
|
2018-06-15 10:16:50 +08:00
|
|
|
case branch_half_divider:
|
|
|
|
clk = rockchip_clk_register_halfdiv(list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
|
|
|
ctx->reg_base, list->muxdiv_offset,
|
|
|
|
list->mux_shift, list->mux_width,
|
|
|
|
list->mux_flags, list->div_shift,
|
|
|
|
list->div_width, list->div_flags,
|
|
|
|
list->gate_offset, list->gate_shift,
|
|
|
|
list->gate_flags, flags, &ctx->lock);
|
|
|
|
break;
|
2014-07-03 01:58:39 +02:00
|
|
|
case branch_gate:
|
|
|
|
flags |= CLK_SET_RATE_PARENT;
|
|
|
|
|
|
|
|
clk = clk_register_gate(NULL, list->name,
|
|
|
|
list->parent_names[0], flags,
|
2016-03-09 10:37:04 +08:00
|
|
|
ctx->reg_base + list->gate_offset,
|
|
|
|
list->gate_shift, list->gate_flags, &ctx->lock);
|
2014-07-03 01:58:39 +02:00
|
|
|
break;
|
2025-05-02 13:03:09 +02:00
|
|
|
case branch_grf_gate:
|
|
|
|
flags |= CLK_SET_RATE_PARENT;
|
|
|
|
clk = rockchip_clk_register_gate_grf(list->name,
|
|
|
|
list->parent_names[0], flags, grf,
|
|
|
|
list->gate_offset, list->gate_shift,
|
|
|
|
list->gate_flags);
|
|
|
|
break;
|
2014-07-03 01:58:39 +02:00
|
|
|
case branch_composite:
|
|
|
|
clk = rockchip_clk_register_branch(list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
2016-04-19 21:29:27 +02:00
|
|
|
ctx->reg_base, list->muxdiv_offset,
|
|
|
|
list->mux_shift,
|
2014-07-03 01:58:39 +02:00
|
|
|
list->mux_width, list->mux_flags,
|
2022-09-07 21:31:56 +05:30
|
|
|
list->mux_table, list->div_offset,
|
|
|
|
list->div_shift, list->div_width,
|
2014-07-03 01:58:39 +02:00
|
|
|
list->div_flags, list->div_table,
|
|
|
|
list->gate_offset, list->gate_shift,
|
2016-03-09 10:37:04 +08:00
|
|
|
list->gate_flags, flags, &ctx->lock);
|
2014-07-03 01:58:39 +02:00
|
|
|
break;
|
2014-11-26 17:30:27 -08:00
|
|
|
case branch_mmc:
|
|
|
|
clk = rockchip_clk_register_mmc(
|
|
|
|
list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
2016-03-09 10:37:04 +08:00
|
|
|
ctx->reg_base + list->muxdiv_offset,
|
2025-05-06 09:22:03 +00:00
|
|
|
NULL, 0,
|
|
|
|
list->div_shift
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
case branch_grf_mmc:
|
|
|
|
clk = rockchip_clk_register_mmc(
|
|
|
|
list->name,
|
|
|
|
list->parent_names, list->num_parents,
|
2025-05-10 07:52:49 +00:00
|
|
|
NULL,
|
2025-05-06 09:22:03 +00:00
|
|
|
grf, list->muxdiv_offset,
|
2014-11-26 17:30:27 -08:00
|
|
|
list->div_shift
|
|
|
|
);
|
|
|
|
break;
|
2015-07-05 11:00:14 +02:00
|
|
|
case branch_inverter:
|
|
|
|
clk = rockchip_clk_register_inverter(
|
|
|
|
list->name, list->parent_names,
|
|
|
|
list->num_parents,
|
2016-03-09 10:37:04 +08:00
|
|
|
ctx->reg_base + list->muxdiv_offset,
|
|
|
|
list->div_shift, list->div_flags, &ctx->lock);
|
2015-07-05 11:00:14 +02:00
|
|
|
break;
|
2015-06-20 13:08:57 +02:00
|
|
|
case branch_factor:
|
|
|
|
clk = rockchip_clk_register_factor_branch(
|
|
|
|
list->name, list->parent_names,
|
2016-03-09 10:37:04 +08:00
|
|
|
list->num_parents, ctx->reg_base,
|
2015-06-20 13:08:57 +02:00
|
|
|
list->div_shift, list->div_width,
|
|
|
|
list->gate_offset, list->gate_shift,
|
2016-03-09 10:37:04 +08:00
|
|
|
list->gate_flags, flags, &ctx->lock);
|
2015-06-20 13:08:57 +02:00
|
|
|
break;
|
2016-08-22 11:36:17 +08:00
|
|
|
case branch_ddrclk:
|
|
|
|
clk = rockchip_clk_register_ddrclk(
|
|
|
|
list->name, list->flags,
|
|
|
|
list->parent_names, list->num_parents,
|
|
|
|
list->muxdiv_offset, list->mux_shift,
|
|
|
|
list->mux_width, list->div_shift,
|
|
|
|
list->div_width, list->div_flags,
|
|
|
|
ctx->reg_base, &ctx->lock);
|
|
|
|
break;
|
2024-12-11 17:58:53 +01:00
|
|
|
case branch_linked_gate:
|
|
|
|
/* must be registered late, fall-through for error message */
|
|
|
|
break;
|
2014-07-03 01:58:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* none of the cases above matched */
|
|
|
|
if (!clk) {
|
|
|
|
pr_err("%s: unknown clock type %d\n",
|
|
|
|
__func__, list->branch_type);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
pr_err("%s: failed to register clock %s: %ld\n",
|
|
|
|
__func__, list->name, PTR_ERR(clk));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-12-11 17:58:52 +01:00
|
|
|
rockchip_clk_set_lookup(ctx, clk, list->id);
|
2014-07-03 01:58:39 +02:00
|
|
|
}
|
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
|
|
|
|
|
2024-12-11 17:58:53 +01:00
|
|
|
void rockchip_clk_register_late_branches(struct device *dev,
|
|
|
|
struct rockchip_clk_provider *ctx,
|
|
|
|
struct rockchip_clk_branch *list,
|
|
|
|
unsigned int nr_clk)
|
|
|
|
{
|
|
|
|
unsigned int idx;
|
|
|
|
|
|
|
|
for (idx = 0; idx < nr_clk; idx++, list++) {
|
|
|
|
struct platform_device *pdev = NULL;
|
|
|
|
|
|
|
|
switch (list->branch_type) {
|
|
|
|
case branch_linked_gate:
|
|
|
|
pdev = rockchip_clk_register_gate_link(dev, ctx, list);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "unknown clock type %d\n", list->branch_type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pdev)
|
|
|
|
dev_err(dev, "failed to register device for clock %s\n", list->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_register_late_branches);
|
|
|
|
|
2020-09-14 10:22:23 +08:00
|
|
|
void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
|
|
|
|
unsigned int lookup_id,
|
|
|
|
const char *name, const char *const *parent_names,
|
|
|
|
u8 num_parents,
|
|
|
|
const struct rockchip_cpuclk_reg_data *reg_data,
|
|
|
|
const struct rockchip_cpuclk_rate_table *rates,
|
|
|
|
int nrates)
|
2014-09-04 22:10:43 +02:00
|
|
|
{
|
|
|
|
struct clk *clk;
|
|
|
|
|
|
|
|
clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
|
2016-04-19 21:29:27 +02:00
|
|
|
reg_data, rates, nrates,
|
|
|
|
ctx->reg_base, &ctx->lock);
|
2014-09-04 22:10:43 +02:00
|
|
|
if (IS_ERR(clk)) {
|
|
|
|
pr_err("%s: failed to register clock %s: %ld\n",
|
|
|
|
__func__, name, PTR_ERR(clk));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-12-11 17:58:52 +01:00
|
|
|
rockchip_clk_set_lookup(ctx, clk, lookup_id);
|
2014-09-04 22:10:43 +02:00
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
|
2014-09-04 22:10:43 +02:00
|
|
|
|
2020-09-14 10:22:23 +08:00
|
|
|
void rockchip_clk_protect_critical(const char *const clocks[],
|
|
|
|
int nclocks)
|
2014-08-14 23:00:26 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Protect the clocks that needs to stay on */
|
|
|
|
for (i = 0; i < nclocks; i++) {
|
|
|
|
struct clk *clk = __clk_lookup(clocks[i]);
|
|
|
|
|
2020-11-27 09:05:51 +00:00
|
|
|
clk_prepare_enable(clk);
|
2014-08-14 23:00:26 +02:00
|
|
|
}
|
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical);
|
2014-08-19 17:45:38 -07:00
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
static void __iomem *rst_base;
|
2014-08-19 17:45:38 -07:00
|
|
|
static unsigned int reg_restart;
|
2015-12-18 17:51:55 +01:00
|
|
|
static void (*cb_restart)(void);
|
2014-08-19 17:45:38 -07:00
|
|
|
static int rockchip_restart_notify(struct notifier_block *this,
|
|
|
|
unsigned long mode, void *cmd)
|
|
|
|
{
|
2015-12-18 17:51:55 +01:00
|
|
|
if (cb_restart)
|
|
|
|
cb_restart();
|
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
writel(0xfdb9, rst_base + reg_restart);
|
2014-08-19 17:45:38 -07:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block rockchip_restart_handler = {
|
|
|
|
.notifier_call = rockchip_restart_notify,
|
|
|
|
.priority = 128,
|
|
|
|
};
|
|
|
|
|
2020-09-14 10:22:23 +08:00
|
|
|
void
|
2016-04-19 21:29:27 +02:00
|
|
|
rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
|
2020-09-14 10:22:23 +08:00
|
|
|
unsigned int reg,
|
|
|
|
void (*cb)(void))
|
2014-08-19 17:45:38 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2016-03-09 10:37:04 +08:00
|
|
|
rst_base = ctx->reg_base;
|
2014-08-19 17:45:38 -07:00
|
|
|
reg_restart = reg;
|
2015-12-18 17:51:55 +01:00
|
|
|
cb_restart = cb;
|
2014-08-19 17:45:38 -07:00
|
|
|
ret = register_restart_handler(&rockchip_restart_handler);
|
|
|
|
if (ret)
|
|
|
|
pr_err("%s: cannot register restart handler, %d\n",
|
|
|
|
__func__, ret);
|
|
|
|
}
|
2020-09-14 10:22:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier);
|