linux/drivers/clk/imx/clk-busy.c
Brian Masney 62a88813c1
clk: imx: busy: convert from round_rate() to determine_rate()
The round_rate() clk ops is deprecated, so migrate this driver from
round_rate() to determine_rate() using the Coccinelle semantic patch
on the cover letter of this series.

The change to call busy->div_ops->determine_rate() instead of
busy->div_ops->round_rate() was done by hand.

Signed-off-by: Brian Masney <bmasney@redhat.com>
Link: https://lore.kernel.org/r/20250710-clk-imx-round-rate-v1-3-5726f98e6d8d@redhat.com
Reviewed-by: Peng Fan <peng.fan@nxp.com>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2025-07-24 15:16:53 -07:00

195 lines
4.3 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2012 Linaro Ltd.
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/err.h>
#include "clk.h"
static int clk_busy_wait(void __iomem *reg, u8 shift)
{
unsigned long timeout = jiffies + msecs_to_jiffies(10);
while (readl_relaxed(reg) & (1 << shift))
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
return 0;
}
struct clk_busy_divider {
struct clk_divider div;
const struct clk_ops *div_ops;
void __iomem *reg;
u8 shift;
};
static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
{
struct clk_divider *div = to_clk_divider(hw);
return container_of(div, struct clk_busy_divider, div);
}
static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
}
static int clk_busy_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
return busy->div_ops->determine_rate(&busy->div.hw, req);
}
static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
int ret;
ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
if (!ret)
ret = clk_busy_wait(busy->reg, busy->shift);
return ret;
}
static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
.determine_rate = clk_busy_divider_determine_rate,
.set_rate = clk_busy_divider_set_rate,
};
struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
void __iomem *reg, u8 shift, u8 width,
void __iomem *busy_reg, u8 busy_shift)
{
struct clk_busy_divider *busy;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
busy = kzalloc(sizeof(*busy), GFP_KERNEL);
if (!busy)
return ERR_PTR(-ENOMEM);
busy->reg = busy_reg;
busy->shift = busy_shift;
busy->div.reg = reg;
busy->div.shift = shift;
busy->div.width = width;
busy->div.lock = &imx_ccm_lock;
busy->div_ops = &clk_divider_ops;
init.name = name;
init.ops = &clk_busy_divider_ops;
init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
init.parent_names = &parent_name;
init.num_parents = 1;
busy->div.hw.init = &init;
hw = &busy->div.hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(busy);
return ERR_PTR(ret);
}
return hw;
}
struct clk_busy_mux {
struct clk_mux mux;
const struct clk_ops *mux_ops;
void __iomem *reg;
u8 shift;
};
static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
return container_of(mux, struct clk_busy_mux, mux);
}
static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
{
struct clk_busy_mux *busy = to_clk_busy_mux(hw);
return busy->mux_ops->get_parent(&busy->mux.hw);
}
static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_busy_mux *busy = to_clk_busy_mux(hw);
int ret;
ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
if (!ret)
ret = clk_busy_wait(busy->reg, busy->shift);
return ret;
}
static const struct clk_ops clk_busy_mux_ops = {
.determine_rate = clk_hw_determine_rate_no_reparent,
.get_parent = clk_busy_mux_get_parent,
.set_parent = clk_busy_mux_set_parent,
};
struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift,
u8 width, void __iomem *busy_reg, u8 busy_shift,
const char * const *parent_names, int num_parents)
{
struct clk_busy_mux *busy;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
busy = kzalloc(sizeof(*busy), GFP_KERNEL);
if (!busy)
return ERR_PTR(-ENOMEM);
busy->reg = busy_reg;
busy->shift = busy_shift;
busy->mux.reg = reg;
busy->mux.shift = shift;
busy->mux.mask = BIT(width) - 1;
busy->mux.lock = &imx_ccm_lock;
busy->mux_ops = &clk_mux_ops;
init.name = name;
init.ops = &clk_busy_mux_ops;
init.flags = CLK_IS_CRITICAL;
init.parent_names = parent_names;
init.num_parents = num_parents;
busy->mux.hw.init = &init;
hw = &busy->mux.hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(busy);
return ERR_PTR(ret);
}
return hw;
}