Merge patch series "Support Multi-frequency scale for UFS"

Ziqi Chen <quic_ziqichen@quicinc.com> says:

With OPP V2 enabled, devfreq can scale clocks amongst multiple frequency
plans. However, the gear speed is only toggled between min and max during
clock scaling. Enable multi-level gear scaling by mapping clock frequencies
to gear speeds, so that when devfreq scales clock frequencies we can put
the UFS link at the appropraite gear speeds accordingly.

This series has been tested on below platforms -
sm8550 mtp + UFS3.1
SM8650 MTP + UFS3.1
SM8750 MTP + UFS4.0

Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8550-QRD
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8550-HDK
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on SM8650-HDK
Link: https://lore.kernel.org/r/20250213080008.2984807-1-quic_ziqichen@quicinc.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2025-02-20 22:11:11 -05:00
commit 7e72900272
6 changed files with 149 additions and 35 deletions

View file

@ -1571,3 +1571,36 @@ Description: Report the number of times a critical health event has been
bWriteBoosterBufferLifeTimeEst, and bRPMBLifeTimeEst.
The file is read only.
What: /sys/bus/platform/drivers/ufshcd/*/clkscale_enable
What: /sys/bus/platform/devices/*.ufs/clkscale_enable
Date: January 2025
Contact: Ziqi Chen <quic_ziqichen@quicinc.com>
Description:
This attribute shows whether the UFS clock scaling is enabled or not.
And it can be used to enable/disable the clock scaling by writing
1 or 0 to this attribute.
The attribute is read/write.
What: /sys/bus/platform/drivers/ufshcd/*/clkgate_enable
What: /sys/bus/platform/devices/*.ufs/clkgate_enable
Date: January 2025
Contact: Ziqi Chen <quic_ziqichen@quicinc.com>
Description:
This attribute shows whether the UFS clock gating is enabled or not.
And it can be used to enable/disable the clock gating by writing
1 or 0 to this attribute.
The attribute is read/write.
What: /sys/bus/platform/drivers/ufshcd/*/clkgate_delay_ms
What: /sys/bus/platform/devices/*.ufs/clkgate_delay_ms
Date: January 2025
Contact: Ziqi Chen <quic_ziqichen@quicinc.com>
Description:
This attribute shows and sets the number of milliseconds of idle time
before the UFS driver starts to perform clock gating. This can
prevent the UFS from frequently performing clock gating/ungating.
The attribute is read/write.

View file

@ -117,11 +117,12 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up,
unsigned long target_freq,
enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->clk_scale_notify)
return hba->vops->clk_scale_notify(hba, up, status);
return hba->vops->clk_scale_notify(hba, up, target_freq, status);
return 0;
}
@ -270,6 +271,14 @@ static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
if (hba->vops && hba->vops->freq_to_gear_speed)
return hba->vops->freq_to_gear_speed(hba, freq);
return 0;
}
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**

View file

@ -1162,7 +1162,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
int ret = 0;
ktime_t start = ktime_get();
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
if (ret)
goto out;
@ -1173,7 +1173,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
if (ret)
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
@ -1313,16 +1313,26 @@ out:
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
* @target_gear: target gear to scale to
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
if (target_gear) {
new_pwr_info = hba->pwr_info;
new_pwr_info.gear_tx = target_gear;
new_pwr_info.gear_rx = target_gear;
goto config_pwr_mode;
}
/* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
@ -1343,6 +1353,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
}
config_pwr_mode:
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
@ -1387,13 +1398,13 @@ out:
return ret;
}
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
/* Enable Write Booster if we have scaled up else disable it */
/* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
ufshcd_wb_toggle(hba, scale_up);
ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
mutex_unlock(&hba->wb_mutex);
@ -1413,15 +1424,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{
u32 old_gear = hba->pwr_info.gear_rx;
u32 new_gear = 0;
int ret = 0;
new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
ret = ufshcd_scale_gear(hba, false);
ret = ufshcd_scale_gear(hba, new_gear, false);
if (ret)
goto out_unprepare;
}
@ -1429,13 +1444,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
ufshcd_scale_gear(hba, old_gear, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
ret = ufshcd_scale_gear(hba, new_gear, true);
if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
@ -1444,7 +1459,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
}
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
ufshcd_clock_scaling_unprepare(hba, ret);
return ret;
}
@ -1720,6 +1735,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_info *clki;
unsigned long freq;
u32 value;
int err = 0;
@ -1743,14 +1760,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value) {
ufshcd_resume_clkscaling(hba);
} else {
ufshcd_suspend_clkscaling(hba);
err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
if (err)
dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
__func__, err);
goto out_rel;
}
clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
freq = clki->max_freq;
ufshcd_suspend_clkscaling(hba);
if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
goto out_rel;
err = ufshcd_devfreq_scale(hba, freq, true);
if (err)
dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
__func__, err);
else
hba->clk_scaling.target_freq = freq;
out_rel:
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
@ -1783,6 +1811,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
if (!hba->clk_scaling.wb_gear)
/* Use intermediate gear speed HS_G3 as the default wb_gear */
hba->clk_scaling.wb_gear = UFS_HS_G3;
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,

View file

@ -1643,6 +1643,7 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
}
static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
unsigned long target_freq,
enum ufs_notify_change_status status)
{
if (!ufshcd_is_clkscaling_supported(hba))

View file

@ -16,6 +16,7 @@
#include <linux/reset-controller.h>
#include <linux/time.h>
#include <linux/unaligned.h>
#include <linux/units.h>
#include <soc/qcom/ice.h>
@ -98,7 +99,7 @@ static const struct __ufs_qcom_bw_table {
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@ -596,7 +597,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return -EINVAL;
}
err = ufs_qcom_set_core_clk_ctrl(hba, true);
err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@ -1315,7 +1316,7 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
@ -1329,10 +1330,11 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
!strcmp(clki->name, "core_clk_unipro")) {
if (!clki->max_freq)
cycles_in_1us = 150; /* default for backwards compatibility */
else if (is_scale_up)
cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
else if (freq == ULONG_MAX)
cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
else
cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
cycles_in_1us = ceil(freq, HZ_PER_MHZ);
break;
}
}
@ -1369,7 +1371,7 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq)
{
int ret;
@ -1379,7 +1381,7 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
return ret;
}
/* set unipro core clock attributes and clear clock divider */
return ufs_qcom_set_core_clk_ctrl(hba, true);
return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@ -1408,14 +1410,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
return err;
}
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
/* set unipro core clock attributes and clear clock divider */
return ufs_qcom_set_core_clk_ctrl(hba, false);
return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
unsigned long target_freq,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@ -1429,7 +1432,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err)
return err;
if (scale_up)
err = ufs_qcom_clk_scale_up_pre_change(hba);
err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
@ -1441,7 +1444,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
err = ufs_qcom_clk_scale_down_post_change(hba);
err = ufs_qcom_clk_scale_down_post_change(hba, target_freq);
if (err) {
@ -1875,6 +1878,36 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return ret;
}
static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
u32 gear = 0;
switch (freq) {
case 403000000:
gear = UFS_HS_G5;
break;
case 300000000:
gear = UFS_HS_G4;
break;
case 201500000:
gear = UFS_HS_G3;
break;
case 150000000:
case 100000000:
gear = UFS_HS_G2;
break;
case 75000000:
case 37500000:
gear = UFS_HS_G1;
break;
default:
dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
break;
}
return gear;
}
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@ -1903,6 +1936,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.op_runtime_config = ufs_qcom_op_runtime_config,
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
.config_esi = ufs_qcom_config_esi,
.freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
};
/**

View file

@ -336,6 +336,7 @@ struct ufs_pwr_mode_info {
* @get_outstanding_cqs: called to get outstanding completion queues
* @config_esi: called to config Event Specific Interrupt
* @config_scsi_dev: called to configure SCSI device parameters
* @freq_to_gear_speed: called to map clock frequency to the max supported gear speed
*/
struct ufs_hba_variant_ops {
const char *name;
@ -344,8 +345,8 @@ struct ufs_hba_variant_ops {
void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*set_dma_mask)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
int (*clk_scale_notify)(struct ufs_hba *, bool, unsigned long,
enum ufs_notify_change_status);
int (*setup_clocks)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
int (*hce_enable_notify)(struct ufs_hba *,
@ -384,6 +385,7 @@ struct ufs_hba_variant_ops {
unsigned long *ocqs);
int (*config_esi)(struct ufs_hba *hba);
void (*config_scsi_dev)(struct scsi_device *sdev);
u32 (*freq_to_gear_speed)(struct ufs_hba *hba, unsigned long freq);
};
/* clock gating state */
@ -448,6 +450,8 @@ struct ufs_clk_gating {
* one keeps track of previous power mode.
* @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
* @wb_gear: enable Write Booster when HS gear scales above or equal to it, else
* disable Write Booster
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
* clkscale_enable sysfs node
* @is_allowed: tracks if scaling is currently allowed or not, used to block
@ -471,6 +475,7 @@ struct ufs_clk_scaling {
struct ufs_pa_layer_attr saved_pwr_info;
unsigned long target_freq;
u32 min_gear;
u32 wb_gear;
bool is_enabled;
bool is_allowed;
bool is_initialized;