Merge back earlier cpufreq material for 6.17-rc1

This commit is contained in:
Rafael J. Wysocki 2025-07-17 20:02:51 +02:00
commit 4f95b5bad6
6 changed files with 49 additions and 76 deletions

View file

@ -398,7 +398,9 @@ policy limits change after that.
This governor does not do anything by itself. Instead, it allows user space
to set the CPU frequency for the policy it is attached to by writing to the
``scaling_setspeed`` attribute of that policy.
``scaling_setspeed`` attribute of that policy. Though the intention may be to
set an exact frequency for the policy, the actual frequency may vary depending
on hardware coordination, thermal and power limits, and other factors.
``schedutil``
-------------

View file

@ -26,14 +26,6 @@
#include <acpi/cppc_acpi.h>
/*
* This list contains information parsed from per CPU ACPI _CPC and _PSD
* structures: e.g. the highest and lowest supported performance, capabilities,
* desired performance, level requested etc. Depending on the share_type, not
* all CPUs will have an entry in the list.
*/
static LIST_HEAD(cpu_data_list);
static struct cpufreq_driver cppc_cpufreq_driver;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
@ -352,7 +344,6 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
@ -488,7 +479,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
return 0;
}
static int populate_efficiency_class(void)
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data;
struct em_data_callback em_cb =
EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
cpu_data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu),
get_perf_level_count(policy), &em_cb,
cpu_data->shared_cpu_map, 0);
}
static void populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
@ -503,7 +506,7 @@ static int populate_efficiency_class(void)
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
return -EINVAL;
return;
}
/*
@ -520,26 +523,11 @@ static int populate_efficiency_class(void)
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
return 0;
}
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data;
struct em_data_callback em_cb =
EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
cpu_data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu),
get_perf_level_count(policy), &em_cb,
cpu_data->shared_cpu_map, 0);
}
#else
static int populate_efficiency_class(void)
static void populate_efficiency_class(void)
{
return 0;
}
#endif
@ -567,8 +555,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
list_add(&cpu_data->node, &cpu_data_list);
return cpu_data;
free_mask:
@ -583,7 +569,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
list_del(&cpu_data->node);
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
policy->driver_data = NULL;
@ -954,24 +939,10 @@ static int __init cppc_cpufreq_init(void)
return ret;
}
static inline void free_cpu_data(void)
{
struct cppc_cpudata *iter, *tmp;
list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
free_cpumask_var(iter->shared_cpu_map);
list_del(&iter->node);
kfree(iter);
}
}
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
cppc_freq_invariance_exit();
free_cpu_data();
}
module_exit(cppc_cpufreq_exit);

View file

@ -967,6 +967,7 @@ static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_cur_freq.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
@ -1095,10 +1096,6 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
if (ret)
@ -1284,6 +1281,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus;
}
init_rwsem(&policy->rwsem);
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min;
@ -1306,7 +1305,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
}
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
@ -1694,14 +1692,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
return;
}
if (has_target())
if (has_target()) {
strscpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
else
policy->last_policy = policy->policy;
if (has_target())
cpufreq_exit_governor(policy);
} else {
policy->last_policy = policy->policy;
}
/*
* Perform the ->offline() during light-weight tear-down, as
@ -1803,6 +1800,9 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
{
unsigned int new_freq;
if (!cpufreq_driver->get)
return 0;
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@ -1925,10 +1925,7 @@ unsigned int cpufreq_get(unsigned int cpu)
guard(cpufreq_policy_read)(policy);
if (cpufreq_driver->get)
return __cpufreq_get(policy);
return 0;
return __cpufreq_get(policy);
}
EXPORT_SYMBOL(cpufreq_get);
@ -2482,8 +2479,7 @@ int cpufreq_start_governor(struct cpufreq_policy *policy)
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
if (cpufreq_driver->get)
cpufreq_verify_current_freq(policy, false);
cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
@ -2715,10 +2711,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
if (cpufreq_init_governor(policy))
if (cpufreq_init_governor(policy)) {
policy->governor = NULL;
else
cpufreq_start_governor(policy);
} else if (cpufreq_start_governor(policy)) {
cpufreq_exit_governor(policy);
policy->governor = NULL;
}
}
return ret;
@ -2944,15 +2942,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("supports frequency invariance");
}
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
@ -2983,6 +2972,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
/*
* Mark support for the scheduler's frequency invariance engine for
* drivers that implement target(), target_index() or fast_switch().
*/
if (!cpufreq_driver->setpolicy) {
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
pr_debug("supports frequency invariance");
}
pr_debug("driver %s up and running\n", driver_data->name);
goto out;

View file

@ -134,6 +134,7 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
.flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "

View file

@ -2775,6 +2775,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(INTEL_TIGERLAKE, core_funcs),
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@ -3249,8 +3251,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
fast_switch);
intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
target_pstate, fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}

View file

@ -139,7 +139,6 @@ struct cppc_perf_fb_ctrs {
/* Per CPU container for runtime CPPC management. */
struct cppc_cpudata {
struct list_head node;
struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls;
struct cppc_perf_fb_ctrs perf_fb_ctrs;