LoongArch: Add SCHED_MC (Multi-core scheduler) support

In order to achieve more reasonable load balancing behavior, add
SCHED_MC (Multi-core scheduler) support.

The LLC distribution of LoongArch now is consistent with NUMA node,
the balancing domain of SCHED_MC can effectively reduce the situation
where processes are awakened to smt_sibling.

Co-developed-by: Hongliang Wang <wanghongliang@loongson.cn>
Signed-off-by: Hongliang Wang <wanghongliang@loongson.cn>
Signed-off-by: Tianyang Zhang <zhangtianyang@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Tianyang Zhang 2025-05-30 21:45:42 +08:00 committed by Huacai Chen
parent 980d4a42d5
commit 93f4373156
4 changed files with 56 additions and 0 deletions

View file

@ -456,6 +456,15 @@ config SCHED_SMT
Improves scheduler's performance when there are multiple
threads in one physical core.
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
default y
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places.
config SMP
bool "Multi-Processing support"
help

View file

@ -25,6 +25,7 @@ extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_llc_shared_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];

View file

@ -30,6 +30,14 @@ void numa_set_distance(int from, int to, int distance);
#endif
#ifdef CONFIG_SMP
/*
* Return cpus that shares the last level cache.
*/
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_llc_shared_map[cpu];
}
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
#define topology_core_id(cpu) (cpu_data[cpu].core)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])

View file

@ -46,6 +46,10 @@ EXPORT_SYMBOL(__cpu_logical_map);
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* Representing the last level cache shared map of each logical CPU */
cpumask_t cpu_llc_shared_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_llc_shared_map);
/* Representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
@ -63,6 +67,9 @@ EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
/* representing cpus for which llc shared maps can be computed */
static cpumask_t cpu_llc_shared_setup_map;
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map;
@ -102,6 +109,34 @@ static inline void set_cpu_core_map(int cpu)
}
}
static inline void set_cpu_llc_shared_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_llc_shared_setup_map);
for_each_cpu(i, &cpu_llc_shared_setup_map) {
if (cpu_to_node(cpu) == cpu_to_node(i)) {
cpumask_set_cpu(i, &cpu_llc_shared_map[cpu]);
cpumask_set_cpu(cpu, &cpu_llc_shared_map[i]);
}
}
}
static inline void clear_cpu_llc_shared_map(int cpu)
{
int i;
for_each_cpu(i, &cpu_llc_shared_setup_map) {
if (cpu_to_node(cpu) == cpu_to_node(i)) {
cpumask_clear_cpu(i, &cpu_llc_shared_map[cpu]);
cpumask_clear_cpu(cpu, &cpu_llc_shared_map[i]);
}
}
cpumask_clear_cpu(cpu, &cpu_llc_shared_setup_map);
}
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@ -406,6 +441,7 @@ int loongson_cpu_disable(void)
#endif
set_cpu_online(cpu, false);
clear_cpu_sibling_map(cpu);
clear_cpu_llc_shared_map(cpu);
calculate_cpu_foreign_map();
local_irq_save(flags);
irq_migrate_all_off_this_cpu();
@ -572,6 +608,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
loongson_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_llc_shared_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
@ -613,6 +650,7 @@ asmlinkage void start_secondary(void)
loongson_init_secondary();
set_cpu_sibling_map(cpu);
set_cpu_llc_shared_map(cpu);
set_cpu_core_map(cpu);
notify_cpu_starting(cpu);