mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
x86/cpu: Move cpu_core_id into topology info
Rename it to core_id and stick it to the other ID fields. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Juergen Gross <jgross@suse.com> Tested-by: Sohil Mehta <sohil.mehta@intel.com> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Zhang Rui <rui.zhang@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20230814085112.566519388@linutronix.de
This commit is contained in:
parent
94f0b3978e
commit
e95256335d
9 changed files with 19 additions and 17 deletions
|
@ -89,6 +89,9 @@ struct cpuinfo_topology {
|
|||
|
||||
// Physical die ID on AMD, Relative on Intel
|
||||
u32 die_id;
|
||||
|
||||
// Core ID relative to the package
|
||||
u32 core_id;
|
||||
};
|
||||
|
||||
struct cpuinfo_x86 {
|
||||
|
@ -143,7 +146,6 @@ struct cpuinfo_x86 {
|
|||
/* Logical processor id: */
|
||||
u16 logical_proc_id;
|
||||
/* Core id: */
|
||||
u16 cpu_core_id;
|
||||
u16 logical_die_id;
|
||||
/* Index into per_cpu list: */
|
||||
u16 cpu_index;
|
||||
|
|
|
@ -109,7 +109,7 @@ extern const struct cpumask *cpu_clustergroup_mask(int cpu);
|
|||
#define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id)
|
||||
#define topology_logical_die_id(cpu) (cpu_data(cpu).logical_die_id)
|
||||
#define topology_die_id(cpu) (cpu_data(cpu).topo.die_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).topo.core_id)
|
||||
#define topology_ppin(cpu) (cpu_data(cpu).ppin)
|
||||
|
||||
extern unsigned int __max_die_per_package;
|
||||
|
|
|
@ -386,7 +386,7 @@ int amd_get_subcaches(int cpu)
|
|||
|
||||
pci_read_config_dword(link, 0x1d4, &mask);
|
||||
|
||||
return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
|
||||
return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf;
|
||||
}
|
||||
|
||||
int amd_set_subcaches(int cpu, unsigned long mask)
|
||||
|
@ -412,7 +412,7 @@ int amd_set_subcaches(int cpu, unsigned long mask)
|
|||
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
|
||||
}
|
||||
|
||||
cuid = cpu_data(cpu).cpu_core_id;
|
||||
cuid = cpu_data(cpu).topo.core_id;
|
||||
mask <<= 4 * cuid;
|
||||
mask |= (0xf ^ (1 << cuid)) << 26;
|
||||
|
||||
|
|
|
@ -378,7 +378,7 @@ static int nearby_node(int apicid)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Fix up cpu_core_id for pre-F17h systems to be in the
|
||||
* Fix up topo::core_id for pre-F17h systems to be in the
|
||||
* [0 .. cores_per_node - 1] range. Not really needed but
|
||||
* kept so as not to break existing setups.
|
||||
*/
|
||||
|
@ -390,7 +390,7 @@ static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
|
|||
return;
|
||||
|
||||
cus_per_node = c->x86_max_cores / nodes_per_socket;
|
||||
c->cpu_core_id %= cus_per_node;
|
||||
c->topo.core_id %= cus_per_node;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -416,7 +416,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
|||
c->cu_id = ebx & 0xff;
|
||||
|
||||
if (c->x86 >= 0x17) {
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
c->topo.core_id = ebx & 0xff;
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
|
@ -459,7 +459,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
|
|||
|
||||
bits = c->x86_coreid_bits;
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
c->topo.core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->topo.pkg_id = c->topo.initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
|
|
|
@ -922,8 +922,8 @@ void detect_ht(struct cpuinfo_x86 *c)
|
|||
|
||||
core_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
c->cpu_core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
|||
|
||||
c->topo.die_id = ecx & 0xff;
|
||||
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
c->topo.core_id = ebx & 0xff;
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
|
@ -120,7 +120,7 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c)
|
|||
|
||||
bits = c->x86_coreid_bits;
|
||||
/* Low order bits define the core id (index of core in socket) */
|
||||
c->cpu_core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
c->topo.core_id = c->topo.initial_apicid & ((1 << bits)-1);
|
||||
/* Convert the initial APIC ID into the socket ID */
|
||||
c->topo.pkg_id = c->topo.initial_apicid >> bits;
|
||||
/* use socket ID also for last level cache */
|
||||
|
|
|
@ -23,7 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
|
|||
seq_printf(m, "physical id\t: %d\n", c->topo.pkg_id);
|
||||
seq_printf(m, "siblings\t: %d\n",
|
||||
cpumask_weight(topology_core_cpumask(cpu)));
|
||||
seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
|
||||
seq_printf(m, "core id\t\t: %d\n", c->topo.core_id);
|
||||
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
|
||||
seq_printf(m, "apicid\t\t: %d\n", c->topo.apicid);
|
||||
seq_printf(m, "initial apicid\t: %d\n", c->topo.initial_apicid);
|
||||
|
|
|
@ -146,7 +146,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
||||
core_plus_mask_width;
|
||||
|
||||
c->cpu_core_id = apic->phys_pkg_id(c->topo.initial_apicid,
|
||||
c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid,
|
||||
ht_mask_width) & core_select_mask;
|
||||
|
||||
if (die_level_present) {
|
||||
|
|
|
@ -479,7 +479,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||
if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->topo.die_id == o->topo.die_id &&
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
|
||||
if (c->cpu_core_id == o->cpu_core_id)
|
||||
if (c->topo.core_id == o->topo.core_id)
|
||||
return topology_sane(c, o, "smt");
|
||||
|
||||
if ((c->cu_id != 0xff) &&
|
||||
|
@ -490,7 +490,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||
|
||||
} else if (c->topo.pkg_id == o->topo.pkg_id &&
|
||||
c->topo.die_id == o->topo.die_id &&
|
||||
c->cpu_core_id == o->cpu_core_id) {
|
||||
c->topo.core_id == o->topo.core_id) {
|
||||
return topology_sane(c, o, "smt");
|
||||
}
|
||||
|
||||
|
@ -1426,7 +1426,7 @@ static void remove_siblinginfo(int cpu)
|
|||
cpumask_clear(topology_sibling_cpumask(cpu));
|
||||
cpumask_clear(topology_core_cpumask(cpu));
|
||||
cpumask_clear(topology_die_cpumask(cpu));
|
||||
c->cpu_core_id = 0;
|
||||
c->topo.core_id = 0;
|
||||
c->booted_cores = 0;
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
|
||||
recompute_smt_state();
|
||||
|
|
Loading…
Add table
Reference in a new issue