2018-09-23 17:33:12 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
/*
|
|
|
|
* Hygon Processor Support for Linux
|
|
|
|
*
|
|
|
|
* Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
|
|
|
|
*
|
|
|
|
* Author: Pu Wen <puwen@hygon.cn>
|
|
|
|
*/
|
|
|
|
#include <linux/io.h>
|
|
|
|
|
2023-08-08 15:03:43 -07:00
|
|
|
#include <asm/apic.h>
|
2018-09-23 17:33:12 +08:00
|
|
|
#include <asm/cpu.h>
|
|
|
|
#include <asm/smp.h>
|
2020-08-06 14:35:11 +02:00
|
|
|
#include <asm/numa.h>
|
2018-09-23 17:33:12 +08:00
|
|
|
#include <asm/cacheinfo.h>
|
|
|
|
#include <asm/spec-ctrl.h>
|
|
|
|
#include <asm/delay.h>
|
|
|
|
|
|
|
|
#include "cpu.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
/*
|
|
|
|
* To workaround broken NUMA config. Read the comment in
|
|
|
|
* srat_detect_node().
|
|
|
|
*/
|
|
|
|
static int nearby_node(int apicid)
|
|
|
|
{
|
|
|
|
int i, node;
|
|
|
|
|
|
|
|
for (i = apicid - 1; i >= 0; i--) {
|
|
|
|
node = __apicid_to_node[i];
|
|
|
|
if (node != NUMA_NO_NODE && node_online(node))
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
|
|
|
|
node = __apicid_to_node[i];
|
|
|
|
if (node != NUMA_NO_NODE && node_online(node))
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
return first_node(node_online_map); /* Shouldn't happen */
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void srat_detect_node(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
int node;
|
2023-08-14 10:18:29 +02:00
|
|
|
unsigned int apicid = c->topo.apicid;
|
2018-09-23 17:33:12 +08:00
|
|
|
|
|
|
|
node = numa_cpu_node(cpu);
|
|
|
|
if (node == NUMA_NO_NODE)
|
2023-08-14 10:18:38 +02:00
|
|
|
node = c->topo.llc_id;
|
2018-09-23 17:33:12 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On multi-fabric platform (e.g. Numascale NumaChip) a
|
|
|
|
* platform-specific handler needs to be called to fixup some
|
|
|
|
* IDs of the CPU.
|
|
|
|
*/
|
|
|
|
if (x86_cpuinit.fixup_cpu_id)
|
|
|
|
x86_cpuinit.fixup_cpu_id(c, node);
|
|
|
|
|
|
|
|
if (!node_online(node)) {
|
|
|
|
/*
|
|
|
|
* Two possibilities here:
|
|
|
|
*
|
|
|
|
* - The CPU is missing memory and no node was created. In
|
|
|
|
* that case try picking one from a nearby CPU.
|
|
|
|
*
|
|
|
|
* - The APIC IDs differ from the HyperTransport node IDs.
|
|
|
|
* Assume they are all increased by a constant offset, but
|
|
|
|
* in the same order as the HT nodeids. If that doesn't
|
|
|
|
* result in a usable node fall back to the path for the
|
|
|
|
* previous case.
|
|
|
|
*
|
|
|
|
* This workaround operates directly on the mapping between
|
|
|
|
* APIC ID and NUMA node, assuming certain relationship
|
|
|
|
* between APIC ID, HT node ID and NUMA topology. As going
|
|
|
|
* through CPU mapping may alter the outcome, directly
|
|
|
|
* access __apicid_to_node[].
|
|
|
|
*/
|
2023-08-14 10:18:29 +02:00
|
|
|
int ht_nodeid = c->topo.initial_apicid;
|
2018-09-23 17:33:12 +08:00
|
|
|
|
|
|
|
if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
|
|
|
node = __apicid_to_node[ht_nodeid];
|
|
|
|
/* Pick a nearby node */
|
|
|
|
if (!node_online(node))
|
|
|
|
node = nearby_node(apicid);
|
|
|
|
}
|
|
|
|
numa_set_node(cpu, node);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bsp_init_hygon(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
rdmsrl(MSR_K7_HWCR, val);
|
|
|
|
if (!(val & BIT(24)))
|
|
|
|
pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_MWAITX))
|
|
|
|
use_mwaitx_delay();
|
|
|
|
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
|
|
|
!boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
|
|
|
|
/*
|
|
|
|
* Try to cache the base value so further operations can
|
|
|
|
* avoid RMW. If that faults, do not enable SSBD.
|
|
|
|
*/
|
|
|
|
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_SSBD);
|
|
|
|
x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void early_init_hygon(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
u32 dummy;
|
|
|
|
|
|
|
|
set_cpu_cap(c, X86_FEATURE_K8);
|
|
|
|
|
|
|
|
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
|
|
|
|
* with P/T states and does not stop in deep C-states
|
|
|
|
*/
|
|
|
|
if (c->x86_power & (1 << 8)) {
|
|
|
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
|
|
|
|
if (c->x86_power & BIT(12))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
|
|
|
|
|
2021-05-14 14:59:20 +01:00
|
|
|
/* Bit 14 indicates the Runtime Average Power Limit interface. */
|
|
|
|
if (c->x86_power & BIT(14))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_RAPL);
|
|
|
|
|
2018-09-23 17:33:12 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
|
|
|
|
/*
|
|
|
|
* ApicID can always be treated as an 8-bit value for Hygon APIC So, we
|
|
|
|
* can safely set X86_FEATURE_EXTD_APICID unconditionally.
|
|
|
|
*/
|
|
|
|
if (boot_cpu_has(X86_FEATURE_APIC))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is only needed to tell the kernel whether to use VMCALL
|
|
|
|
* and VMMCALL. VMMCALL is never executed except under virt, so
|
|
|
|
* we can set it unconditionally.
|
|
|
|
*/
|
|
|
|
set_cpu_cap(c, X86_FEATURE_VMMCALL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void init_hygon(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2023-09-21 07:49:40 -04:00
|
|
|
u64 vm_cr;
|
|
|
|
|
2018-09-23 17:33:12 +08:00
|
|
|
early_init_hygon(c);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
|
|
|
|
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
|
|
|
|
*/
|
|
|
|
clear_cpu_cap(c, 0*32+31);
|
|
|
|
|
|
|
|
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
|
|
|
|
2022-06-14 23:16:04 +02:00
|
|
|
/*
|
|
|
|
* XXX someone from Hygon needs to confirm this DTRT
|
|
|
|
*
|
|
|
|
init_spectral_chicken(c);
|
|
|
|
*/
|
|
|
|
|
2018-09-23 17:33:12 +08:00
|
|
|
set_cpu_cap(c, X86_FEATURE_ZEN);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_CPB);
|
|
|
|
|
|
|
|
cpu_detect_cache_sizes(c);
|
|
|
|
|
|
|
|
srat_detect_node(c);
|
|
|
|
|
2018-09-23 17:33:44 +08:00
|
|
|
init_hygon_cacheinfo(c);
|
|
|
|
|
2023-09-21 07:49:40 -04:00
|
|
|
if (cpu_has(c, X86_FEATURE_SVM)) {
|
|
|
|
rdmsrl(MSR_VM_CR, vm_cr);
|
|
|
|
if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
|
|
|
|
pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_SVM);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-23 17:33:12 +08:00
|
|
|
if (cpu_has(c, X86_FEATURE_XMM2)) {
|
|
|
|
/*
|
x86: Remove X86_FEATURE_MFENCE_RDTSC
AMD and Intel both have serializing lfence (X86_FEATURE_LFENCE_RDTSC).
They've both had it for a long time, and AMD has had it enabled in Linux
since Spectre v1 was announced.
Back then, there was a proposal to remove the serializing mfence feature
bit (X86_FEATURE_MFENCE_RDTSC), since both AMD and Intel have
serializing lfence. At the time, it was (ahem) speculated that some
hypervisors might not yet support its removal, so it remained for the
time being.
Now a year-and-a-half later, it should be safe to remove.
I asked Andrew Cooper about whether it's still needed:
So if you're virtualised, you've got no choice in the matter. lfence
is either dispatch-serialising or not on AMD, and you won't be able to
change it.
Furthermore, you can't accurately tell what state the bit is in, because
the MSR might not be virtualised at all, or may not reflect the true
state in hardware. Worse still, attempting to set the bit may not be
successful even if there isn't a fault for doing so.
Xen sets the DE_CFG bit unconditionally, as does Linux by the looks of
things (see MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT). ISTR other hypervisor
vendors saying the same, but I don't have any information to hand.
If you are running under a hypervisor which has been updated, then
lfence will almost certainly be dispatch-serialising in practice, and
you'll almost certainly see the bit already set in DE_CFG. If you're
running under a hypervisor which hasn't been patched since Spectre,
you've already lost in many more ways.
I'd argue that X86_FEATURE_MFENCE_RDTSC is not worth keeping.
So remove it. This will reduce some code rot, and also make it easier
to hook barrier_nospec() up to a cmdline disable for performance
raisins, without having to need an alternative_3() macro.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/d990aa51e40063acb9888e8c1b688e41355a9588.1562255067.git.jpoimboe@redhat.com
2019-07-04 10:46:37 -05:00
|
|
|
* Use LFENCE for execution serialization. On families which
|
2018-09-23 17:33:12 +08:00
|
|
|
* don't have that MSR, LFENCE is already serializing.
|
|
|
|
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
|
|
|
* is not present.
|
|
|
|
*/
|
2022-11-14 12:44:01 +01:00
|
|
|
msr_set_bit(MSR_AMD64_DE_CFG,
|
|
|
|
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
|
2018-09-23 17:33:12 +08:00
|
|
|
|
x86: Remove X86_FEATURE_MFENCE_RDTSC
AMD and Intel both have serializing lfence (X86_FEATURE_LFENCE_RDTSC).
They've both had it for a long time, and AMD has had it enabled in Linux
since Spectre v1 was announced.
Back then, there was a proposal to remove the serializing mfence feature
bit (X86_FEATURE_MFENCE_RDTSC), since both AMD and Intel have
serializing lfence. At the time, it was (ahem) speculated that some
hypervisors might not yet support its removal, so it remained for the
time being.
Now a year-and-a-half later, it should be safe to remove.
I asked Andrew Cooper about whether it's still needed:
So if you're virtualised, you've got no choice in the matter. lfence
is either dispatch-serialising or not on AMD, and you won't be able to
change it.
Furthermore, you can't accurately tell what state the bit is in, because
the MSR might not be virtualised at all, or may not reflect the true
state in hardware. Worse still, attempting to set the bit may not be
successful even if there isn't a fault for doing so.
Xen sets the DE_CFG bit unconditionally, as does Linux by the looks of
things (see MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT). ISTR other hypervisor
vendors saying the same, but I don't have any information to hand.
If you are running under a hypervisor which has been updated, then
lfence will almost certainly be dispatch-serialising in practice, and
you'll almost certainly see the bit already set in DE_CFG. If you're
running under a hypervisor which hasn't been patched since Spectre,
you've already lost in many more ways.
I'd argue that X86_FEATURE_MFENCE_RDTSC is not worth keeping.
So remove it. This will reduce some code rot, and also make it easier
to hook barrier_nospec() up to a cmdline disable for performance
raisins, without having to need an alternative_3() macro.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/d990aa51e40063acb9888e8c1b688e41355a9588.1562255067.git.jpoimboe@redhat.com
2019-07-04 10:46:37 -05:00
|
|
|
/* A serializing LFENCE stops RDTSC speculation */
|
|
|
|
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
2018-09-23 17:33:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hygon processors have APIC timer running in deep C states.
|
|
|
|
*/
|
|
|
|
set_cpu_cap(c, X86_FEATURE_ARAT);
|
|
|
|
|
|
|
|
/* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
|
2022-11-04 08:27:01 +01:00
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
|
2018-09-23 17:33:12 +08:00
|
|
|
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
2021-10-21 11:47:44 +01:00
|
|
|
|
|
|
|
check_null_seg_clears_base(c);
|
x86/barrier: Do not serialize MSR accesses on AMD
AMD does not have the requirement for a synchronization barrier when
acccessing a certain group of MSRs. Do not incur that unnecessary
penalty there.
There will be a CPUID bit which explicitly states that a MFENCE is not
needed. Once that bit is added to the APM, this will be extended with
it.
While at it, move to processor.h to avoid include hell. Untangling that
file properly is a matter for another day.
Some notes on the performance aspect of why this is relevant, courtesy
of Kishon VijayAbraham <Kishon.VijayAbraham@amd.com>:
On a AMD Zen4 system with 96 cores, a modified ipi-bench[1] on a VM
shows x2AVIC IPI rate is 3% to 4% lower than AVIC IPI rate. The
ipi-bench is modified so that the IPIs are sent between two vCPUs in the
same CCX. This also requires to pin the vCPU to a physical core to
prevent any latencies. This simulates the use case of pinning vCPUs to
the thread of a single CCX to avoid interrupt IPI latency.
In order to avoid run-to-run variance (for both x2AVIC and AVIC), the
below configurations are done:
1) Disable Power States in BIOS (to prevent the system from going to
lower power state)
2) Run the system at fixed frequency 2500MHz (to prevent the system
from increasing the frequency when the load is more)
With the above configuration:
*) Performance measured using ipi-bench for AVIC:
Average Latency: 1124.98ns [Time to send IPI from one vCPU to another vCPU]
Cumulative throughput: 42.6759M/s [Total number of IPIs sent in a second from
48 vCPUs simultaneously]
*) Performance measured using ipi-bench for x2AVIC:
Average Latency: 1172.42ns [Time to send IPI from one vCPU to another vCPU]
Cumulative throughput: 40.9432M/s [Total number of IPIs sent in a second from
48 vCPUs simultaneously]
From above, x2AVIC latency is ~4% more than AVIC. However, the expectation is
x2AVIC performance to be better or equivalent to AVIC. Upon analyzing
the perf captures, it is observed significant time is spent in
weak_wrmsr_fence() invoked by x2apic_send_IPI().
With the fix to skip weak_wrmsr_fence()
*) Performance measured using ipi-bench for x2AVIC:
Average Latency: 1117.44ns [Time to send IPI from one vCPU to another vCPU]
Cumulative throughput: 42.9608M/s [Total number of IPIs sent in a second from
48 vCPUs simultaneously]
Comparing the performance of x2AVIC with and without the fix, it can be seen
the performance improves by ~4%.
Performance captured using an unmodified ipi-bench using the 'mesh-ipi' option
with and without weak_wrmsr_fence() on a Zen4 system also showed significant
performance improvement without weak_wrmsr_fence(). The 'mesh-ipi' option ignores
CCX or CCD and just picks random vCPU.
Average throughput (10 iterations) with weak_wrmsr_fence(),
Cumulative throughput: 4933374 IPI/s
Average throughput (10 iterations) without weak_wrmsr_fence(),
Cumulative throughput: 6355156 IPI/s
[1] https://github.com/bytedance/kvm-utils/tree/master/microbenchmark/ipi-bench
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230622095212.20940-1-bp@alien8.de
2023-10-27 14:24:16 +02:00
|
|
|
|
|
|
|
/* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
2018-09-23 17:33:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
u32 ebx, eax, ecx, edx;
|
|
|
|
u16 mask = 0xfff;
|
|
|
|
|
|
|
|
if (c->extended_cpuid_level < 0x80000006)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
|
|
|
|
tlb_lli_4k[ENTRIES] = ebx & mask;
|
|
|
|
|
|
|
|
/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
|
|
|
|
if (!((eax >> 16) & mask))
|
|
|
|
tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
|
|
|
|
else
|
|
|
|
tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
|
|
|
|
|
|
|
|
/* a 4M entry uses two 2M entries */
|
|
|
|
tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
|
|
|
|
|
|
|
|
/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
|
|
|
|
if (!(eax & mask)) {
|
|
|
|
cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
|
|
|
|
tlb_lli_2m[ENTRIES] = eax & 0xff;
|
|
|
|
} else
|
|
|
|
tlb_lli_2m[ENTRIES] = eax & mask;
|
|
|
|
|
|
|
|
tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct cpu_dev hygon_cpu_dev = {
|
|
|
|
.c_vendor = "Hygon",
|
|
|
|
.c_ident = { "HygonGenuine" },
|
|
|
|
.c_early_init = early_init_hygon,
|
|
|
|
.c_detect_tlb = cpu_detect_tlb_hygon,
|
|
|
|
.c_bsp_init = bsp_init_hygon,
|
|
|
|
.c_init = init_hygon,
|
|
|
|
.c_x86_vendor = X86_VENDOR_HYGON,
|
|
|
|
};
|
|
|
|
|
|
|
|
cpu_dev_register(hygon_cpu_dev);
|