2023-08-12 21:58:47 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _X86_MICROCODE_INTERNAL_H
|
|
|
|
#define _X86_MICROCODE_INTERNAL_H
|
|
|
|
|
|
|
|
#include <linux/earlycpio.h>
|
|
|
|
#include <linux/initrd.h>
|
|
|
|
|
|
|
|
#include <asm/cpu.h>
|
|
|
|
#include <asm/microcode.h>
|
|
|
|
|
|
|
|
struct device;
|
|
|
|
|
|
|
|
enum ucode_state {
|
|
|
|
UCODE_OK = 0,
|
|
|
|
UCODE_NEW,
|
2023-10-17 23:24:16 +02:00
|
|
|
UCODE_NEW_SAFE,
|
2023-08-12 21:58:47 +02:00
|
|
|
UCODE_UPDATED,
|
|
|
|
UCODE_NFOUND,
|
|
|
|
UCODE_ERROR,
|
2023-10-17 23:24:05 +02:00
|
|
|
UCODE_TIMEOUT,
|
2023-10-02 14:00:08 +02:00
|
|
|
UCODE_OFFLINE,
|
2023-08-12 21:58:47 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct microcode_ops {
|
|
|
|
enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
|
|
|
|
void (*microcode_fini_cpu)(int cpu);
|
|
|
|
|
|
|
|
/*
|
2023-10-02 13:59:56 +02:00
|
|
|
* The generic 'microcode_core' part guarantees that the callbacks
|
|
|
|
* below run on a target CPU when they are being called.
|
2023-08-12 21:58:47 +02:00
|
|
|
* See also the "Synchronization" section in microcode_core.c.
|
|
|
|
*/
|
2023-10-02 13:59:56 +02:00
|
|
|
enum ucode_state (*apply_microcode)(int cpu);
|
|
|
|
int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
|
|
|
|
void (*finalize_late_load)(int result);
|
2023-10-02 14:00:05 +02:00
|
|
|
unsigned int nmi_safe : 1,
|
|
|
|
use_nmi : 1;
|
2023-08-12 21:58:47 +02:00
|
|
|
};
|
|
|
|
|
2023-11-15 22:02:12 +01:00
|
|
|
struct early_load_data {
|
|
|
|
u32 old_rev;
|
|
|
|
u32 new_rev;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct early_load_data early_data;
|
2023-08-12 21:58:47 +02:00
|
|
|
extern struct ucode_cpu_info ucode_cpu_info[];
|
x86/microcode/32: Move early loading after paging enable
32-bit loads microcode before paging is enabled. The commit which
introduced that has zero justification in the changelog. The cover
letter has slightly more content, but it does not give any technical
justification either:
"The problem in current microcode loading method is that we load a
microcode way, way too late; ideally we should load it before turning
paging on. This may only be practical on 32 bits since we can't get
to 64-bit mode without paging on, but we should still do it as early
as at all possible."
Handwaving word salad with zero technical content.
Someone claimed in an offlist conversation that this is required for
curing the ATOM erratum AAE44/AAF40/AAG38/AAH41. That erratum requires
an microcode update in order to make the usage of PSE safe. But during
early boot, PSE is completely irrelevant and it is evaluated way later.
Neither is it relevant for the AP on single core HT enabled CPUs as the
microcode loading on the AP is not doing anything.
On dual core CPUs there is a theoretical problem if a split of an
executable large page between enabling paging including PSE and loading
the microcode happens. But that's only theoretical, it's practically
irrelevant because the affected dual core CPUs are 64bit enabled and
therefore have paging and PSE enabled before loading the microcode on
the second core. So why would it work on 64-bit but not on 32-bit?
The erratum:
"AAG38 Code Fetch May Occur to Incorrect Address After a Large Page is
Split Into 4-Kbyte Pages
Problem: If software clears the PS (page size) bit in a present PDE
(page directory entry), that will cause linear addresses mapped through
this PDE to use 4-KByte pages instead of using a large page after old
TLB entries are invalidated. Due to this erratum, if a code fetch uses
this PDE before the TLB entry for the large page is invalidated then it
may fetch from a different physical address than specified by either the
old large page translation or the new 4-KByte page translation. This
erratum may also cause speculative code fetches from incorrect addresses."
The practical relevance for this is exactly zero because there is no
splitting of large text pages during early boot-time, i.e. between paging
enable and microcode loading, and neither during CPU hotplug.
IOW, this load microcode before paging enable is yet another voodoo
programming solution in search of a problem. What's worse is that it causes
at least two serious problems:
1) When stackprotector is enabled, the microcode loader code has the
stackprotector mechanics enabled. The read from the per CPU variable
__stack_chk_guard is always accessing the virtual address either
directly on UP or via %fs on SMP. In physical address mode this
results in an access to memory above 3GB. So this works by chance as
the hardware returns the same value when there is no RAM at this
physical address. When there is RAM populated above 3G then the read
is by chance the same as nothing changes that memory during the very
early boot stage. That's not necessarily true during runtime CPU
hotplug.
2) When function tracing is enabled, the relevant microcode loader
functions and the functions invoked from there will call into the
tracing code and evaluate global and per CPU variables in physical
address mode. What could potentially go wrong?
Cure this and move the microcode loading after the early paging enable, use
the new temporary initrd mapping and remove the gunk in the microcode
loader which is required to handle physical address mode.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20231017211722.348298216@linutronix.de
2023-10-17 23:23:32 +02:00
|
|
|
struct cpio_data find_microcode_in_initrd(const char *path);
|
2023-08-12 21:58:47 +02:00
|
|
|
|
|
|
|
#define MAX_UCODE_COUNT 128
|
|
|
|
|
|
|
|
#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
|
|
|
|
#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
|
|
|
|
#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
|
|
|
|
#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
|
|
|
|
#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
|
|
|
|
#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
|
|
|
|
#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
|
|
|
|
|
|
|
|
#define CPUID_IS(a, b, c, ebx, ecx, edx) \
|
|
|
|
(!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c))))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
|
|
|
|
* x86_cpuid_vendor() gets vendor id for BSP.
|
|
|
|
*
|
|
|
|
* In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
|
|
|
|
* coding, we still use x86_cpuid_vendor() to get vendor id for AP.
|
|
|
|
*
|
|
|
|
* x86_cpuid_vendor() gets vendor information directly from CPUID.
|
|
|
|
*/
|
|
|
|
static inline int x86_cpuid_vendor(void)
|
|
|
|
{
|
|
|
|
u32 eax = 0x00000000;
|
|
|
|
u32 ebx, ecx = 0, edx;
|
|
|
|
|
|
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
|
|
|
|
return X86_VENDOR_INTEL;
|
|
|
|
|
|
|
|
if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
|
|
|
|
return X86_VENDOR_AMD;
|
|
|
|
|
|
|
|
return X86_VENDOR_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int x86_cpuid_family(void)
|
|
|
|
{
|
|
|
|
u32 eax = 0x00000001;
|
|
|
|
u32 ebx, ecx = 0, edx;
|
|
|
|
|
|
|
|
native_cpuid(&eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
return x86_family(eax);
|
|
|
|
}
|
|
|
|
|
2023-10-02 13:59:43 +02:00
|
|
|
extern bool dis_ucode_ldr;
|
2023-10-17 23:24:16 +02:00
|
|
|
extern bool force_minrev;
|
2023-08-12 21:58:47 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_SUP_AMD
|
2023-11-15 22:02:12 +01:00
|
|
|
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
|
2023-08-12 21:58:47 +02:00
|
|
|
void load_ucode_amd_ap(unsigned int family);
|
|
|
|
int save_microcode_in_initrd_amd(unsigned int family);
|
|
|
|
void reload_ucode_amd(unsigned int cpu);
|
|
|
|
struct microcode_ops *init_amd_microcode(void);
|
|
|
|
void exit_amd_microcode(void);
|
2023-08-25 16:12:26 +02:00
|
|
|
#else /* CONFIG_CPU_SUP_AMD */
|
2023-11-15 22:02:12 +01:00
|
|
|
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
|
2023-08-12 21:58:47 +02:00
|
|
|
static inline void load_ucode_amd_ap(unsigned int family) { }
|
|
|
|
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
|
|
|
static inline void reload_ucode_amd(unsigned int cpu) { }
|
|
|
|
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
|
|
|
|
static inline void exit_amd_microcode(void) { }
|
2023-08-25 16:12:26 +02:00
|
|
|
#endif /* !CONFIG_CPU_SUP_AMD */
|
2023-08-12 21:58:47 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
2023-11-15 22:02:12 +01:00
|
|
|
void load_ucode_intel_bsp(struct early_load_data *ed);
|
2023-08-12 21:58:47 +02:00
|
|
|
void load_ucode_intel_ap(void);
|
|
|
|
void reload_ucode_intel(void);
|
|
|
|
struct microcode_ops *init_intel_microcode(void);
|
|
|
|
#else /* CONFIG_CPU_SUP_INTEL */
|
2023-11-15 22:02:12 +01:00
|
|
|
static inline void load_ucode_intel_bsp(struct early_load_data *ed) { }
|
2023-08-12 21:58:47 +02:00
|
|
|
static inline void load_ucode_intel_ap(void) { }
|
|
|
|
static inline void reload_ucode_intel(void) { }
|
|
|
|
static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
|
|
|
|
#endif /* !CONFIG_CPU_SUP_INTEL */
|
|
|
|
|
|
|
|
#endif /* _X86_MICROCODE_INTERNAL_H */
|