x86/boot: Move the early GDT/IDT setup code into startup/

Move the early GDT/IDT setup code that runs long before the kernel
virtual mapping is up into arch/x86/boot/startup/, and build it in a way
that ensures that the code tolerates being called from the 1:1 mapping
of memory. The code itself is left unchanged by this patch.

Also tweak the sed symbol matching pattern in the decompressor to match
on lower case 't' or 'b', as these will be emitted by Clang for symbols
with hidden linkage.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kevin Loughlin <kevinloughlin@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: linux-efi@vger.kernel.org
Link: https://lore.kernel.org/r/20250410134117.3713574-15-ardb+git@google.com
This commit is contained in:
Ard Biesheuvel 2025-04-10 15:41:20 +02:00 committed by Ingo Molnar
parent bcceba3c72
commit 4cecebf200
4 changed files with 100 additions and 75 deletions

View file

@ -73,7 +73,7 @@ LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABbCDGRSTtVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
quiet_cmd_voffset = VOFFSET $@
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@

View file

@ -1,6 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_AFLAGS += -D__DISABLE_EXPORTS
KBUILD_CFLAGS += -D__DISABLE_EXPORTS -mcmodel=small -fPIC \
-Os -DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN) \
-fno-stack-protector -D__NO_FORTIFY \
-include $(srctree)/include/linux/hidden.h
# disable ftrace hooks
KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS))
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
KMSAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
obj-$(CONFIG_X86_64) += gdt_idt.o
lib-$(CONFIG_X86_64) += la57toggle.o
lib-$(CONFIG_EFI_MIXED) += efi-mixed.o

View file

@ -0,0 +1,84 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/desc.h>
#include <asm/init.h>
#include <asm/setup.h>
#include <asm/sev.h>
#include <asm/trapnr.h>
/*
* Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
* used until the idt_table takes over. On the boot CPU this happens in
* x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
* this happens in the functions called from head_64.S.
*
* The idt_table can't be used that early because all the code modifying it is
* in idt.c and can be instrumented by tracing or KASAN, which both don't work
* during early CPU bringup. Also the idt_table has the runtime vectors
* configured which require certain CPU state to be setup already (like TSS),
* which also hasn't happened yet in early CPU bringup.
*/
static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
/* This may run while still in the direct mapping */
static void __head startup_64_load_idt(void *vc_handler)
{
struct desc_ptr desc = {
.address = (unsigned long)rip_rel_ptr(bringup_idt_table),
.size = sizeof(bringup_idt_table) - 1,
};
struct idt_data data;
gate_desc idt_desc;
/* @vc_handler is set only for a VMM Communication Exception */
if (vc_handler) {
init_idt_data(&data, X86_TRAP_VC, vc_handler);
idt_init_desc(&idt_desc, &data);
native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
}
native_load_idt(&desc);
}
/* This is used when running on kernel addresses */
void early_setup_idt(void)
{
void *handler = NULL;
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
setup_ghcb();
handler = vc_boot_ghcb;
}
startup_64_load_idt(handler);
}
/*
* Setup boot CPU state needed before kernel switches to virtual addresses.
*/
void __head startup_64_setup_gdt_idt(void)
{
struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page);
void *handler = NULL;
struct desc_ptr startup_gdt_descr = {
.address = (unsigned long)gp->gdt,
.size = GDT_SIZE - 1,
};
/* Load GDT */
native_load_gdt(&startup_gdt_descr);
/* New GDT is live - reload data segment registers */
asm volatile("movl %%eax, %%ds\n"
"movl %%eax, %%ss\n"
"movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
handler = rip_rel_ptr(vc_no_ghcb);
startup_64_load_idt(handler);
}

View file

@ -512,77 +512,3 @@ void __init __noreturn x86_64_start_reservations(char *real_mode_data)
start_kernel();
}
/*
* Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
* used until the idt_table takes over. On the boot CPU this happens in
* x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
* this happens in the functions called from head_64.S.
*
* The idt_table can't be used that early because all the code modifying it is
* in idt.c and can be instrumented by tracing or KASAN, which both don't work
* during early CPU bringup. Also the idt_table has the runtime vectors
* configured which require certain CPU state to be setup already (like TSS),
* which also hasn't happened yet in early CPU bringup.
*/
static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
/* This may run while still in the direct mapping */
static void __head startup_64_load_idt(void *vc_handler)
{
struct desc_ptr desc = {
.address = (unsigned long)rip_rel_ptr(bringup_idt_table),
.size = sizeof(bringup_idt_table) - 1,
};
struct idt_data data;
gate_desc idt_desc;
/* @vc_handler is set only for a VMM Communication Exception */
if (vc_handler) {
init_idt_data(&data, X86_TRAP_VC, vc_handler);
idt_init_desc(&idt_desc, &data);
native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
}
native_load_idt(&desc);
}
/* This is used when running on kernel addresses */
void early_setup_idt(void)
{
void *handler = NULL;
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
setup_ghcb();
handler = vc_boot_ghcb;
}
startup_64_load_idt(handler);
}
/*
* Setup boot CPU state needed before kernel switches to virtual addresses.
*/
void __head startup_64_setup_gdt_idt(void)
{
struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page);
void *handler = NULL;
struct desc_ptr startup_gdt_descr = {
.address = (unsigned long)gp->gdt,
.size = GDT_SIZE - 1,
};
/* Load GDT */
native_load_gdt(&startup_gdt_descr);
/* New GDT is live - reload data segment registers */
asm volatile("movl %%eax, %%ds\n"
"movl %%eax, %%ss\n"
"movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
handler = rip_rel_ptr(vc_no_ghcb);
startup_64_load_idt(handler);
}