mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 08:43:31 +00:00

This change makes later calculations about where the kernel is located easier to reason about. To better understand this change, we must first clarify what 'VO' and 'ZO' are. These values were introduced in commits by hpa:77d1a49995
("x86, boot: make symbols from the main vmlinux available")37ba7ab5e3
("x86, boot: make kernel_alignment adjustable; new bzImage fields") Specifically: All names prefixed with 'VO_': - relate to the uncompressed kernel image - the size of the VO image is: VO__end-VO__text ("VO_INIT_SIZE" define) All names prefixed with 'ZO_': - relate to the bootable compressed kernel image (boot/compressed/vmlinux), which is composed of the following memory areas: - head text - compressed kernel (VO image and relocs table) - decompressor code - the size of the ZO image is: ZO__end - ZO_startup_32 ("ZO_INIT_SIZE" define, though see below) The 'INIT_SIZE' value is used to find the larger of the two image sizes: #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) #define VO_INIT_SIZE (VO__end - VO__text) #if ZO_INIT_SIZE > VO_INIT_SIZE # define INIT_SIZE ZO_INIT_SIZE #else # define INIT_SIZE VO_INIT_SIZE #endif The current code uses extract_offset to decide where to position the copied ZO (i.e. ZO starts at extract_offset). (This is why ZO_INIT_SIZE currently includes the extract_offset.) Why does z_extract_offset exist? It's needed because we are trying to minimize the amount of RAM used for the whole act of creating an uncompressed, executable, properly relocation-linked kernel image in system memory. We do this so that kernels can be booted on even very small systems. To achieve the goal of minimal memory consumption we have implemented an in-place decompression strategy: instead of cleanly separating the VO and ZO images and also allocating some memory for the decompression code's runtime needs, we instead create this elaborate layout of memory buffers where the output (decompressed) stream, as it progresses, overlaps with and destroys the input (compressed) stream. This can only be done safely if the ZO image is placed to the end of the VO range, plus a certain amount of safety distance to make sure that when the last bytes of the VO range are decompressed, the compressed stream pointer is safely beyond the end of the VO range. z_extract_offset is calculated in arch/x86/boot/compressed/mkpiggy.c during the build process, at a point when we know the exact compressed and uncompressed size of the kernel images and can calculate this safe minimum offset value. (Note that the mkpiggy.c calculation is not perfect, because we don't know the decompressor used at that stage, so the z_extract_offset calculation is necessarily imprecise and is mostly based on gzip internals - we'll improve that in the next patch.) When INIT_SIZE is bigger than VO_INIT_SIZE (uncommon but possible), the copied ZO occupies the memory from extract_offset to the end of decompression buffer. It overlaps with the soon-to-be-uncompressed kernel like this: |-----compressed kernel image------| V V 0 extract_offset +INIT_SIZE |-----------|---------------|-------------------------|--------| | | | | VO__text startup_32 of ZO VO__end ZO__end ^ ^ |-------uncompressed kernel image---------| When INIT_SIZE is equal to VO_INIT_SIZE (likely) there's still space left from end of ZO to the end of decompressing buffer, like below. |-compressed kernel image-| V V 0 extract_offset +INIT_SIZE |-----------|---------------|-------------------------|--------| | | | | VO__text startup_32 of ZO ZO__end VO__end ^ ^ |------------uncompressed kernel image-------------| To simplify calculations and avoid special cases, it is cleaner to always place the compressed kernel image in memory so that ZO__end is at the end of the decompression buffer, instead of placing t at the start of extract_offset as is currently done. This patch adds BP_init_size (which is the INIT_SIZE as passed in from the boot_params) into asm-offsets.c to make it visible to the assembly code. Then when moving the ZO, it calculates the starting position of the copied ZO (via BP_init_size and the ZO run size) so that the VO__end will be at the end of the decompression buffer. To make the position calculation safe, the end of ZO is page aligned (and a comment is added to the existing VO alignment for good measure). Signed-off-by: Yinghai Lu <yinghai@kernel.org> [ Rewrote changelog and comments. ] Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: lasse.collin@tukaani.org Link: http://lkml.kernel.org/r/1461888548-32439-3-git-send-email-keescook@chromium.org [ Rewrote the changelog some more. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
283 lines
6.1 KiB
ArmAsm
283 lines
6.1 KiB
ArmAsm
/*
|
|
* linux/boot/head.S
|
|
*
|
|
* Copyright (C) 1991, 1992, 1993 Linus Torvalds
|
|
*/
|
|
|
|
/*
|
|
* head.S contains the 32-bit startup code.
|
|
*
|
|
* NOTE!!! Startup happens at absolute address 0x00001000, which is also where
|
|
* the page directory will exist. The startup code will be overwritten by
|
|
* the page directory. [According to comments etc elsewhere on a compressed
|
|
* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
|
|
*
|
|
* Page 0 is deliberately kept safe, since System Management Mode code in
|
|
* laptops may need to access the BIOS data stored there. This is also
|
|
* useful for future device drivers that either access the BIOS via VM86
|
|
* mode.
|
|
*/
|
|
|
|
/*
|
|
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
|
|
*/
|
|
.text
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/boot.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/bootparam.h>
|
|
|
|
/*
|
|
* The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X
|
|
* relocation to get the symbol address in PIC. When the compressed x86
|
|
* kernel isn't built as PIC, the linker optimizes R_386_GOT32X
|
|
* relocations to their fixed symbol addresses. However, when the
|
|
* compressed x86 kernel is loaded at a different address, it leads
|
|
* to the following load failure:
|
|
*
|
|
* Failed to allocate space for phdrs
|
|
*
|
|
* during the decompression stage.
|
|
*
|
|
* If the compressed x86 kernel is relocatable at run-time, it should be
|
|
* compiled with -fPIE, instead of -fPIC, if possible and should be built as
|
|
* Position Independent Executable (PIE) so that linker won't optimize
|
|
* R_386_GOT32X relocation to its fixed symbol address. Older
|
|
* linkers generate R_386_32 relocations against locally defined symbols,
|
|
* _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less
|
|
* optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle
|
|
* R_386_32 relocations when relocating the kernel. To generate
|
|
* R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as
|
|
* hidden:
|
|
*/
|
|
.hidden _bss
|
|
.hidden _ebss
|
|
.hidden _got
|
|
.hidden _egot
|
|
|
|
__HEAD
|
|
ENTRY(startup_32)
|
|
#ifdef CONFIG_EFI_STUB
|
|
jmp preferred_addr
|
|
|
|
/*
|
|
* We don't need the return address, so set up the stack so
|
|
* efi_main() can find its arguments.
|
|
*/
|
|
ENTRY(efi_pe_entry)
|
|
add $0x4, %esp
|
|
|
|
call 1f
|
|
1: popl %esi
|
|
subl $1b, %esi
|
|
|
|
popl %ecx
|
|
movl %ecx, efi32_config(%esi) /* Handle */
|
|
popl %ecx
|
|
movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */
|
|
|
|
/* Relocate efi_config->call() */
|
|
leal efi32_config(%esi), %eax
|
|
add %esi, 88(%eax)
|
|
pushl %eax
|
|
|
|
call make_boot_params
|
|
cmpl $0, %eax
|
|
je fail
|
|
movl %esi, BP_code32_start(%eax)
|
|
popl %ecx
|
|
pushl %eax
|
|
pushl %ecx
|
|
jmp 2f /* Skip efi_config initialization */
|
|
|
|
ENTRY(efi32_stub_entry)
|
|
add $0x4, %esp
|
|
popl %ecx
|
|
popl %edx
|
|
|
|
call 1f
|
|
1: popl %esi
|
|
subl $1b, %esi
|
|
|
|
movl %ecx, efi32_config(%esi) /* Handle */
|
|
movl %edx, efi32_config+8(%esi) /* EFI System table pointer */
|
|
|
|
/* Relocate efi_config->call() */
|
|
leal efi32_config(%esi), %eax
|
|
add %esi, 88(%eax)
|
|
pushl %eax
|
|
2:
|
|
call efi_main
|
|
cmpl $0, %eax
|
|
movl %eax, %esi
|
|
jne 2f
|
|
fail:
|
|
/* EFI init failed, so hang. */
|
|
hlt
|
|
jmp fail
|
|
2:
|
|
movl BP_code32_start(%esi), %eax
|
|
leal preferred_addr(%eax), %eax
|
|
jmp *%eax
|
|
|
|
preferred_addr:
|
|
#endif
|
|
cld
|
|
/*
|
|
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
|
|
* us to not reload segments
|
|
*/
|
|
testb $KEEP_SEGMENTS, BP_loadflags(%esi)
|
|
jnz 1f
|
|
|
|
cli
|
|
movl $__BOOT_DS, %eax
|
|
movl %eax, %ds
|
|
movl %eax, %es
|
|
movl %eax, %fs
|
|
movl %eax, %gs
|
|
movl %eax, %ss
|
|
1:
|
|
|
|
/*
|
|
* Calculate the delta between where we were compiled to run
|
|
* at and where we were actually loaded at. This can only be done
|
|
* with a short local call on x86. Nothing else will tell us what
|
|
* address we are running at. The reserved chunk of the real-mode
|
|
* data at 0x1e4 (defined as a scratch field) are used as the stack
|
|
* for this calculation. Only 4 bytes are needed.
|
|
*/
|
|
leal (BP_scratch+4)(%esi), %esp
|
|
call 1f
|
|
1: popl %ebp
|
|
subl $1b, %ebp
|
|
|
|
/*
|
|
* %ebp contains the address we are loaded at by the boot loader and %ebx
|
|
* contains the address where we should move the kernel image temporarily
|
|
* for safe in-place decompression.
|
|
*/
|
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
movl %ebp, %ebx
|
|
movl BP_kernel_alignment(%esi), %eax
|
|
decl %eax
|
|
addl %eax, %ebx
|
|
notl %eax
|
|
andl %eax, %ebx
|
|
cmpl $LOAD_PHYSICAL_ADDR, %ebx
|
|
jge 1f
|
|
#endif
|
|
movl $LOAD_PHYSICAL_ADDR, %ebx
|
|
1:
|
|
|
|
/* Target address to relocate to for decompression */
|
|
movl BP_init_size(%esi), %eax
|
|
subl $_end, %eax
|
|
addl %eax, %ebx
|
|
|
|
/* Set up the stack */
|
|
leal boot_stack_end(%ebx), %esp
|
|
|
|
/* Zero EFLAGS */
|
|
pushl $0
|
|
popfl
|
|
|
|
/*
|
|
* Copy the compressed kernel to the end of our buffer
|
|
* where decompression in place becomes safe.
|
|
*/
|
|
pushl %esi
|
|
leal (_bss-4)(%ebp), %esi
|
|
leal (_bss-4)(%ebx), %edi
|
|
movl $(_bss - startup_32), %ecx
|
|
shrl $2, %ecx
|
|
std
|
|
rep movsl
|
|
cld
|
|
popl %esi
|
|
|
|
/*
|
|
* Jump to the relocated address.
|
|
*/
|
|
leal relocated(%ebx), %eax
|
|
jmp *%eax
|
|
ENDPROC(startup_32)
|
|
|
|
.text
|
|
relocated:
|
|
|
|
/*
|
|
* Clear BSS (stack is currently empty)
|
|
*/
|
|
xorl %eax, %eax
|
|
leal _bss(%ebx), %edi
|
|
leal _ebss(%ebx), %ecx
|
|
subl %edi, %ecx
|
|
shrl $2, %ecx
|
|
rep stosl
|
|
|
|
/*
|
|
* Adjust our own GOT
|
|
*/
|
|
leal _got(%ebx), %edx
|
|
leal _egot(%ebx), %ecx
|
|
1:
|
|
cmpl %ecx, %edx
|
|
jae 2f
|
|
addl %ebx, (%edx)
|
|
addl $4, %edx
|
|
jmp 1b
|
|
2:
|
|
|
|
/*
|
|
* Do the extraction, and jump to the new kernel..
|
|
*/
|
|
/* push arguments for extract_kernel: */
|
|
pushl $z_run_size /* size of kernel with .bss and .brk */
|
|
pushl $z_output_len /* decompressed length, end of relocs */
|
|
|
|
movl BP_init_size(%esi), %eax
|
|
subl $_end, %eax
|
|
movl %ebx, %ebp
|
|
subl %eax, %ebp
|
|
pushl %ebp /* output address */
|
|
|
|
pushl $z_input_len /* input_len */
|
|
leal input_data(%ebx), %eax
|
|
pushl %eax /* input_data */
|
|
leal boot_heap(%ebx), %eax
|
|
pushl %eax /* heap area */
|
|
pushl %esi /* real mode pointer */
|
|
call extract_kernel /* returns kernel location in %eax */
|
|
addl $28, %esp
|
|
|
|
/*
|
|
* Jump to the extracted kernel.
|
|
*/
|
|
xorl %ebx, %ebx
|
|
jmp *%eax
|
|
|
|
#ifdef CONFIG_EFI_STUB
|
|
.data
|
|
efi32_config:
|
|
.fill 11,8,0
|
|
.long efi_call_phys
|
|
.long 0
|
|
.byte 0
|
|
#endif
|
|
|
|
/*
|
|
* Stack and heap for uncompression
|
|
*/
|
|
.bss
|
|
.balign 4
|
|
boot_heap:
|
|
.fill BOOT_HEAP_SIZE, 1, 0
|
|
boot_stack:
|
|
.fill BOOT_STACK_SIZE, 1, 0
|
|
boot_stack_end:
|