2013-10-10 17:18:14 -07:00
|
|
|
#include "misc.h"
|
|
|
|
|
2013-10-10 17:18:15 -07:00
|
|
|
#include <asm/msr.h>
|
|
|
|
#include <asm/archrandom.h>
|
2013-10-10 17:18:16 -07:00
|
|
|
#include <asm/e820.h>
|
2013-10-10 17:18:15 -07:00
|
|
|
|
2013-11-11 14:28:39 -08:00
|
|
|
#include <generated/compile.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/uts.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
#include <generated/utsrelease.h>
|
|
|
|
|
|
|
|
/* Simplified build-specific string for starting entropy. */
|
2013-11-12 08:56:07 -08:00
|
|
|
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
|
2013-11-11 14:28:39 -08:00
|
|
|
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
|
|
|
|
|
x86/mm/ASLR: Propagate base load address calculation
Commit:
e2b32e678513 ("x86, kaslr: randomize module base load address")
makes the base address for module to be unconditionally randomized in
case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't
present on the commandline.
This is not consistent with how choose_kernel_location() decides whether
it will randomize kernel load base.
Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is
explicitly specified on kernel commandline), which makes the state space
larger than what module loader is looking at. IOW CONFIG_HIBERNATION &&
CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied
by default in that case, but module loader is not aware of that.
Instead of fixing the logic in module.c, this patch takes more generic
aproach. It introduces a new bootparam setup data_type SETUP_KASLR and
uses that to pass the information whether kaslr has been applied during
kernel decompression, and sets a global 'kaslr_enabled' variable
accordingly, so that any kernel code (module loading, livepatching, ...)
can make decisions based on its value.
x86 module loader is converted to make use of this flag.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz
[ Always dump correct kaslr status when panicking ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-02-13 16:04:55 +01:00
|
|
|
struct kaslr_setup_data {
|
|
|
|
__u64 next;
|
|
|
|
__u32 type;
|
|
|
|
__u32 len;
|
|
|
|
__u8 data[1];
|
|
|
|
} kaslr_setup_data;
|
|
|
|
|
2013-10-10 17:18:15 -07:00
|
|
|
#define I8254_PORT_CONTROL 0x43
|
|
|
|
#define I8254_PORT_COUNTER0 0x40
|
|
|
|
#define I8254_CMD_READBACK 0xC0
|
|
|
|
#define I8254_SELECT_COUNTER0 0x02
|
|
|
|
#define I8254_STATUS_NOTREADY 0x40
|
|
|
|
static inline u16 i8254(void)
|
|
|
|
{
|
|
|
|
u16 status, timer;
|
|
|
|
|
|
|
|
do {
|
|
|
|
outb(I8254_PORT_CONTROL,
|
|
|
|
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
|
|
|
|
status = inb(I8254_PORT_COUNTER0);
|
|
|
|
timer = inb(I8254_PORT_COUNTER0);
|
|
|
|
timer |= inb(I8254_PORT_COUNTER0) << 8;
|
|
|
|
} while (status & I8254_STATUS_NOTREADY);
|
|
|
|
|
|
|
|
return timer;
|
|
|
|
}
|
|
|
|
|
2013-11-11 14:28:39 -08:00
|
|
|
static unsigned long rotate_xor(unsigned long hash, const void *area,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
unsigned long *ptr = (unsigned long *)area;
|
|
|
|
|
|
|
|
for (i = 0; i < size / sizeof(hash); i++) {
|
|
|
|
/* Rotate by odd number of bits and XOR. */
|
|
|
|
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
|
|
|
|
hash ^= ptr[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Attempt to create a simple but unpredictable starting entropy. */
|
|
|
|
static unsigned long get_random_boot(void)
|
|
|
|
{
|
|
|
|
unsigned long hash = 0;
|
|
|
|
|
|
|
|
hash = rotate_xor(hash, build_str, sizeof(build_str));
|
|
|
|
hash = rotate_xor(hash, real_mode, sizeof(*real_mode));
|
|
|
|
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
2013-10-10 17:18:15 -07:00
|
|
|
static unsigned long get_random_long(void)
|
|
|
|
{
|
2013-11-11 22:45:20 -08:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
|
|
|
|
#else
|
|
|
|
const unsigned long mix_const = 0x3f39e593UL;
|
|
|
|
#endif
|
2013-11-11 14:28:39 -08:00
|
|
|
unsigned long raw, random = get_random_boot();
|
|
|
|
bool use_i8254 = true;
|
|
|
|
|
|
|
|
debug_putstr("KASLR using");
|
2013-10-10 17:18:15 -07:00
|
|
|
|
|
|
|
if (has_cpuflag(X86_FEATURE_RDRAND)) {
|
2013-11-11 14:28:39 -08:00
|
|
|
debug_putstr(" RDRAND");
|
|
|
|
if (rdrand_long(&raw)) {
|
|
|
|
random ^= raw;
|
|
|
|
use_i8254 = false;
|
|
|
|
}
|
2013-10-10 17:18:15 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (has_cpuflag(X86_FEATURE_TSC)) {
|
2013-11-11 14:28:39 -08:00
|
|
|
debug_putstr(" RDTSC");
|
|
|
|
rdtscll(raw);
|
2013-10-10 17:18:15 -07:00
|
|
|
|
2013-11-11 14:28:39 -08:00
|
|
|
random ^= raw;
|
|
|
|
use_i8254 = false;
|
|
|
|
}
|
2013-10-10 17:18:15 -07:00
|
|
|
|
2013-11-11 14:28:39 -08:00
|
|
|
if (use_i8254) {
|
|
|
|
debug_putstr(" i8254");
|
|
|
|
random ^= i8254();
|
2013-10-10 17:18:15 -07:00
|
|
|
}
|
|
|
|
|
2013-11-11 22:45:20 -08:00
|
|
|
/* Circular multiply for better bit diffusion */
|
|
|
|
asm("mul %3"
|
|
|
|
: "=a" (random), "=d" (raw)
|
|
|
|
: "a" (random), "rm" (mix_const));
|
|
|
|
random += raw;
|
|
|
|
|
2013-11-11 14:28:39 -08:00
|
|
|
debug_putstr("...\n");
|
|
|
|
|
2013-10-10 17:18:15 -07:00
|
|
|
return random;
|
|
|
|
}
|
2013-10-10 17:18:14 -07:00
|
|
|
|
2013-10-10 17:18:16 -07:00
|
|
|
struct mem_vector {
|
|
|
|
unsigned long start;
|
|
|
|
unsigned long size;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MEM_AVOID_MAX 5
|
2014-02-09 13:56:44 -08:00
|
|
|
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
|
2013-10-10 17:18:16 -07:00
|
|
|
|
|
|
|
static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
|
|
|
|
{
|
|
|
|
/* Item at least partially before region. */
|
|
|
|
if (item->start < region->start)
|
|
|
|
return false;
|
|
|
|
/* Item at least partially after region. */
|
|
|
|
if (item->start + item->size > region->start + region->size)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
|
|
|
|
{
|
|
|
|
/* Item one is entirely before item two. */
|
|
|
|
if (one->start + one->size <= two->start)
|
|
|
|
return false;
|
|
|
|
/* Item one is entirely after item two. */
|
|
|
|
if (one->start >= two->start + two->size)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mem_avoid_init(unsigned long input, unsigned long input_size,
|
|
|
|
unsigned long output, unsigned long output_size)
|
|
|
|
{
|
|
|
|
u64 initrd_start, initrd_size;
|
|
|
|
u64 cmd_line, cmd_line_size;
|
|
|
|
unsigned long unsafe, unsafe_len;
|
|
|
|
char *ptr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid the region that is unsafe to overlap during
|
|
|
|
* decompression (see calculations at top of misc.c).
|
|
|
|
*/
|
|
|
|
unsafe_len = (output_size >> 12) + 32768 + 18;
|
|
|
|
unsafe = (unsigned long)input + input_size - unsafe_len;
|
|
|
|
mem_avoid[0].start = unsafe;
|
|
|
|
mem_avoid[0].size = unsafe_len;
|
|
|
|
|
|
|
|
/* Avoid initrd. */
|
|
|
|
initrd_start = (u64)real_mode->ext_ramdisk_image << 32;
|
|
|
|
initrd_start |= real_mode->hdr.ramdisk_image;
|
|
|
|
initrd_size = (u64)real_mode->ext_ramdisk_size << 32;
|
|
|
|
initrd_size |= real_mode->hdr.ramdisk_size;
|
|
|
|
mem_avoid[1].start = initrd_start;
|
|
|
|
mem_avoid[1].size = initrd_size;
|
|
|
|
|
|
|
|
/* Avoid kernel command line. */
|
|
|
|
cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32;
|
|
|
|
cmd_line |= real_mode->hdr.cmd_line_ptr;
|
|
|
|
/* Calculate size of cmd_line. */
|
|
|
|
ptr = (char *)(unsigned long)cmd_line;
|
|
|
|
for (cmd_line_size = 0; ptr[cmd_line_size++]; )
|
|
|
|
;
|
|
|
|
mem_avoid[2].start = cmd_line;
|
|
|
|
mem_avoid[2].size = cmd_line_size;
|
|
|
|
|
|
|
|
/* Avoid heap memory. */
|
|
|
|
mem_avoid[3].start = (unsigned long)free_mem_ptr;
|
|
|
|
mem_avoid[3].size = BOOT_HEAP_SIZE;
|
|
|
|
|
|
|
|
/* Avoid stack memory. */
|
|
|
|
mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
|
|
|
|
mem_avoid[4].size = BOOT_STACK_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Does this memory vector overlap a known avoided area? */
|
2014-02-09 13:56:44 -08:00
|
|
|
static bool mem_avoid_overlap(struct mem_vector *img)
|
2013-10-10 17:18:16 -07:00
|
|
|
{
|
|
|
|
int i;
|
2014-09-11 09:19:31 -07:00
|
|
|
struct setup_data *ptr;
|
2013-10-10 17:18:16 -07:00
|
|
|
|
|
|
|
for (i = 0; i < MEM_AVOID_MAX; i++) {
|
|
|
|
if (mem_overlaps(img, &mem_avoid[i]))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-11 09:19:31 -07:00
|
|
|
/* Avoid all entries in the setup_data linked list. */
|
|
|
|
ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
|
|
|
|
while (ptr) {
|
|
|
|
struct mem_vector avoid;
|
|
|
|
|
2014-10-01 11:36:32 -07:00
|
|
|
avoid.start = (unsigned long)ptr;
|
2014-09-11 09:19:31 -07:00
|
|
|
avoid.size = sizeof(*ptr) + ptr->len;
|
|
|
|
|
|
|
|
if (mem_overlaps(img, &avoid))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
ptr = (struct setup_data *)(unsigned long)ptr->next;
|
|
|
|
}
|
|
|
|
|
2013-10-10 17:18:16 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-02-09 13:56:44 -08:00
|
|
|
static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
|
|
|
|
CONFIG_PHYSICAL_ALIGN];
|
|
|
|
static unsigned long slot_max;
|
2013-10-10 17:18:16 -07:00
|
|
|
|
|
|
|
static void slots_append(unsigned long addr)
|
|
|
|
{
|
|
|
|
/* Overflowing the slots list should be impossible. */
|
|
|
|
if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
|
|
|
|
CONFIG_PHYSICAL_ALIGN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
slots[slot_max++] = addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long slots_fetch_random(void)
|
|
|
|
{
|
|
|
|
/* Handle case of no slots stored. */
|
|
|
|
if (slot_max == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return slots[get_random_long() % slot_max];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void process_e820_entry(struct e820entry *entry,
|
|
|
|
unsigned long minimum,
|
|
|
|
unsigned long image_size)
|
|
|
|
{
|
|
|
|
struct mem_vector region, img;
|
|
|
|
|
|
|
|
/* Skip non-RAM entries. */
|
|
|
|
if (entry->type != E820_RAM)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Ignore entries entirely above our maximum. */
|
|
|
|
if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Ignore entries entirely below our minimum. */
|
|
|
|
if (entry->addr + entry->size < minimum)
|
|
|
|
return;
|
|
|
|
|
|
|
|
region.start = entry->addr;
|
|
|
|
region.size = entry->size;
|
|
|
|
|
|
|
|
/* Potentially raise address to minimum location. */
|
|
|
|
if (region.start < minimum)
|
|
|
|
region.start = minimum;
|
|
|
|
|
|
|
|
/* Potentially raise address to meet alignment requirements. */
|
|
|
|
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
|
|
|
|
|
|
|
|
/* Did we raise the address above the bounds of this e820 region? */
|
|
|
|
if (region.start > entry->addr + entry->size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Reduce size by any delta from the original address. */
|
|
|
|
region.size -= region.start - entry->addr;
|
|
|
|
|
|
|
|
/* Reduce maximum size to fit end of image within maximum limit. */
|
|
|
|
if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
|
|
|
|
region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
|
|
|
|
|
|
|
|
/* Walk each aligned slot and check for avoided areas. */
|
|
|
|
for (img.start = region.start, img.size = image_size ;
|
|
|
|
mem_contains(®ion, &img) ;
|
|
|
|
img.start += CONFIG_PHYSICAL_ALIGN) {
|
|
|
|
if (mem_avoid_overlap(&img))
|
|
|
|
continue;
|
|
|
|
slots_append(img.start);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long find_random_addr(unsigned long minimum,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
/* Make sure minimum is aligned. */
|
|
|
|
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
|
|
|
|
|
|
|
|
/* Verify potential e820 positions, appending to slots list. */
|
|
|
|
for (i = 0; i < real_mode->e820_entries; i++) {
|
|
|
|
process_e820_entry(&real_mode->e820_map[i], minimum, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return slots_fetch_random();
|
|
|
|
}
|
|
|
|
|
x86/mm/ASLR: Propagate base load address calculation
Commit:
e2b32e678513 ("x86, kaslr: randomize module base load address")
makes the base address for module to be unconditionally randomized in
case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't
present on the commandline.
This is not consistent with how choose_kernel_location() decides whether
it will randomize kernel load base.
Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is
explicitly specified on kernel commandline), which makes the state space
larger than what module loader is looking at. IOW CONFIG_HIBERNATION &&
CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied
by default in that case, but module loader is not aware of that.
Instead of fixing the logic in module.c, this patch takes more generic
aproach. It introduces a new bootparam setup data_type SETUP_KASLR and
uses that to pass the information whether kaslr has been applied during
kernel decompression, and sets a global 'kaslr_enabled' variable
accordingly, so that any kernel code (module loading, livepatching, ...)
can make decisions based on its value.
x86 module loader is converted to make use of this flag.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz
[ Always dump correct kaslr status when panicking ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-02-13 16:04:55 +01:00
|
|
|
static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled)
|
|
|
|
{
|
|
|
|
struct setup_data *data;
|
|
|
|
|
|
|
|
kaslr_setup_data.type = SETUP_KASLR;
|
|
|
|
kaslr_setup_data.len = 1;
|
|
|
|
kaslr_setup_data.next = 0;
|
|
|
|
kaslr_setup_data.data[0] = enabled;
|
|
|
|
|
|
|
|
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
|
|
|
|
|
|
|
|
while (data && data->next)
|
|
|
|
data = (struct setup_data *)(unsigned long)data->next;
|
|
|
|
|
|
|
|
if (data)
|
|
|
|
data->next = (unsigned long)&kaslr_setup_data;
|
|
|
|
else
|
|
|
|
params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned char *choose_kernel_location(struct boot_params *params,
|
|
|
|
unsigned char *input,
|
2013-10-10 17:18:14 -07:00
|
|
|
unsigned long input_size,
|
|
|
|
unsigned char *output,
|
|
|
|
unsigned long output_size)
|
|
|
|
{
|
|
|
|
unsigned long choice = (unsigned long)output;
|
2013-10-10 17:18:16 -07:00
|
|
|
unsigned long random;
|
2013-10-10 17:18:14 -07:00
|
|
|
|
2014-06-13 13:30:36 -07:00
|
|
|
#ifdef CONFIG_HIBERNATION
|
|
|
|
if (!cmdline_find_option_bool("kaslr")) {
|
|
|
|
debug_putstr("KASLR disabled by default...\n");
|
x86/mm/ASLR: Propagate base load address calculation
Commit:
e2b32e678513 ("x86, kaslr: randomize module base load address")
makes the base address for module to be unconditionally randomized in
case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't
present on the commandline.
This is not consistent with how choose_kernel_location() decides whether
it will randomize kernel load base.
Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is
explicitly specified on kernel commandline), which makes the state space
larger than what module loader is looking at. IOW CONFIG_HIBERNATION &&
CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied
by default in that case, but module loader is not aware of that.
Instead of fixing the logic in module.c, this patch takes more generic
aproach. It introduces a new bootparam setup data_type SETUP_KASLR and
uses that to pass the information whether kaslr has been applied during
kernel decompression, and sets a global 'kaslr_enabled' variable
accordingly, so that any kernel code (module loading, livepatching, ...)
can make decisions based on its value.
x86 module loader is converted to make use of this flag.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz
[ Always dump correct kaslr status when panicking ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-02-13 16:04:55 +01:00
|
|
|
add_kaslr_setup_data(params, 0);
|
2014-06-13 13:30:36 -07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#else
|
2013-10-10 17:18:14 -07:00
|
|
|
if (cmdline_find_option_bool("nokaslr")) {
|
2014-06-13 13:30:36 -07:00
|
|
|
debug_putstr("KASLR disabled by cmdline...\n");
|
x86/mm/ASLR: Propagate base load address calculation
Commit:
e2b32e678513 ("x86, kaslr: randomize module base load address")
makes the base address for module to be unconditionally randomized in
case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't
present on the commandline.
This is not consistent with how choose_kernel_location() decides whether
it will randomize kernel load base.
Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is
explicitly specified on kernel commandline), which makes the state space
larger than what module loader is looking at. IOW CONFIG_HIBERNATION &&
CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied
by default in that case, but module loader is not aware of that.
Instead of fixing the logic in module.c, this patch takes more generic
aproach. It introduces a new bootparam setup data_type SETUP_KASLR and
uses that to pass the information whether kaslr has been applied during
kernel decompression, and sets a global 'kaslr_enabled' variable
accordingly, so that any kernel code (module loading, livepatching, ...)
can make decisions based on its value.
x86 module loader is converted to make use of this flag.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz
[ Always dump correct kaslr status when panicking ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-02-13 16:04:55 +01:00
|
|
|
add_kaslr_setup_data(params, 0);
|
2013-10-10 17:18:14 -07:00
|
|
|
goto out;
|
|
|
|
}
|
2014-06-13 13:30:36 -07:00
|
|
|
#endif
|
x86/mm/ASLR: Propagate base load address calculation
Commit:
e2b32e678513 ("x86, kaslr: randomize module base load address")
makes the base address for module to be unconditionally randomized in
case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't
present on the commandline.
This is not consistent with how choose_kernel_location() decides whether
it will randomize kernel load base.
Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is
explicitly specified on kernel commandline), which makes the state space
larger than what module loader is looking at. IOW CONFIG_HIBERNATION &&
CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied
by default in that case, but module loader is not aware of that.
Instead of fixing the logic in module.c, this patch takes more generic
aproach. It introduces a new bootparam setup data_type SETUP_KASLR and
uses that to pass the information whether kaslr has been applied during
kernel decompression, and sets a global 'kaslr_enabled' variable
accordingly, so that any kernel code (module loading, livepatching, ...)
can make decisions based on its value.
x86 module loader is converted to make use of this flag.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: "H. Peter Anvin" <hpa@linux.intel.com>
Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz
[ Always dump correct kaslr status when panicking ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2015-02-13 16:04:55 +01:00
|
|
|
add_kaslr_setup_data(params, 1);
|
2013-10-10 17:18:14 -07:00
|
|
|
|
2013-10-10 17:18:16 -07:00
|
|
|
/* Record the various known unsafe memory ranges. */
|
|
|
|
mem_avoid_init((unsigned long)input, input_size,
|
|
|
|
(unsigned long)output, output_size);
|
|
|
|
|
|
|
|
/* Walk e820 and find a random address. */
|
|
|
|
random = find_random_addr(choice, output_size);
|
|
|
|
if (!random) {
|
|
|
|
debug_putstr("KASLR could not find suitable E820 region...\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Always enforce the minimum. */
|
|
|
|
if (random < choice)
|
|
|
|
goto out;
|
2013-10-10 17:18:14 -07:00
|
|
|
|
2013-10-10 17:18:16 -07:00
|
|
|
choice = random;
|
2013-10-10 17:18:14 -07:00
|
|
|
out:
|
|
|
|
return (unsigned char *)choice;
|
|
|
|
}
|