mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-05-24 10:39:52 +00:00

Changes to the existing page table macros will allow the SME support to be enabled in a simple fashion with minimal changes to files that use these macros. Since the memory encryption mask will now be part of the regular pagetable macros, we introduce two new macros (_PAGE_TABLE_NOENC and _KERNPG_TABLE_NOENC) to allow for early pagetable creation/initialization without the encryption mask before SME becomes active. Two new pgprot() macros are defined to allow setting or clearing the page encryption mask. The FIXMAP_PAGE_NOCACHE define is introduced for use with MMIO. SME does not support encryption for MMIO areas so this define removes the encryption mask from the page attribute. Two new macros are introduced (__sme_pa() / __sme_pa_nodebug()) to allow creating a physical address with the encryption mask. These are used when working with the cr3 register so that the PGD can be encrypted. The current __va() macro is updated so that the virtual address is generated based off of the physical address without the encryption mask thus allowing the same virtual address to be generated regardless of whether encryption is enabled for that physical location or not. Also, an early initialization function is added for SME. If SME is active, this function: - Updates the early_pmd_flags so that early page faults create mappings with the encryption mask. - Updates the __supported_pte_mask to include the encryption mask. - Updates the protection_map entries to include the encryption mask so that user-space allocations will automatically have the encryption mask applied. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Dave Young <dyoung@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Toshimitsu Kani <toshi.kani@hpe.com> Cc: kasan-dev@googlegroups.com Cc: kvm@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-efi@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/b36e952c4c39767ae7f0a41cf5345adf27438480.1500319216.git.thomas.lendacky@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
164 lines
4.1 KiB
C
164 lines
4.1 KiB
C
#define DISABLE_BRANCH_PROFILING
|
|
#define pr_fmt(fmt) "kasan: " fmt
|
|
#include <linux/bootmem.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <asm/e820/types.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/sections.h>
|
|
|
|
extern pgd_t early_top_pgt[PTRS_PER_PGD];
|
|
extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
|
|
|
static int __init map_range(struct range *range)
|
|
{
|
|
unsigned long start;
|
|
unsigned long end;
|
|
|
|
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
|
|
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
|
|
|
|
return vmemmap_populate(start, end, NUMA_NO_NODE);
|
|
}
|
|
|
|
static void __init clear_pgds(unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
pgd_t *pgd;
|
|
|
|
for (; start < end; start += PGDIR_SIZE) {
|
|
pgd = pgd_offset_k(start);
|
|
/*
|
|
* With folded p4d, pgd_clear() is nop, use p4d_clear()
|
|
* instead.
|
|
*/
|
|
if (CONFIG_PGTABLE_LEVELS < 5)
|
|
p4d_clear(p4d_offset(pgd, start));
|
|
else
|
|
pgd_clear(pgd);
|
|
}
|
|
}
|
|
|
|
static void __init kasan_map_early_shadow(pgd_t *pgd)
|
|
{
|
|
int i;
|
|
unsigned long start = KASAN_SHADOW_START;
|
|
unsigned long end = KASAN_SHADOW_END;
|
|
|
|
for (i = pgd_index(start); start < end; i++) {
|
|
switch (CONFIG_PGTABLE_LEVELS) {
|
|
case 4:
|
|
pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
|
|
_KERNPG_TABLE);
|
|
break;
|
|
case 5:
|
|
pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
|
|
_KERNPG_TABLE);
|
|
break;
|
|
default:
|
|
BUILD_BUG();
|
|
}
|
|
start += PGDIR_SIZE;
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_KASAN_INLINE
|
|
static int kasan_die_handler(struct notifier_block *self,
|
|
unsigned long val,
|
|
void *data)
|
|
{
|
|
if (val == DIE_GPF) {
|
|
pr_emerg("CONFIG_KASAN_INLINE enabled\n");
|
|
pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
|
|
}
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block kasan_die_notifier = {
|
|
.notifier_call = kasan_die_handler,
|
|
};
|
|
#endif
|
|
|
|
void __init kasan_early_init(void)
|
|
{
|
|
int i;
|
|
pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
|
|
pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
|
|
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
|
|
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
|
kasan_zero_pte[i] = __pte(pte_val);
|
|
|
|
for (i = 0; i < PTRS_PER_PMD; i++)
|
|
kasan_zero_pmd[i] = __pmd(pmd_val);
|
|
|
|
for (i = 0; i < PTRS_PER_PUD; i++)
|
|
kasan_zero_pud[i] = __pud(pud_val);
|
|
|
|
for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
|
|
kasan_zero_p4d[i] = __p4d(p4d_val);
|
|
|
|
kasan_map_early_shadow(early_top_pgt);
|
|
kasan_map_early_shadow(init_top_pgt);
|
|
}
|
|
|
|
void __init kasan_init(void)
|
|
{
|
|
int i;
|
|
|
|
#ifdef CONFIG_KASAN_INLINE
|
|
register_die_notifier(&kasan_die_notifier);
|
|
#endif
|
|
|
|
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
|
|
load_cr3(early_top_pgt);
|
|
__flush_tlb_all();
|
|
|
|
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
|
|
|
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
|
|
kasan_mem_to_shadow((void *)PAGE_OFFSET));
|
|
|
|
for (i = 0; i < E820_MAX_ENTRIES; i++) {
|
|
if (pfn_mapped[i].end == 0)
|
|
break;
|
|
|
|
if (map_range(&pfn_mapped[i]))
|
|
panic("kasan: unable to allocate shadow!");
|
|
}
|
|
kasan_populate_zero_shadow(
|
|
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
|
|
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
|
|
|
vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
|
|
(unsigned long)kasan_mem_to_shadow(_end),
|
|
NUMA_NO_NODE);
|
|
|
|
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
|
(void *)KASAN_SHADOW_END);
|
|
|
|
load_cr3(init_top_pgt);
|
|
__flush_tlb_all();
|
|
|
|
/*
|
|
* kasan_zero_page has been used as early shadow memory, thus it may
|
|
* contain some garbage. Now we can clear and write protect it, since
|
|
* after the TLB flush no one should write to it.
|
|
*/
|
|
memset(kasan_zero_page, 0, PAGE_SIZE);
|
|
for (i = 0; i < PTRS_PER_PTE; i++) {
|
|
pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
|
|
set_pte(&kasan_zero_pte[i], pte);
|
|
}
|
|
/* Flush TLBs again to be sure that write protection applied. */
|
|
__flush_tlb_all();
|
|
|
|
init_task.kasan_depth = 0;
|
|
pr_info("KernelAddressSanitizer initialized\n");
|
|
}
|