mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 08:17:46 +00:00

It references to x86/s390 architecture. So, it doesn't map the early shadow page to cover VMALLOC space. Prepopulate top level page table for the range that would otherwise be empty. lower levels are filled dynamically upon memory allocation while booting. Signed-off-by: Nylon Chen <nylon7@andestech.com> Signed-off-by: Nick Hu <nickhu@andestech.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
168 lines
4.5 KiB
C
168 lines
4.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2019 Andes Technology Corporation
|
|
|
|
#include <linux/pfn.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/pgalloc.h>
|
|
|
|
static __init void *early_alloc(size_t size, int node)
|
|
{
|
|
void *ptr = memblock_alloc_try_nid(size, size,
|
|
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
|
|
|
|
if (!ptr)
|
|
panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
|
|
__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
|
|
|
|
return ptr;
|
|
}
|
|
|
|
extern pgd_t early_pg_dir[PTRS_PER_PGD];
|
|
asmlinkage void __init kasan_early_init(void)
|
|
{
|
|
uintptr_t i;
|
|
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
|
set_pte(kasan_early_shadow_pte + i,
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
|
PAGE_KERNEL));
|
|
|
|
for (i = 0; i < PTRS_PER_PMD; ++i)
|
|
set_pmd(kasan_early_shadow_pmd + i,
|
|
pfn_pmd(PFN_DOWN
|
|
(__pa((uintptr_t) kasan_early_shadow_pte)),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
|
i += PGDIR_SIZE, ++pgd)
|
|
set_pgd(pgd,
|
|
pfn_pgd(PFN_DOWN
|
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
/* init for swapper_pg_dir */
|
|
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
|
|
|
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
|
i += PGDIR_SIZE, ++pgd)
|
|
set_pgd(pgd,
|
|
pfn_pgd(PFN_DOWN
|
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
local_flush_tlb_all();
|
|
}
|
|
|
|
static void __init populate(void *start, void *end)
|
|
{
|
|
unsigned long i, offset;
|
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
|
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
|
|
unsigned long n_ptes =
|
|
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
|
|
unsigned long n_pmds =
|
|
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
|
|
|
|
pte_t *pte =
|
|
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
|
|
pmd_t *pmd =
|
|
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
|
pgd_t *pgd = pgd_offset_k(vaddr);
|
|
|
|
for (i = 0; i < n_pages; i++) {
|
|
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
|
}
|
|
|
|
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
|
|
set_pmd(&pmd[i],
|
|
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
|
|
set_pgd(&pgd[i],
|
|
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
|
__pgprot(_PAGE_TABLE)));
|
|
|
|
local_flush_tlb_all();
|
|
memset(start, 0, end - start);
|
|
}
|
|
|
|
void __init kasan_shallow_populate(void *start, void *end)
|
|
{
|
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
|
unsigned long pfn;
|
|
int index;
|
|
void *p;
|
|
pud_t *pud_dir, *pud_k;
|
|
pgd_t *pgd_dir, *pgd_k;
|
|
p4d_t *p4d_dir, *p4d_k;
|
|
|
|
while (vaddr < vend) {
|
|
index = pgd_index(vaddr);
|
|
pfn = csr_read(CSR_SATP) & SATP_PPN;
|
|
pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
|
|
pgd_k = init_mm.pgd + index;
|
|
pgd_dir = pgd_offset_k(vaddr);
|
|
set_pgd(pgd_dir, *pgd_k);
|
|
|
|
p4d_dir = p4d_offset(pgd_dir, vaddr);
|
|
p4d_k = p4d_offset(pgd_k, vaddr);
|
|
|
|
vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
|
|
pud_dir = pud_offset(p4d_dir, vaddr);
|
|
pud_k = pud_offset(p4d_k, vaddr);
|
|
|
|
if (pud_present(*pud_dir)) {
|
|
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
|
|
pud_populate(&init_mm, pud_dir, p);
|
|
}
|
|
vaddr += PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
void __init kasan_init(void)
|
|
{
|
|
phys_addr_t _start, _end;
|
|
u64 i;
|
|
|
|
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
|
(void *)kasan_mem_to_shadow((void *)
|
|
VMEMMAP_END));
|
|
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
|
|
kasan_shallow_populate(
|
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
|
else
|
|
kasan_populate_early_shadow(
|
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
|
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
|
|
|
for_each_mem_range(i, &_start, &_end) {
|
|
void *start = (void *)_start;
|
|
void *end = (void *)_end;
|
|
|
|
if (start >= end)
|
|
break;
|
|
|
|
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
|
};
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
|
set_pte(&kasan_early_shadow_pte[i],
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
|
__pgprot(_PAGE_PRESENT | _PAGE_READ |
|
|
_PAGE_ACCESSED)));
|
|
|
|
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
|
init_task.kasan_depth = 0;
|
|
}
|