2022-12-13 11:35:11 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/sched/task.h>
|
|
|
|
#include <linux/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/facility.h>
|
|
|
|
#include <asm/sections.h>
|
2023-02-08 18:11:25 +01:00
|
|
|
#include <asm/physmem_info.h>
|
2022-12-11 08:18:57 +01:00
|
|
|
#include <asm/maccess.h>
|
2022-12-19 21:08:27 +01:00
|
|
|
#include <asm/abs_lowcore.h>
|
2022-12-13 11:35:11 +01:00
|
|
|
#include "decompressor.h"
|
|
|
|
#include "boot.h"
|
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
unsigned long __bootdata_preserved(s390_invalid_asce);
|
|
|
|
|
2022-12-13 11:35:11 +01:00
|
|
|
#define init_mm (*(struct mm_struct *)vmlinux.init_mm_off)
|
|
|
|
#define swapper_pg_dir vmlinux.swapper_pg_dir_off
|
|
|
|
#define invalid_pg_dir vmlinux.invalid_pg_dir_off
|
|
|
|
|
2022-12-11 08:18:57 +01:00
|
|
|
/*
|
|
|
|
* Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
|
|
|
|
*/
|
|
|
|
static inline pte_t *__virt_to_kpte(unsigned long va)
|
|
|
|
{
|
|
|
|
return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
enum populate_mode {
|
2022-12-11 08:18:57 +01:00
|
|
|
POPULATE_NONE,
|
2022-12-15 10:33:52 +01:00
|
|
|
POPULATE_ONE2ONE,
|
2022-12-19 21:08:27 +01:00
|
|
|
POPULATE_ABS_LOWCORE,
|
2022-12-15 10:33:52 +01:00
|
|
|
};
|
|
|
|
|
2022-12-13 11:35:11 +01:00
|
|
|
static void *boot_crst_alloc(unsigned long val)
|
|
|
|
{
|
2023-02-02 13:59:36 +01:00
|
|
|
unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
|
2022-12-13 11:35:11 +01:00
|
|
|
unsigned long *table;
|
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
|
|
|
|
crst_table_init(table, val);
|
2022-12-13 11:35:11 +01:00
|
|
|
return table;
|
|
|
|
}
|
|
|
|
|
|
|
|
static pte_t *boot_pte_alloc(void)
|
|
|
|
{
|
|
|
|
pte_t *pte;
|
|
|
|
|
2023-02-02 13:59:36 +01:00
|
|
|
pte = (pte_t *)physmem_alloc_top_down(RR_VMEM, _PAGE_TABLE_SIZE, _PAGE_TABLE_SIZE);
|
2022-12-13 11:35:11 +01:00
|
|
|
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
static unsigned long _pa(unsigned long addr, enum populate_mode mode)
|
|
|
|
{
|
|
|
|
switch (mode) {
|
2022-12-11 08:18:57 +01:00
|
|
|
case POPULATE_NONE:
|
|
|
|
return -1;
|
2022-12-15 10:33:52 +01:00
|
|
|
case POPULATE_ONE2ONE:
|
|
|
|
return addr;
|
2022-12-19 21:08:27 +01:00
|
|
|
case POPULATE_ABS_LOWCORE:
|
|
|
|
return __abs_lowcore_pa(addr);
|
2022-12-15 10:33:52 +01:00
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-13 11:35:11 +01:00
|
|
|
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end)
|
|
|
|
{
|
|
|
|
return machine.has_edat2 &&
|
|
|
|
IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end)
|
|
|
|
{
|
|
|
|
return machine.has_edat1 &&
|
|
|
|
IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE;
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|
|
|
enum populate_mode mode)
|
2022-12-13 11:35:11 +01:00
|
|
|
{
|
|
|
|
pte_t *pte, entry;
|
|
|
|
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
|
|
for (; addr < end; addr += PAGE_SIZE, pte++) {
|
|
|
|
if (pte_none(*pte)) {
|
2022-12-15 10:33:52 +01:00
|
|
|
entry = __pte(_pa(addr, mode));
|
2022-12-13 11:35:11 +01:00
|
|
|
entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
|
|
|
|
set_pte(pte, entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
|
|
|
|
enum populate_mode mode)
|
2022-12-13 11:35:11 +01:00
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
pmd_t *pmd, entry;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
for (; addr < end; addr = next, pmd++) {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
|
if (can_large_pmd(pmd, addr, next)) {
|
2022-12-15 10:33:52 +01:00
|
|
|
entry = __pmd(_pa(addr, mode));
|
2022-12-13 11:35:11 +01:00
|
|
|
entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
|
|
|
|
set_pmd(pmd, entry);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
pte = boot_pte_alloc();
|
|
|
|
pmd_populate(&init_mm, pmd, pte);
|
|
|
|
} else if (pmd_large(*pmd)) {
|
|
|
|
continue;
|
|
|
|
}
|
2022-12-15 10:33:52 +01:00
|
|
|
pgtable_pte_populate(pmd, addr, next, mode);
|
2022-12-13 11:35:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
|
|
|
|
enum populate_mode mode)
|
2022-12-13 11:35:11 +01:00
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
pud_t *pud, entry;
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, addr);
|
|
|
|
for (; addr < end; addr = next, pud++) {
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (pud_none(*pud)) {
|
|
|
|
if (can_large_pud(pud, addr, next)) {
|
2022-12-15 10:33:52 +01:00
|
|
|
entry = __pud(_pa(addr, mode));
|
2022-12-13 11:35:11 +01:00
|
|
|
entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
|
|
|
|
set_pud(pud, entry);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
|
|
|
|
pud_populate(&init_mm, pud, pmd);
|
|
|
|
} else if (pud_large(*pud)) {
|
|
|
|
continue;
|
|
|
|
}
|
2022-12-15 10:33:52 +01:00
|
|
|
pgtable_pmd_populate(pud, addr, next, mode);
|
2022-12-13 11:35:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
|
|
enum populate_mode mode)
|
2022-12-13 11:35:11 +01:00
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
p4d_t *p4d;
|
|
|
|
pud_t *pud;
|
|
|
|
|
|
|
|
p4d = p4d_offset(pgd, addr);
|
|
|
|
for (; addr < end; addr = next, p4d++) {
|
|
|
|
next = p4d_addr_end(addr, end);
|
|
|
|
if (p4d_none(*p4d)) {
|
|
|
|
pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
|
|
|
|
p4d_populate(&init_mm, p4d, pud);
|
|
|
|
}
|
2022-12-15 10:33:52 +01:00
|
|
|
pgtable_pud_populate(p4d, addr, next, mode);
|
2022-12-13 11:35:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-15 10:33:52 +01:00
|
|
|
static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
|
2022-12-13 11:35:11 +01:00
|
|
|
{
|
|
|
|
unsigned long next;
|
|
|
|
pgd_t *pgd;
|
|
|
|
p4d_t *p4d;
|
|
|
|
|
|
|
|
pgd = pgd_offset(&init_mm, addr);
|
|
|
|
for (; addr < end; addr = next, pgd++) {
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
if (pgd_none(*pgd)) {
|
|
|
|
p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
|
|
|
|
pgd_populate(&init_mm, pgd, p4d);
|
|
|
|
}
|
2022-12-15 10:33:52 +01:00
|
|
|
pgtable_p4d_populate(pgd, addr, next, mode);
|
2022-12-13 11:35:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-28 23:55:04 +01:00
|
|
|
void setup_vmem(unsigned long asce_limit)
|
2022-12-13 11:35:11 +01:00
|
|
|
{
|
2023-01-23 15:24:17 +01:00
|
|
|
unsigned long start, end;
|
2022-12-13 11:35:11 +01:00
|
|
|
unsigned long asce_type;
|
|
|
|
unsigned long asce_bits;
|
2023-01-23 15:24:17 +01:00
|
|
|
int i;
|
2022-12-13 11:35:11 +01:00
|
|
|
|
|
|
|
if (asce_limit == _REGION1_SIZE) {
|
|
|
|
asce_type = _REGION2_ENTRY_EMPTY;
|
|
|
|
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
|
|
|
|
} else {
|
|
|
|
asce_type = _REGION3_ENTRY_EMPTY;
|
|
|
|
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
|
|
|
|
}
|
|
|
|
s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
|
|
|
|
|
|
|
|
crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
|
|
|
|
crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To allow prefixing the lowcore must be mapped with 4KB pages.
|
|
|
|
* To prevent creation of a large page at address 0 first map
|
|
|
|
* the lowcore and create the identity mapping only afterwards.
|
|
|
|
*/
|
2022-12-15 10:33:52 +01:00
|
|
|
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
|
2023-02-08 18:11:25 +01:00
|
|
|
for_each_physmem_usable_range(i, &start, &end)
|
2023-01-28 23:55:04 +01:00
|
|
|
pgtable_populate(start, end, POPULATE_ONE2ONE);
|
2022-12-19 21:08:27 +01:00
|
|
|
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
|
|
|
|
POPULATE_ABS_LOWCORE);
|
2022-12-11 08:18:57 +01:00
|
|
|
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
|
|
|
|
POPULATE_NONE);
|
|
|
|
memcpy_real_ptep = __virt_to_kpte(__memcpy_real_area);
|
2022-12-13 11:35:11 +01:00
|
|
|
|
|
|
|
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
|
|
|
|
S390_lowcore.user_asce = s390_invalid_asce;
|
|
|
|
|
|
|
|
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
|
|
|
|
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
|
|
|
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
|
|
|
|
|
|
|
|
init_mm.context.asce = S390_lowcore.kernel_asce;
|
|
|
|
}
|