2022-05-31 18:04:11 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
|
|
*
|
|
|
|
* Derived from MIPS:
|
|
|
|
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
|
|
|
|
* Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_PGTABLE_H
|
|
|
|
#define _ASM_PGTABLE_H
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <asm/addrspace.h>
|
2022-10-27 20:52:51 +08:00
|
|
|
#include <asm/page.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
#include <asm/pgtable-bits.h>
|
|
|
|
|
|
|
|
#if CONFIG_PGTABLE_LEVELS == 2
|
|
|
|
#include <asm-generic/pgtable-nopmd.h>
|
|
|
|
#elif CONFIG_PGTABLE_LEVELS == 3
|
|
|
|
#include <asm-generic/pgtable-nopud.h>
|
|
|
|
#else
|
|
|
|
#include <asm-generic/pgtable-nop4d.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_PGTABLE_LEVELS == 2
|
2022-07-03 17:11:58 +03:00
|
|
|
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
#elif CONFIG_PGTABLE_LEVELS == 3
|
2022-07-03 17:11:58 +03:00
|
|
|
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
|
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
2022-07-03 17:11:59 +03:00
|
|
|
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
#elif CONFIG_PGTABLE_LEVELS == 4
|
2022-07-03 17:11:58 +03:00
|
|
|
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
|
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
2022-07-03 17:11:59 +03:00
|
|
|
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
#define PUD_SIZE (1UL << PUD_SHIFT)
|
|
|
|
#define PUD_MASK (~(PUD_SIZE-1))
|
2022-07-03 17:12:00 +03:00
|
|
|
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
|
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
|
|
|
2022-07-03 17:12:01 +03:00
|
|
|
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2022-07-03 17:12:01 +03:00
|
|
|
#define PTRS_PER_PGD (PAGE_SIZE >> 3)
|
2022-05-31 18:04:11 +08:00
|
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
2022-07-03 17:12:00 +03:00
|
|
|
#define PTRS_PER_PUD (PAGE_SIZE >> 3)
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
2022-07-03 17:11:59 +03:00
|
|
|
#define PTRS_PER_PMD (PAGE_SIZE >> 3)
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
2022-07-03 17:11:58 +03:00
|
|
|
#define PTRS_PER_PTE (PAGE_SIZE >> 3)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
|
|
|
|
|
2025-06-26 20:07:10 +08:00
|
|
|
#ifndef __ASSEMBLER__
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <linux/mmzone.h>
|
|
|
|
#include <asm/fixmap.h>
|
2022-10-27 20:52:51 +08:00
|
|
|
#include <asm/sparsemem.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
struct mm_struct;
|
|
|
|
struct vm_area_struct;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ZERO_PAGE is a global shared page that is always zero; used
|
|
|
|
* for zero-mapped memory areas etc..
|
|
|
|
*/
|
|
|
|
|
2023-09-06 22:53:10 +08:00
|
|
|
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2023-09-06 22:53:10 +08:00
|
|
|
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TLB refill handlers may also map the vmalloc area into xkvrange.
|
|
|
|
* Avoid the first couple of pages so NULL pointer dereferences will
|
|
|
|
* still reliably trap.
|
|
|
|
*/
|
|
|
|
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
|
|
|
|
#define MODULES_END (MODULES_VADDR + SZ_256M)
|
|
|
|
|
2023-09-06 22:54:16 +08:00
|
|
|
#ifdef CONFIG_KFENCE
|
|
|
|
#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
|
|
|
|
#else
|
|
|
|
#define KFENCE_AREA_SIZE 0
|
|
|
|
#endif
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
#define VMALLOC_START MODULES_END
|
2023-09-06 22:54:16 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_KASAN
|
2022-05-31 18:04:11 +08:00
|
|
|
#define VMALLOC_END \
|
|
|
|
(vm_map_base + \
|
2023-09-06 22:54:16 +08:00
|
|
|
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
|
2023-09-06 22:54:16 +08:00
|
|
|
#else
|
|
|
|
#define VMALLOC_END \
|
|
|
|
(vm_map_base + \
|
|
|
|
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
|
|
|
|
#endif
|
2022-10-27 20:52:51 +08:00
|
|
|
|
|
|
|
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
|
|
|
|
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2023-09-06 22:54:16 +08:00
|
|
|
#define KFENCE_AREA_START (VMEMMAP_END + 1)
|
|
|
|
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
#define ptep_get(ptep) READ_ONCE(*(ptep))
|
|
|
|
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
#define pte_ERROR(e) \
|
|
|
|
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
#define pmd_ERROR(e) \
|
|
|
|
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
|
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
|
#define pud_ERROR(e) \
|
|
|
|
pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
|
|
|
|
#endif
|
|
|
|
#define pgd_ERROR(e) \
|
|
|
|
pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
|
|
|
|
|
|
|
extern pte_t invalid_pte_table[PTRS_PER_PTE];
|
|
|
|
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
|
|
|
|
|
typedef struct { unsigned long pud; } pud_t;
|
|
|
|
#define pud_val(x) ((x).pud)
|
|
|
|
#define __pud(x) ((pud_t) { (x) })
|
|
|
|
|
|
|
|
extern pud_t invalid_pud_table[PTRS_PER_PUD];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Empty pgd/p4d entries point to the invalid_pud_table.
|
|
|
|
*/
|
|
|
|
static inline int p4d_none(p4d_t p4d)
|
|
|
|
{
|
|
|
|
return p4d_val(p4d) == (unsigned long)invalid_pud_table;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int p4d_bad(p4d_t p4d)
|
|
|
|
{
|
|
|
|
return p4d_val(p4d) & ~PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int p4d_present(p4d_t p4d)
|
|
|
|
{
|
|
|
|
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pud_t *p4d_pgtable(p4d_t p4d)
|
|
|
|
{
|
|
|
|
return (pud_t *)p4d_val(p4d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
|
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
WRITE_ONCE(*p4d, p4dval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void p4d_clear(p4d_t *p4dp)
|
|
|
|
{
|
|
|
|
set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
2022-08-25 19:34:59 +08:00
|
|
|
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
|
2022-05-31 18:04:11 +08:00
|
|
|
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
|
|
|
|
typedef struct { unsigned long pmd; } pmd_t;
|
|
|
|
#define pmd_val(x) ((x).pmd)
|
|
|
|
#define __pmd(x) ((pmd_t) { (x) })
|
|
|
|
|
|
|
|
extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Empty pud entries point to the invalid_pmd_table.
|
|
|
|
*/
|
|
|
|
static inline int pud_none(pud_t pud)
|
|
|
|
{
|
|
|
|
return pud_val(pud) == (unsigned long)invalid_pmd_table;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pud_bad(pud_t pud)
|
|
|
|
{
|
|
|
|
return pud_val(pud) & ~PAGE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pud_present(pud_t pud)
|
|
|
|
{
|
|
|
|
return pud_val(pud) != (unsigned long)invalid_pmd_table;
|
|
|
|
}
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
static inline pmd_t *pud_pgtable(pud_t pud)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
return (pmd_t *)pud_val(pud);
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
static inline void set_pud(pud_t *pud, pud_t pudval)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
WRITE_ONCE(*pud, pudval);
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
static inline void pud_clear(pud_t *pudp)
|
|
|
|
{
|
|
|
|
set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
|
|
|
|
}
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2022-08-25 19:34:59 +08:00
|
|
|
#define pud_phys(pud) PHYSADDR(pud_val(pud))
|
2022-05-31 18:04:11 +08:00
|
|
|
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Empty pmd entries point to the invalid_pte_table.
|
|
|
|
*/
|
|
|
|
static inline int pmd_none(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return pmd_val(pmd) == (unsigned long)invalid_pte_table;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pmd_bad(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return (pmd_val(pmd) & ~PAGE_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pmd_present(pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
|
2023-06-15 14:35:52 +08:00
|
|
|
return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
return pmd_val(pmd) != (unsigned long)invalid_pte_table;
|
|
|
|
}
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
WRITE_ONCE(*pmd, pmdval);
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
static inline void pmd_clear(pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
|
|
|
|
}
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2022-08-25 19:34:59 +08:00
|
|
|
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
|
|
|
|
#define pmd_page_vaddr(pmd) pmd_val(pmd)
|
|
|
|
|
|
|
|
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
|
|
|
|
|
|
|
|
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
2023-08-02 16:13:42 +01:00
|
|
|
#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
|
|
|
|
#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
|
|
|
|
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
/*
|
2022-10-27 20:52:50 +08:00
|
|
|
* Initialize a new pgd / pud / pmd table with invalid pointers.
|
2022-05-31 18:04:11 +08:00
|
|
|
*/
|
2022-10-27 20:52:50 +08:00
|
|
|
extern void pgd_init(void *addr);
|
|
|
|
extern void pud_init(void *addr);
|
2024-11-04 15:07:12 +08:00
|
|
|
#define pud_init pud_init
|
2022-10-27 20:52:50 +08:00
|
|
|
extern void pmd_init(void *addr);
|
2024-11-04 15:07:12 +08:00
|
|
|
#define pmd_init pmd_init
|
LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space
There are two pages in one TLB entry on LoongArch system. For kernel
space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit
set, otherwise HW treats it as non-global tlb, there will be potential
problems if tlb entry for kernel space is not global. Such as fail to
flush kernel tlb with the function local_flush_tlb_kernel_range() which
supposed only flush tlb with global bit.
Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and
kasan areas. For these areas both two consecutive page table entries
should be enabled with PAGE_GLOBAL bit. So with function set_pte() and
pte_clear(), pte buddy entry is checked and set besides its own pte
entry. However it is not atomic operation to set both two pte entries,
there is problem with test_vmalloc test case.
So function kernel_pte_init() is added to init a pte table when it is
created for kernel address space, and the default initial pte value is
PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry
need update with function set_pte() and pte_clear(), nothing to do with
the pte buddy entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-10-21 22:11:19 +08:00
|
|
|
extern void kernel_pte_init(void *addr);
|
2024-11-04 15:07:12 +08:00
|
|
|
#define kernel_pte_init kernel_pte_init
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
/*
|
2023-01-13 18:10:08 +01:00
|
|
|
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
|
|
|
|
* are !pte_none() && !pte_present().
|
|
|
|
*
|
|
|
|
* Format of swap PTEs:
|
|
|
|
*
|
|
|
|
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
|
|
|
|
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
|
|
|
|
* <--------------------------- offset ---------------------------
|
|
|
|
*
|
|
|
|
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
|
|
|
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
|
|
|
* --------------> E <--- type ---> <---------- zeroes ---------->
|
|
|
|
*
|
|
|
|
* E is the exclusive marker that is not stored in swap entries.
|
|
|
|
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
|
2022-05-31 18:04:11 +08:00
|
|
|
*/
|
|
|
|
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
|
2023-01-13 18:10:08 +01:00
|
|
|
{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2023-01-13 18:10:08 +01:00
|
|
|
#define __swp_type(x) (((x).val >> 16) & 0x7f)
|
2022-05-31 18:04:11 +08:00
|
|
|
#define __swp_offset(x) ((x).val >> 24)
|
|
|
|
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
|
|
|
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
|
|
|
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
|
|
|
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
|
|
|
|
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
|
|
|
|
|
2025-02-18 18:55:14 +01:00
|
|
|
static inline bool pte_swp_exclusive(pte_t pte)
|
2023-01-13 18:10:08 +01:00
|
|
|
{
|
|
|
|
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_swp_mkexclusive(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
extern void paging_init(void);
|
|
|
|
|
|
|
|
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
|
|
|
|
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
|
|
|
|
#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
|
|
|
|
|
|
|
|
static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
WRITE_ONCE(*ptep, pteval);
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
#ifdef CONFIG_SMP
|
LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space
There are two pages in one TLB entry on LoongArch system. For kernel
space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit
set, otherwise HW treats it as non-global tlb, there will be potential
problems if tlb entry for kernel space is not global. Such as fail to
flush kernel tlb with the function local_flush_tlb_kernel_range() which
supposed only flush tlb with global bit.
Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and
kasan areas. For these areas both two consecutive page table entries
should be enabled with PAGE_GLOBAL bit. So with function set_pte() and
pte_clear(), pte buddy entry is checked and set besides its own pte
entry. However it is not atomic operation to set both two pte entries,
there is problem with test_vmalloc test case.
So function kernel_pte_init() is added to init a pte table when it is
created for kernel address space, and the default initial pte value is
PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry
need update with function set_pte() and pte_clear(), nothing to do with
the pte buddy entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-10-21 22:11:19 +08:00
|
|
|
if (pte_val(pteval) & _PAGE_GLOBAL)
|
|
|
|
DBAR(0b11000); /* o_wrw = 0b11000 */
|
|
|
|
#endif
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
|
{
|
LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space
There are two pages in one TLB entry on LoongArch system. For kernel
space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit
set, otherwise HW treats it as non-global tlb, there will be potential
problems if tlb entry for kernel space is not global. Such as fail to
flush kernel tlb with the function local_flush_tlb_kernel_range() which
supposed only flush tlb with global bit.
Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and
kasan areas. For these areas both two consecutive page table entries
should be enabled with PAGE_GLOBAL bit. So with function set_pte() and
pte_clear(), pte buddy entry is checked and set besides its own pte
entry. However it is not atomic operation to set both two pte entries,
there is problem with test_vmalloc test case.
So function kernel_pte_init() is added to init a pte table when it is
created for kernel address space, and the default initial pte value is
PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry
need update with function set_pte() and pte_clear(), nothing to do with
the pte buddy entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-10-21 22:11:19 +08:00
|
|
|
pte_t pte = ptep_get(ptep);
|
|
|
|
pte_val(pte) &= _PAGE_GLOBAL;
|
|
|
|
set_pte(ptep, pte);
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
|
|
|
|
#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
|
|
|
|
#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
|
|
|
|
|
|
|
|
extern pgd_t swapper_pg_dir[];
|
|
|
|
extern pgd_t invalid_pg_dir[];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following only work if pte_present() is true.
|
|
|
|
* Undefined behaviour if not..
|
|
|
|
*/
|
|
|
|
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
|
|
|
|
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
|
2023-06-29 20:58:44 +08:00
|
|
|
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
static inline pte_t pte_mkold(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) &= ~_PAGE_ACCESSED;
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_mkyoung(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) |= _PAGE_ACCESSED;
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_mkclean(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_mkdirty(pte_t pte)
|
|
|
|
{
|
2022-11-21 19:02:57 +08:00
|
|
|
pte_val(pte) |= _PAGE_MODIFIED;
|
|
|
|
if (pte_val(pte) & _PAGE_WRITE)
|
|
|
|
pte_val(pte) |= _PAGE_DIRTY;
|
2022-05-31 18:04:11 +08:00
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
2023-06-12 17:10:27 -07:00
|
|
|
static inline pte_t pte_mkwrite_novma(pte_t pte)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2022-11-21 19:02:57 +08:00
|
|
|
pte_val(pte) |= _PAGE_WRITE;
|
|
|
|
if (pte_val(pte) & _PAGE_MODIFIED)
|
|
|
|
pte_val(pte) |= _PAGE_DIRTY;
|
2022-05-31 18:04:11 +08:00
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_wrprotect(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
|
|
|
|
|
|
|
|
static inline pte_t pte_mkhuge(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) |= _PAGE_HUGE;
|
|
|
|
return pte;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
|
|
|
|
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
|
|
|
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
|
|
|
|
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
|
|
|
|
|
|
|
|
#define pte_accessible pte_accessible
|
|
|
|
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
|
|
|
|
{
|
|
|
|
if (pte_val(a) & _PAGE_PRESENT)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((pte_val(a) & _PAGE_PROTNONE) &&
|
|
|
|
atomic_read(&mm->tlb_flush_pending))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|
|
|
{
|
|
|
|
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
|
|
|
|
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void __update_tlb(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t *ptep);
|
|
|
|
|
2023-08-02 16:13:42 +01:00
|
|
|
static inline void update_mmu_cache_range(struct vm_fault *vmf,
|
|
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pte_t *ptep, unsigned int nr)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2023-08-02 16:13:42 +01:00
|
|
|
for (;;) {
|
|
|
|
__update_tlb(vma, address, ptep);
|
|
|
|
if (--nr == 0)
|
|
|
|
break;
|
|
|
|
address += PAGE_SIZE;
|
|
|
|
ptep++;
|
|
|
|
}
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
2023-08-02 16:13:42 +01:00
|
|
|
#define update_mmu_cache(vma, addr, ptep) \
|
|
|
|
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2024-05-22 14:12:02 +08:00
|
|
|
#define update_mmu_tlb_range(vma, addr, ptep, nr) \
|
|
|
|
update_mmu_cache_range(NULL, vma, addr, ptep, nr)
|
2022-09-29 19:23:18 +08:00
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
__update_tlb(vma, address, (pte_t *)pmdp);
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:56:16 +08:00
|
|
|
static inline unsigned long pmd_pfn(pmd_t pmd)
|
|
|
|
{
|
2023-08-02 16:13:42 +01:00
|
|
|
return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
|
2022-06-22 21:56:16 +08:00
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
|
|
|
|
/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
|
|
|
|
#define pmdp_establish generic_pmdp_establish
|
|
|
|
|
|
|
|
static inline int pmd_trans_huge(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
|
|
|
{
|
|
|
|
pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
|
|
|
|
((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
|
|
|
|
pmd_val(pmd) |= _PAGE_HUGE;
|
|
|
|
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define pmd_write pmd_write
|
|
|
|
static inline int pmd_write(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return !!(pmd_val(pmd) & _PAGE_WRITE);
|
|
|
|
}
|
|
|
|
|
2023-06-12 17:10:27 -07:00
|
|
|
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2022-11-21 19:02:57 +08:00
|
|
|
pmd_val(pmd) |= _PAGE_WRITE;
|
|
|
|
if (pmd_val(pmd) & _PAGE_MODIFIED)
|
|
|
|
pmd_val(pmd) |= _PAGE_DIRTY;
|
2022-05-31 18:04:11 +08:00
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
|
|
|
{
|
|
|
|
pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
2023-12-27 14:12:04 +00:00
|
|
|
#define pmd_dirty pmd_dirty
|
2022-05-31 18:04:11 +08:00
|
|
|
static inline int pmd_dirty(pmd_t pmd)
|
|
|
|
{
|
2023-06-29 20:58:44 +08:00
|
|
|
return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkclean(pmd_t pmd)
|
|
|
|
{
|
|
|
|
pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
|
|
|
{
|
2022-11-21 19:02:57 +08:00
|
|
|
pmd_val(pmd) |= _PAGE_MODIFIED;
|
|
|
|
if (pmd_val(pmd) & _PAGE_WRITE)
|
|
|
|
pmd_val(pmd) |= _PAGE_DIRTY;
|
2022-05-31 18:04:11 +08:00
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
2022-11-30 14:49:41 -08:00
|
|
|
#define pmd_young pmd_young
|
2022-05-31 18:04:11 +08:00
|
|
|
static inline int pmd_young(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkold(pmd_t pmd)
|
|
|
|
{
|
|
|
|
pmd_val(pmd) &= ~_PAGE_ACCESSED;
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
|
|
|
{
|
|
|
|
pmd_val(pmd) |= _PAGE_ACCESSED;
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct page *pmd_page(pmd_t pmd)
|
|
|
|
{
|
|
|
|
if (pmd_trans_huge(pmd))
|
|
|
|
return pfn_to_page(pmd_pfn(pmd));
|
|
|
|
|
|
|
|
return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
|
|
{
|
|
|
|
pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
|
|
|
|
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
|
|
|
|
{
|
2023-06-15 14:35:52 +08:00
|
|
|
pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
|
2022-05-31 18:04:11 +08:00
|
|
|
pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
|
|
|
|
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
|
|
|
|
* different prototype.
|
|
|
|
*/
|
|
|
|
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
|
|
|
|
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
|
|
|
|
unsigned long address, pmd_t *pmdp)
|
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
pmd_t old = pmdp_get(pmdp);
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
pmd_clear(pmdp);
|
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
|
static inline long pte_protnone(pte_t pte)
|
|
|
|
{
|
|
|
|
return (pte_val(pte) & _PAGE_PROTNONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline long pmd_protnone(pmd_t pmd)
|
|
|
|
{
|
|
|
|
return (pmd_val(pmd) & _PAGE_PROTNONE);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
|
2023-09-06 22:53:09 +08:00
|
|
|
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
|
|
|
|
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
/*
|
|
|
|
* We provide our own get_unmapped area to cope with the virtual aliasing
|
|
|
|
* constraints placed on us by the cache architecture.
|
|
|
|
*/
|
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
|
|
|
|
2025-06-26 20:07:10 +08:00
|
|
|
#endif /* !__ASSEMBLER__ */
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
#endif /* _ASM_PGTABLE_H */
|