2022-05-31 18:04:11 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
2023-09-06 22:53:55 +08:00
|
|
|
struct page *dmw_virt_to_page(unsigned long kaddr)
|
|
|
|
{
|
LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE
KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.
All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().
DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-04-10 21:08:51 +08:00
|
|
|
return phys_to_page(__pa(kaddr));
|
2023-09-06 22:53:55 +08:00
|
|
|
}
|
2023-11-21 15:03:25 +08:00
|
|
|
EXPORT_SYMBOL(dmw_virt_to_page);
|
2023-09-06 22:53:55 +08:00
|
|
|
|
|
|
|
struct page *tlb_virt_to_page(unsigned long kaddr)
|
|
|
|
{
|
LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE
KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.
All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().
DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-04-10 21:08:51 +08:00
|
|
|
return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
|
2023-09-06 22:53:55 +08:00
|
|
|
}
|
2023-11-21 15:03:25 +08:00
|
|
|
EXPORT_SYMBOL(tlb_virt_to_page);
|
2023-09-06 22:53:55 +08:00
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
|
{
|
asm-generic: pgalloc: provide generic __pgd_{alloc,free}
We already have a generic implementation of alloc/free up to P4D level, as
well as pgd_free(). Let's finish the work and add a generic PGD-level
alloc helper as well.
Unlike at lower levels, almost all architectures need some specific magic
at PGD level (typically initialising PGD entries), so introducing a
generic pgd_alloc() isn't worth it. Instead we introduce two new helpers,
__pgd_alloc() and __pgd_free(), and make use of them in the arch-specific
pgd_alloc() and pgd_free() wherever possible. To accommodate as many arch
as possible, __pgd_alloc() takes a page allocation order.
Because pagetable_alloc() allocates zeroed pages, explicit zeroing in
pgd_alloc() becomes redundant and we can get rid of it. Some trivial
implementations of pgd_free() also become unnecessary once __pgd_alloc()
is used; remove them.
Another small improvement is consistent accounting of PGD pages by using
GFP_PGTABLE_{USER,KERNEL} as appropriate.
Not all PGD allocations can be handled by the generic helpers. In
particular, multiple architectures allocate PGDs from a kmem_cache, and
those PGDs may not be page-sized.
Link: https://lkml.kernel.org/r/20250103184415.2744423-6-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-01-03 18:44:14 +00:00
|
|
|
pgd_t *init, *ret;
|
2022-05-31 18:04:11 +08:00
|
|
|
|
asm-generic: pgalloc: provide generic __pgd_{alloc,free}
We already have a generic implementation of alloc/free up to P4D level, as
well as pgd_free(). Let's finish the work and add a generic PGD-level
alloc helper as well.
Unlike at lower levels, almost all architectures need some specific magic
at PGD level (typically initialising PGD entries), so introducing a
generic pgd_alloc() isn't worth it. Instead we introduce two new helpers,
__pgd_alloc() and __pgd_free(), and make use of them in the arch-specific
pgd_alloc() and pgd_free() wherever possible. To accommodate as many arch
as possible, __pgd_alloc() takes a page allocation order.
Because pagetable_alloc() allocates zeroed pages, explicit zeroing in
pgd_alloc() becomes redundant and we can get rid of it. Some trivial
implementations of pgd_free() also become unnecessary once __pgd_alloc()
is used; remove them.
Another small improvement is consistent accounting of PGD pages by using
GFP_PGTABLE_{USER,KERNEL} as appropriate.
Not all PGD allocations can be handled by the generic helpers. In
particular, multiple architectures allocate PGDs from a kmem_cache, and
those PGDs may not be page-sized.
Link: https://lkml.kernel.org/r/20250103184415.2744423-6-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-01-03 18:44:14 +00:00
|
|
|
ret = __pgd_alloc(mm, 0);
|
|
|
|
if (ret) {
|
2022-05-31 18:04:11 +08:00
|
|
|
init = pgd_offset(&init_mm, 0UL);
|
2022-10-27 20:52:50 +08:00
|
|
|
pgd_init(ret);
|
2022-05-31 18:04:11 +08:00
|
|
|
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
|
|
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pgd_alloc);
|
|
|
|
|
2022-10-27 20:52:50 +08:00
|
|
|
void pgd_init(void *addr)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
|
|
|
unsigned long *p, *end;
|
|
|
|
unsigned long entry;
|
|
|
|
|
|
|
|
#if !defined(__PAGETABLE_PUD_FOLDED)
|
|
|
|
entry = (unsigned long)invalid_pud_table;
|
|
|
|
#elif !defined(__PAGETABLE_PMD_FOLDED)
|
|
|
|
entry = (unsigned long)invalid_pmd_table;
|
|
|
|
#else
|
|
|
|
entry = (unsigned long)invalid_pte_table;
|
|
|
|
#endif
|
|
|
|
|
2022-10-27 20:52:50 +08:00
|
|
|
p = (unsigned long *)addr;
|
2022-05-31 18:04:11 +08:00
|
|
|
end = p + PTRS_PER_PGD;
|
|
|
|
|
|
|
|
do {
|
|
|
|
p[0] = entry;
|
|
|
|
p[1] = entry;
|
|
|
|
p[2] = entry;
|
|
|
|
p[3] = entry;
|
|
|
|
p[4] = entry;
|
|
|
|
p += 8;
|
|
|
|
p[-3] = entry;
|
|
|
|
p[-2] = entry;
|
|
|
|
p[-1] = entry;
|
|
|
|
} while (p != end);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pgd_init);
|
|
|
|
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
2022-10-27 20:52:50 +08:00
|
|
|
void pmd_init(void *addr)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
|
|
|
unsigned long *p, *end;
|
2022-10-27 20:52:50 +08:00
|
|
|
unsigned long pagetable = (unsigned long)invalid_pte_table;
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2022-10-27 20:52:50 +08:00
|
|
|
p = (unsigned long *)addr;
|
2022-05-31 18:04:11 +08:00
|
|
|
end = p + PTRS_PER_PMD;
|
|
|
|
|
|
|
|
do {
|
|
|
|
p[0] = pagetable;
|
|
|
|
p[1] = pagetable;
|
|
|
|
p[2] = pagetable;
|
|
|
|
p[3] = pagetable;
|
|
|
|
p[4] = pagetable;
|
|
|
|
p += 8;
|
|
|
|
p[-3] = pagetable;
|
|
|
|
p[-2] = pagetable;
|
|
|
|
p[-1] = pagetable;
|
|
|
|
} while (p != end);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pmd_init);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
2022-10-27 20:52:50 +08:00
|
|
|
void pud_init(void *addr)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
|
|
|
unsigned long *p, *end;
|
2022-10-27 20:52:50 +08:00
|
|
|
unsigned long pagetable = (unsigned long)invalid_pmd_table;
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
p = (unsigned long *)addr;
|
|
|
|
end = p + PTRS_PER_PUD;
|
|
|
|
|
|
|
|
do {
|
|
|
|
p[0] = pagetable;
|
|
|
|
p[1] = pagetable;
|
|
|
|
p[2] = pagetable;
|
|
|
|
p[3] = pagetable;
|
|
|
|
p[4] = pagetable;
|
|
|
|
p += 8;
|
|
|
|
p[-3] = pagetable;
|
|
|
|
p[-2] = pagetable;
|
|
|
|
p[-1] = pagetable;
|
|
|
|
} while (p != end);
|
|
|
|
}
|
2022-10-27 20:52:50 +08:00
|
|
|
EXPORT_SYMBOL_GPL(pud_init);
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
|
LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space
There are two pages in one TLB entry on LoongArch system. For kernel
space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit
set, otherwise HW treats it as non-global tlb, there will be potential
problems if tlb entry for kernel space is not global. Such as fail to
flush kernel tlb with the function local_flush_tlb_kernel_range() which
supposed only flush tlb with global bit.
Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and
kasan areas. For these areas both two consecutive page table entries
should be enabled with PAGE_GLOBAL bit. So with function set_pte() and
pte_clear(), pte buddy entry is checked and set besides its own pte
entry. However it is not atomic operation to set both two pte entries,
there is problem with test_vmalloc test case.
So function kernel_pte_init() is added to init a pte table when it is
created for kernel address space, and the default initial pte value is
PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry
need update with function set_pte() and pte_clear(), nothing to do with
the pte buddy entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-10-21 22:11:19 +08:00
|
|
|
void kernel_pte_init(void *addr)
|
|
|
|
{
|
|
|
|
unsigned long *p, *end;
|
|
|
|
|
|
|
|
p = (unsigned long *)addr;
|
|
|
|
end = p + PTRS_PER_PTE;
|
|
|
|
|
|
|
|
do {
|
|
|
|
p[0] = _PAGE_GLOBAL;
|
|
|
|
p[1] = _PAGE_GLOBAL;
|
|
|
|
p[2] = _PAGE_GLOBAL;
|
|
|
|
p[3] = _PAGE_GLOBAL;
|
|
|
|
p[4] = _PAGE_GLOBAL;
|
|
|
|
p += 8;
|
|
|
|
p[-3] = _PAGE_GLOBAL;
|
|
|
|
p[-2] = _PAGE_GLOBAL;
|
|
|
|
p[-1] = _PAGE_GLOBAL;
|
|
|
|
} while (p != end);
|
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
WRITE_ONCE(*pmdp, pmd);
|
2022-05-31 18:04:11 +08:00
|
|
|
flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init pagetable_init(void)
|
|
|
|
{
|
|
|
|
/* Initialize the entire pgd. */
|
2022-10-27 20:52:50 +08:00
|
|
|
pgd_init(swapper_pg_dir);
|
|
|
|
pgd_init(invalid_pg_dir);
|
2022-05-31 18:04:11 +08:00
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
2022-10-27 20:52:50 +08:00
|
|
|
pud_init(invalid_pud_table);
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
2022-10-27 20:52:50 +08:00
|
|
|
pmd_init(invalid_pmd_table);
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
}
|