2022-05-31 18:04:11 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/memblock.h>
|
|
|
|
#include <linux/memremap.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/pfn.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/gfp.h>
|
2022-10-27 20:52:51 +08:00
|
|
|
#include <linux/hugetlb.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
#include <linux/mmzone.h>
|
2024-05-05 19:06:24 +03:00
|
|
|
#include <linux/execmem.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/bootinfo.h>
|
|
|
|
#include <asm/cpu.h>
|
|
|
|
#include <asm/dma.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
|
2023-09-06 22:53:10 +08:00
|
|
|
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
|
LoongArch: Mark 3 symbol exports as non-GPL
vm_map_base, empty_zero_page and invalid_pmd_table could be accessed
widely by some out-of-tree non-GPL but important file systems or drivers
(e.g. OpenZFS). Let's use EXPORT_SYMBOL() instead of EXPORT_SYMBOL_GPL()
to export them, so as to avoid build errors.
1, Details about vm_map_base:
This is a LoongArch-specific symbol and may be referenced through macros
PCI_IOBASE, VMALLOC_START and VMALLOC_END.
2, Details about empty_zero_page:
As it stands today, only 3 architectures export empty_zero_page as a GPL
symbol: IA64, LoongArch and MIPS. LoongArch gets the GPL export by
inheriting from MIPS, and the MIPS export was first introduced in commit
497d2adcbf50b ("[MIPS] Export empty_zero_page for sake of the ext4
module."). The IA64 export was similar: commit a7d57ecf4216e ("[IA64]
Export three symbols for module use") did so for kvm.
In both IA64 and MIPS, the export of empty_zero_page was done for
satisfying some in-kernel component built as module (kvm and ext4
respectively), and given its reasonably low-level nature, GPL is a
reasonable choice. But looking at the bigger picture it is evident most
other architectures do not regard it as GPL, so in effect the symbol
probably should not be treated as such, in favor of consistency.
3, Details about invalid_pmd_table:
Keep consistency with invalid_pte_table and make it be possible by some
modules.
Cc: stable@vger.kernel.org
Reviewed-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2023-04-18 19:38:58 +08:00
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
void copy_user_highpage(struct page *to, struct page *from,
|
|
|
|
unsigned long vaddr, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
void *vfrom, *vto;
|
|
|
|
|
2023-10-18 08:42:52 +08:00
|
|
|
vfrom = kmap_local_page(from);
|
|
|
|
vto = kmap_local_page(to);
|
2022-05-31 18:04:11 +08:00
|
|
|
copy_page(vto, vfrom);
|
2023-10-18 08:42:52 +08:00
|
|
|
kunmap_local(vfrom);
|
|
|
|
kunmap_local(vto);
|
2022-05-31 18:04:11 +08:00
|
|
|
/* Make sure this page is cleared on other CPU's too before using it */
|
|
|
|
smp_wmb();
|
|
|
|
}
|
|
|
|
|
|
|
|
int __ref page_is_ram(unsigned long pfn)
|
|
|
|
{
|
|
|
|
unsigned long addr = PFN_PHYS(pfn);
|
|
|
|
|
|
|
|
return memblock_is_memory(addr) && !memblock_is_reserved(addr);
|
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
#ifndef CONFIG_NUMA
|
2022-05-31 18:04:11 +08:00
|
|
|
void __init paging_init(void)
|
|
|
|
{
|
|
|
|
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
|
|
|
|
|
|
|
#ifdef CONFIG_ZONE_DMA32
|
|
|
|
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
|
|
|
#endif
|
|
|
|
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
|
|
|
|
|
|
|
free_area_init(max_zone_pfns);
|
|
|
|
}
|
2022-05-31 18:04:12 +08:00
|
|
|
#endif /* !CONFIG_NUMA */
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
void __ref free_initmem(void)
|
|
|
|
{
|
|
|
|
free_initmem_default(POISON_FREE_INITMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
|
int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
|
|
|
|
{
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = __add_pages(nid, start_pfn, nr_pages, params);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
|
|
|
|
__func__, ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
|
|
|
{
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
|
|
struct page *page = pfn_to_page(start_pfn);
|
|
|
|
|
|
|
|
/* With altmap the first mapped page is offset from @start */
|
|
|
|
if (altmap)
|
|
|
|
page += vmem_altmap_offset(altmap);
|
|
|
|
__remove_pages(start_pfn, nr_pages, altmap);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-10-27 20:52:51 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2022-10-27 20:52:52 +08:00
|
|
|
void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
|
|
|
|
unsigned long addr, unsigned long next)
|
2022-10-27 20:52:51 +08:00
|
|
|
{
|
2022-10-27 20:52:52 +08:00
|
|
|
pmd_t entry;
|
|
|
|
|
|
|
|
entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
|
|
|
|
pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
|
|
|
|
set_pmd_at(&init_mm, addr, pmd, entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
|
|
|
|
unsigned long addr, unsigned long next)
|
|
|
|
{
|
2024-08-07 17:37:11 +08:00
|
|
|
int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
|
2022-10-27 20:52:52 +08:00
|
|
|
|
|
|
|
if (huge)
|
|
|
|
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
|
|
|
|
|
|
|
return huge;
|
2022-10-27 20:52:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int __meminit vmemmap_populate(unsigned long start, unsigned long end,
|
|
|
|
int node, struct vmem_altmap *altmap)
|
|
|
|
{
|
|
|
|
#if CONFIG_PGTABLE_LEVELS == 2
|
|
|
|
return vmemmap_populate_basepages(start, end, node, NULL);
|
|
|
|
#else
|
|
|
|
return vmemmap_populate_hugepages(start, end, node, NULL);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
|
void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2023-09-06 22:53:09 +08:00
|
|
|
pte_t * __init populate_kernel_pte(unsigned long addr)
|
2022-10-12 16:36:14 +08:00
|
|
|
{
|
2023-09-06 22:53:09 +08:00
|
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
|
|
p4d_t *p4d = p4d_offset(pgd, addr);
|
2022-10-12 16:36:14 +08:00
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
2024-08-07 17:37:11 +08:00
|
|
|
if (p4d_none(p4dp_get(p4d))) {
|
2025-01-02 15:25:28 +08:00
|
|
|
pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
|
2023-09-06 22:53:09 +08:00
|
|
|
p4d_populate(&init_mm, p4d, pud);
|
2022-10-12 16:36:14 +08:00
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
2023-09-06 22:53:09 +08:00
|
|
|
pud_init(pud);
|
2022-10-12 16:36:14 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
pud = pud_offset(p4d, addr);
|
2024-08-07 17:37:11 +08:00
|
|
|
if (pud_none(pudp_get(pud))) {
|
2025-01-02 15:25:28 +08:00
|
|
|
pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
|
2023-09-06 22:53:09 +08:00
|
|
|
pud_populate(&init_mm, pud, pmd);
|
2022-10-12 16:36:14 +08:00
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
2023-09-06 22:53:09 +08:00
|
|
|
pmd_init(pmd);
|
2022-10-12 16:36:14 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
2024-08-07 17:37:11 +08:00
|
|
|
if (!pmd_present(pmdp_get(pmd))) {
|
2023-09-06 22:53:09 +08:00
|
|
|
pte_t *pte;
|
2022-10-12 16:36:14 +08:00
|
|
|
|
2025-01-02 15:25:28 +08:00
|
|
|
pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
|
2023-09-06 22:53:09 +08:00
|
|
|
pmd_populate_kernel(&init_mm, pmd, pte);
|
LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space
There are two pages in one TLB entry on LoongArch system. For kernel
space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit
set, otherwise HW treats it as non-global tlb, there will be potential
problems if tlb entry for kernel space is not global. Such as fail to
flush kernel tlb with the function local_flush_tlb_kernel_range() which
supposed only flush tlb with global bit.
Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and
kasan areas. For these areas both two consecutive page table entries
should be enabled with PAGE_GLOBAL bit. So with function set_pte() and
pte_clear(), pte buddy entry is checked and set besides its own pte
entry. However it is not atomic operation to set both two pte entries,
there is problem with test_vmalloc test case.
So function kernel_pte_init() is added to init a pte table when it is
created for kernel address space, and the default initial pte value is
PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry
need update with function set_pte() and pte_clear(), nothing to do with
the pte buddy entry.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-10-21 22:11:19 +08:00
|
|
|
kernel_pte_init(pte);
|
2022-10-12 16:36:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return pte_offset_kernel(pmd, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init __set_fixmap(enum fixed_addresses idx,
|
|
|
|
phys_addr_t phys, pgprot_t flags)
|
|
|
|
{
|
|
|
|
unsigned long addr = __fix_to_virt(idx);
|
|
|
|
pte_t *ptep;
|
|
|
|
|
|
|
|
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
|
|
|
|
2023-09-06 22:53:09 +08:00
|
|
|
ptep = populate_kernel_pte(addr);
|
2024-08-07 17:37:11 +08:00
|
|
|
if (!pte_none(ptep_get(ptep))) {
|
2022-10-12 16:36:14 +08:00
|
|
|
pte_ERROR(*ptep);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pgprot_val(flags))
|
|
|
|
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
|
|
|
|
else {
|
|
|
|
pte_clear(&init_mm, addr, ptep);
|
|
|
|
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
/*
|
|
|
|
* Align swapper_pg_dir in to 64K, allows its address to be loaded
|
|
|
|
* with a single LUI instruction in the TLB handlers. If we used
|
|
|
|
* __aligned(64K), its size would get rounded up to the alignment
|
|
|
|
* size, and waste space. So we place it in its own section and align
|
|
|
|
* it in the linker script.
|
|
|
|
*/
|
|
|
|
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
|
|
|
|
|
|
|
|
pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
|
|
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
|
2023-10-18 08:42:52 +08:00
|
|
|
EXPORT_SYMBOL(invalid_pud_table);
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
|
LoongArch: Mark 3 symbol exports as non-GPL
vm_map_base, empty_zero_page and invalid_pmd_table could be accessed
widely by some out-of-tree non-GPL but important file systems or drivers
(e.g. OpenZFS). Let's use EXPORT_SYMBOL() instead of EXPORT_SYMBOL_GPL()
to export them, so as to avoid build errors.
1, Details about vm_map_base:
This is a LoongArch-specific symbol and may be referenced through macros
PCI_IOBASE, VMALLOC_START and VMALLOC_END.
2, Details about empty_zero_page:
As it stands today, only 3 architectures export empty_zero_page as a GPL
symbol: IA64, LoongArch and MIPS. LoongArch gets the GPL export by
inheriting from MIPS, and the MIPS export was first introduced in commit
497d2adcbf50b ("[MIPS] Export empty_zero_page for sake of the ext4
module."). The IA64 export was similar: commit a7d57ecf4216e ("[IA64]
Export three symbols for module use") did so for kvm.
In both IA64 and MIPS, the export of empty_zero_page was done for
satisfying some in-kernel component built as module (kvm and ext4
respectively), and given its reasonably low-level nature, GPL is a
reasonable choice. But looking at the bigger picture it is evident most
other architectures do not regard it as GPL, so in effect the symbol
probably should not be treated as such, in favor of consistency.
3, Details about invalid_pmd_table:
Keep consistency with invalid_pte_table and make it be possible by some
modules.
Cc: stable@vger.kernel.org
Reviewed-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2023-04-18 19:38:58 +08:00
|
|
|
EXPORT_SYMBOL(invalid_pmd_table);
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif
|
|
|
|
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
|
|
|
EXPORT_SYMBOL(invalid_pte_table);
|
2024-05-05 19:06:24 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_EXECMEM
|
|
|
|
static struct execmem_info execmem_info __ro_after_init;
|
|
|
|
|
|
|
|
struct execmem_info __init *execmem_arch_setup(void)
|
|
|
|
{
|
|
|
|
execmem_info = (struct execmem_info){
|
|
|
|
.ranges = {
|
|
|
|
[EXECMEM_DEFAULT] = {
|
|
|
|
.start = MODULES_VADDR,
|
|
|
|
.end = MODULES_END,
|
|
|
|
.pgprot = PAGE_KERNEL,
|
|
|
|
.alignment = 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
return &execmem_info;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_EXECMEM */
|