2019-05-27 08:55:01 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2016-04-29 23:26:29 +10:00
|
|
|
/*
|
|
|
|
* Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
2017-02-04 00:16:44 +01:00
|
|
|
#include <linux/mm_types.h>
|
2018-04-16 16:57:14 +05:30
|
|
|
#include <linux/memblock.h>
|
2022-02-16 15:31:36 +11:00
|
|
|
#include <linux/memremap.h>
|
2022-04-28 23:16:13 -07:00
|
|
|
#include <linux/pkeys.h>
|
2021-08-12 18:58:31 +05:30
|
|
|
#include <linux/debugfs.h>
|
2023-07-27 14:26:50 +02:00
|
|
|
#include <linux/proc_fs.h>
|
2017-02-04 00:16:44 +01:00
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/tlb.h>
|
2018-04-16 16:57:14 +05:30
|
|
|
#include <asm/trace.h>
|
|
|
|
#include <asm/powernv.h>
|
2019-08-22 00:48:35 -03:00
|
|
|
#include <asm/firmware.h>
|
|
|
|
#include <asm/ultravisor.h>
|
2020-07-09 08:59:41 +05:30
|
|
|
#include <asm/kexec.h>
|
2016-04-29 23:26:29 +10:00
|
|
|
|
2019-03-29 09:59:59 +00:00
|
|
|
#include <mm/mmu_decl.h>
|
2016-04-29 23:26:29 +10:00
|
|
|
#include <trace/events/thp.h>
|
|
|
|
|
2021-02-11 00:08:03 +11:00
|
|
|
#include "internal.h"
|
|
|
|
|
2021-12-02 00:41:44 +10:00
|
|
|
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
|
|
|
|
EXPORT_SYMBOL_GPL(mmu_psize_defs);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
int mmu_vmemmap_psize = MMU_PAGE_4K;
|
|
|
|
#endif
|
|
|
|
|
2018-04-16 16:57:22 +05:30
|
|
|
unsigned long __pmd_frag_nr;
|
|
|
|
EXPORT_SYMBOL(__pmd_frag_nr);
|
|
|
|
unsigned long __pmd_frag_size_shift;
|
|
|
|
EXPORT_SYMBOL(__pmd_frag_size_shift);
|
|
|
|
|
2024-10-18 22:59:51 +05:30
|
|
|
#ifdef CONFIG_KFENCE
|
|
|
|
extern bool kfence_early_init;
|
|
|
|
static int __init parse_kfence_early_init(char *arg)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (get_option(&arg, &val))
|
|
|
|
kfence_early_init = !!val;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("kfence.sample_interval", parse_kfence_early_init);
|
|
|
|
#endif
|
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
|
|
/*
|
|
|
|
* This is called when relaxing access to a hugepage. It's also called in the page
|
|
|
|
* fault path when we don't hit any of the major fault cases, ie, a minor
|
|
|
|
* update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
|
|
|
|
* handled those two for us, we additionally deal with missing execute
|
|
|
|
* permission here on some processors
|
|
|
|
*/
|
|
|
|
int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pmd_t *pmdp, pmd_t entry, int dirty)
|
|
|
|
{
|
|
|
|
int changed;
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
2025-06-19 18:58:01 +10:00
|
|
|
WARN_ON(!pmd_trans_huge(*pmdp));
|
2018-04-16 16:57:16 +05:30
|
|
|
assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
|
2016-04-29 23:26:29 +10:00
|
|
|
#endif
|
|
|
|
changed = !pmd_same(*(pmdp), entry);
|
|
|
|
if (changed) {
|
2018-05-29 19:58:40 +05:30
|
|
|
/*
|
|
|
|
* We can use MMU_PAGE_2M here, because only radix
|
|
|
|
* path look at the psize.
|
|
|
|
*/
|
|
|
|
__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
|
|
|
|
pmd_pte(entry), address, MMU_PAGE_2M);
|
2016-04-29 23:26:29 +10:00
|
|
|
}
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2023-07-25 00:37:55 +05:30
|
|
|
int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pud_t *pudp, pud_t entry, int dirty)
|
|
|
|
{
|
|
|
|
int changed;
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
|
|
assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
|
|
|
|
#endif
|
|
|
|
changed = !pud_same(*(pudp), entry);
|
|
|
|
if (changed) {
|
|
|
|
/*
|
|
|
|
* We can use MMU_PAGE_1G here, because only radix
|
|
|
|
* path look at the psize.
|
|
|
|
*/
|
|
|
|
__ptep_set_access_flags(vma, pudp_ptep(pudp),
|
|
|
|
pud_pte(entry), address, MMU_PAGE_1G);
|
|
|
|
}
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pmd_t *pmdp)
|
|
|
|
{
|
|
|
|
return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
|
|
|
|
}
|
2023-07-25 00:37:55 +05:30
|
|
|
|
|
|
|
int pudp_test_and_clear_young(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pud_t *pudp)
|
|
|
|
{
|
|
|
|
return __pudp_test_and_clear_young(vma->vm_mm, address, pudp);
|
|
|
|
}
|
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
/*
|
|
|
|
* set a new huge pmd. We should not be called for updating
|
|
|
|
* an existing pmd entry. That should go via pmd_hugepage_update.
|
|
|
|
*/
|
|
|
|
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
2018-09-20 23:39:42 +05:30
|
|
|
/*
|
|
|
|
* Make sure hardware valid bit is not set. We don't do
|
|
|
|
* tlb flush for this update.
|
|
|
|
*/
|
powerpc/mm: Fix WARN_ON with THP NUMA migration
WARNING: CPU: 12 PID: 4322 at /arch/powerpc/mm/pgtable-book3s64.c:76 set_pmd_at+0x4c/0x2b0
Modules linked in:
CPU: 12 PID: 4322 Comm: qemu-system-ppc Tainted: G W 4.19.0-rc3-00758-g8f0c636b0542 #36
NIP: c0000000000872fc LR: c000000000484eec CTR: 0000000000000000
REGS: c000003fba876fe0 TRAP: 0700 Tainted: G W (4.19.0-rc3-00758-g8f0c636b0542)
MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 24282884 XER: 00000000
CFAR: c000000000484ee8 IRQMASK: 0
GPR00: c000000000484eec c000003fba877268 c000000001f0ec00 c000003fbd229f80
GPR04: 00007c8fe8e00000 c000003f864c5a38 860300853e0000c0 0000000000000080
GPR08: 0000000080000000 0000000000000001 0401000000000080 0000000000000001
GPR12: 0000000000002000 c000003fffff5400 c000003fce292000 00007c9024570000
GPR16: 0000000000000000 0000000000ffffff 0000000000000001 c000000001885950
GPR20: 0000000000000000 001ffffc0004807c 0000000000000008 c000000001f49d05
GPR24: 00007c8fe8e00000 c0000000020f2468 ffffffffffffffff c000003fcd33b090
GPR28: 00007c8fe8e00000 c000003fbd229f80 c000003f864c5a38 860300853e0000c0
NIP [c0000000000872fc] set_pmd_at+0x4c/0x2b0
LR [c000000000484eec] do_huge_pmd_numa_page+0xb1c/0xc20
Call Trace:
[c000003fba877268] [c00000000045931c] mpol_misplaced+0x1bc/0x230 (unreliable)
[c000003fba8772c8] [c000000000484eec] do_huge_pmd_numa_page+0xb1c/0xc20
[c000003fba877398] [c00000000040d344] __handle_mm_fault+0x5e4/0x2300
[c000003fba8774d8] [c00000000040f400] handle_mm_fault+0x3a0/0x420
[c000003fba877528] [c0000000003ff6f4] __get_user_pages+0x2e4/0x560
[c000003fba877628] [c000000000400314] get_user_pages_unlocked+0x104/0x2a0
[c000003fba8776c8] [c000000000118f44] __gfn_to_pfn_memslot+0x284/0x6a0
[c000003fba877748] [c0000000001463a0] kvmppc_book3s_radix_page_fault+0x360/0x12d0
[c000003fba877838] [c000000000142228] kvmppc_book3s_hv_page_fault+0x48/0x1300
[c000003fba877988] [c00000000013dc08] kvmppc_vcpu_run_hv+0x1808/0x1b50
[c000003fba877af8] [c000000000126b44] kvmppc_vcpu_run+0x34/0x50
[c000003fba877b18] [c000000000123268] kvm_arch_vcpu_ioctl_run+0x288/0x2d0
[c000003fba877b98] [c00000000011253c] kvm_vcpu_ioctl+0x1fc/0x8c0
[c000003fba877d08] [c0000000004e9b24] do_vfs_ioctl+0xa44/0xae0
[c000003fba877db8] [c0000000004e9c44] ksys_ioctl+0x84/0xf0
[c000003fba877e08] [c0000000004e9cd8] sys_ioctl+0x28/0x80
We removed the pte_protnone check earlier with the understanding that we
mark the pte invalid before the set_pte/set_pmd usage. But the huge pmd
autonuma still use the set_pmd_at directly. This is ok because a protnone pte
won't have translation cache in TLB.
Fixes: da7ad366b497 ("powerpc/mm/book3s: Update pmd_present to look at _PAGE_PRESENT bit")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-10-13 22:18:15 +05:30
|
|
|
|
|
|
|
WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
|
2018-04-16 16:57:16 +05:30
|
|
|
assert_spin_locked(pmd_lockptr(mm, pmdp));
|
2024-03-05 12:37:47 +08:00
|
|
|
WARN_ON(!(pmd_leaf(pmd)));
|
2016-04-29 23:26:29 +10:00
|
|
|
#endif
|
|
|
|
trace_hugepage_set_pmd(addr, pmd_val(pmd));
|
|
|
|
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
|
|
|
|
}
|
2017-07-27 11:54:54 +05:30
|
|
|
|
2023-07-25 00:37:55 +05:30
|
|
|
void set_pud_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pud_t *pudp, pud_t pud)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
|
|
/*
|
|
|
|
* Make sure hardware valid bit is not set. We don't do
|
|
|
|
* tlb flush for this update.
|
|
|
|
*/
|
|
|
|
|
|
|
|
WARN_ON(pte_hw_valid(pud_pte(*pudp)));
|
|
|
|
assert_spin_locked(pud_lockptr(mm, pudp));
|
2024-03-05 12:37:48 +08:00
|
|
|
WARN_ON(!(pud_leaf(pud)));
|
2023-07-25 00:37:55 +05:30
|
|
|
#endif
|
|
|
|
trace_hugepage_set_pud(addr, pud_val(pud));
|
|
|
|
return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
|
|
|
|
}
|
|
|
|
|
2020-12-17 23:47:30 +10:00
|
|
|
static void do_serialize(void *arg)
|
|
|
|
{
|
|
|
|
/* We've taken the IPI, so try to trim the mask while here */
|
|
|
|
if (radix_enabled()) {
|
|
|
|
struct mm_struct *mm = arg;
|
|
|
|
exit_lazy_flush_tlb(mm, false);
|
|
|
|
}
|
2017-07-27 11:54:54 +05:30
|
|
|
}
|
2020-12-17 23:47:30 +10:00
|
|
|
|
2017-07-27 11:54:54 +05:30
|
|
|
/*
|
2022-03-09 10:29:50 +01:00
|
|
|
* Serialize against __find_linux_pte() which does lock-less
|
2017-07-27 11:54:54 +05:30
|
|
|
* lookup in page tables with local interrupts disabled. For huge pages
|
|
|
|
* it casts pmd_t to pte_t. Since format of pte_t is different from
|
|
|
|
* pmd_t we want to prevent transit from pmd pointing to page table
|
|
|
|
* to pmd pointing to huge page (and back) while interrupts are disabled.
|
|
|
|
* We clear pmd to possibly replace it with page table pointer in
|
|
|
|
* different code paths. So make sure we wait for the parallel
|
2022-03-09 10:29:50 +01:00
|
|
|
* __find_linux_pte() to finish.
|
2017-07-27 11:54:54 +05:30
|
|
|
*/
|
|
|
|
void serialize_against_pte_lookup(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
smp_mb();
|
2020-12-17 23:47:30 +10:00
|
|
|
smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
|
2017-07-27 11:54:54 +05:30
|
|
|
}
|
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
/*
|
|
|
|
* We use this to invalidate a pmdp entry before switching from a
|
|
|
|
* hugepte to regular pmd entry.
|
|
|
|
*/
|
2018-01-31 16:18:02 -08:00
|
|
|
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
2016-04-29 23:26:29 +10:00
|
|
|
pmd_t *pmdp)
|
|
|
|
{
|
2018-01-31 16:18:02 -08:00
|
|
|
unsigned long old_pmd;
|
|
|
|
|
2024-05-01 15:33:10 +01:00
|
|
|
VM_WARN_ON_ONCE(!pmd_present(*pmdp));
|
2018-09-20 23:39:42 +05:30
|
|
|
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
|
2016-07-13 15:06:40 +05:30
|
|
|
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
2018-01-31 16:18:02 -08:00
|
|
|
return __pmd(old_pmd);
|
2016-04-29 23:26:29 +10:00
|
|
|
}
|
|
|
|
|
2024-08-12 14:12:21 -04:00
|
|
|
pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
pud_t *pudp)
|
|
|
|
{
|
|
|
|
unsigned long old_pud;
|
|
|
|
|
|
|
|
VM_WARN_ON_ONCE(!pud_present(*pudp));
|
|
|
|
old_pud = pud_hugepage_update(vma->vm_mm, address, pudp, _PAGE_PRESENT, _PAGE_INVALID);
|
|
|
|
flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
|
|
|
|
return __pud(old_pud);
|
|
|
|
}
|
|
|
|
|
2020-05-05 12:47:29 +05:30
|
|
|
pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pmd_t *pmdp, int full)
|
|
|
|
{
|
|
|
|
pmd_t pmd;
|
|
|
|
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
|
2025-06-19 18:58:01 +10:00
|
|
|
VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp)) ||
|
|
|
|
!pmd_present(*pmdp));
|
2020-05-05 12:47:29 +05:30
|
|
|
pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
|
|
|
|
/*
|
|
|
|
* if it not a fullmm flush, then we can possibly end up converting
|
|
|
|
* this PMD pte entry to a regular level 0 PTE by a parallel page fault.
|
|
|
|
* Make sure we flush the tlb in this case.
|
|
|
|
*/
|
|
|
|
if (!full)
|
|
|
|
flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
2023-07-25 00:37:55 +05:30
|
|
|
pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pud_t *pudp, int full)
|
|
|
|
{
|
|
|
|
pud_t pud;
|
|
|
|
|
|
|
|
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
|
2025-06-19 18:58:01 +10:00
|
|
|
VM_BUG_ON(!pud_present(*pudp));
|
2023-07-25 00:37:55 +05:30
|
|
|
pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
|
|
|
|
/*
|
|
|
|
* if it not a fullmm flush, then we can possibly end up converting
|
|
|
|
* this PMD pte entry to a regular level 0 PTE by a parallel page fault.
|
|
|
|
* Make sure we flush the tlb in this case.
|
|
|
|
*/
|
|
|
|
if (!full)
|
|
|
|
flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE);
|
|
|
|
return pud;
|
|
|
|
}
|
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
|
|
|
|
{
|
|
|
|
return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
|
|
|
|
}
|
|
|
|
|
2023-07-25 00:37:55 +05:30
|
|
|
static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot)
|
|
|
|
{
|
|
|
|
return __pud(pud_val(pud) | pgprot_val(pgprot));
|
|
|
|
}
|
|
|
|
|
2020-10-22 14:41:15 +05:30
|
|
|
/*
|
|
|
|
* At some point we should be able to get rid of
|
|
|
|
* pmd_mkhuge() and mk_huge_pmd() when we update all the
|
|
|
|
* other archs to mark the pmd huge in pfn_pmd()
|
|
|
|
*/
|
2016-04-29 23:26:29 +10:00
|
|
|
pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
|
|
|
|
{
|
|
|
|
unsigned long pmdv;
|
|
|
|
|
|
|
|
pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
|
2020-10-22 14:41:15 +05:30
|
|
|
|
|
|
|
return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
|
2016-04-29 23:26:29 +10:00
|
|
|
}
|
|
|
|
|
2023-07-25 00:37:55 +05:30
|
|
|
pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
|
|
|
|
{
|
|
|
|
unsigned long pudv;
|
|
|
|
|
|
|
|
pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
|
|
|
|
|
|
|
|
return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
|
|
|
|
}
|
|
|
|
|
2016-04-29 23:26:29 +10:00
|
|
|
pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
|
|
{
|
|
|
|
unsigned long pmdv;
|
|
|
|
|
|
|
|
pmdv = pmd_val(pmd);
|
|
|
|
pmdv &= _HPAGE_CHG_MASK;
|
|
|
|
return pmd_set_protbits(__pmd(pmdv), newprot);
|
|
|
|
}
|
2024-08-12 14:12:21 -04:00
|
|
|
|
|
|
|
pud_t pud_modify(pud_t pud, pgprot_t newprot)
|
|
|
|
{
|
|
|
|
unsigned long pudv;
|
|
|
|
|
|
|
|
pudv = pud_val(pud);
|
|
|
|
pudv &= _HPAGE_CHG_MASK;
|
|
|
|
return pud_set_protbits(__pud(pudv), newprot);
|
|
|
|
}
|
2016-04-29 23:26:29 +10:00
|
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
2016-08-19 14:22:37 +05:30
|
|
|
|
2021-07-14 18:17:58 +05:30
|
|
|
/* For use by kexec, called with MMU off */
|
|
|
|
notrace void mmu_cleanup_all(void)
|
2016-08-19 14:22:37 +05:30
|
|
|
{
|
|
|
|
if (radix_enabled())
|
|
|
|
radix__mmu_cleanup_all();
|
|
|
|
else if (mmu_hash_ops.hpte_clear_all)
|
|
|
|
mmu_hash_ops.hpte_clear_all();
|
2020-07-09 08:59:41 +05:30
|
|
|
|
|
|
|
reset_sprs();
|
2016-08-19 14:22:37 +05:30
|
|
|
}
|
2017-01-03 14:39:51 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
2020-04-10 14:33:32 -07:00
|
|
|
int __meminit create_section_mapping(unsigned long start, unsigned long end,
|
|
|
|
int nid, pgprot_t prot)
|
2017-01-03 14:39:51 -06:00
|
|
|
{
|
|
|
|
if (radix_enabled())
|
2020-04-10 14:33:32 -07:00
|
|
|
return radix__create_section_mapping(start, end, nid, prot);
|
2017-01-03 14:39:51 -06:00
|
|
|
|
2020-04-10 14:33:32 -07:00
|
|
|
return hash__create_section_mapping(start, end, nid, prot);
|
2017-01-03 14:39:51 -06:00
|
|
|
}
|
|
|
|
|
2018-03-09 17:45:58 -03:00
|
|
|
int __meminit remove_section_mapping(unsigned long start, unsigned long end)
|
2017-01-03 14:39:51 -06:00
|
|
|
{
|
|
|
|
if (radix_enabled())
|
2017-01-16 13:07:45 -06:00
|
|
|
return radix__remove_section_mapping(start, end);
|
2017-01-03 14:39:51 -06:00
|
|
|
|
|
|
|
return hash__remove_section_mapping(start, end);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
2018-04-16 16:57:14 +05:30
|
|
|
|
|
|
|
void __init mmu_partition_table_init(void)
|
|
|
|
{
|
|
|
|
unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
|
|
|
|
unsigned long ptcr;
|
|
|
|
|
|
|
|
/* Initialize the Partition Table with no entries */
|
2025-01-02 15:25:28 +08:00
|
|
|
partition_tb = memblock_alloc_or_panic(patb_size, patb_size);
|
2018-04-16 16:57:14 +05:30
|
|
|
ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
|
2019-08-22 00:48:36 -03:00
|
|
|
set_ptcr_when_no_uv(ptcr);
|
2018-04-16 16:57:14 +05:30
|
|
|
powernv_set_nmmu_ptcr(ptcr);
|
|
|
|
}
|
|
|
|
|
2019-08-22 00:48:35 -03:00
|
|
|
static void flush_partition(unsigned int lpid, bool radix)
|
2018-04-16 16:57:14 +05:30
|
|
|
{
|
2019-08-22 00:48:35 -03:00
|
|
|
if (radix) {
|
2019-09-03 01:29:27 +10:00
|
|
|
radix__flush_all_lpid(lpid);
|
|
|
|
radix__flush_all_lpid_guest(lpid);
|
2018-04-16 16:57:14 +05:30
|
|
|
} else {
|
2019-09-03 01:29:27 +10:00
|
|
|
asm volatile("ptesync" : : : "memory");
|
2018-04-16 16:57:14 +05:30
|
|
|
asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
|
|
|
|
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
|
2019-09-03 01:29:27 +10:00
|
|
|
/* do we need fixup here ?*/
|
|
|
|
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
2018-04-16 16:57:14 +05:30
|
|
|
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
2019-08-22 00:48:35 -03:00
|
|
|
|
|
|
|
void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
|
2019-09-03 01:29:28 +10:00
|
|
|
unsigned long dw1, bool flush)
|
2019-08-22 00:48:35 -03:00
|
|
|
{
|
|
|
|
unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When ultravisor is enabled, the partition table is stored in secure
|
|
|
|
* memory and can only be accessed doing an ultravisor call. However, we
|
|
|
|
* maintain a copy of the partition table in normal memory to allow Nest
|
|
|
|
* MMU translations to occur (for normal VMs).
|
|
|
|
*
|
|
|
|
* Therefore, here we always update partition_tb, regardless of whether
|
|
|
|
* we are running under an ultravisor or not.
|
|
|
|
*/
|
|
|
|
partition_tb[lpid].patb0 = cpu_to_be64(dw0);
|
|
|
|
partition_tb[lpid].patb1 = cpu_to_be64(dw1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If ultravisor is enabled, we do an ultravisor call to register the
|
|
|
|
* partition table entry (PATE), which also do a global flush of TLBs
|
|
|
|
* and partition table caches for the lpid. Otherwise, just do the
|
|
|
|
* flush. The type of flush (hash or radix) depends on what the previous
|
|
|
|
* use of the partition ID was, not the new use.
|
|
|
|
*/
|
|
|
|
if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
|
|
|
|
uv_register_pate(lpid, dw0, dw1);
|
|
|
|
pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
|
|
|
|
dw0, dw1);
|
2019-09-03 01:29:28 +10:00
|
|
|
} else if (flush) {
|
2019-09-03 01:29:30 +10:00
|
|
|
/*
|
|
|
|
* Boot does not need to flush, because MMU is off and each
|
|
|
|
* CPU does a tlbiel_all() before switching them on, which
|
|
|
|
* flushes everything.
|
|
|
|
*/
|
2019-08-22 00:48:35 -03:00
|
|
|
flush_partition(lpid, (old & PATB_HR));
|
|
|
|
}
|
|
|
|
}
|
2018-04-16 16:57:14 +05:30
|
|
|
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
|
2018-04-16 16:57:20 +05:30
|
|
|
|
2018-04-16 16:57:22 +05:30
|
|
|
static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
void *pmd_frag, *ret;
|
|
|
|
|
2018-11-29 14:06:53 +00:00
|
|
|
if (PMD_FRAG_NR == 1)
|
|
|
|
return NULL;
|
|
|
|
|
2018-04-16 16:57:22 +05:30
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
|
ret = mm->context.pmd_frag;
|
|
|
|
if (ret) {
|
|
|
|
pmd_frag = ret + PMD_FRAG_SIZE;
|
|
|
|
/*
|
|
|
|
* If we have taken up all the fragments mark PTE page NULL
|
|
|
|
*/
|
|
|
|
if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
|
|
|
|
pmd_frag = NULL;
|
|
|
|
mm->context.pmd_frag = pmd_frag;
|
|
|
|
}
|
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
return (pmd_t *)ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
void *ret = NULL;
|
2023-08-07 16:04:54 -07:00
|
|
|
struct ptdesc *ptdesc;
|
2018-04-16 16:57:22 +05:30
|
|
|
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
|
|
|
|
|
|
|
|
if (mm == &init_mm)
|
|
|
|
gfp &= ~__GFP_ACCOUNT;
|
2023-08-07 16:04:54 -07:00
|
|
|
ptdesc = pagetable_alloc(gfp, 0);
|
|
|
|
if (!ptdesc)
|
2018-04-16 16:57:22 +05:30
|
|
|
return NULL;
|
mm: pass mm down to pagetable_{pte,pmd}_ctor
Patch series "Always call constructor for kernel page tables", v2.
There has been much confusion around exactly when page table
constructors/destructors (pagetable_*_[cd]tor) are supposed to be called.
They were initially introduced for user PTEs only (to support split page
table locks), then at the PMD level for the same purpose. Accounting was
added later on, starting at the PTE level and then moving to higher levels
(PMD, PUD). Finally, with my earlier series "Account page tables at all
levels" [1], the ctor/dtor is run for all levels, all the way to PGD.
I thought this was the end of the story, and it hopefully is for user
pgtables, but I was wrong for what concerns kernel pgtables. The current
situation there makes very little sense:
* At the PTE level, the ctor/dtor is not called (at least in the generic
implementation). Specific helpers are used for kernel pgtables at this
level (pte_{alloc,free}_kernel()) and those have never called the
ctor/dtor, most likely because they were initially irrelevant in the
kernel case.
* At all other levels, the ctor/dtor is normally called. This is
potentially wasteful at the PMD level (more on that later).
This series aims to ensure that the ctor/dtor is always called for kernel
pgtables, as it already is for user pgtables. Besides consistency, the
main motivation is to guarantee that ctor/dtor hooks are systematically
called; this makes it possible to insert hooks to protect page tables [2],
for instance. There is however an extra challenge: split locks are not
used for kernel pgtables, and it would therefore be wasteful to initialise
them (ptlock_init()).
It is worth clarifying exactly when split locks are used. They clearly
are for user pgtables, but as illustrated in commit 61444cde9170 ("ARM:
8591/1: mm: use fully constructed struct pages for EFI pgd allocations"),
they also are for special page tables like efi_mm. The one case where
split locks are definitely unused is pgtables owned by init_mm; this is
consistent with the behaviour of apply_to_pte_range().
The approach chosen in this series is therefore to pass the mm associated
to the pgtables being constructed to pagetable_{pte,pmd}_ctor() (patch 1),
and skip ptlock_init() if mm == &init_mm (patch 3 and 7). This makes it
possible to call the PTE ctor/dtor from pte_{alloc,free}_kernel() without
unintended consequences (patch 3). As a result the accounting functions
are now called at all levels for kernel pgtables, and split locks are
never initialised.
In configurations where ptlocks are dynamically allocated (32-bit,
PREEMPT_RT, etc.) and ARCH_ENABLE_SPLIT_PMD_PTLOCK is selected, this
series results in the removal of a kmem_cache allocation for every kernel
PMD. Additionally, for certain architectures that do not use
<asm-generic/pgalloc.h> such as s390, the same optimisation occurs at the
PTE level.
===
Things get more complicated when it comes to special pgtable allocators
(patch 8-12). All architectures need such allocators to create initial
kernel pgtables; we are not concerned with those as the ctor cannot be
called so early in the boot sequence. However, those allocators may also
be used later in the boot sequence or during normal operations. There are
two main use-cases:
1. Mapping EFI memory: efi_mm (arm, arm64, riscv)
2. arch_add_memory(): init_mm
The ctor is already explicitly run (at the PTE/PMD level) in the first
case, as required for pgtables that are not associated with init_mm.
However the same allocators may also be used for the second use-case (or
others), and this is where it gets messy. Patch 1 calls the ctor with
NULL as mm in those situations, as the actual mm isn't available.
Practically this means that ptlocks will be unconditionally initialised.
This is fine on arm - create_mapping_late() is only used for the EFI
mapping. On arm64, __create_pgd_mapping() is also used by
arch_add_memory(); patch 8/9/11 ensure that ctors are called at all levels
with the appropriate mm. The situation is similar on riscv, but
propagating the mm down to the ctor would require significant refactoring.
Since they are already called unconditionally, this series leaves riscv
no worse off - patch 10 adds comments to clarify the situation.
From a cursory look at other architectures implementing arch_add_memory(),
s390 and x86 may also need a similar treatment to add constructor calls.
This is to be taken care of in a future version or as a follow-up.
===
The complications in those special pgtable allocators beg the question:
does it really make sense to treat efi_mm and init_mm differently in e.g.
apply_to_pte_range()? Maybe what we really need is a way to tell if an mm
corresponds to user memory or not, and never use split locks for non-user
mm's. Feedback and suggestions welcome!
This patch (of 12):
In preparation for calling constructors for all kernel page tables while
eliding unnecessary ptlock initialisation, let's pass down the associated
mm to the PTE/PMD level ctors. (These are the two levels where ptlocks
are used.)
In most cases the mm is already around at the point of calling the ctor so
we simply pass it down. This is however not the case for special page
table allocators:
* arch/arm/mm/mmu.c
* arch/arm64/mm/mmu.c
* arch/riscv/mm/init.c
In those cases, the page tables being allocated are either for standard
kernel memory (init_mm) or special page directories, which may not be
associated to any mm. For now let's pass NULL as mm; this will be refined
where possible in future patches.
No functional change in this patch.
Link: https://lore.kernel.org/linux-mm/20250103184415.2744423-1-kevin.brodsky@arm.com/ [1]
Link: https://lore.kernel.org/linux-hardening/20250203101839.1223008-1-kevin.brodsky@arm.com/ [2]
Link: https://lkml.kernel.org/r/20250408095222.860601-1-kevin.brodsky@arm.com
Link: https://lkml.kernel.org/r/20250408095222.860601-2-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> [s390]
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Linus Waleij <linus.walleij@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <yang@os.amperecomputing.com>
Cc: <x86@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-04-08 10:52:11 +01:00
|
|
|
if (!pagetable_pmd_ctor(mm, ptdesc)) {
|
2023-08-07 16:04:54 -07:00
|
|
|
pagetable_free(ptdesc);
|
2018-04-16 16:57:22 +05:30
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-08-07 16:04:54 -07:00
|
|
|
atomic_set(&ptdesc->pt_frag_refcount, 1);
|
2018-07-27 21:48:17 +10:00
|
|
|
|
2023-08-07 16:04:54 -07:00
|
|
|
ret = ptdesc_address(ptdesc);
|
2018-04-16 16:57:22 +05:30
|
|
|
/*
|
|
|
|
* if we support only one fragment just return the
|
|
|
|
* allocated page.
|
|
|
|
*/
|
|
|
|
if (PMD_FRAG_NR == 1)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
|
/*
|
2023-08-07 16:04:54 -07:00
|
|
|
* If we find ptdesc_page set, we return
|
2022-04-30 20:56:54 +02:00
|
|
|
* the allocated page with single fragment
|
2018-04-16 16:57:22 +05:30
|
|
|
* count.
|
|
|
|
*/
|
|
|
|
if (likely(!mm->context.pmd_frag)) {
|
2023-08-07 16:04:54 -07:00
|
|
|
atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
|
2018-04-16 16:57:22 +05:30
|
|
|
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
|
|
|
|
}
|
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
|
|
|
|
return (pmd_t *)ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
|
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
|
|
|
|
pmd = get_pmd_from_cache(mm);
|
|
|
|
if (pmd)
|
|
|
|
return pmd;
|
|
|
|
|
|
|
|
return __alloc_for_pmdcache(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pmd_fragment_free(unsigned long *pmd)
|
|
|
|
{
|
2023-08-07 16:04:54 -07:00
|
|
|
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
|
2018-04-16 16:57:22 +05:30
|
|
|
|
2023-08-07 16:04:54 -07:00
|
|
|
if (pagetable_is_reserved(ptdesc))
|
|
|
|
return free_reserved_ptdesc(ptdesc);
|
2020-07-09 18:49:22 +05:30
|
|
|
|
2023-08-07 16:04:54 -07:00
|
|
|
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
|
|
|
|
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
|
2025-01-08 14:57:23 +08:00
|
|
|
pagetable_dtor(ptdesc);
|
2023-08-07 16:04:54 -07:00
|
|
|
pagetable_free(ptdesc);
|
2018-04-16 16:57:22 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-16 16:57:21 +05:30
|
|
|
static inline void pgtable_free(void *table, int index)
|
|
|
|
{
|
|
|
|
switch (index) {
|
|
|
|
case PTE_INDEX:
|
|
|
|
pte_fragment_free(table, 0);
|
|
|
|
break;
|
|
|
|
case PMD_INDEX:
|
2018-04-16 16:57:23 +05:30
|
|
|
pmd_fragment_free(table);
|
2018-04-16 16:57:21 +05:30
|
|
|
break;
|
|
|
|
case PUD_INDEX:
|
2020-07-09 18:49:22 +05:30
|
|
|
__pud_free(table);
|
2018-04-16 16:57:21 +05:30
|
|
|
break;
|
|
|
|
/* We don't free pgd table via RCU callback */
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
|
2018-04-16 16:57:19 +05:30
|
|
|
{
|
|
|
|
unsigned long pgf = (unsigned long)table;
|
|
|
|
|
2018-04-16 16:57:21 +05:30
|
|
|
BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
|
|
|
|
pgf |= index;
|
2018-04-16 16:57:19 +05:30
|
|
|
tlb_remove_table(tlb, (void *)pgf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __tlb_remove_table(void *_table)
|
|
|
|
{
|
|
|
|
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
|
2018-04-16 16:57:21 +05:30
|
|
|
unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
|
2018-04-16 16:57:19 +05:30
|
|
|
|
2018-04-16 16:57:21 +05:30
|
|
|
return pgtable_free(table, index);
|
2018-04-16 16:57:19 +05:30
|
|
|
}
|
2018-08-13 11:14:57 +05:30
|
|
|
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
|
|
|
|
|
|
|
|
void arch_report_meminfo(struct seq_file *m)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Hash maps the memory with one size mmu_linear_psize.
|
|
|
|
* So don't bother to print these on hash
|
|
|
|
*/
|
|
|
|
if (!radix_enabled())
|
|
|
|
return;
|
|
|
|
seq_printf(m, "DirectMap4k: %8lu kB\n",
|
|
|
|
atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
|
|
|
|
seq_printf(m, "DirectMap64k: %8lu kB\n",
|
|
|
|
atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
|
|
|
|
seq_printf(m, "DirectMap2M: %8lu kB\n",
|
|
|
|
atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
|
|
|
|
seq_printf(m, "DirectMap1G: %8lu kB\n",
|
|
|
|
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PROC_FS */
|
2019-01-23 11:51:38 +05:30
|
|
|
|
2019-03-05 15:46:33 -08:00
|
|
|
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
|
|
|
{
|
|
|
|
unsigned long pte_val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear the _PAGE_PRESENT so that no hardware parallel update is
|
|
|
|
* possible. Also keep the pte_present true so that we don't take
|
|
|
|
* wrong fault.
|
|
|
|
*/
|
|
|
|
pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
|
|
|
|
|
|
|
|
return __pte(pte_val);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
pte_t *ptep, pte_t old_pte, pte_t pte)
|
|
|
|
{
|
|
|
|
if (radix_enabled())
|
|
|
|
return radix__ptep_modify_prot_commit(vma, addr,
|
|
|
|
ptep, old_pte, pte);
|
|
|
|
set_pte_at(vma->vm_mm, addr, ptep, pte);
|
|
|
|
}
|
|
|
|
|
2023-11-27 13:28:09 +11:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2019-01-23 11:51:38 +05:30
|
|
|
/*
|
|
|
|
* For hash translation mode, we use the deposited table to store hash slot
|
|
|
|
* information and they are stored at PTRS_PER_PMD offset from related pmd
|
|
|
|
* location. Hence a pmd move requires deposit and withdraw.
|
|
|
|
*
|
|
|
|
* For radix translation with split pmd ptl, we store the deposited table in the
|
|
|
|
* pmd page. Hence if we have different pmd page we need to withdraw during pmd
|
|
|
|
* move.
|
|
|
|
*
|
|
|
|
* With hash we use deposited table always irrespective of anon or not.
|
|
|
|
* With radix we use deposited table only for anonymous mapping.
|
|
|
|
*/
|
|
|
|
int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
|
|
|
|
struct spinlock *old_pmd_ptl,
|
|
|
|
struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
if (radix_enabled())
|
|
|
|
return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2023-11-27 13:28:09 +11:00
|
|
|
#endif
|
2019-09-03 01:29:31 +10:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the CPU support tlbie?
|
|
|
|
*/
|
2025-01-31 17:29:17 +11:00
|
|
|
bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
|
2019-09-03 01:29:31 +10:00
|
|
|
EXPORT_SYMBOL(tlbie_capable);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Should tlbie be used for management of CPU TLBs, for kernel and process
|
|
|
|
* address spaces? tlbie may still be used for nMMU accelerators, and for KVM
|
|
|
|
* guest address spaces.
|
|
|
|
*/
|
2025-01-31 17:29:17 +11:00
|
|
|
bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
|
2019-09-03 01:29:31 +10:00
|
|
|
|
|
|
|
static int __init setup_disable_tlbie(char *str)
|
|
|
|
{
|
|
|
|
if (!radix_enabled()) {
|
|
|
|
pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
tlbie_capable = false;
|
|
|
|
tlbie_enabled = false;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("disable_tlbie", setup_disable_tlbie);
|
|
|
|
|
|
|
|
static int __init pgtable_debugfs_setup(void)
|
|
|
|
{
|
|
|
|
if (!tlbie_capable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is no locking vs tlb flushing when changing this value.
|
|
|
|
* The tlb flushers will see one value or another, and use either
|
|
|
|
* tlbie or tlbiel with IPIs. In both cases the TLBs will be
|
|
|
|
* invalidated as expected.
|
|
|
|
*/
|
|
|
|
debugfs_create_bool("tlbie_enabled", 0600,
|
2021-08-12 18:58:31 +05:30
|
|
|
arch_debugfs_dir,
|
2019-09-03 01:29:31 +10:00
|
|
|
&tlbie_enabled);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(pgtable_debugfs_setup);
|
2021-12-02 00:41:47 +10:00
|
|
|
|
2021-12-02 00:41:52 +10:00
|
|
|
#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
|
2021-12-02 00:41:47 +10:00
|
|
|
/*
|
|
|
|
* Override the generic version in mm/memremap.c.
|
|
|
|
*
|
|
|
|
* With hash translation, the direct-map range is mapped with just one
|
|
|
|
* page size selected by htab_init_page_sizes(). Consult
|
|
|
|
* mmu_psize_defs[] to determine the minimum page size alignment.
|
|
|
|
*/
|
|
|
|
unsigned long memremap_compat_align(void)
|
|
|
|
{
|
|
|
|
if (!radix_enabled()) {
|
|
|
|
unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
|
|
|
|
return max(SUBSECTION_SIZE, 1UL << shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
return SUBSECTION_SIZE;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(memremap_compat_align);
|
|
|
|
#endif
|
2022-04-28 23:16:13 -07:00
|
|
|
|
mm: change vm_get_page_prot() to accept vm_flags_t argument
Patch series "use vm_flags_t consistently".
The VMA flags field vma->vm_flags is of type vm_flags_t. Right now this
is exactly equivalent to unsigned long, but it should not be assumed to
be.
Much code that references vma->vm_flags already correctly uses vm_flags_t,
but a fairly large chunk of code simply uses unsigned long and assumes
that the two are equivalent.
This series corrects that and has us use vm_flags_t consistently.
This series is motivated by the desire to, in a future series, adjust
vm_flags_t to be a u64 regardless of whether the kernel is 32-bit or
64-bit in order to deal with the VMA flag exhaustion issue and avoid all
the various problems that arise from it (being unable to use certain
features in 32-bit, being unable to add new flags except for 64-bit, etc.)
This is therefore a critical first step towards that goal. At any rate,
using the correct type is of value regardless.
We additionally take the opportunity to refer to VMA flags as vm_flags
where possible to make clear what we're referring to.
Overall, this series does not introduce any functional change.
This patch (of 3):
We abstract the type of the VMA flags to vm_flags_t, however in may places
it is simply assumed this is unsigned long, which is simply incorrect.
At the moment this is simply an incongruity, however in future we plan to
change this type and therefore this change is a critical requirement for
doing so.
Overall, this patch does not introduce any functional change.
[lorenzo.stoakes@oracle.com: add missing vm_get_page_prot() instance, remove include]
Link: https://lkml.kernel.org/r/552f88e1-2df8-4e95-92b8-812f7c8db829@lucifer.local
Link: https://lkml.kernel.org/r/cover.1750274467.git.lorenzo.stoakes@oracle.com
Link: https://lkml.kernel.org/r/a12769720a2743f235643b158c4f4f0a9911daf0.1750274467.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Christian Brauner <brauner@kernel.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-06-18 20:42:52 +01:00
|
|
|
pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
|
2022-04-28 23:16:13 -07:00
|
|
|
{
|
2022-08-17 15:06:39 +10:00
|
|
|
unsigned long prot;
|
|
|
|
|
|
|
|
/* Radix supports execute-only, but protection_map maps X -> RX */
|
2023-09-25 20:31:51 +02:00
|
|
|
if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))
|
|
|
|
vm_flags |= VM_READ;
|
|
|
|
|
|
|
|
prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]);
|
2022-04-28 23:16:13 -07:00
|
|
|
|
|
|
|
if (vm_flags & VM_SAO)
|
|
|
|
prot |= _PAGE_SAO;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_MEM_KEYS
|
|
|
|
prot |= vmflag_to_pte_pkey_bits(vm_flags);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return __pgprot(prot);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(vm_get_page_prot);
|