mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Since [1], constructors/destructors are expected to be called for all page table pages, at all levels and for both user and kernel pgtables. There is however one glaring exception: kernel PTEs are managed via separate helpers (pte_alloc_kernel/pte_free_kernel), which do not call the [cd]tor, at least not in the generic implementation. The most obvious reason for this anomaly is that init_mm is special-cased not to use split page table locks. As a result calling ptlock_init() for PTEs associated with init_mm would be wasteful, potentially resulting in dynamic memory allocation. However, pgtable [cd]tors perform other actions - currently related to accounting/statistics, and potentially more functionally significant in the future. Now that pagetable_pte_ctor() is passed the associated mm, we can make it skip the call to ptlock_init() for init_mm; this allows us to call the ctor from pte_alloc_one_kernel() too. This is matched by a call to the pgtable destructor in pte_free_kernel(); no special-casing is needed on that path, as ptlock_free() is already called unconditionally. (ptlock_free() is a no-op unless a ptlock was allocated for the given PTP.) This patch ensures that all architectures that rely on <asm-generic/pgalloc.h> call the [cd]tor for kernel PTEs. pte_free_kernel() cannot be overridden so changing the generic implementation is sufficient. pte_alloc_one_kernel() can be overridden using __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL, and a few architectures implement it by calling the page allocator directly. We amend those so that they call the generic __pte_alloc_one_kernel() instead, if possible, ensuring that the ctor is called. A few architectures do not use <asm-generic/pgalloc.h>; those will be taken care of separately. [1] https://lore.kernel.org/linux-mm/20250103184415.2744423-1-kevin.brodsky@arm.com/ Link: https://lkml.kernel.org/r/20250408095222.860601-4-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com> Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> # s390 Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Linus Waleij <linus.walleij@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Mike Rapoport <rppt@kernel.org> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Will Deacon <will@kernel.org> Cc: <x86@kernel.org> Cc: Yang Shi <yang@os.amperecomputing.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
71 lines
1.5 KiB
C
71 lines
1.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ASM_CSKY_PGALLOC_H
|
|
#define __ASM_CSKY_PGALLOC_H
|
|
|
|
#include <linux/highmem.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched.h>
|
|
|
|
#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
|
|
#include <asm-generic/pgalloc.h>
|
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
pte_t *pte)
|
|
{
|
|
set_pmd(pmd, __pmd(__pa(pte)));
|
|
}
|
|
|
|
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|
pgtable_t pte)
|
|
{
|
|
set_pmd(pmd, __pmd(__pa(page_address(pte))));
|
|
}
|
|
|
|
extern void pgd_init(unsigned long *p);
|
|
|
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
|
|
{
|
|
pte_t *pte;
|
|
unsigned long i;
|
|
|
|
pte = __pte_alloc_one_kernel(mm);
|
|
if (!pte)
|
|
return NULL;
|
|
|
|
for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
|
|
(pte + i)->pte_low = _PAGE_GLOBAL;
|
|
|
|
return pte;
|
|
}
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *ret;
|
|
pgd_t *init;
|
|
|
|
ret = __pgd_alloc(mm, 0);
|
|
if (ret) {
|
|
init = pgd_offset(&init_mm, 0UL);
|
|
pgd_init((unsigned long *)ret);
|
|
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
/* prevent out of order excute */
|
|
smp_mb();
|
|
#ifdef CONFIG_CPU_NEED_TLBSYNC
|
|
dcache_wb_range((unsigned int)ret,
|
|
(unsigned int)(ret + PTRS_PER_PGD));
|
|
#endif
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
#define __pte_free_tlb(tlb, pte, address) \
|
|
tlb_remove_ptdesc((tlb), page_ptdesc(pte))
|
|
|
|
extern void pagetable_init(void);
|
|
extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
|
|
extern void pre_trap_init(void);
|
|
|
|
#endif /* __ASM_CSKY_PGALLOC_H */
|