2019-05-27 08:55:21 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-04-10 13:48:00 +01:00
|
|
|
/*
|
|
|
|
* arch/arm64/include/asm/hugetlb.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Linaro Ltd.
|
|
|
|
*
|
|
|
|
* Based on arch/x86/include/asm/hugetlb.h
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_HUGETLB_H
|
|
|
|
#define __ASM_HUGETLB_H
|
|
|
|
|
2023-08-13 21:53:17 -07:00
|
|
|
#include <asm/cacheflush.h>
|
2024-10-01 15:52:19 -07:00
|
|
|
#include <asm/mte.h>
|
2013-04-10 13:48:00 +01:00
|
|
|
#include <asm/page.h>
|
|
|
|
|
2019-03-05 15:43:58 -08:00
|
|
|
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
|
|
|
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
|
|
|
|
extern bool arch_hugetlb_migration_supported(struct hstate *h);
|
|
|
|
#endif
|
|
|
|
|
2024-03-26 17:10:29 +00:00
|
|
|
static inline void arch_clear_hugetlb_flags(struct folio *folio)
|
2013-04-10 13:48:00 +01:00
|
|
|
{
|
2024-03-26 17:10:29 +00:00
|
|
|
clear_bit(PG_dcache_clean, &folio->flags);
|
2024-10-01 15:52:19 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_ARM64_MTE
|
|
|
|
if (system_supports_mte()) {
|
|
|
|
clear_bit(PG_mte_tagged, &folio->flags);
|
|
|
|
clear_bit(PG_mte_lock, &folio->flags);
|
|
|
|
}
|
|
|
|
#endif
|
2013-04-10 13:48:00 +01:00
|
|
|
}
|
2024-03-26 17:10:29 +00:00
|
|
|
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
|
2013-04-10 13:48:00 +01:00
|
|
|
|
mm/hugetlb: change parameters of arch_make_huge_pte()
Patch series "Subject: [PATCH v2 0/5] Implement huge VMAP and VMALLOC on powerpc 8xx", v2.
This series implements huge VMAP and VMALLOC on powerpc 8xx.
Powerpc 8xx has 4 page sizes:
- 4k
- 16k
- 512k
- 8M
At the time being, vmalloc and vmap only support huge pages which are
leaf at PMD level.
Here the PMD level is 4M, it doesn't correspond to any supported
page size.
For now, implement use of 16k and 512k pages which is done
at PTE level.
Support of 8M pages will be implemented later, it requires use of
hugepd tables.
To allow this, the architecture provides two functions:
- arch_vmap_pte_range_map_size() which tells vmap_pte_range() what
page size to use. A stub returning PAGE_SIZE is provided when the
architecture doesn't provide this function.
- arch_vmap_pte_supported_shift() which tells __vmalloc_node_range()
what page shift to use for a given area size. A stub returning
PAGE_SHIFT is provided when the architecture doesn't provide this
function.
This patch (of 5):
At the time being, arch_make_huge_pte() has the following prototype:
pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable);
vma is used to get the pages shift or size.
vma is also used on Sparc to get vm_flags.
page is not used.
writable is not used.
In order to use this function without a vma, replace vma by shift and
flags. Also remove the used parameters.
Link: https://lkml.kernel.org/r/cover.1620795204.git.christophe.leroy@csgroup.eu
Link: https://lkml.kernel.org/r/f4633ac6a7da2f22f31a04a89e0a7026bb78b15b.1620795204.git.christophe.leroy@csgroup.eu
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2021-06-30 18:48:00 -07:00
|
|
|
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
|
2015-12-17 14:31:26 -05:00
|
|
|
#define arch_make_huge_pte arch_make_huge_pte
|
2018-10-26 15:08:07 -07:00
|
|
|
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
|
2015-12-17 14:31:26 -05:00
|
|
|
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
mm: hugetlb: add huge page size param to set_huge_pte_at()
Patch series "Fix set_huge_pte_at() panic on arm64", v2.
This series fixes a bug in arm64's implementation of set_huge_pte_at(),
which can result in an unprivileged user causing a kernel panic. The
problem was triggered when running the new uffd poison mm selftest for
HUGETLB memory. This test (and the uffd poison feature) was merged for
v6.5-rc7.
Ideally, I'd like to get this fix in for v6.6 and I've cc'ed stable
(correctly this time) to get it backported to v6.5, where the issue first
showed up.
Description of Bug
==================
arm64's huge pte implementation supports multiple huge page sizes, some of
which are implemented in the page table with multiple contiguous entries.
So set_huge_pte_at() needs to work out how big the logical pte is, so that
it can also work out how many physical ptes (or pmds) need to be written.
It previously did this by grabbing the folio out of the pte and querying
its size.
However, there are cases when the pte being set is actually a swap entry.
But this also used to work fine, because for huge ptes, we only ever saw
migration entries and hwpoison entries. And both of these types of swap
entries have a PFN embedded, so the code would grab that and everything
still worked out.
But over time, more calls to set_huge_pte_at() have been added that set
swap entry types that do not embed a PFN. And this causes the code to go
bang. The triggering case is for the uffd poison test, commit
99aa77215ad0 ("selftests/mm: add uffd unit test for UFFDIO_POISON"), which
causes a PTE_MARKER_POISONED swap entry to be set, coutesey of commit
8a13897fb0da ("mm: userfaultfd: support UFFDIO_POISON for hugetlbfs") -
added in v6.5-rc7. Although review shows that there are other call sites
that set PTE_MARKER_UFFD_WP (which also has no PFN), these don't trigger
on arm64 because arm64 doesn't support UFFD WP.
If CONFIG_DEBUG_VM is enabled, we do at least get a BUG(), but otherwise,
it will dereference a bad pointer in page_folio():
static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
{
VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
return page_folio(pfn_to_page(swp_offset_pfn(entry)));
}
Fix
===
The simplest fix would have been to revert the dodgy cleanup commit
18f3962953e4 ("mm: hugetlb: kill set_huge_swap_pte_at()"), but since
things have moved on, this would have required an audit of all the new
set_huge_pte_at() call sites to see if they should be converted to
set_huge_swap_pte_at(). As per the original intent of the change, it
would also leave us open to future bugs when people invariably get it
wrong and call the wrong helper.
So instead, I've added a huge page size parameter to set_huge_pte_at().
This means that the arm64 code has the size in all cases. It's a bigger
change, due to needing to touch the arches that implement the function,
but it is entirely mechanical, so in my view, low risk.
I've compile-tested all touched arches; arm64, parisc, powerpc, riscv,
s390, sparc (and additionally x86_64). I've additionally booted and run
mm selftests against arm64, where I observe the uffd poison test is fixed,
and there are no other regressions.
This patch (of 2):
In order to fix a bug, arm64 needs to be told the size of the huge page
for which the pte is being set in set_huge_pte_at(). Provide for this by
adding an `unsigned long sz` parameter to the function. This follows the
same pattern as huge_pte_clear().
This commit makes the required interface modifications to the core mm as
well as all arches that implement this function (arm64, parisc, powerpc,
riscv, s390, sparc). The actual arm64 bug will be fixed in a separate
commit.
No behavioral changes intended.
Link: https://lkml.kernel.org/r/20230922115804.2043771-1-ryan.roberts@arm.com
Link: https://lkml.kernel.org/r/20230922115804.2043771-2-ryan.roberts@arm.com
Fixes: 8a13897fb0da ("mm: userfaultfd: support UFFDIO_POISON for hugetlbfs")
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> [powerpc 8xx]
Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com> [vmalloc change]
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: <stable@vger.kernel.org> [6.5+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-09-22 12:58:03 +01:00
|
|
|
pte_t *ptep, pte_t pte, unsigned long sz);
|
2018-10-26 15:08:39 -07:00
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
2015-12-17 14:31:26 -05:00
|
|
|
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pte_t *ptep,
|
|
|
|
pte_t pte, int dirty);
|
2018-10-26 15:08:12 -07:00
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
2025-02-26 12:06:51 +00:00
|
|
|
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, unsigned long sz);
|
2018-10-26 15:08:35 -07:00
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
2015-12-17 14:31:26 -05:00
|
|
|
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
|
|
unsigned long addr, pte_t *ptep);
|
2018-10-26 15:08:17 -07:00
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
2022-05-13 16:48:55 -07:00
|
|
|
extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pte_t *ptep);
|
2018-10-26 15:07:59 -07:00
|
|
|
#define __HAVE_ARCH_HUGE_PTE_CLEAR
|
2017-08-22 11:42:46 +01:00
|
|
|
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep, unsigned long sz);
|
2022-05-16 08:55:58 +08:00
|
|
|
#define __HAVE_ARCH_HUGE_PTEP_GET
|
2024-07-02 15:51:20 +02:00
|
|
|
extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
2017-08-22 11:42:46 +01:00
|
|
|
|
2020-07-01 10:12:01 +05:30
|
|
|
void __init arm64_hugetlb_cma_reserve(void);
|
|
|
|
|
2023-01-02 11:46:51 +05:30
|
|
|
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
|
|
|
|
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pte_t *ptep);
|
|
|
|
|
|
|
|
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
|
|
|
|
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pte_t *ptep,
|
|
|
|
pte_t old_pte, pte_t new_pte);
|
|
|
|
|
2017-08-22 11:42:46 +01:00
|
|
|
#include <asm-generic/hugetlb.h>
|
2015-12-17 14:31:26 -05:00
|
|
|
|
2025-04-22 09:18:10 +01:00
|
|
|
static inline void __flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
|
|
|
unsigned long start,
|
|
|
|
unsigned long end,
|
|
|
|
unsigned long stride,
|
|
|
|
bool last_level)
|
2023-08-02 09:27:31 +08:00
|
|
|
{
|
2025-02-26 12:06:53 +00:00
|
|
|
switch (stride) {
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
|
|
case PUD_SIZE:
|
2025-04-22 09:18:10 +01:00
|
|
|
__flush_tlb_range(vma, start, end, PUD_SIZE, last_level, 1);
|
2025-02-26 12:06:53 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case CONT_PMD_SIZE:
|
|
|
|
case PMD_SIZE:
|
2025-04-22 09:18:10 +01:00
|
|
|
__flush_tlb_range(vma, start, end, PMD_SIZE, last_level, 2);
|
2025-02-26 12:06:53 +00:00
|
|
|
break;
|
|
|
|
case CONT_PTE_SIZE:
|
2025-04-22 09:18:10 +01:00
|
|
|
__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, 3);
|
2025-02-26 12:06:53 +00:00
|
|
|
break;
|
|
|
|
default:
|
2025-04-22 09:18:10 +01:00
|
|
|
__flush_tlb_range(vma, start, end, PAGE_SIZE, last_level, TLBI_TTL_UNKNOWN);
|
2025-02-26 12:06:53 +00:00
|
|
|
}
|
2023-08-02 09:27:31 +08:00
|
|
|
}
|
|
|
|
|
2025-04-22 09:18:10 +01:00
|
|
|
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
|
|
|
|
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
|
|
|
unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long stride = huge_page_size(hstate_vma(vma));
|
|
|
|
|
|
|
|
__flush_hugetlb_tlb_range(vma, start, end, stride, false);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:48:00 +01:00
|
|
|
#endif /* __ASM_HUGETLB_H */
|