From a4ee28615c7a1e2925e1fcb4ba0fa1aeee633d78 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 7 Nov 2022 19:47:53 +0530 Subject: [PATCH 1/3] arm64/mm: Simplify and document pte_to_phys() for 52 bit addresses pte_to_phys() assembly definition does multiple bits field transformations to derive physical address, embedded inside a page table entry. Unlike its C counter part i.e __pte_to_phys(), pte_to_phys() is not very apparent. It simplifies these operations via a new macro PTE_ADDR_HIGH_SHIFT indicating how far the pte encoded higher address bits need to be left shifted. While here, this also updates __pte_to_phys() and __phys_to_pte_val(). Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Brown Cc: Mark Rutland Cc: Ard Biesheuvel Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Ard Biesheuvel Suggested-by: Ard Biesheuvel Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/20221107141753.2938621-1-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 8 +++----- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/include/asm/pgtable.h | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e5957a53be39..89038067ef34 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -660,12 +660,10 @@ alternative_endif .endm .macro pte_to_phys, phys, pte -#ifdef CONFIG_ARM64_PA_BITS_52 - ubfiz \phys, \pte, #(48 - 16 - 12), #16 - bfxil \phys, \pte, #16, #32 - lsl \phys, \phys, #16 -#else and \phys, \pte, #PTE_ADDR_MASK +#ifdef CONFIG_ARM64_PA_BITS_52 + orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT + and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT) #endif .endm diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 5ab8d163198f..f658aafc47df 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -159,6 +159,7 @@ #ifdef CONFIG_ARM64_PA_BITS_52 #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) #define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH) +#define PTE_ADDR_HIGH_SHIFT 36 #else #define PTE_ADDR_MASK PTE_ADDR_LOW #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 71a1af42f0e8..daedd6172227 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -77,11 +77,11 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; static inline phys_addr_t __pte_to_phys(pte_t pte) { return (pte_val(pte) & PTE_ADDR_LOW) | - ((pte_val(pte) & PTE_ADDR_HIGH) << 36); + ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); } static inline pteval_t __phys_to_pte_val(phys_addr_t phys) { - return (phys | (phys >> 36)) & PTE_ADDR_MASK; + return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK; } #else #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) From 0bb1fbffc631064db567ccaeb9ed6b6df6342b66 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 14 Nov 2022 10:44:11 +0000 Subject: [PATCH 2/3] arm64: mm: kfence: only handle translation faults Alexander noted that KFENCE only expects to handle faults from invalid page table entries (i.e. translation faults), but arm64's fault handling logic will call kfence_handle_page_fault() for other types of faults, including alignment faults caused by unaligned atomics. This has the unfortunate property of causing those other faults to be reported as "KFENCE: use-after-free", which is misleading and hinders debugging. Fix this by only forwarding unhandled translation faults to the KFENCE code, similar to what x86 does already. Alexander has verified that this passes all the tests in the KFENCE test suite and avoids bogus reports on misaligned atomics. Link: https://lore.kernel.org/all/20221102081620.1465154-1-zhongbaisong@huawei.com/ Fixes: 840b23986344 ("arm64, kfence: enable KFENCE for ARM64") Signed-off-by: Mark Rutland Reviewed-by: Alexander Potapenko Tested-by: Alexander Potapenko Cc: Catalin Marinas Cc: Marco Elver Cc: Will Deacon Link: https://lore.kernel.org/r/20221114104411.2853040-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 3e9cf9826417..3eb2825d08cf 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -354,6 +354,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) return false; } +static bool is_translation_fault(unsigned long esr) +{ + return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; +} + static void __do_kernel_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { @@ -386,7 +391,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { - if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) + if (is_translation_fault(esr) && + kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) return; msg = "paging request"; From 453dfcee70c5c344ca09396ff5b0baf177eb327e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 22 Nov 2022 18:02:49 +0100 Subject: [PATCH 3/3] arm64: booting: Require placement within 48-bit addressable memory Some configurations (i.e., 64k + LVA/LPA) can tolerate a physical placement of the kernel image outside of the 48-bit addressable region, but given that the loader has no way of knowing whether or not the image in question supports LVA/LPA, it currently has no choice but to place it below the 48-bit mark. Once we add support for LPA2, which allows 52-bit physical and virtual addressing when using 4k or 16k pages, but in way that relies on increasing the number of paging levels, there will be more variety in the configurations that may or may not support this. So redefine bit #3 in the Image header as 'must be placed within 48-bit addressable memory', as this is the current de facto meaning. Signed-off-by: Ard Biesheuvel Reviewed-by: Anshuman Khandual Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20221122170249.2453853-1-ardb@kernel.org Signed-off-by: Will Deacon --- Documentation/arm64/booting.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index 8c324ad638de..5a764fabfea8 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -121,8 +121,9 @@ Header notes: to the base of DRAM, since memory below it is not accessible via the linear mapping 1 - 2MB aligned base may be anywhere in physical - memory + 2MB aligned base such that all image_size bytes + counted from the start of the image are within + the 48-bit addressable range of physical memory Bits 4-63 Reserved. ============= ===============================================================