linux/arch/powerpc/include/asm/kfence.h
Ritesh Harjani (IBM) b5fbf7e2c6 book3s64/radix: Refactoring common kfence related functions
Both radix and hash on book3s requires to detect if kfence
early init is enabled or not. Hash needs to disable kfence
if early init is not enabled because with kfence the linear map is
mapped using PAGE_SIZE rather than 16M mapping.
We don't support multiple page sizes for slb entry used for kernel
linear map in book3s64.

This patch refactors out the common functions required to detect kfence
early init is enabled or not.

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://patch.msgid.link/f4a787224fbe5bb787158ace579780c0257f6602.1729271995.git.ritesh.list@gmail.com
2024-10-23 18:53:20 +11:00

61 lines
1.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* powerpc KFENCE support.
*
* Copyright (C) 2020 CS GROUP France
*/
#ifndef __ASM_POWERPC_KFENCE_H
#define __ASM_POWERPC_KFENCE_H
#include <linux/mm.h>
#include <asm/pgtable.h>
#ifdef CONFIG_PPC64_ELF_ABI_V1
#define ARCH_FUNC_PREFIX "."
#endif
extern bool kfence_early_init;
extern bool kfence_disabled;
static inline void disable_kfence(void)
{
kfence_disabled = true;
}
static inline bool arch_kfence_init_pool(void)
{
return !kfence_disabled;
}
static inline bool kfence_early_init_enabled(void)
{
return IS_ENABLED(CONFIG_KFENCE) && kfence_early_init;
}
#ifdef CONFIG_PPC64
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
struct page *page = virt_to_page((void *)addr);
__kernel_map_pages(page, 1, !protect);
return true;
}
#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
if (protect) {
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
} else {
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
}
return true;
}
#endif
#endif /* __ASM_POWERPC_KFENCE_H */