linux/arch/powerpc/include/asm/book3s/64/kup.h

443 lines
10 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
#define _ASM_POWERPC_BOOK3S_64_KUP_H
#include <linux/const.h>
powerpc/64/sycall: Implement syscall entry/exit logic in C System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
2020-02-26 03:35:34 +10:00
#include <asm/reg.h>
#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
#ifdef __ASSEMBLY__
.macro kuap_user_restore gpr1, gpr2
#if defined(CONFIG_PPC_PKEY)
BEGIN_MMU_FTR_SECTION_NESTED(67)
b 100f // skip_restore_amr
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
/*
* AMR and IAMR are going to be different when
* returning to userspace.
*/
ld \gpr1, STACK_REGS_AMR(r1)
/*
* If kuap feature is not enabled, do the mtspr
* only if AMR value is different.
*/
BEGIN_MMU_FTR_SECTION_NESTED(68)
mfspr \gpr2, SPRN_AMR
cmpd \gpr1, \gpr2
beq 99f
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
isync
mtspr SPRN_AMR, \gpr1
99:
/*
* Restore IAMR only when returning to userspace
*/
ld \gpr1, STACK_REGS_IAMR(r1)
/*
* If kuep feature is not enabled, do the mtspr
* only if IAMR value is different.
*/
BEGIN_MMU_FTR_SECTION_NESTED(69)
mfspr \gpr2, SPRN_IAMR
cmpd \gpr1, \gpr2
beq 100f
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
isync
mtspr SPRN_IAMR, \gpr1
100: //skip_restore_amr
/* No isync required, see kuap_user_restore() */
#endif
.endm
.macro kuap_kernel_restore gpr1, gpr2
#if defined(CONFIG_PPC_PKEY)
BEGIN_MMU_FTR_SECTION_NESTED(67)
/*
* AMR is going to be mostly the same since we are
* returning to the kernel. Compare and do a mtspr.
*/
ld \gpr2, STACK_REGS_AMR(r1)
mfspr \gpr1, SPRN_AMR
cmpd \gpr1, \gpr2
beq 100f
isync
mtspr SPRN_AMR, \gpr2
/*
* No isync required, see kuap_restore_amr()
* No need to restore IAMR when returning to kernel space.
*/
100:
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
#endif
.endm
#ifdef CONFIG_PPC_KUAP
.macro kuap_check_amr gpr1, gpr2
#ifdef CONFIG_PPC_KUAP_DEBUG
BEGIN_MMU_FTR_SECTION_NESTED(67)
mfspr \gpr1, SPRN_AMR
/* Prevent access to userspace using any key values */
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
999: tdne \gpr1, \gpr2
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
#endif
.endm
#endif
/*
* if (pkey) {
*
* save AMR -> stack;
* if (kuap) {
* if (AMR != BLOCKED)
* KUAP_BLOCKED -> AMR;
* }
* if (from_user) {
* save IAMR -> stack;
* if (kuep) {
* KUEP_BLOCKED ->IAMR
* }
* }
* return;
* }
*
* if (kuap) {
* if (from_kernel) {
* save AMR -> stack;
* if (AMR != BLOCKED)
* KUAP_BLOCKED -> AMR;
* }
*
* }
*/
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
#if defined(CONFIG_PPC_PKEY)
/*
* if both pkey and kuap is disabled, nothing to do
*/
BEGIN_MMU_FTR_SECTION_NESTED(68)
b 100f // skip_save_amr
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
/*
* if pkey is disabled and we are entering from userspace
* don't do anything.
*/
BEGIN_MMU_FTR_SECTION_NESTED(67)
.ifnb \msr_pr_cr
/*
* Without pkey we are not changing AMR outside the kernel
* hence skip this completely.
*/
bne \msr_pr_cr, 100f // from userspace
.endif
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
/*
* pkey is enabled or pkey is disabled but entering from kernel
*/
mfspr \gpr1, SPRN_AMR
std \gpr1, STACK_REGS_AMR(r1)
/*
* update kernel AMR with AMR_KUAP_BLOCKED only
* if KUAP feature is enabled
*/
BEGIN_MMU_FTR_SECTION_NESTED(69)
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
cmpd \use_cr, \gpr1, \gpr2
beq \use_cr, 102f
/*
* We don't isync here because we very recently entered via an interrupt
*/
mtspr SPRN_AMR, \gpr2
isync
102:
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
/*
* if entering from kernel we don't need save IAMR
*/
.ifnb \msr_pr_cr
beq \msr_pr_cr, 100f // from kernel space
mfspr \gpr1, SPRN_IAMR
std \gpr1, STACK_REGS_IAMR(r1)
/*
* update kernel IAMR with AMR_KUEP_BLOCKED only
* if KUEP feature is enabled
*/
BEGIN_MMU_FTR_SECTION_NESTED(70)
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
mtspr SPRN_IAMR, \gpr2
isync
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
.endif
100: // skip_save_amr
#endif
.endm
#else /* !__ASSEMBLY__ */
#include <linux/jump_label.h>
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_PKEY
powerpc/64/sycall: Implement syscall entry/exit logic in C System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
2020-02-26 03:35:34 +10:00
#include <asm/mmu.h>
#include <asm/ptrace.h>
/*
* For kernel thread that doesn't have thread.regs return
* default AMR/IAMR values.
*/
static inline u64 current_thread_amr(void)
{
if (current->thread.regs)
return current->thread.regs->amr;
return AMR_KUAP_BLOCKED;
}
static inline u64 current_thread_iamr(void)
{
if (current->thread.regs)
return current->thread.regs->iamr;
return AMR_KUEP_BLOCKED;
}
#endif /* CONFIG_PPC_PKEY */
#ifdef CONFIG_PPC_KUAP
static inline void kuap_user_restore(struct pt_regs *regs)
{
bool restore_amr = false, restore_iamr = false;
unsigned long amr, iamr;
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
amr = mfspr(SPRN_AMR);
if (amr != regs->amr)
restore_amr = true;
} else {
restore_amr = true;
}
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
iamr = mfspr(SPRN_IAMR);
if (iamr != regs->iamr)
restore_iamr = true;
} else {
restore_iamr = true;
}
if (restore_amr || restore_iamr) {
isync();
if (restore_amr)
mtspr(SPRN_AMR, regs->amr);
if (restore_iamr)
mtspr(SPRN_IAMR, regs->iamr);
}
/*
* No isync required here because we are about to rfi
* back to previous context before any user accesses
* would be made, which is a CSI.
*/
}
static inline void kuap_kernel_restore(struct pt_regs *regs,
unsigned long amr)
powerpc/64s: Implement interrupt exit logic in C Implement the bulk of interrupt return logic in C. The asm return code must handle a few cases: restoring full GPRs, and emulating stack store. The stack store emulation is significantly simplfied, rather than creating a new return frame and switching to that before performing the store, it uses the PACA to keep a scratch register around to perform the store. The asm return code is moved into 64e for now. The new logic has made allowance for 64e, but I don't have a full environment that works well to test it, and even booting in emulated qemu is not great for stress testing. 64e shouldn't be too far off working with this, given a bit more testing and auditing of the logic. This is slightly faster on a POWER9 (page fault speed increases about 1.1%), probably due to reduced mtmsrd. mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE handling (including the fast_interrupt_return path), to remove trace_hardirqs_on(), and fixes the interrupt-return part of the MSR_VSX restore bug caught by tm-unavailable selftest. mpe: Incorporate fix from Nick: The return-to-kernel path has to replay any soft-pending interrupts if it is returning to a context that had interrupts soft-enabled. It has to do this carefully and avoid plain enabling interrupts if this is an irq context, which can cause multiple nesting of interrupts on the stack, and other unexpected issues. The code which avoided this case got the soft-mask state wrong, and marked interrupts as enabled before going around again to retry. This seems to be mostly harmless except when PREEMPT=y, this calls preempt_schedule_irq with irqs apparently enabled and runs into a BUG in kernel/sched/core.c Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 03:35:37 +10:00
{
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
if (unlikely(regs->amr != amr)) {
isync();
mtspr(SPRN_AMR, regs->amr);
/*
* No isync required here because we are about to rfi
* back to previous context before any user accesses
* would be made, which is a CSI.
*/
}
}
/*
* No need to restore IAMR when returning to kernel space.
*/
powerpc/64s: Implement interrupt exit logic in C Implement the bulk of interrupt return logic in C. The asm return code must handle a few cases: restoring full GPRs, and emulating stack store. The stack store emulation is significantly simplfied, rather than creating a new return frame and switching to that before performing the store, it uses the PACA to keep a scratch register around to perform the store. The asm return code is moved into 64e for now. The new logic has made allowance for 64e, but I don't have a full environment that works well to test it, and even booting in emulated qemu is not great for stress testing. 64e shouldn't be too far off working with this, given a bit more testing and auditing of the logic. This is slightly faster on a POWER9 (page fault speed increases about 1.1%), probably due to reduced mtmsrd. mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE handling (including the fast_interrupt_return path), to remove trace_hardirqs_on(), and fixes the interrupt-return part of the MSR_VSX restore bug caught by tm-unavailable selftest. mpe: Incorporate fix from Nick: The return-to-kernel path has to replay any soft-pending interrupts if it is returning to a context that had interrupts soft-enabled. It has to do this carefully and avoid plain enabling interrupts if this is an irq context, which can cause multiple nesting of interrupts on the stack, and other unexpected issues. The code which avoided this case got the soft-mask state wrong, and marked interrupts as enabled before going around again to retry. This seems to be mostly harmless except when PREEMPT=y, this calls preempt_schedule_irq with irqs apparently enabled and runs into a BUG in kernel/sched/core.c Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 03:35:37 +10:00
}
static inline unsigned long kuap_get_and_check_amr(void)
{
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
unsigned long amr = mfspr(SPRN_AMR);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
return amr;
}
return 0;
}
#else /* CONFIG_PPC_PKEY */
static inline void kuap_user_restore(struct pt_regs *regs)
{
}
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{
}
static inline unsigned long kuap_get_and_check_amr(void)
{
return 0;
}
#endif /* CONFIG_PPC_PKEY */
#ifdef CONFIG_PPC_KUAP
powerpc/64/sycall: Implement syscall entry/exit logic in C System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
2020-02-26 03:35:34 +10:00
static inline void kuap_check_amr(void)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
powerpc/64/sycall: Implement syscall entry/exit logic in C System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
2020-02-26 03:35:34 +10:00
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
}
/*
* We support individually allowing read or write, but we don't support nesting
* because that would require an expensive read/modify write of the AMR.
*/
static inline unsigned long get_kuap(void)
{
/*
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
* cause restore_user_access to do a flush.
*
* This has no effect in terms of actually blocking things on hash,
* so it doesn't break anything.
*/
if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return AMR_KUAP_BLOCKED;
return mfspr(SPRN_AMR);
}
static inline void set_kuap(unsigned long value)
{
if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return;
/*
* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
* before and after the move to AMR. See table 6 on page 1134.
*/
isync();
mtspr(SPRN_AMR, value);
isync();
}
static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
bool is_write)
{
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
return false;
/*
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
* For hash this will be a key fault (DSISR_KEYFAULT)
*/
/*
* We do have exception table entry, but accessing the
* userspace results in fault. This could be because we
* didn't unlock the AMR or access is denied by userspace
* using a key value that blocks access. We are only interested
* in catching the use case of accessing without unlocking
* the AMR. Hence check for BLOCK_WRITE/READ against AMR.
*/
if (is_write) {
return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
}
return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
}
powerpc/kuap: Fix set direction in allow/prevent_user_access() __builtin_constant_p() always return 0 for pointers, so on RADIX we always end up opening both direction (by writing 0 in SPR29): 0000000000000170 <._copy_to_user>: ... 1b0: 4c 00 01 2c isync 1b4: 39 20 00 00 li r9,0 1b8: 7d 3d 03 a6 mtspr 29,r9 1bc: 4c 00 01 2c isync 1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50> 1c0: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000220 <._copy_from_user>: ... 2ac: 4c 00 01 2c isync 2b0: 39 20 00 00 li r9,0 2b4: 7d 3d 03 a6 mtspr 29,r9 2b8: 4c 00 01 2c isync 2bc: 7f c5 f3 78 mr r5,r30 2c0: 7f 83 e3 78 mr r3,r28 2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4> 2c4: R_PPC64_REL24 .__copy_tofrom_user ... Use an explicit parameter for direction selection, so that GCC is able to see it is a constant: 00000000000001b0 <._copy_to_user>: ... 1f0: 4c 00 01 2c isync 1f4: 3d 20 40 00 lis r9,16384 1f8: 79 29 07 c6 rldicr r9,r9,32,31 1fc: 7d 3d 03 a6 mtspr 29,r9 200: 4c 00 01 2c isync 204: 48 00 00 01 bl 204 <._copy_to_user+0x54> 204: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000260 <._copy_from_user>: ... 2ec: 4c 00 01 2c isync 2f0: 39 20 ff ff li r9,-1 2f4: 79 29 00 04 rldicr r9,r9,0,0 2f8: 7d 3d 03 a6 mtspr 29,r9 2fc: 4c 00 01 2c isync 300: 7f c5 f3 78 mr r5,r30 304: 7f 83 e3 78 mr r3,r28 308: 48 00 00 01 bl 308 <._copy_from_user+0xa8> 308: R_PPC64_REL24 .__copy_tofrom_user ... Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> [mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
unsigned long thread_amr = 0;
// This is written so we can resolve to a single case at build time
powerpc/kuap: Fix set direction in allow/prevent_user_access() __builtin_constant_p() always return 0 for pointers, so on RADIX we always end up opening both direction (by writing 0 in SPR29): 0000000000000170 <._copy_to_user>: ... 1b0: 4c 00 01 2c isync 1b4: 39 20 00 00 li r9,0 1b8: 7d 3d 03 a6 mtspr 29,r9 1bc: 4c 00 01 2c isync 1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50> 1c0: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000220 <._copy_from_user>: ... 2ac: 4c 00 01 2c isync 2b0: 39 20 00 00 li r9,0 2b4: 7d 3d 03 a6 mtspr 29,r9 2b8: 4c 00 01 2c isync 2bc: 7f c5 f3 78 mr r5,r30 2c0: 7f 83 e3 78 mr r3,r28 2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4> 2c4: R_PPC64_REL24 .__copy_tofrom_user ... Use an explicit parameter for direction selection, so that GCC is able to see it is a constant: 00000000000001b0 <._copy_to_user>: ... 1f0: 4c 00 01 2c isync 1f4: 3d 20 40 00 lis r9,16384 1f8: 79 29 07 c6 rldicr r9,r9,32,31 1fc: 7d 3d 03 a6 mtspr 29,r9 200: 4c 00 01 2c isync 204: 48 00 00 01 bl 204 <._copy_to_user+0x54> 204: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000260 <._copy_from_user>: ... 2ec: 4c 00 01 2c isync 2f0: 39 20 ff ff li r9,-1 2f4: 79 29 00 04 rldicr r9,r9,0,0 2f8: 7d 3d 03 a6 mtspr 29,r9 2fc: 4c 00 01 2c isync 300: 7f c5 f3 78 mr r5,r30 304: 7f 83 e3 78 mr r3,r28 308: 48 00 00 01 bl 308 <._copy_from_user+0xa8> 308: R_PPC64_REL24 .__copy_tofrom_user ... Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> [mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
BUILD_BUG_ON(!__builtin_constant_p(dir));
if (mmu_has_feature(MMU_FTR_PKEY))
thread_amr = current_thread_amr();
powerpc/kuap: Fix set direction in allow/prevent_user_access() __builtin_constant_p() always return 0 for pointers, so on RADIX we always end up opening both direction (by writing 0 in SPR29): 0000000000000170 <._copy_to_user>: ... 1b0: 4c 00 01 2c isync 1b4: 39 20 00 00 li r9,0 1b8: 7d 3d 03 a6 mtspr 29,r9 1bc: 4c 00 01 2c isync 1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50> 1c0: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000220 <._copy_from_user>: ... 2ac: 4c 00 01 2c isync 2b0: 39 20 00 00 li r9,0 2b4: 7d 3d 03 a6 mtspr 29,r9 2b8: 4c 00 01 2c isync 2bc: 7f c5 f3 78 mr r5,r30 2c0: 7f 83 e3 78 mr r3,r28 2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4> 2c4: R_PPC64_REL24 .__copy_tofrom_user ... Use an explicit parameter for direction selection, so that GCC is able to see it is a constant: 00000000000001b0 <._copy_to_user>: ... 1f0: 4c 00 01 2c isync 1f4: 3d 20 40 00 lis r9,16384 1f8: 79 29 07 c6 rldicr r9,r9,32,31 1fc: 7d 3d 03 a6 mtspr 29,r9 200: 4c 00 01 2c isync 204: 48 00 00 01 bl 204 <._copy_to_user+0x54> 204: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000260 <._copy_from_user>: ... 2ec: 4c 00 01 2c isync 2f0: 39 20 ff ff li r9,-1 2f4: 79 29 00 04 rldicr r9,r9,0,0 2f8: 7d 3d 03 a6 mtspr 29,r9 2fc: 4c 00 01 2c isync 300: 7f c5 f3 78 mr r5,r30 304: 7f 83 e3 78 mr r3,r28 308: 48 00 00 01 bl 308 <._copy_from_user+0xa8> 308: R_PPC64_REL24 .__copy_tofrom_user ... Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> [mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
if (dir == KUAP_READ)
set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
powerpc/kuap: Fix set direction in allow/prevent_user_access() __builtin_constant_p() always return 0 for pointers, so on RADIX we always end up opening both direction (by writing 0 in SPR29): 0000000000000170 <._copy_to_user>: ... 1b0: 4c 00 01 2c isync 1b4: 39 20 00 00 li r9,0 1b8: 7d 3d 03 a6 mtspr 29,r9 1bc: 4c 00 01 2c isync 1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50> 1c0: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000220 <._copy_from_user>: ... 2ac: 4c 00 01 2c isync 2b0: 39 20 00 00 li r9,0 2b4: 7d 3d 03 a6 mtspr 29,r9 2b8: 4c 00 01 2c isync 2bc: 7f c5 f3 78 mr r5,r30 2c0: 7f 83 e3 78 mr r3,r28 2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4> 2c4: R_PPC64_REL24 .__copy_tofrom_user ... Use an explicit parameter for direction selection, so that GCC is able to see it is a constant: 00000000000001b0 <._copy_to_user>: ... 1f0: 4c 00 01 2c isync 1f4: 3d 20 40 00 lis r9,16384 1f8: 79 29 07 c6 rldicr r9,r9,32,31 1fc: 7d 3d 03 a6 mtspr 29,r9 200: 4c 00 01 2c isync 204: 48 00 00 01 bl 204 <._copy_to_user+0x54> 204: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000260 <._copy_from_user>: ... 2ec: 4c 00 01 2c isync 2f0: 39 20 ff ff li r9,-1 2f4: 79 29 00 04 rldicr r9,r9,0,0 2f8: 7d 3d 03 a6 mtspr 29,r9 2fc: 4c 00 01 2c isync 300: 7f c5 f3 78 mr r5,r30 304: 7f 83 e3 78 mr r3,r28 308: 48 00 00 01 bl 308 <._copy_from_user+0xa8> 308: R_PPC64_REL24 .__copy_tofrom_user ... Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> [mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
else if (dir == KUAP_WRITE)
set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
else if (dir == KUAP_READ_WRITE)
set_kuap(thread_amr);
else
BUILD_BUG();
}
#else /* CONFIG_PPC_KUAP */
static inline unsigned long get_kuap(void)
{
return AMR_KUAP_BLOCKED;
}
static inline void set_kuap(unsigned long value) { }
static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{ }
#endif /* !CONFIG_PPC_KUAP */
static inline void prevent_user_access(void __user *to, const void __user *from,
powerpc/kuap: Fix set direction in allow/prevent_user_access() __builtin_constant_p() always return 0 for pointers, so on RADIX we always end up opening both direction (by writing 0 in SPR29): 0000000000000170 <._copy_to_user>: ... 1b0: 4c 00 01 2c isync 1b4: 39 20 00 00 li r9,0 1b8: 7d 3d 03 a6 mtspr 29,r9 1bc: 4c 00 01 2c isync 1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50> 1c0: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000220 <._copy_from_user>: ... 2ac: 4c 00 01 2c isync 2b0: 39 20 00 00 li r9,0 2b4: 7d 3d 03 a6 mtspr 29,r9 2b8: 4c 00 01 2c isync 2bc: 7f c5 f3 78 mr r5,r30 2c0: 7f 83 e3 78 mr r3,r28 2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4> 2c4: R_PPC64_REL24 .__copy_tofrom_user ... Use an explicit parameter for direction selection, so that GCC is able to see it is a constant: 00000000000001b0 <._copy_to_user>: ... 1f0: 4c 00 01 2c isync 1f4: 3d 20 40 00 lis r9,16384 1f8: 79 29 07 c6 rldicr r9,r9,32,31 1fc: 7d 3d 03 a6 mtspr 29,r9 200: 4c 00 01 2c isync 204: 48 00 00 01 bl 204 <._copy_to_user+0x54> 204: R_PPC64_REL24 .__copy_tofrom_user ... 0000000000000260 <._copy_from_user>: ... 2ec: 4c 00 01 2c isync 2f0: 39 20 ff ff li r9,-1 2f4: 79 29 00 04 rldicr r9,r9,0,0 2f8: 7d 3d 03 a6 mtspr 29,r9 2fc: 4c 00 01 2c isync 300: 7f c5 f3 78 mr r5,r30 304: 7f 83 e3 78 mr r3,r28 308: 48 00 00 01 bl 308 <._copy_from_user+0xa8> 308: R_PPC64_REL24 .__copy_tofrom_user ... Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> [mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
unsigned long size, unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = get_kuap();
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();
return flags;
}
static inline void restore_user_access(unsigned long flags)
{
set_kuap(flags);
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
do_uaccess_flush();
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */