2019-04-18 16:51:24 +10:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2020-11-27 10:14:07 +05:30
|
|
|
#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
|
|
|
|
#define _ASM_POWERPC_BOOK3S_64_KUP_H
|
2019-04-18 16:51:24 +10:00
|
|
|
|
|
|
|
#include <linux/const.h>
|
2020-02-26 03:35:34 +10:00
|
|
|
#include <asm/reg.h>
|
2019-04-18 16:51:24 +10:00
|
|
|
|
2020-11-27 10:14:19 +05:30
|
|
|
#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
|
|
|
|
#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
|
2020-11-27 10:14:20 +05:30
|
|
|
#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
|
2019-04-18 16:51:24 +10:00
|
|
|
#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
|
|
|
|
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
|
2020-11-27 10:14:24 +05:30
|
|
|
.macro kuap_user_restore gpr1, gpr2
|
2020-11-27 10:14:12 +05:30
|
|
|
#if defined(CONFIG_PPC_PKEY)
|
2019-04-18 16:51:24 +10:00
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
2020-11-27 10:14:24 +05:30
|
|
|
b 100f // skip_restore_amr
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* AMR and IAMR are going to be different when
|
|
|
|
* returning to userspace.
|
|
|
|
*/
|
|
|
|
ld \gpr1, STACK_REGS_AMR(r1)
|
2020-11-27 10:14:24 +05:30
|
|
|
|
|
|
|
/*
|
|
|
|
* If kuap feature is not enabled, do the mtspr
|
|
|
|
* only if AMR value is different.
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(68)
|
|
|
|
mfspr \gpr2, SPRN_AMR
|
|
|
|
cmpd \gpr1, \gpr2
|
|
|
|
beq 99f
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
|
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
isync
|
|
|
|
mtspr SPRN_AMR, \gpr1
|
2020-11-27 10:14:24 +05:30
|
|
|
99:
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* Restore IAMR only when returning to userspace
|
|
|
|
*/
|
|
|
|
ld \gpr1, STACK_REGS_IAMR(r1)
|
2020-11-27 10:14:24 +05:30
|
|
|
|
|
|
|
/*
|
|
|
|
* If kuep feature is not enabled, do the mtspr
|
|
|
|
* only if IAMR value is different.
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(69)
|
|
|
|
mfspr \gpr2, SPRN_IAMR
|
|
|
|
cmpd \gpr1, \gpr2
|
|
|
|
beq 100f
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
|
|
|
|
|
|
|
|
isync
|
2020-11-27 10:14:12 +05:30
|
|
|
mtspr SPRN_IAMR, \gpr1
|
|
|
|
|
2020-11-27 10:14:24 +05:30
|
|
|
100: //skip_restore_amr
|
2020-11-27 10:14:12 +05:30
|
|
|
/* No isync required, see kuap_user_restore() */
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2020-11-27 10:14:24 +05:30
|
|
|
.macro kuap_kernel_restore gpr1, gpr2
|
2020-11-27 10:14:12 +05:30
|
|
|
#if defined(CONFIG_PPC_PKEY)
|
|
|
|
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
/*
|
|
|
|
* AMR is going to be mostly the same since we are
|
|
|
|
* returning to the kernel. Compare and do a mtspr.
|
|
|
|
*/
|
2020-11-27 10:14:05 +05:30
|
|
|
ld \gpr2, STACK_REGS_AMR(r1)
|
2020-11-27 10:14:12 +05:30
|
|
|
mfspr \gpr1, SPRN_AMR
|
2020-04-29 16:56:54 +10:00
|
|
|
cmpd \gpr1, \gpr2
|
2020-11-27 10:14:12 +05:30
|
|
|
beq 100f
|
2020-04-29 16:56:50 +10:00
|
|
|
isync
|
2020-04-29 16:56:54 +10:00
|
|
|
mtspr SPRN_AMR, \gpr2
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* No isync required, see kuap_restore_amr()
|
|
|
|
* No need to restore IAMR when returning to kernel space.
|
|
|
|
*/
|
|
|
|
100:
|
2020-11-27 10:14:09 +05:30
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
2019-04-18 16:51:24 +10:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2020-11-19 23:43:53 +11:00
|
|
|
#ifdef CONFIG_PPC_KUAP
|
2019-04-18 16:51:24 +10:00
|
|
|
.macro kuap_check_amr gpr1, gpr2
|
|
|
|
#ifdef CONFIG_PPC_KUAP_DEBUG
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
mfspr \gpr1, SPRN_AMR
|
2020-11-27 10:14:19 +05:30
|
|
|
/* Prevent access to userspace using any key values */
|
|
|
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
|
2019-04-18 16:51:24 +10:00
|
|
|
999: tdne \gpr1, \gpr2
|
|
|
|
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
|
2020-11-27 10:14:09 +05:30
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
|
2019-04-18 16:51:24 +10:00
|
|
|
#endif
|
|
|
|
.endm
|
2020-11-19 23:43:53 +11:00
|
|
|
#endif
|
2019-04-18 16:51:24 +10:00
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* if (pkey) {
|
|
|
|
*
|
|
|
|
* save AMR -> stack;
|
|
|
|
* if (kuap) {
|
|
|
|
* if (AMR != BLOCKED)
|
|
|
|
* KUAP_BLOCKED -> AMR;
|
|
|
|
* }
|
|
|
|
* if (from_user) {
|
|
|
|
* save IAMR -> stack;
|
|
|
|
* if (kuep) {
|
|
|
|
* KUEP_BLOCKED ->IAMR
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
* return;
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* if (kuap) {
|
|
|
|
* if (from_kernel) {
|
|
|
|
* save AMR -> stack;
|
|
|
|
* if (AMR != BLOCKED)
|
|
|
|
* KUAP_BLOCKED -> AMR;
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* }
|
|
|
|
*/
|
2019-04-18 16:51:24 +10:00
|
|
|
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
|
2020-11-27 10:14:12 +05:30
|
|
|
#if defined(CONFIG_PPC_PKEY)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if both pkey and kuap is disabled, nothing to do
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(68)
|
|
|
|
b 100f // skip_save_amr
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if pkey is disabled and we are entering from userspace
|
|
|
|
* don't do anything.
|
|
|
|
*/
|
2019-04-18 16:51:24 +10:00
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(67)
|
|
|
|
.ifnb \msr_pr_cr
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* Without pkey we are not changing AMR outside the kernel
|
|
|
|
* hence skip this completely.
|
|
|
|
*/
|
|
|
|
bne \msr_pr_cr, 100f // from userspace
|
2019-04-18 16:51:24 +10:00
|
|
|
.endif
|
2020-11-27 10:14:12 +05:30
|
|
|
END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* pkey is enabled or pkey is disabled but entering from kernel
|
|
|
|
*/
|
2019-04-18 16:51:24 +10:00
|
|
|
mfspr \gpr1, SPRN_AMR
|
2020-11-27 10:14:05 +05:30
|
|
|
std \gpr1, STACK_REGS_AMR(r1)
|
2020-11-27 10:14:12 +05:30
|
|
|
|
|
|
|
/*
|
|
|
|
* update kernel AMR with AMR_KUAP_BLOCKED only
|
|
|
|
* if KUAP feature is enabled
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(69)
|
|
|
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
|
2019-04-18 16:51:24 +10:00
|
|
|
cmpd \use_cr, \gpr1, \gpr2
|
2020-11-27 10:14:12 +05:30
|
|
|
beq \use_cr, 102f
|
|
|
|
/*
|
|
|
|
* We don't isync here because we very recently entered via an interrupt
|
|
|
|
*/
|
2019-04-18 16:51:24 +10:00
|
|
|
mtspr SPRN_AMR, \gpr2
|
|
|
|
isync
|
2020-11-27 10:14:12 +05:30
|
|
|
102:
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if entering from kernel we don't need save IAMR
|
|
|
|
*/
|
|
|
|
.ifnb \msr_pr_cr
|
|
|
|
beq \msr_pr_cr, 100f // from kernel space
|
|
|
|
mfspr \gpr1, SPRN_IAMR
|
|
|
|
std \gpr1, STACK_REGS_IAMR(r1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update kernel IAMR with AMR_KUEP_BLOCKED only
|
|
|
|
* if KUEP feature is enabled
|
|
|
|
*/
|
|
|
|
BEGIN_MMU_FTR_SECTION_NESTED(70)
|
|
|
|
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
|
|
|
|
mtspr SPRN_IAMR, \gpr2
|
|
|
|
isync
|
|
|
|
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
|
|
|
|
.endif
|
|
|
|
|
|
|
|
100: // skip_save_amr
|
2019-04-18 16:51:24 +10:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
|
|
|
2020-11-23 18:40:16 +11:00
|
|
|
#include <linux/jump_label.h>
|
|
|
|
|
2020-11-17 16:59:13 +11:00
|
|
|
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
#ifdef CONFIG_PPC_PKEY
|
2019-04-18 16:51:24 +10:00
|
|
|
|
2020-02-26 03:35:34 +10:00
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
2020-11-27 10:14:16 +05:30
|
|
|
/*
|
|
|
|
* For kernel thread that doesn't have thread.regs return
|
|
|
|
* default AMR/IAMR values.
|
|
|
|
*/
|
|
|
|
static inline u64 current_thread_amr(void)
|
|
|
|
{
|
|
|
|
if (current->thread.regs)
|
|
|
|
return current->thread.regs->amr;
|
|
|
|
return AMR_KUAP_BLOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 current_thread_iamr(void)
|
|
|
|
{
|
|
|
|
if (current->thread.regs)
|
|
|
|
return current->thread.regs->iamr;
|
|
|
|
return AMR_KUEP_BLOCKED;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_PKEY */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
static inline void kuap_user_restore(struct pt_regs *regs)
|
|
|
|
{
|
2020-11-27 10:14:24 +05:30
|
|
|
bool restore_amr = false, restore_iamr = false;
|
|
|
|
unsigned long amr, iamr;
|
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
if (!mmu_has_feature(MMU_FTR_PKEY))
|
|
|
|
return;
|
|
|
|
|
2020-11-27 10:14:24 +05:30
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
|
|
|
amr = mfspr(SPRN_AMR);
|
|
|
|
if (amr != regs->amr)
|
|
|
|
restore_amr = true;
|
|
|
|
} else {
|
|
|
|
restore_amr = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
|
|
|
|
iamr = mfspr(SPRN_IAMR);
|
|
|
|
if (iamr != regs->iamr)
|
|
|
|
restore_iamr = true;
|
|
|
|
} else {
|
|
|
|
restore_iamr = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (restore_amr || restore_iamr) {
|
|
|
|
isync();
|
|
|
|
if (restore_amr)
|
|
|
|
mtspr(SPRN_AMR, regs->amr);
|
|
|
|
if (restore_iamr)
|
|
|
|
mtspr(SPRN_IAMR, regs->iamr);
|
|
|
|
}
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* No isync required here because we are about to rfi
|
|
|
|
* back to previous context before any user accesses
|
|
|
|
* would be made, which is a CSI.
|
|
|
|
*/
|
|
|
|
}
|
2020-11-27 10:14:24 +05:30
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
static inline void kuap_kernel_restore(struct pt_regs *regs,
|
|
|
|
unsigned long amr)
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 03:35:37 +10:00
|
|
|
{
|
2020-11-27 10:14:12 +05:30
|
|
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
|
|
|
if (unlikely(regs->amr != amr)) {
|
|
|
|
isync();
|
|
|
|
mtspr(SPRN_AMR, regs->amr);
|
|
|
|
/*
|
|
|
|
* No isync required here because we are about to rfi
|
|
|
|
* back to previous context before any user accesses
|
|
|
|
* would be made, which is a CSI.
|
|
|
|
*/
|
|
|
|
}
|
2020-04-29 16:56:50 +10:00
|
|
|
}
|
2020-11-27 10:14:12 +05:30
|
|
|
/*
|
|
|
|
* No need to restore IAMR when returning to kernel space.
|
|
|
|
*/
|
powerpc/64s: Implement interrupt exit logic in C
Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.
The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.
The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.
This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.
mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.
mpe: Incorporate fix from Nick:
The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.
The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
2020-02-26 03:35:37 +10:00
|
|
|
}
|
|
|
|
|
2020-04-29 16:56:51 +10:00
|
|
|
static inline unsigned long kuap_get_and_check_amr(void)
|
|
|
|
{
|
2020-11-27 10:14:09 +05:30
|
|
|
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
|
2020-04-29 16:56:51 +10:00
|
|
|
unsigned long amr = mfspr(SPRN_AMR);
|
|
|
|
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
|
|
|
|
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
|
|
|
|
return amr;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
#else /* CONFIG_PPC_PKEY */
|
|
|
|
|
|
|
|
static inline void kuap_user_restore(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long kuap_get_and_check_amr(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_PPC_PKEY */
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_KUAP
|
|
|
|
|
2020-02-26 03:35:34 +10:00
|
|
|
static inline void kuap_check_amr(void)
|
|
|
|
{
|
2020-11-27 10:14:09 +05:30
|
|
|
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
2020-02-26 03:35:34 +10:00
|
|
|
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
|
|
|
|
}
|
2019-04-18 16:51:24 +10:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We support individually allowing read or write, but we don't support nesting
|
|
|
|
* because that would require an expensive read/modify write of the AMR.
|
|
|
|
*/
|
|
|
|
|
2020-01-24 11:54:45 +00:00
|
|
|
static inline unsigned long get_kuap(void)
|
|
|
|
{
|
2020-11-17 16:59:13 +11:00
|
|
|
/*
|
|
|
|
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
|
|
|
|
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
|
|
|
|
* cause restore_user_access to do a flush.
|
|
|
|
*
|
|
|
|
* This has no effect in terms of actually blocking things on hash,
|
|
|
|
* so it doesn't break anything.
|
|
|
|
*/
|
2020-11-27 10:14:09 +05:30
|
|
|
if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
2020-11-17 16:59:13 +11:00
|
|
|
return AMR_KUAP_BLOCKED;
|
2020-01-24 11:54:45 +00:00
|
|
|
|
|
|
|
return mfspr(SPRN_AMR);
|
|
|
|
}
|
|
|
|
|
2019-04-18 16:51:24 +10:00
|
|
|
static inline void set_kuap(unsigned long value)
|
|
|
|
{
|
2020-11-27 10:14:09 +05:30
|
|
|
if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
2019-04-18 16:51:24 +10:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
|
|
|
|
* before and after the move to AMR. See table 6 on page 1134.
|
|
|
|
*/
|
|
|
|
isync();
|
|
|
|
mtspr(SPRN_AMR, value);
|
|
|
|
isync();
|
|
|
|
}
|
|
|
|
|
2020-11-27 10:14:18 +05:30
|
|
|
static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
|
2020-12-08 08:45:39 +05:30
|
|
|
bool is_write)
|
2020-11-17 16:59:13 +11:00
|
|
|
{
|
2020-11-27 10:14:18 +05:30
|
|
|
if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
|
|
|
|
return false;
|
|
|
|
/*
|
2020-12-08 08:45:39 +05:30
|
|
|
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
|
|
|
|
* For hash this will be a key fault (DSISR_KEYFAULT)
|
2020-11-27 10:14:18 +05:30
|
|
|
*/
|
2020-12-08 08:45:39 +05:30
|
|
|
/*
|
|
|
|
* We do have exception table entry, but accessing the
|
|
|
|
* userspace results in fault. This could be because we
|
|
|
|
* didn't unlock the AMR or access is denied by userspace
|
|
|
|
* using a key value that blocks access. We are only interested
|
|
|
|
* in catching the use case of accessing without unlocking
|
|
|
|
* the AMR. Hence check for BLOCK_WRITE/READ against AMR.
|
|
|
|
*/
|
|
|
|
if (is_write) {
|
2020-12-09 05:29:23 +00:00
|
|
|
return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
|
2020-12-08 08:45:39 +05:30
|
|
|
}
|
2020-12-09 05:29:23 +00:00
|
|
|
return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
|
2020-11-17 16:59:13 +11:00
|
|
|
}
|
|
|
|
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size, unsigned long dir)
|
2019-04-18 16:51:24 +10:00
|
|
|
{
|
2020-11-27 10:14:17 +05:30
|
|
|
unsigned long thread_amr = 0;
|
|
|
|
|
2019-04-18 16:51:24 +10:00
|
|
|
// This is written so we can resolve to a single case at build time
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
BUILD_BUG_ON(!__builtin_constant_p(dir));
|
2020-11-27 10:14:17 +05:30
|
|
|
|
|
|
|
if (mmu_has_feature(MMU_FTR_PKEY))
|
|
|
|
thread_amr = current_thread_amr();
|
|
|
|
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
if (dir == KUAP_READ)
|
2020-11-27 10:14:17 +05:30
|
|
|
set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
else if (dir == KUAP_WRITE)
|
2020-11-27 10:14:17 +05:30
|
|
|
set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
|
2020-01-24 11:54:43 +00:00
|
|
|
else if (dir == KUAP_READ_WRITE)
|
2020-11-27 10:14:17 +05:30
|
|
|
set_kuap(thread_amr);
|
2020-01-24 11:54:43 +00:00
|
|
|
else
|
|
|
|
BUILD_BUG();
|
2019-04-18 16:51:24 +10:00
|
|
|
}
|
|
|
|
|
2020-11-27 10:14:12 +05:30
|
|
|
#else /* CONFIG_PPC_KUAP */
|
|
|
|
|
|
|
|
static inline unsigned long get_kuap(void)
|
|
|
|
{
|
|
|
|
return AMR_KUAP_BLOCKED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_kuap(unsigned long value) { }
|
|
|
|
|
|
|
|
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
|
|
|
unsigned long size, unsigned long dir)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
#endif /* !CONFIG_PPC_KUAP */
|
|
|
|
|
2019-04-18 16:51:24 +10:00
|
|
|
static inline void prevent_user_access(void __user *to, const void __user *from,
|
powerpc/kuap: Fix set direction in allow/prevent_user_access()
__builtin_constant_p() always return 0 for pointers, so on RADIX
we always end up opening both direction (by writing 0 in SPR29):
0000000000000170 <._copy_to_user>:
...
1b0: 4c 00 01 2c isync
1b4: 39 20 00 00 li r9,0
1b8: 7d 3d 03 a6 mtspr 29,r9
1bc: 4c 00 01 2c isync
1c0: 48 00 00 01 bl 1c0 <._copy_to_user+0x50>
1c0: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000220 <._copy_from_user>:
...
2ac: 4c 00 01 2c isync
2b0: 39 20 00 00 li r9,0
2b4: 7d 3d 03 a6 mtspr 29,r9
2b8: 4c 00 01 2c isync
2bc: 7f c5 f3 78 mr r5,r30
2c0: 7f 83 e3 78 mr r3,r28
2c4: 48 00 00 01 bl 2c4 <._copy_from_user+0xa4>
2c4: R_PPC64_REL24 .__copy_tofrom_user
...
Use an explicit parameter for direction selection, so that GCC
is able to see it is a constant:
00000000000001b0 <._copy_to_user>:
...
1f0: 4c 00 01 2c isync
1f4: 3d 20 40 00 lis r9,16384
1f8: 79 29 07 c6 rldicr r9,r9,32,31
1fc: 7d 3d 03 a6 mtspr 29,r9
200: 4c 00 01 2c isync
204: 48 00 00 01 bl 204 <._copy_to_user+0x54>
204: R_PPC64_REL24 .__copy_tofrom_user
...
0000000000000260 <._copy_from_user>:
...
2ec: 4c 00 01 2c isync
2f0: 39 20 ff ff li r9,-1
2f4: 79 29 00 04 rldicr r9,r9,0,0
2f8: 7d 3d 03 a6 mtspr 29,r9
2fc: 4c 00 01 2c isync
300: 7f c5 f3 78 mr r5,r30
304: 7f 83 e3 78 mr r3,r28
308: 48 00 00 01 bl 308 <._copy_from_user+0xa8>
308: R_PPC64_REL24 .__copy_tofrom_user
...
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Spell out the directions, s/KUAP_R/KUAP_READ/ etc.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/f4e88ec4941d5facb35ce75026b0112f980086c3.1579866752.git.christophe.leroy@c-s.fr
2020-01-24 11:54:41 +00:00
|
|
|
unsigned long size, unsigned long dir)
|
2019-04-18 16:51:24 +10:00
|
|
|
{
|
|
|
|
set_kuap(AMR_KUAP_BLOCKED);
|
2020-11-17 16:59:13 +11:00
|
|
|
if (static_branch_unlikely(&uaccess_flush_key))
|
|
|
|
do_uaccess_flush();
|
2019-04-18 16:51:24 +10:00
|
|
|
}
|
|
|
|
|
2020-01-24 11:54:45 +00:00
|
|
|
static inline unsigned long prevent_user_access_return(void)
|
|
|
|
{
|
|
|
|
unsigned long flags = get_kuap();
|
|
|
|
|
|
|
|
set_kuap(AMR_KUAP_BLOCKED);
|
2020-11-17 16:59:13 +11:00
|
|
|
if (static_branch_unlikely(&uaccess_flush_key))
|
|
|
|
do_uaccess_flush();
|
2020-01-24 11:54:45 +00:00
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void restore_user_access(unsigned long flags)
|
|
|
|
{
|
|
|
|
set_kuap(flags);
|
2020-11-17 16:59:13 +11:00
|
|
|
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
|
|
|
|
do_uaccess_flush();
|
2020-01-24 11:54:45 +00:00
|
|
|
}
|
2019-04-18 16:51:24 +10:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
2020-11-27 10:14:07 +05:30
|
|
|
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
|