linux/arch/riscv/kernel/entry.S

457 lines
12 KiB
ArmAsm
Raw Permalink Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/scs.h>
#include <asm/unistd.h>
riscv: VMAP_STACK overflow detection thread-safe commit 31da94c25aea ("riscv: add VMAP_STACK overflow detection") added support for CONFIG_VMAP_STACK. If overflow is detected, CPU switches to `shadow_stack` temporarily before switching finally to per-cpu `overflow_stack`. If two CPUs/harts are racing and end up in over flowing kernel stack, one or both will end up corrupting each other state because `shadow_stack` is not per-cpu. This patch optimizes per-cpu overflow stack switch by directly picking per-cpu `overflow_stack` and gets rid of `shadow_stack`. Following are the changes in this patch - Defines an asm macro to obtain per-cpu symbols in destination register. - In entry.S, when overflow is detected, per-cpu overflow stack is located using per-cpu asm macro. Computing per-cpu symbol requires a temporary register. x31 is saved away into CSR_SCRATCH (CSR_SCRATCH is anyways zero since we're in kernel). Please see Links for additional relevant disccussion and alternative solution. Tested by `echo EXHAUST_STACK > /sys/kernel/debug/provoke-crash/DIRECT` Kernel crash log below Insufficient stack space to handle exception!/debug/provoke-crash/DIRECT Task stack: [0xff20000010a98000..0xff20000010a9c000] Overflow stack: [0xff600001f7d98370..0xff600001f7d99370] CPU: 1 PID: 205 Comm: bash Not tainted 6.1.0-rc2-00001-g328a1f96f7b9 #34 Hardware name: riscv-virtio,qemu (DT) epc : __memset+0x60/0xfc ra : recursive_loop+0x48/0xc6 [lkdtm] epc : ffffffff808de0e4 ra : ffffffff0163a752 sp : ff20000010a97e80 gp : ffffffff815c0330 tp : ff600000820ea280 t0 : ff20000010a97e88 t1 : 000000000000002e t2 : 3233206874706564 s0 : ff20000010a982b0 s1 : 0000000000000012 a0 : ff20000010a97e88 a1 : 0000000000000000 a2 : 0000000000000400 a3 : ff20000010a98288 a4 : 0000000000000000 a5 : 0000000000000000 a6 : fffffffffffe43f0 a7 : 00007fffffffffff s2 : ff20000010a97e88 s3 : ffffffff01644680 s4 : ff20000010a9be90 s5 : ff600000842ba6c0 s6 : 00aaaaaac29e42b0 s7 : 00fffffff0aa3684 s8 : 00aaaaaac2978040 s9 : 0000000000000065 s10: 00ffffff8a7cad10 s11: 00ffffff8a76a4e0 t3 : ffffffff815dbaf4 t4 : ffffffff815dbaf4 t5 : ffffffff815dbab8 t6 : ff20000010a9bb48 status: 0000000200000120 badaddr: ff20000010a97e88 cause: 000000000000000f Kernel panic - not syncing: Kernel stack overflow CPU: 1 PID: 205 Comm: bash Not tainted 6.1.0-rc2-00001-g328a1f96f7b9 #34 Hardware name: riscv-virtio,qemu (DT) Call Trace: [<ffffffff80006754>] dump_backtrace+0x30/0x38 [<ffffffff808de798>] show_stack+0x40/0x4c [<ffffffff808ea2a8>] dump_stack_lvl+0x44/0x5c [<ffffffff808ea2d8>] dump_stack+0x18/0x20 [<ffffffff808dec06>] panic+0x126/0x2fe [<ffffffff800065ea>] walk_stackframe+0x0/0xf0 [<ffffffff0163a752>] recursive_loop+0x48/0xc6 [lkdtm] SMP: stopping secondary CPUs ---[ end Kernel panic - not syncing: Kernel stack overflow ]--- Cc: Guo Ren <guoren@kernel.org> Cc: Jisheng Zhang <jszhang@kernel.org> Link: https://lore.kernel.org/linux-riscv/Y347B0x4VUNOd6V7@xhacker/T/#t Link: https://lore.kernel.org/lkml/20221124094845.1907443-1-debug@rivosinc.com/ Signed-off-by: Deepak Gupta <debug@rivosinc.com> Co-developed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Acked-by: Guo Ren <guoren@kernel.org> Tested-by: Nathan Chancellor <nathan@kernel.org> Link: https://lore.kernel.org/r/20230927224757.1154247-9-samitolvanen@google.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-09-27 22:47:59 +00:00
#include <asm/page.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/errata_list.h>
riscv: VMAP_STACK overflow detection thread-safe commit 31da94c25aea ("riscv: add VMAP_STACK overflow detection") added support for CONFIG_VMAP_STACK. If overflow is detected, CPU switches to `shadow_stack` temporarily before switching finally to per-cpu `overflow_stack`. If two CPUs/harts are racing and end up in over flowing kernel stack, one or both will end up corrupting each other state because `shadow_stack` is not per-cpu. This patch optimizes per-cpu overflow stack switch by directly picking per-cpu `overflow_stack` and gets rid of `shadow_stack`. Following are the changes in this patch - Defines an asm macro to obtain per-cpu symbols in destination register. - In entry.S, when overflow is detected, per-cpu overflow stack is located using per-cpu asm macro. Computing per-cpu symbol requires a temporary register. x31 is saved away into CSR_SCRATCH (CSR_SCRATCH is anyways zero since we're in kernel). Please see Links for additional relevant disccussion and alternative solution. Tested by `echo EXHAUST_STACK > /sys/kernel/debug/provoke-crash/DIRECT` Kernel crash log below Insufficient stack space to handle exception!/debug/provoke-crash/DIRECT Task stack: [0xff20000010a98000..0xff20000010a9c000] Overflow stack: [0xff600001f7d98370..0xff600001f7d99370] CPU: 1 PID: 205 Comm: bash Not tainted 6.1.0-rc2-00001-g328a1f96f7b9 #34 Hardware name: riscv-virtio,qemu (DT) epc : __memset+0x60/0xfc ra : recursive_loop+0x48/0xc6 [lkdtm] epc : ffffffff808de0e4 ra : ffffffff0163a752 sp : ff20000010a97e80 gp : ffffffff815c0330 tp : ff600000820ea280 t0 : ff20000010a97e88 t1 : 000000000000002e t2 : 3233206874706564 s0 : ff20000010a982b0 s1 : 0000000000000012 a0 : ff20000010a97e88 a1 : 0000000000000000 a2 : 0000000000000400 a3 : ff20000010a98288 a4 : 0000000000000000 a5 : 0000000000000000 a6 : fffffffffffe43f0 a7 : 00007fffffffffff s2 : ff20000010a97e88 s3 : ffffffff01644680 s4 : ff20000010a9be90 s5 : ff600000842ba6c0 s6 : 00aaaaaac29e42b0 s7 : 00fffffff0aa3684 s8 : 00aaaaaac2978040 s9 : 0000000000000065 s10: 00ffffff8a7cad10 s11: 00ffffff8a76a4e0 t3 : ffffffff815dbaf4 t4 : ffffffff815dbaf4 t5 : ffffffff815dbab8 t6 : ff20000010a9bb48 status: 0000000200000120 badaddr: ff20000010a97e88 cause: 000000000000000f Kernel panic - not syncing: Kernel stack overflow CPU: 1 PID: 205 Comm: bash Not tainted 6.1.0-rc2-00001-g328a1f96f7b9 #34 Hardware name: riscv-virtio,qemu (DT) Call Trace: [<ffffffff80006754>] dump_backtrace+0x30/0x38 [<ffffffff808de798>] show_stack+0x40/0x4c [<ffffffff808ea2a8>] dump_stack_lvl+0x44/0x5c [<ffffffff808ea2d8>] dump_stack+0x18/0x20 [<ffffffff808dec06>] panic+0x126/0x2fe [<ffffffff800065ea>] walk_stackframe+0x0/0xf0 [<ffffffff0163a752>] recursive_loop+0x48/0xc6 [lkdtm] SMP: stopping secondary CPUs ---[ end Kernel panic - not syncing: Kernel stack overflow ]--- Cc: Guo Ren <guoren@kernel.org> Cc: Jisheng Zhang <jszhang@kernel.org> Link: https://lore.kernel.org/linux-riscv/Y347B0x4VUNOd6V7@xhacker/T/#t Link: https://lore.kernel.org/lkml/20221124094845.1907443-1-debug@rivosinc.com/ Signed-off-by: Deepak Gupta <debug@rivosinc.com> Co-developed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Acked-by: Guo Ren <guoren@kernel.org> Tested-by: Nathan Chancellor <nathan@kernel.org> Link: https://lore.kernel.org/r/20230927224757.1154247-9-samitolvanen@google.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-09-27 22:47:59 +00:00
#include <linux/sizes.h>
.section .irqentry.text, "ax"
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
.macro new_vmalloc_check
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
/* Exclude IRQs */
blt a0, zero, .Lnew_vmalloc_restore_context_a0
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
REG_S a1, TASK_TI_A1(tp)
/* Only check new_vmalloc if we are in page/protection fault */
li a1, EXC_LOAD_PAGE_FAULT
beq a0, a1, .Lnew_vmalloc_kernel_address
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
li a1, EXC_STORE_PAGE_FAULT
beq a0, a1, .Lnew_vmalloc_kernel_address
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
li a1, EXC_INST_PAGE_FAULT
bne a0, a1, .Lnew_vmalloc_restore_context_a1
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
.Lnew_vmalloc_kernel_address:
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
/* Is it a kernel address? */
csrr a0, CSR_TVAL
bge a0, zero, .Lnew_vmalloc_restore_context_a1
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
/* Check if a new vmalloc mapping appeared that could explain the trap */
REG_S a2, TASK_TI_A2(tp)
/*
* Computes:
* a0 = &new_vmalloc[BIT_WORD(cpu)]
* a1 = BIT_MASK(cpu)
*/
REG_L a2, TASK_TI_CPU(tp)
/*
* Compute the new_vmalloc element position:
* (cpu / 64) * 8 = (cpu >> 6) << 3
*/
srli a1, a2, 6
slli a1, a1, 3
la a0, new_vmalloc
add a0, a0, a1
/*
* Compute the bit position in the new_vmalloc element:
* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
* = cpu - ((cpu >> 6) << 3) << 3
*/
slli a1, a1, 3
sub a1, a2, a1
/* Compute the "get mask": 1 << bit_pos */
li a2, 1
sll a1, a2, a1
/* Check the value of new_vmalloc for this cpu */
REG_L a2, 0(a0)
and a2, a2, a1
beq a2, zero, .Lnew_vmalloc_restore_context
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
/* Atomically reset the current cpu bit in new_vmalloc */
amoxor.d a0, a1, (a0)
/* Only emit a sfence.vma if the uarch caches invalid entries */
ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
REG_L a0, TASK_TI_A0(tp)
REG_L a1, TASK_TI_A1(tp)
REG_L a2, TASK_TI_A2(tp)
csrw CSR_SCRATCH, x0
sret
.Lnew_vmalloc_restore_context:
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
REG_L a2, TASK_TI_A2(tp)
.Lnew_vmalloc_restore_context_a1:
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
REG_L a1, TASK_TI_A1(tp)
.Lnew_vmalloc_restore_context_a0:
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
REG_L a0, TASK_TI_A0(tp)
.endm
SYM_CODE_START(handle_exception)
/*
* If coming from userspace, preserve the user thread pointer and load
* the kernel thread pointer. If we came from the kernel, the scratch
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
bnez tp, .Lsave_context
.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
riscv: Stop emitting preventive sfence.vma for new vmalloc mappings In 6.5, we removed the vmalloc fault path because that can't work (see [1] [2]). Then in order to make sure that new page table entries were seen by the page table walker, we had to preventively emit a sfence.vma on all harts [3] but this solution is very costly since it relies on IPI. And even there, we could end up in a loop of vmalloc faults if a vmalloc allocation is done in the IPI path (for example if it is traced, see [4]), which could result in a kernel stack overflow. Those preventive sfence.vma needed to be emitted because: - if the uarch caches invalid entries, the new mapping may not be observed by the page table walker and an invalidation may be needed. - if the uarch does not cache invalid entries, a reordered access could "miss" the new mapping and traps: in that case, we would actually only need to retry the access, no sfence.vma is required. So this patch removes those preventive sfence.vma and actually handles the possible (and unlikely) exceptions. And since the kernel stacks mappings lie in the vmalloc area, this handling must be done very early when the trap is taken, at the very beginning of handle_exception: this also rules out the vmalloc allocations in the fault path. Link: https://lore.kernel.org/linux-riscv/20230531093817.665799-1-bjorn@kernel.org/ [1] Link: https://lore.kernel.org/linux-riscv/20230801090927.2018653-1-dylan@andestech.com [2] Link: https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/ [3] Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ [4] Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Reviewed-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20240717060125.139416-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2024-07-17 08:01:24 +02:00
#ifdef CONFIG_64BIT
/*
* The RISC-V kernel does not eagerly emit a sfence.vma after each
* new vmalloc mapping, which may result in exceptions:
* - if the uarch caches invalid entries, the new mapping would not be
* observed by the page table walker and an invalidation is needed.
* - if the uarch does not cache invalid entries, a reordered access
* could "miss" the new mapping and traps: in that case, we only need
* to retry the access, no sfence.vma is required.
*/
new_vmalloc_check
#endif
REG_S sp, TASK_TI_KERNEL_SP(tp)
riscv: add VMAP_STACK overflow detection This patch adds stack overflow detection to riscv, usable when CONFIG_VMAP_STACK=y. Overflow is detected in kernel exception entry(kernel/entry.S), if the kernel stack is overflow and been detected, the overflow handler is invoked on a per-cpu overflow stack. This approach preserves GPRs and the original exception information. The overflow detect is performed before any attempt is made to access the stack and the principle of stack overflow detection: kernel stacks are aligned to double their size, enabling overflow to be detected with a single bit test. For example, a 16K stack is aligned to 32K, ensuring that bit 14 of the SP must be zero. On an overflow (or underflow), this bit is flipped. Thus, overflow (of less than the size of the stack) can be detected by testing whether this bit is set. This gives us a useful error message on stack overflow, as can be trigger with the LKDTM overflow test: [ 388.053267] lkdtm: Performing direct entry EXHAUST_STACK [ 388.053663] lkdtm: Calling function with 1024 frame size to depth 32 ... [ 388.054016] lkdtm: loop 32/32 ... [ 388.054186] lkdtm: loop 31/32 ... [ 388.054491] lkdtm: loop 30/32 ... [ 388.054672] lkdtm: loop 29/32 ... [ 388.054859] lkdtm: loop 28/32 ... [ 388.055010] lkdtm: loop 27/32 ... [ 388.055163] lkdtm: loop 26/32 ... [ 388.055309] lkdtm: loop 25/32 ... [ 388.055481] lkdtm: loop 24/32 ... [ 388.055653] lkdtm: loop 23/32 ... [ 388.055837] lkdtm: loop 22/32 ... [ 388.056015] lkdtm: loop 21/32 ... [ 388.056188] lkdtm: loop 20/32 ... [ 388.058145] Insufficient stack space to handle exception! [ 388.058153] Task stack: [0xffffffd014260000..0xffffffd014264000] [ 388.058160] Overflow stack: [0xffffffe1f8d2c220..0xffffffe1f8d2d220] [ 388.058168] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058175] Hardware name: riscv-virtio,qemu (DT) [ 388.058187] epc : number+0x32/0x2c0 [ 388.058247] ra : vsnprintf+0x2ae/0x3f0 [ 388.058255] epc : ffffffe0002d38f6 ra : ffffffe0002d814e sp : ffffffd01425ffc0 [ 388.058263] gp : ffffffe0012e4010 tp : ffffffe08014da00 t0 : ffffffd0142606e8 [ 388.058271] t1 : 0000000000000000 t2 : 0000000000000000 s0 : ffffffd014260070 [ 388.058303] s1 : ffffffd014260158 a0 : ffffffd01426015e a1 : ffffffd014260158 [ 388.058311] a2 : 0000000000000013 a3 : ffff0a01ffffff10 a4 : ffffffe000c398e0 [ 388.058319] a5 : 511b02ec65f3e300 a6 : 0000000000a1749a a7 : 0000000000000000 [ 388.058327] s2 : ffffffff000000ff s3 : 00000000ffff0a01 s4 : ffffffe0012e50a8 [ 388.058335] s5 : 0000000000ffff0a s6 : ffffffe0012e50a8 s7 : ffffffe000da1cc0 [ 388.058343] s8 : ffffffffffffffff s9 : ffffffd0142602b0 s10: ffffffd0142602a8 [ 388.058351] s11: ffffffd01426015e t3 : 00000000000f0000 t4 : ffffffffffffffff [ 388.058359] t5 : 000000000000002f t6 : ffffffd014260158 [ 388.058366] status: 0000000000000100 badaddr: ffffffd01425fff8 cause: 000000000000000f [ 388.058374] Kernel panic - not syncing: Kernel stack overflow [ 388.058381] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058387] Hardware name: riscv-virtio,qemu (DT) [ 388.058393] Call Trace: [ 388.058400] [<ffffffe000004944>] walk_stackframe+0x0/0xce [ 388.058406] [<ffffffe0006f0b28>] dump_backtrace+0x38/0x46 [ 388.058412] [<ffffffe0006f0b46>] show_stack+0x10/0x18 [ 388.058418] [<ffffffe0006f3690>] dump_stack+0x74/0x8e [ 388.058424] [<ffffffe0006f0d52>] panic+0xfc/0x2b2 [ 388.058430] [<ffffffe0006f0acc>] print_trace_address+0x0/0x24 [ 388.058436] [<ffffffe0002d814e>] vsnprintf+0x2ae/0x3f0 [ 388.058956] SMP: stopping secondary CPUs Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-21 11:28:55 +08:00
#ifdef CONFIG_VMAP_STACK
addi sp, sp, -(PT_SIZE_ON_STACK)
srli sp, sp, THREAD_SHIFT
andi sp, sp, 0x1
bnez sp, handle_kernel_stack_overflow
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif
.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
save_from_x6_to_x31
/*
* Disable user-mode memory access as it should only be set in the
* actual user copy routines.
*
* Disable the FPU/Vector to detect illegal usage of floating point
* or vector in kernel space.
*/
li t0, SR_SUM | SR_FS_VS
REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, CSR_STATUS, t0
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
csrr s4, CSR_CAUSE
csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_STATUS(sp)
REG_S s2, PT_EPC(sp)
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
/*
* Set the scratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
*/
csrw CSR_SCRATCH, x0
/* Load the global pointer */
load_global_pointer
/* Load the kernel shadow call stack pointer if coming from userspace */
scs_load_current_if_task_changed s5
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
move a0, sp
call riscv_v_context_nesting_start
#endif
move a0, sp /* pt_regs */
riscv: Enable context tracking This patch implements and enables context tracking for riscv (which is a prerequisite for CONFIG_NO_HZ_FULL support) It adds checking for previous state in the entry that all excepttions and interrupts goes to and calls context_tracking_user_exit() if it comes from user space. It also calls context_tracking_user_enter() if it will return to user space before restore_all. This patch is tested with the dynticks-testing testcase in qemu-system-riscv64 virt machine and Unleashed board. git://git.kernel.org/pub/scm/linux/kernel/git/frederic/dynticks-testing.git We can see the log here. The tick got mostly stopped during the execution of the user loop. _-----=> irqs-off / _----=> need-resched | / _---=> hardirq/softirq || / _--=> preempt-depth ||| / delay TASK-PID CPU# |||| TIMESTAMP FUNCTION | | | |||| | | <idle>-0 [001] d..2 604.183512: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=taskset next_pid=273 next_prio=120 user_loop-273 [001] d.h1 604.184788: hrtimer_expire_entry: hrtimer=000000002eda5fab function=tick_sched_timer now=604176096300 user_loop-273 [001] d.s2 604.184897: workqueue_queue_work: work struct=00000000383402c2 function=vmstat_update workqueue=00000000f36d35d4 req_cpu=1 cpu=1 user_loop-273 [001] dns2 604.185039: tick_stop: success=0 dependency=SCHED user_loop-273 [001] dn.1 604.185103: tick_stop: success=0 dependency=SCHED user_loop-273 [001] d..2 604.185154: sched_switch: prev_comm=taskset prev_pid=273 prev_prio=120 prev_state=R+ ==> next_comm=kworker/1:1 next_pid=46 next_prio=120 <...>-46 [001] .... 604.185194: workqueue_execute_start: work struct 00000000383402c2: function vmstat_update <...>-46 [001] d..2 604.185266: sched_switch: prev_comm=kworker/1:1 prev_pid=46 prev_prio=120 prev_state=I ==> next_comm=taskset next_pid=273 next_prio=120 user_loop-273 [001] d.h1 604.188812: hrtimer_expire_entry: hrtimer=000000002eda5fab function=tick_sched_timer now=604180133400 user_loop-273 [001] d..1 604.189050: tick_stop: success=1 dependency=NONE user_loop-273 [001] d..2 614.251386: sched_switch: prev_comm=user_loop prev_pid=273 prev_prio=120 prev_state=X ==> next_comm=swapper/1 next_pid=0 next_prio=120 <idle>-0 [001] d..2 614.315391: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=taskset next_pid=276 next_prio=120 Signed-off-by: Greentime Hu <greentime.hu@sifive.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-06-24 17:03:16 +08:00
/*
* MSB of cause differentiates between
* interrupts and exceptions
*/
bge s4, zero, 1f
/* Handle interrupts */
call do_irq
j ret_from_exception
1:
/* Handle other exceptions */
slli t0, s4, RISCV_LGPTR
la t1, excp_vect_table
la t2, excp_vect_table_end
add t0, t1, t0
/* Check if exception code lies within bounds */
bgeu t0, t2, 3f
REG_L t1, 0(t0)
2: jalr t1
j ret_from_exception
3:
la t1, do_trap_unknown
j 2b
SYM_CODE_END(handle_exception)
ASM_NOKPROBE(handle_exception)
/*
* The ret_from_exception must be called with interrupt disabled. Here is the
* caller list:
* - handle_exception
* - ret_from_fork
*/
riscv: stacktrace: Make walk_stackframe cross pt_regs frame The current walk_stackframe with FRAME_POINTER would stop unwinding at ret_from_exception: BUG: sleeping function called from invalid context at kernel/locking/rwsem.c:1518 in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: init CPU: 0 PID: 1 Comm: init Not tainted 5.10.113-00021-g15c15974895c-dirty #192 Call Trace: [<ffffffe0002038c8>] walk_stackframe+0x0/0xee [<ffffffe000aecf48>] show_stack+0x32/0x4a [<ffffffe000af1618>] dump_stack_lvl+0x72/0x8e [<ffffffe000af1648>] dump_stack+0x14/0x1c [<ffffffe000239ad2>] ___might_sleep+0x12e/0x138 [<ffffffe000239aec>] __might_sleep+0x10/0x18 [<ffffffe000afe3fe>] down_read+0x22/0xa4 [<ffffffe000207588>] do_page_fault+0xb0/0x2fe [<ffffffe000201b80>] ret_from_exception+0x0/0xc The optimization would help walk_stackframe cross the pt_regs frame and get more backtrace of debug info: BUG: sleeping function called from invalid context at kernel/locking/rwsem.c:1518 in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: init CPU: 0 PID: 1 Comm: init Not tainted 5.10.113-00021-g15c15974895c-dirty #192 Call Trace: [<ffffffe0002038c8>] walk_stackframe+0x0/0xee [<ffffffe000aecf48>] show_stack+0x32/0x4a [<ffffffe000af1618>] dump_stack_lvl+0x72/0x8e [<ffffffe000af1648>] dump_stack+0x14/0x1c [<ffffffe000239ad2>] ___might_sleep+0x12e/0x138 [<ffffffe000239aec>] __might_sleep+0x10/0x18 [<ffffffe000afe3fe>] down_read+0x22/0xa4 [<ffffffe000207588>] do_page_fault+0xb0/0x2fe [<ffffffe000201b80>] ret_from_exception+0x0/0xc [<ffffffe000613c06>] riscv_intc_irq+0x1a/0x72 [<ffffffe000201b80>] ret_from_exception+0x0/0xc [<ffffffe00033f44a>] vma_link+0x54/0x160 [<ffffffe000341d7a>] mmap_region+0x2cc/0x4d0 [<ffffffe000342256>] do_mmap+0x2d8/0x3ac [<ffffffe000326318>] vm_mmap_pgoff+0x70/0xb8 [<ffffffe00032638a>] vm_mmap+0x2a/0x36 [<ffffffe0003cfdde>] elf_map+0x72/0x84 [<ffffffe0003d05f8>] load_elf_binary+0x69a/0xec8 [<ffffffe000376240>] bprm_execve+0x246/0x53a [<ffffffe00037786c>] kernel_execve+0xe8/0x124 [<ffffffe000aecdf2>] run_init_process+0xfa/0x10c [<ffffffe000aece16>] try_to_run_init_process+0x12/0x3c [<ffffffe000afa920>] kernel_init+0xb4/0xf8 [<ffffffe000201b80>] ret_from_exception+0x0/0xc Here is the error injection test code for the above output: drivers/irqchip/irq-riscv-intc.c: static asmlinkage void riscv_intc_irq(struct pt_regs *regs) { unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG; + u32 tmp; __get_user(tmp, (u32 *)0); Signed-off-by: Guo Ren <guoren@linux.alibaba.com> Signed-off-by: Guo Ren <guoren@kernel.org> Link: https://lore.kernel.org/r/20221109064937.3643993-3-guoren@kernel.org [Palmer: use SYM_CODE_*] Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2022-11-09 01:49:37 -05:00
SYM_CODE_START_NOALIGN(ret_from_exception)
REG_L s0, PT_STATUS(sp)
#ifdef CONFIG_RISCV_M_MODE
/* the MPP value is too large to be used as an immediate arg for addi */
li t0, SR_MPP
and s0, s0, t0
#else
andi s0, s0, SR_SPP
#endif
bnez s0, 1f
riscv: Enable context tracking This patch implements and enables context tracking for riscv (which is a prerequisite for CONFIG_NO_HZ_FULL support) It adds checking for previous state in the entry that all excepttions and interrupts goes to and calls context_tracking_user_exit() if it comes from user space. It also calls context_tracking_user_enter() if it will return to user space before restore_all. This patch is tested with the dynticks-testing testcase in qemu-system-riscv64 virt machine and Unleashed board. git://git.kernel.org/pub/scm/linux/kernel/git/frederic/dynticks-testing.git We can see the log here. The tick got mostly stopped during the execution of the user loop. _-----=> irqs-off / _----=> need-resched | / _---=> hardirq/softirq || / _--=> preempt-depth ||| / delay TASK-PID CPU# |||| TIMESTAMP FUNCTION | | | |||| | | <idle>-0 [001] d..2 604.183512: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=taskset next_pid=273 next_prio=120 user_loop-273 [001] d.h1 604.184788: hrtimer_expire_entry: hrtimer=000000002eda5fab function=tick_sched_timer now=604176096300 user_loop-273 [001] d.s2 604.184897: workqueue_queue_work: work struct=00000000383402c2 function=vmstat_update workqueue=00000000f36d35d4 req_cpu=1 cpu=1 user_loop-273 [001] dns2 604.185039: tick_stop: success=0 dependency=SCHED user_loop-273 [001] dn.1 604.185103: tick_stop: success=0 dependency=SCHED user_loop-273 [001] d..2 604.185154: sched_switch: prev_comm=taskset prev_pid=273 prev_prio=120 prev_state=R+ ==> next_comm=kworker/1:1 next_pid=46 next_prio=120 <...>-46 [001] .... 604.185194: workqueue_execute_start: work struct 00000000383402c2: function vmstat_update <...>-46 [001] d..2 604.185266: sched_switch: prev_comm=kworker/1:1 prev_pid=46 prev_prio=120 prev_state=I ==> next_comm=taskset next_pid=273 next_prio=120 user_loop-273 [001] d.h1 604.188812: hrtimer_expire_entry: hrtimer=000000002eda5fab function=tick_sched_timer now=604180133400 user_loop-273 [001] d..1 604.189050: tick_stop: success=1 dependency=NONE user_loop-273 [001] d..2 614.251386: sched_switch: prev_comm=user_loop prev_pid=273 prev_prio=120 prev_state=X ==> next_comm=swapper/1 next_pid=0 next_prio=120 <idle>-0 [001] d..2 614.315391: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=taskset next_pid=276 next_prio=120 Signed-off-by: Greentime Hu <greentime.hu@sifive.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-06-24 17:03:16 +08:00
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
call stackleak_erase_on_task_stack
#endif
/* Save unwound kernel stack pointer in thread_info */
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
/* Save the kernel shadow call stack pointer */
scs_save_current
/*
* Save TP into the scratch register , so we can find the kernel data
* structures again.
*/
csrw CSR_SCRATCH, tp
1:
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
move a0, sp
call riscv_v_context_nesting_end
#endif
REG_L a0, PT_STATUS(sp)
/*
* The current load reservation is effectively part of the processor's
* state, in the sense that load reservations cannot be shared between
* different hart contexts. We can't actually save and restore a load
* reservation, so instead here we clear any existing reservation --
* it's always legal for implementations to clear load reservations at
* any point (as long as the forward progress guarantee is kept, but
* we'll ignore that here).
*
* Dangling load reservations can be the result of taking a trap in the
* middle of an LR/SC sequence, but can also be the result of a taken
* forward branch around an SC -- which is how we implement CAS. As a
* result we need to clear reservations between the last CAS and the
* jump back to the new context. While it is unlikely the store
* completes, implementations are allowed to expand reservations to be
* arbitrarily large.
*/
REG_L a2, PT_EPC(sp)
REG_SC x0, a2, PT_EPC(sp)
csrw CSR_STATUS, a0
csrw CSR_EPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x5, PT_T0(sp)
restore_from_x6_to_x31
REG_L x2, PT_SP(sp)
#ifdef CONFIG_RISCV_M_MODE
mret
#else
sret
#endif
SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
SYM_CODE_END(ret_from_exception)
ASM_NOKPROBE(ret_from_exception)
riscv: add VMAP_STACK overflow detection This patch adds stack overflow detection to riscv, usable when CONFIG_VMAP_STACK=y. Overflow is detected in kernel exception entry(kernel/entry.S), if the kernel stack is overflow and been detected, the overflow handler is invoked on a per-cpu overflow stack. This approach preserves GPRs and the original exception information. The overflow detect is performed before any attempt is made to access the stack and the principle of stack overflow detection: kernel stacks are aligned to double their size, enabling overflow to be detected with a single bit test. For example, a 16K stack is aligned to 32K, ensuring that bit 14 of the SP must be zero. On an overflow (or underflow), this bit is flipped. Thus, overflow (of less than the size of the stack) can be detected by testing whether this bit is set. This gives us a useful error message on stack overflow, as can be trigger with the LKDTM overflow test: [ 388.053267] lkdtm: Performing direct entry EXHAUST_STACK [ 388.053663] lkdtm: Calling function with 1024 frame size to depth 32 ... [ 388.054016] lkdtm: loop 32/32 ... [ 388.054186] lkdtm: loop 31/32 ... [ 388.054491] lkdtm: loop 30/32 ... [ 388.054672] lkdtm: loop 29/32 ... [ 388.054859] lkdtm: loop 28/32 ... [ 388.055010] lkdtm: loop 27/32 ... [ 388.055163] lkdtm: loop 26/32 ... [ 388.055309] lkdtm: loop 25/32 ... [ 388.055481] lkdtm: loop 24/32 ... [ 388.055653] lkdtm: loop 23/32 ... [ 388.055837] lkdtm: loop 22/32 ... [ 388.056015] lkdtm: loop 21/32 ... [ 388.056188] lkdtm: loop 20/32 ... [ 388.058145] Insufficient stack space to handle exception! [ 388.058153] Task stack: [0xffffffd014260000..0xffffffd014264000] [ 388.058160] Overflow stack: [0xffffffe1f8d2c220..0xffffffe1f8d2d220] [ 388.058168] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058175] Hardware name: riscv-virtio,qemu (DT) [ 388.058187] epc : number+0x32/0x2c0 [ 388.058247] ra : vsnprintf+0x2ae/0x3f0 [ 388.058255] epc : ffffffe0002d38f6 ra : ffffffe0002d814e sp : ffffffd01425ffc0 [ 388.058263] gp : ffffffe0012e4010 tp : ffffffe08014da00 t0 : ffffffd0142606e8 [ 388.058271] t1 : 0000000000000000 t2 : 0000000000000000 s0 : ffffffd014260070 [ 388.058303] s1 : ffffffd014260158 a0 : ffffffd01426015e a1 : ffffffd014260158 [ 388.058311] a2 : 0000000000000013 a3 : ffff0a01ffffff10 a4 : ffffffe000c398e0 [ 388.058319] a5 : 511b02ec65f3e300 a6 : 0000000000a1749a a7 : 0000000000000000 [ 388.058327] s2 : ffffffff000000ff s3 : 00000000ffff0a01 s4 : ffffffe0012e50a8 [ 388.058335] s5 : 0000000000ffff0a s6 : ffffffe0012e50a8 s7 : ffffffe000da1cc0 [ 388.058343] s8 : ffffffffffffffff s9 : ffffffd0142602b0 s10: ffffffd0142602a8 [ 388.058351] s11: ffffffd01426015e t3 : 00000000000f0000 t4 : ffffffffffffffff [ 388.058359] t5 : 000000000000002f t6 : ffffffd014260158 [ 388.058366] status: 0000000000000100 badaddr: ffffffd01425fff8 cause: 000000000000000f [ 388.058374] Kernel panic - not syncing: Kernel stack overflow [ 388.058381] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058387] Hardware name: riscv-virtio,qemu (DT) [ 388.058393] Call Trace: [ 388.058400] [<ffffffe000004944>] walk_stackframe+0x0/0xce [ 388.058406] [<ffffffe0006f0b28>] dump_backtrace+0x38/0x46 [ 388.058412] [<ffffffe0006f0b46>] show_stack+0x10/0x18 [ 388.058418] [<ffffffe0006f3690>] dump_stack+0x74/0x8e [ 388.058424] [<ffffffe0006f0d52>] panic+0xfc/0x2b2 [ 388.058430] [<ffffffe0006f0acc>] print_trace_address+0x0/0x24 [ 388.058436] [<ffffffe0002d814e>] vsnprintf+0x2ae/0x3f0 [ 388.058956] SMP: stopping secondary CPUs Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-21 11:28:55 +08:00
#ifdef CONFIG_VMAP_STACK
SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
riscv: VMAP_STACK overflow detection thread-safe commit 31da94c25aea ("riscv: add VMAP_STACK overflow detection") added support for CONFIG_VMAP_STACK. If overflow is detected, CPU switches to `shadow_stack` temporarily before switching finally to per-cpu `overflow_stack`. If two CPUs/harts are racing and end up in over flowing kernel stack, one or both will end up corrupting each other state because `shadow_stack` is not per-cpu. This patch optimizes per-cpu overflow stack switch by directly picking per-cpu `overflow_stack` and gets rid of `shadow_stack`. Following are the changes in this patch - Defines an asm macro to obtain per-cpu symbols in destination register. - In entry.S, when overflow is detected, per-cpu overflow stack is located using per-cpu asm macro. Computing per-cpu symbol requires a temporary register. x31 is saved away into CSR_SCRATCH (CSR_SCRATCH is anyways zero since we're in kernel). Please see Links for additional relevant disccussion and alternative solution. Tested by `echo EXHAUST_STACK > /sys/kernel/debug/provoke-crash/DIRECT` Kernel crash log below Insufficient stack space to handle exception!/debug/provoke-crash/DIRECT Task stack: [0xff20000010a98000..0xff20000010a9c000] Overflow stack: [0xff600001f7d98370..0xff600001f7d99370] CPU: 1 PID: 205 Comm: bash Not tainted 6.1.0-rc2-00001-g328a1f96f7b9 #34 Hardware name: riscv-virtio,qemu (DT) epc : __memset+0x60/0xfc ra : recursive_loop+0x48/0xc6 [lkdtm] epc : ffffffff808de0e4 ra : ffffffff0163a752 sp : ff20000010a97e80 gp : ffffffff815c0330 tp : ff600000820ea280 t0 : ff20000010a97e88 t1 : 000000000000002e t2 : 3233206874706564 s0 : ff20000010a982b0 s1 : 0000000000000012 a0 : ff20000010a97e88 a1 : 0000000000000000 a2 : 0000000000000400 a3 : ff20000010a98288 a4 : 0000000000000000 a5 : 0000000000000000 a6 : fffffffffffe43f0 a7 : 00007fffffffffff s2 : ff20000010a97e88 s3 : ffffffff01644680 s4 : ff20000010a9be90 s5 : ff600000842ba6c0 s6 : 00aaaaaac29e42b0 s7 : 00fffffff0aa3684 s8 : 00aaaaaac2978040 s9 : 0000000000000065 s10: 00ffffff8a7cad10 s11: 00ffffff8a76a4e0 t3 : ffffffff815dbaf4 t4 : ffffffff815dbaf4 t5 : ffffffff815dbab8 t6 : ff20000010a9bb48 status: 0000000200000120 badaddr: ff20000010a97e88 cause: 000000000000000f Kernel panic - not syncing: Kernel stack overflow CPU: 1 PID: 205 Comm: bash Not tainted 6.1.0-rc2-00001-g328a1f96f7b9 #34 Hardware name: riscv-virtio,qemu (DT) Call Trace: [<ffffffff80006754>] dump_backtrace+0x30/0x38 [<ffffffff808de798>] show_stack+0x40/0x4c [<ffffffff808ea2a8>] dump_stack_lvl+0x44/0x5c [<ffffffff808ea2d8>] dump_stack+0x18/0x20 [<ffffffff808dec06>] panic+0x126/0x2fe [<ffffffff800065ea>] walk_stackframe+0x0/0xf0 [<ffffffff0163a752>] recursive_loop+0x48/0xc6 [lkdtm] SMP: stopping secondary CPUs ---[ end Kernel panic - not syncing: Kernel stack overflow ]--- Cc: Guo Ren <guoren@kernel.org> Cc: Jisheng Zhang <jszhang@kernel.org> Link: https://lore.kernel.org/linux-riscv/Y347B0x4VUNOd6V7@xhacker/T/#t Link: https://lore.kernel.org/lkml/20221124094845.1907443-1-debug@rivosinc.com/ Signed-off-by: Deepak Gupta <debug@rivosinc.com> Co-developed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Sami Tolvanen <samitolvanen@google.com> Acked-by: Guo Ren <guoren@kernel.org> Tested-by: Nathan Chancellor <nathan@kernel.org> Link: https://lore.kernel.org/r/20230927224757.1154247-9-samitolvanen@google.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-09-27 22:47:59 +00:00
/* we reach here from kernel context, sscratch must be 0 */
csrrw x31, CSR_SCRATCH, x31
asm_per_cpu sp, overflow_stack, x31
li x31, OVERFLOW_STACK_SIZE
add sp, sp, x31
/* zero out x31 again and restore x31 */
xor x31, x31, x31
csrrw x31, CSR_SCRATCH, x31
riscv: add VMAP_STACK overflow detection This patch adds stack overflow detection to riscv, usable when CONFIG_VMAP_STACK=y. Overflow is detected in kernel exception entry(kernel/entry.S), if the kernel stack is overflow and been detected, the overflow handler is invoked on a per-cpu overflow stack. This approach preserves GPRs and the original exception information. The overflow detect is performed before any attempt is made to access the stack and the principle of stack overflow detection: kernel stacks are aligned to double their size, enabling overflow to be detected with a single bit test. For example, a 16K stack is aligned to 32K, ensuring that bit 14 of the SP must be zero. On an overflow (or underflow), this bit is flipped. Thus, overflow (of less than the size of the stack) can be detected by testing whether this bit is set. This gives us a useful error message on stack overflow, as can be trigger with the LKDTM overflow test: [ 388.053267] lkdtm: Performing direct entry EXHAUST_STACK [ 388.053663] lkdtm: Calling function with 1024 frame size to depth 32 ... [ 388.054016] lkdtm: loop 32/32 ... [ 388.054186] lkdtm: loop 31/32 ... [ 388.054491] lkdtm: loop 30/32 ... [ 388.054672] lkdtm: loop 29/32 ... [ 388.054859] lkdtm: loop 28/32 ... [ 388.055010] lkdtm: loop 27/32 ... [ 388.055163] lkdtm: loop 26/32 ... [ 388.055309] lkdtm: loop 25/32 ... [ 388.055481] lkdtm: loop 24/32 ... [ 388.055653] lkdtm: loop 23/32 ... [ 388.055837] lkdtm: loop 22/32 ... [ 388.056015] lkdtm: loop 21/32 ... [ 388.056188] lkdtm: loop 20/32 ... [ 388.058145] Insufficient stack space to handle exception! [ 388.058153] Task stack: [0xffffffd014260000..0xffffffd014264000] [ 388.058160] Overflow stack: [0xffffffe1f8d2c220..0xffffffe1f8d2d220] [ 388.058168] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058175] Hardware name: riscv-virtio,qemu (DT) [ 388.058187] epc : number+0x32/0x2c0 [ 388.058247] ra : vsnprintf+0x2ae/0x3f0 [ 388.058255] epc : ffffffe0002d38f6 ra : ffffffe0002d814e sp : ffffffd01425ffc0 [ 388.058263] gp : ffffffe0012e4010 tp : ffffffe08014da00 t0 : ffffffd0142606e8 [ 388.058271] t1 : 0000000000000000 t2 : 0000000000000000 s0 : ffffffd014260070 [ 388.058303] s1 : ffffffd014260158 a0 : ffffffd01426015e a1 : ffffffd014260158 [ 388.058311] a2 : 0000000000000013 a3 : ffff0a01ffffff10 a4 : ffffffe000c398e0 [ 388.058319] a5 : 511b02ec65f3e300 a6 : 0000000000a1749a a7 : 0000000000000000 [ 388.058327] s2 : ffffffff000000ff s3 : 00000000ffff0a01 s4 : ffffffe0012e50a8 [ 388.058335] s5 : 0000000000ffff0a s6 : ffffffe0012e50a8 s7 : ffffffe000da1cc0 [ 388.058343] s8 : ffffffffffffffff s9 : ffffffd0142602b0 s10: ffffffd0142602a8 [ 388.058351] s11: ffffffd01426015e t3 : 00000000000f0000 t4 : ffffffffffffffff [ 388.058359] t5 : 000000000000002f t6 : ffffffd014260158 [ 388.058366] status: 0000000000000100 badaddr: ffffffd01425fff8 cause: 000000000000000f [ 388.058374] Kernel panic - not syncing: Kernel stack overflow [ 388.058381] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058387] Hardware name: riscv-virtio,qemu (DT) [ 388.058393] Call Trace: [ 388.058400] [<ffffffe000004944>] walk_stackframe+0x0/0xce [ 388.058406] [<ffffffe0006f0b28>] dump_backtrace+0x38/0x46 [ 388.058412] [<ffffffe0006f0b46>] show_stack+0x10/0x18 [ 388.058418] [<ffffffe0006f3690>] dump_stack+0x74/0x8e [ 388.058424] [<ffffffe0006f0d52>] panic+0xfc/0x2b2 [ 388.058430] [<ffffffe0006f0acc>] print_trace_address+0x0/0x24 [ 388.058436] [<ffffffe0002d814e>] vsnprintf+0x2ae/0x3f0 [ 388.058956] SMP: stopping secondary CPUs Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-21 11:28:55 +08:00
addi sp, sp, -(PT_SIZE_ON_STACK)
//save context to overflow stack
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x5, PT_T0(sp)
save_from_x6_to_x31
riscv: add VMAP_STACK overflow detection This patch adds stack overflow detection to riscv, usable when CONFIG_VMAP_STACK=y. Overflow is detected in kernel exception entry(kernel/entry.S), if the kernel stack is overflow and been detected, the overflow handler is invoked on a per-cpu overflow stack. This approach preserves GPRs and the original exception information. The overflow detect is performed before any attempt is made to access the stack and the principle of stack overflow detection: kernel stacks are aligned to double their size, enabling overflow to be detected with a single bit test. For example, a 16K stack is aligned to 32K, ensuring that bit 14 of the SP must be zero. On an overflow (or underflow), this bit is flipped. Thus, overflow (of less than the size of the stack) can be detected by testing whether this bit is set. This gives us a useful error message on stack overflow, as can be trigger with the LKDTM overflow test: [ 388.053267] lkdtm: Performing direct entry EXHAUST_STACK [ 388.053663] lkdtm: Calling function with 1024 frame size to depth 32 ... [ 388.054016] lkdtm: loop 32/32 ... [ 388.054186] lkdtm: loop 31/32 ... [ 388.054491] lkdtm: loop 30/32 ... [ 388.054672] lkdtm: loop 29/32 ... [ 388.054859] lkdtm: loop 28/32 ... [ 388.055010] lkdtm: loop 27/32 ... [ 388.055163] lkdtm: loop 26/32 ... [ 388.055309] lkdtm: loop 25/32 ... [ 388.055481] lkdtm: loop 24/32 ... [ 388.055653] lkdtm: loop 23/32 ... [ 388.055837] lkdtm: loop 22/32 ... [ 388.056015] lkdtm: loop 21/32 ... [ 388.056188] lkdtm: loop 20/32 ... [ 388.058145] Insufficient stack space to handle exception! [ 388.058153] Task stack: [0xffffffd014260000..0xffffffd014264000] [ 388.058160] Overflow stack: [0xffffffe1f8d2c220..0xffffffe1f8d2d220] [ 388.058168] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058175] Hardware name: riscv-virtio,qemu (DT) [ 388.058187] epc : number+0x32/0x2c0 [ 388.058247] ra : vsnprintf+0x2ae/0x3f0 [ 388.058255] epc : ffffffe0002d38f6 ra : ffffffe0002d814e sp : ffffffd01425ffc0 [ 388.058263] gp : ffffffe0012e4010 tp : ffffffe08014da00 t0 : ffffffd0142606e8 [ 388.058271] t1 : 0000000000000000 t2 : 0000000000000000 s0 : ffffffd014260070 [ 388.058303] s1 : ffffffd014260158 a0 : ffffffd01426015e a1 : ffffffd014260158 [ 388.058311] a2 : 0000000000000013 a3 : ffff0a01ffffff10 a4 : ffffffe000c398e0 [ 388.058319] a5 : 511b02ec65f3e300 a6 : 0000000000a1749a a7 : 0000000000000000 [ 388.058327] s2 : ffffffff000000ff s3 : 00000000ffff0a01 s4 : ffffffe0012e50a8 [ 388.058335] s5 : 0000000000ffff0a s6 : ffffffe0012e50a8 s7 : ffffffe000da1cc0 [ 388.058343] s8 : ffffffffffffffff s9 : ffffffd0142602b0 s10: ffffffd0142602a8 [ 388.058351] s11: ffffffd01426015e t3 : 00000000000f0000 t4 : ffffffffffffffff [ 388.058359] t5 : 000000000000002f t6 : ffffffd014260158 [ 388.058366] status: 0000000000000100 badaddr: ffffffd01425fff8 cause: 000000000000000f [ 388.058374] Kernel panic - not syncing: Kernel stack overflow [ 388.058381] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058387] Hardware name: riscv-virtio,qemu (DT) [ 388.058393] Call Trace: [ 388.058400] [<ffffffe000004944>] walk_stackframe+0x0/0xce [ 388.058406] [<ffffffe0006f0b28>] dump_backtrace+0x38/0x46 [ 388.058412] [<ffffffe0006f0b46>] show_stack+0x10/0x18 [ 388.058418] [<ffffffe0006f3690>] dump_stack+0x74/0x8e [ 388.058424] [<ffffffe0006f0d52>] panic+0xfc/0x2b2 [ 388.058430] [<ffffffe0006f0acc>] print_trace_address+0x0/0x24 [ 388.058436] [<ffffffe0002d814e>] vsnprintf+0x2ae/0x3f0 [ 388.058956] SMP: stopping secondary CPUs Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-21 11:28:55 +08:00
REG_L s0, TASK_TI_KERNEL_SP(tp)
csrr s1, CSR_STATUS
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
csrr s4, CSR_CAUSE
csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_STATUS(sp)
REG_S s2, PT_EPC(sp)
REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
move a0, sp
tail handle_bad_stack
SYM_CODE_END(handle_kernel_stack_overflow)
ASM_NOKPROBE(handle_kernel_stack_overflow)
riscv: add VMAP_STACK overflow detection This patch adds stack overflow detection to riscv, usable when CONFIG_VMAP_STACK=y. Overflow is detected in kernel exception entry(kernel/entry.S), if the kernel stack is overflow and been detected, the overflow handler is invoked on a per-cpu overflow stack. This approach preserves GPRs and the original exception information. The overflow detect is performed before any attempt is made to access the stack and the principle of stack overflow detection: kernel stacks are aligned to double their size, enabling overflow to be detected with a single bit test. For example, a 16K stack is aligned to 32K, ensuring that bit 14 of the SP must be zero. On an overflow (or underflow), this bit is flipped. Thus, overflow (of less than the size of the stack) can be detected by testing whether this bit is set. This gives us a useful error message on stack overflow, as can be trigger with the LKDTM overflow test: [ 388.053267] lkdtm: Performing direct entry EXHAUST_STACK [ 388.053663] lkdtm: Calling function with 1024 frame size to depth 32 ... [ 388.054016] lkdtm: loop 32/32 ... [ 388.054186] lkdtm: loop 31/32 ... [ 388.054491] lkdtm: loop 30/32 ... [ 388.054672] lkdtm: loop 29/32 ... [ 388.054859] lkdtm: loop 28/32 ... [ 388.055010] lkdtm: loop 27/32 ... [ 388.055163] lkdtm: loop 26/32 ... [ 388.055309] lkdtm: loop 25/32 ... [ 388.055481] lkdtm: loop 24/32 ... [ 388.055653] lkdtm: loop 23/32 ... [ 388.055837] lkdtm: loop 22/32 ... [ 388.056015] lkdtm: loop 21/32 ... [ 388.056188] lkdtm: loop 20/32 ... [ 388.058145] Insufficient stack space to handle exception! [ 388.058153] Task stack: [0xffffffd014260000..0xffffffd014264000] [ 388.058160] Overflow stack: [0xffffffe1f8d2c220..0xffffffe1f8d2d220] [ 388.058168] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058175] Hardware name: riscv-virtio,qemu (DT) [ 388.058187] epc : number+0x32/0x2c0 [ 388.058247] ra : vsnprintf+0x2ae/0x3f0 [ 388.058255] epc : ffffffe0002d38f6 ra : ffffffe0002d814e sp : ffffffd01425ffc0 [ 388.058263] gp : ffffffe0012e4010 tp : ffffffe08014da00 t0 : ffffffd0142606e8 [ 388.058271] t1 : 0000000000000000 t2 : 0000000000000000 s0 : ffffffd014260070 [ 388.058303] s1 : ffffffd014260158 a0 : ffffffd01426015e a1 : ffffffd014260158 [ 388.058311] a2 : 0000000000000013 a3 : ffff0a01ffffff10 a4 : ffffffe000c398e0 [ 388.058319] a5 : 511b02ec65f3e300 a6 : 0000000000a1749a a7 : 0000000000000000 [ 388.058327] s2 : ffffffff000000ff s3 : 00000000ffff0a01 s4 : ffffffe0012e50a8 [ 388.058335] s5 : 0000000000ffff0a s6 : ffffffe0012e50a8 s7 : ffffffe000da1cc0 [ 388.058343] s8 : ffffffffffffffff s9 : ffffffd0142602b0 s10: ffffffd0142602a8 [ 388.058351] s11: ffffffd01426015e t3 : 00000000000f0000 t4 : ffffffffffffffff [ 388.058359] t5 : 000000000000002f t6 : ffffffd014260158 [ 388.058366] status: 0000000000000100 badaddr: ffffffd01425fff8 cause: 000000000000000f [ 388.058374] Kernel panic - not syncing: Kernel stack overflow [ 388.058381] CPU: 0 PID: 89 Comm: bash Not tainted 5.12.0-rc8-dirty #90 [ 388.058387] Hardware name: riscv-virtio,qemu (DT) [ 388.058393] Call Trace: [ 388.058400] [<ffffffe000004944>] walk_stackframe+0x0/0xce [ 388.058406] [<ffffffe0006f0b28>] dump_backtrace+0x38/0x46 [ 388.058412] [<ffffffe0006f0b46>] show_stack+0x10/0x18 [ 388.058418] [<ffffffe0006f3690>] dump_stack+0x74/0x8e [ 388.058424] [<ffffffe0006f0d52>] panic+0xfc/0x2b2 [ 388.058430] [<ffffffe0006f0acc>] print_trace_address+0x0/0x24 [ 388.058436] [<ffffffe0002d814e>] vsnprintf+0x2ae/0x3f0 [ 388.058956] SMP: stopping secondary CPUs Signed-off-by: Tong Tiangen <tongtiangen@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-21 11:28:55 +08:00
#endif
SYM_CODE_START(ret_from_fork)
call schedule_tail
beqz s0, 1f /* not from kernel thread */
/* Call fn(arg) */
move a0, s1
jalr s0
1:
move a0, sp /* pt_regs */
call syscall_exit_to_user_mode
j ret_from_exception
SYM_CODE_END(ret_from_fork)
#ifdef CONFIG_IRQ_STACKS
/*
* void call_on_irq_stack(struct pt_regs *regs,
* void (*func)(struct pt_regs *));
*
* Calls func(regs) using the per-CPU IRQ stack.
*/
SYM_FUNC_START(call_on_irq_stack)
/* Create a frame record to save ra and s0 (fp) */
addi sp, sp, -STACKFRAME_SIZE_ON_STACK
REG_S ra, STACKFRAME_RA(sp)
REG_S s0, STACKFRAME_FP(sp)
addi s0, sp, STACKFRAME_SIZE_ON_STACK
/* Switch to the per-CPU shadow call stack */
scs_save_current
scs_load_irq_stack t0
/* Switch to the per-CPU IRQ stack and call the handler */
load_per_cpu t0, irq_stack_ptr, t1
li t1, IRQ_STACK_SIZE
add sp, t0, t1
jalr a1
/* Switch back to the thread shadow call stack */
scs_load_current
/* Switch back to the thread stack and restore ra and s0 */
addi sp, s0, -STACKFRAME_SIZE_ON_STACK
REG_L ra, STACKFRAME_RA(sp)
REG_L s0, STACKFRAME_FP(sp)
addi sp, sp, STACKFRAME_SIZE_ON_STACK
ret
SYM_FUNC_END(call_on_irq_stack)
#endif /* CONFIG_IRQ_STACKS */
/*
* Integer register context switch
* The callee-saved registers must be saved and restored.
*
* a0: previous task_struct (must be preserved across the switch)
* a1: next task_struct
*
* The value of a0 and a1 must be preserved by this function, as that's how
* arguments are passed to schedule_tail.
*/
SYM_FUNC_START(__switch_to)
/* Save context into prev->thread */
li a4, TASK_THREAD_RA
add a3, a0, a4
add a4, a1, a4
REG_S ra, TASK_THREAD_RA_RA(a3)
REG_S sp, TASK_THREAD_SP_RA(a3)
REG_S s0, TASK_THREAD_S0_RA(a3)
REG_S s1, TASK_THREAD_S1_RA(a3)
REG_S s2, TASK_THREAD_S2_RA(a3)
REG_S s3, TASK_THREAD_S3_RA(a3)
REG_S s4, TASK_THREAD_S4_RA(a3)
REG_S s5, TASK_THREAD_S5_RA(a3)
REG_S s6, TASK_THREAD_S6_RA(a3)
REG_S s7, TASK_THREAD_S7_RA(a3)
REG_S s8, TASK_THREAD_S8_RA(a3)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
/* Save the kernel shadow call stack pointer */
scs_save_current
/* Restore context from next->thread */
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
REG_L s0, TASK_THREAD_S0_RA(a4)
REG_L s1, TASK_THREAD_S1_RA(a4)
REG_L s2, TASK_THREAD_S2_RA(a4)
REG_L s3, TASK_THREAD_S3_RA(a4)
REG_L s4, TASK_THREAD_S4_RA(a4)
REG_L s5, TASK_THREAD_S5_RA(a4)
REG_L s6, TASK_THREAD_S6_RA(a4)
REG_L s7, TASK_THREAD_S7_RA(a4)
REG_L s8, TASK_THREAD_S8_RA(a4)
REG_L s9, TASK_THREAD_S9_RA(a4)
REG_L s10, TASK_THREAD_S10_RA(a4)
REG_L s11, TASK_THREAD_S11_RA(a4)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
/* Switch to the next shadow call stack */
scs_load_current
ret
SYM_FUNC_END(__switch_to)
#ifndef CONFIG_MMU
#define do_page_fault do_trap_unknown
#endif
.section ".rodata"
.align LGREG
/* Exception vector table */
SYM_DATA_START_LOCAL(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
RISCV_PTR do_trap_break
RISCV_PTR do_trap_load_misaligned
RISCV_PTR do_trap_load_fault
RISCV_PTR do_trap_store_misaligned
RISCV_PTR do_trap_store_fault
RISCV_PTR do_trap_ecall_u /* system call */
RISCV_PTR do_trap_ecall_s
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_ecall_m
/* instruciton page fault */
ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
RISCV_PTR do_page_fault /* load page fault */
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
#ifndef CONFIG_MMU
SYM_DATA_START(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
ecall
SYM_DATA_END(__user_rt_sigreturn)
#endif