2019-06-03 07:44:50 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:33 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_DEBUG_MONITORS_H
|
|
|
|
#define __ASM_DEBUG_MONITORS_H
|
|
|
|
|
2015-07-24 16:37:47 +01:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/types.h>
|
2016-02-23 08:56:45 +01:00
|
|
|
#include <asm/brk-imm.h>
|
2015-07-24 16:37:43 +01:00
|
|
|
#include <asm/esr.h>
|
2015-07-24 16:37:41 +01:00
|
|
|
#include <asm/insn.h>
|
2015-07-24 16:37:47 +01:00
|
|
|
#include <asm/ptrace.h>
|
2015-07-24 16:37:41 +01:00
|
|
|
|
2014-05-07 12:13:14 +01:00
|
|
|
/* Low-level stepping controls. */
|
|
|
|
#define DBG_SPSR_SS (1 << 21)
|
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
|
|
|
|
|
|
|
|
/* AArch64 */
|
|
|
|
#define DBG_ESR_EVT_HWBP 0x0
|
|
|
|
#define DBG_ESR_EVT_HWSS 0x1
|
|
|
|
#define DBG_ESR_EVT_HWWP 0x2
|
|
|
|
#define DBG_ESR_EVT_BRK 0x6
|
|
|
|
|
2014-01-28 16:50:18 +05:30
|
|
|
/*
|
|
|
|
* Break point instruction encoding
|
|
|
|
*/
|
2015-07-24 16:37:41 +01:00
|
|
|
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
|
2014-01-28 16:50:18 +05:30
|
|
|
|
2015-07-24 16:37:46 +01:00
|
|
|
#define AARCH64_BREAK_KGDB_DYN_DBG \
|
|
|
|
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
|
2014-01-28 16:50:18 +05:30
|
|
|
|
|
|
|
#define CACHE_FLUSH_IS_SAFE 1
|
|
|
|
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 12:35:48 -04:00
|
|
|
/* kprobes BRK opcodes with ESR encoding */
|
2019-02-26 15:06:42 +00:00
|
|
|
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
|
2020-11-03 14:49:01 +01:00
|
|
|
#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5))
|
2016-11-02 14:40:46 +05:30
|
|
|
/* uprobes BRK opcodes with ESR encoding */
|
2019-02-26 15:06:42 +00:00
|
|
|
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 12:35:48 -04:00
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
/* AArch32 */
|
|
|
|
#define DBG_ESR_EVT_BKPT 0x4
|
|
|
|
#define DBG_ESR_EVT_VECC 0x5
|
|
|
|
|
|
|
|
#define AARCH32_BREAK_ARM 0x07f001f0
|
|
|
|
#define AARCH32_BREAK_THUMB 0xde01
|
|
|
|
#define AARCH32_BREAK_THUMB2_LO 0xf7f0
|
|
|
|
#define AARCH32_BREAK_THUMB2_HI 0xa000
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
struct task_struct;
|
|
|
|
|
|
|
|
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
|
|
|
|
|
2013-12-04 05:50:20 +00:00
|
|
|
#define DBG_HOOK_HANDLED 0
|
|
|
|
#define DBG_HOOK_ERROR 1
|
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
u8 debug_monitors_arch(void);
|
|
|
|
|
2015-07-27 18:36:54 +01:00
|
|
|
enum dbg_active_el {
|
2014-05-07 12:13:14 +01:00
|
|
|
DBG_ACTIVE_EL0 = 0,
|
|
|
|
DBG_ACTIVE_EL1,
|
|
|
|
};
|
|
|
|
|
2015-07-27 18:36:54 +01:00
|
|
|
void enable_debug_monitors(enum dbg_active_el el);
|
|
|
|
void disable_debug_monitors(enum dbg_active_el el);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
|
|
|
void user_rewind_single_step(struct task_struct *task);
|
|
|
|
void user_fastforward_single_step(struct task_struct *task);
|
arm64: ptrace: Override SPSR.SS when single-stepping is enabled
Luis reports that, when reverse debugging with GDB, single-step does not
function as expected on arm64:
| I've noticed, under very specific conditions, that a PTRACE_SINGLESTEP
| request by GDB won't execute the underlying instruction. As a consequence,
| the PC doesn't move, but we return a SIGTRAP just like we would for a
| regular successful PTRACE_SINGLESTEP request.
The underlying problem is that when the CPU register state is restored
as part of a reverse step, the SPSR.SS bit is cleared and so the hardware
single-step state can transition to the "active-pending" state, causing
an unexpected step exception to be taken immediately if a step operation
is attempted.
In hindsight, we probably shouldn't have exposed SPSR.SS in the pstate
accessible by the GPR regset, but it's a bit late for that now. Instead,
simply prevent userspace from configuring the bit to a value which is
inconsistent with the TIF_SINGLESTEP state for the task being traced.
Cc: <stable@vger.kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Keno Fischer <keno@juliacomputing.com>
Link: https://lore.kernel.org/r/1eed6d69-d53d-9657-1fc9-c089be07f98c@linaro.org
Reported-by: Luis Machado <luis.machado@linaro.org>
Tested-by: Luis Machado <luis.machado@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
2020-02-13 12:06:26 +00:00
|
|
|
void user_regs_reset_single_step(struct user_pt_regs *regs,
|
|
|
|
struct task_struct *task);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
|
|
|
void kernel_enable_single_step(struct pt_regs *regs);
|
|
|
|
void kernel_disable_single_step(void);
|
|
|
|
int kernel_active_single_step(void);
|
arm64: kgdb: Set PSTATE.SS to 1 to re-enable single-step
Currently only the first attempt to single-step has any effect. After
that all further stepping remains "stuck" at the same program counter
value.
Refer to the ARM Architecture Reference Manual (ARM DDI 0487E.a) D2.12,
PSTATE.SS=1 should be set at each step before transferring the PE to the
'Active-not-pending' state. The problem here is PSTATE.SS=1 is not set
since the second single-step.
After the first single-step, the PE transferes to the 'Inactive' state,
with PSTATE.SS=0 and MDSCR.SS=1, thus PSTATE.SS won't be set to 1 due to
kernel_active_single_step()=true. Then the PE transferes to the
'Active-pending' state when ERET and returns to the debugger by step
exception.
Before this patch:
==================
Entering kdb (current=0xffff3376039f0000, pid 1) on processor 0 due to Keyboard Entry
[0]kdb>
[0]kdb>
[0]kdb> bp write_sysrq_trigger
Instruction(i) BP #0 at 0xffffa45c13d09290 (write_sysrq_trigger)
is enabled addr at ffffa45c13d09290, hardtype=0 installed=0
[0]kdb> go
$ echo h > /proc/sysrq-trigger
Entering kdb (current=0xffff4f7e453f8000, pid 175) on processor 1 due to Breakpoint @ 0xffffad651a309290
[1]kdb> ss
Entering kdb (current=0xffff4f7e453f8000, pid 175) on processor 1 due to SS trap @ 0xffffad651a309294
[1]kdb> ss
Entering kdb (current=0xffff4f7e453f8000, pid 175) on processor 1 due to SS trap @ 0xffffad651a309294
[1]kdb>
After this patch:
=================
Entering kdb (current=0xffff6851c39f0000, pid 1) on processor 0 due to Keyboard Entry
[0]kdb> bp write_sysrq_trigger
Instruction(i) BP #0 at 0xffffc02d2dd09290 (write_sysrq_trigger)
is enabled addr at ffffc02d2dd09290, hardtype=0 installed=0
[0]kdb> go
$ echo h > /proc/sysrq-trigger
Entering kdb (current=0xffff6851c53c1840, pid 174) on processor 1 due to Breakpoint @ 0xffffc02d2dd09290
[1]kdb> ss
Entering kdb (current=0xffff6851c53c1840, pid 174) on processor 1 due to SS trap @ 0xffffc02d2dd09294
[1]kdb> ss
Entering kdb (current=0xffff6851c53c1840, pid 174) on processor 1 due to SS trap @ 0xffffc02d2dd09298
[1]kdb> ss
Entering kdb (current=0xffff6851c53c1840, pid 174) on processor 1 due to SS trap @ 0xffffc02d2dd0929c
[1]kdb>
Fixes: 44679a4f142b ("arm64: KGDB: Add step debugging support")
Co-developed-by: Wei Li <liwei391@huawei.com>
Signed-off-by: Wei Li <liwei391@huawei.com>
Signed-off-by: Sumit Garg <sumit.garg@linaro.org>
Tested-by: Douglas Anderson <dianders@chromium.org>
Acked-by: Daniel Thompson <daniel.thompson@linaro.org>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org>
Link: https://lore.kernel.org/r/20230202073148.657746-3-sumit.garg@linaro.org
Signed-off-by: Will Deacon <will@kernel.org>
2023-02-02 13:01:48 +05:30
|
|
|
void kernel_rewind_single_step(struct pt_regs *regs);
|
2024-09-30 17:10:48 +01:00
|
|
|
void kernel_fastforward_single_step(struct pt_regs *regs);
|
2012-03-05 11:49:33 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
2025-07-07 12:41:04 +01:00
|
|
|
bool try_step_suspended_breakpoints(struct pt_regs *regs);
|
2012-03-05 11:49:33 +00:00
|
|
|
#else
|
2025-07-07 12:41:04 +01:00
|
|
|
static inline bool try_step_suspended_breakpoints(struct pt_regs *regs)
|
2012-03-05 11:49:33 +00:00
|
|
|
{
|
2025-07-07 12:41:04 +01:00
|
|
|
return false;
|
2012-03-05 11:49:33 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2025-07-07 12:40:58 +01:00
|
|
|
bool try_handle_aarch32_break(struct pt_regs *regs);
|
2020-05-13 16:06:37 -07:00
|
|
|
|
2012-03-05 11:49:33 +00:00
|
|
|
#endif /* __ASSEMBLY */
|
|
|
|
#endif /* __ASM_DEBUG_MONITORS_H */
|