mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

The arm64 stacktrace code has a few error conditions where a WARN_ON_ONCE() is triggered before the stacktrace is terminated and an error is returned to the caller. The conditions shouldn't be triggered when unwinding the current task, but it is possible to trigger these when unwinding another task which is not blocked, as the stack of that task is concurrently modified. Kent reports that these warnings can be triggered while running filesystem tests on bcachefs, which calls the stacktrace code directly. To produce a meaningful stacktrace of another task, the task in question should be blocked, but the stacktrace code is expected to be robust to cases where it is not blocked. Note that this is purely about not unuduly scaring the user and/or crashing the kernel; stacktraces in such cases are meaningless and may leak kernel secrets from the stack of the task being unwound. Ideally we'd pin the task in a blocked state during the unwind, as we do for /proc/${PID}/wchan since commit:42a20f86dc
("sched: Add wrapper for get_wchan() to keep task blocked") ... but a bunch of places don't do that, notably /proc/${PID}/stack, where we don't pin the task in a blocked state, but do restrict the output to privileged users since commit:f8a00cef17
("proc: restrict kernel stack dumps to root") ... and so it's possible to trigger these warnings accidentally, e.g. by reading /proc/*/stack (as root): | for n in $(seq 1 10); do | while true; do cat /proc/*/stack > /dev/null 2>&1; done & | done | ------------[ cut here ]------------ | WARNING: CPU: 3 PID: 166 at arch/arm64/kernel/stacktrace.c:207 arch_stack_walk+0x1c8/0x370 | Modules linked in: | CPU: 3 UID: 0 PID: 166 Comm: cat Not tainted 6.13.0-rc2-00003-g3dafa7a7925d #2 | Hardware name: linux,dummy-virt (DT) | pstate: 81400005 (Nzcv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--) | pc : arch_stack_walk+0x1c8/0x370 | lr : arch_stack_walk+0x1b0/0x370 | sp : ffff800080773890 | x29: ffff800080773930 x28: fff0000005c44500 x27: fff00000058fa038 | x26: 000000007ffff000 x25: 0000000000000000 x24: 0000000000000000 | x23: ffffa35a8d9600ec x22: 0000000000000000 x21: fff00000043a33c0 | x20: ffff800080773970 x19: ffffa35a8d960168 x18: 0000000000000000 | x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 | x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 | x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000 | x8 : ffff8000807738e0 x7 : ffff8000806e3800 x6 : ffff8000806e3818 | x5 : ffff800080773920 x4 : ffff8000806e4000 x3 : ffff8000807738e0 | x2 : 0000000000000018 x1 : ffff8000806e3800 x0 : 0000000000000000 | Call trace: | arch_stack_walk+0x1c8/0x370 (P) | stack_trace_save_tsk+0x8c/0x108 | proc_pid_stack+0xb0/0x134 | proc_single_show+0x60/0x120 | seq_read_iter+0x104/0x438 | seq_read+0xf8/0x140 | vfs_read+0xc4/0x31c | ksys_read+0x70/0x108 | __arm64_sys_read+0x1c/0x28 | invoke_syscall+0x48/0x104 | el0_svc_common.constprop.0+0x40/0xe0 | do_el0_svc+0x1c/0x28 | el0_svc+0x30/0xcc | el0t_64_sync_handler+0x10c/0x138 | el0t_64_sync+0x198/0x19c | ---[ end trace 0000000000000000 ]--- Fix this by only warning when unwinding the current task. When unwinding another task the error conditions will be handled by returning an error without producing a warning. The two warnings in kunwind_next_frame_record_meta() were added recently as part of commit:c2c6b27b5a
("arm64: stacktrace: unwind exception boundaries") The warning when recovering the fgraph return address has changed form many times, but was originally introduced back in commit:9f416319f4
("arm64: fix unwind_frame() for filtered out fn for function graph tracing") Fixes:c2c6b27b5a
("arm64: stacktrace: unwind exception boundaries") Fixes:9f416319f4
("arm64: fix unwind_frame() for filtered out fn for function graph tracing") Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reported-by: Kent Overstreet <kent.overstreet@linux.dev> Cc: Kees Cook <keescook@chromium.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20241211140704.2498712-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
588 lines
14 KiB
C
588 lines
14 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Stack tracing support
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/efi.h>
|
|
#include <linux/export.h>
|
|
#include <linux/filter.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/stacktrace.h>
|
|
|
|
#include <asm/efi.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/stack_pointer.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
enum kunwind_source {
|
|
KUNWIND_SOURCE_UNKNOWN,
|
|
KUNWIND_SOURCE_FRAME,
|
|
KUNWIND_SOURCE_CALLER,
|
|
KUNWIND_SOURCE_TASK,
|
|
KUNWIND_SOURCE_REGS_PC,
|
|
};
|
|
|
|
union unwind_flags {
|
|
unsigned long all;
|
|
struct {
|
|
unsigned long fgraph : 1,
|
|
kretprobe : 1;
|
|
};
|
|
};
|
|
|
|
/*
|
|
* Kernel unwind state
|
|
*
|
|
* @common: Common unwind state.
|
|
* @task: The task being unwound.
|
|
* @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
|
|
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
|
|
* associated with the most recently encountered replacement lr
|
|
* value.
|
|
*/
|
|
struct kunwind_state {
|
|
struct unwind_state common;
|
|
struct task_struct *task;
|
|
int graph_idx;
|
|
#ifdef CONFIG_KRETPROBES
|
|
struct llist_node *kr_cur;
|
|
#endif
|
|
enum kunwind_source source;
|
|
union unwind_flags flags;
|
|
struct pt_regs *regs;
|
|
};
|
|
|
|
static __always_inline void
|
|
kunwind_init(struct kunwind_state *state,
|
|
struct task_struct *task)
|
|
{
|
|
unwind_init_common(&state->common);
|
|
state->task = task;
|
|
state->source = KUNWIND_SOURCE_UNKNOWN;
|
|
state->flags.all = 0;
|
|
state->regs = NULL;
|
|
}
|
|
|
|
/*
|
|
* Start an unwind from a pt_regs.
|
|
*
|
|
* The unwind will begin at the PC within the regs.
|
|
*
|
|
* The regs must be on a stack currently owned by the calling task.
|
|
*/
|
|
static __always_inline void
|
|
kunwind_init_from_regs(struct kunwind_state *state,
|
|
struct pt_regs *regs)
|
|
{
|
|
kunwind_init(state, current);
|
|
|
|
state->regs = regs;
|
|
state->common.fp = regs->regs[29];
|
|
state->common.pc = regs->pc;
|
|
state->source = KUNWIND_SOURCE_REGS_PC;
|
|
}
|
|
|
|
/*
|
|
* Start an unwind from a caller.
|
|
*
|
|
* The unwind will begin at the caller of whichever function this is inlined
|
|
* into.
|
|
*
|
|
* The function which invokes this must be noinline.
|
|
*/
|
|
static __always_inline void
|
|
kunwind_init_from_caller(struct kunwind_state *state)
|
|
{
|
|
kunwind_init(state, current);
|
|
|
|
state->common.fp = (unsigned long)__builtin_frame_address(1);
|
|
state->common.pc = (unsigned long)__builtin_return_address(0);
|
|
state->source = KUNWIND_SOURCE_CALLER;
|
|
}
|
|
|
|
/*
|
|
* Start an unwind from a blocked task.
|
|
*
|
|
* The unwind will begin at the blocked tasks saved PC (i.e. the caller of
|
|
* cpu_switch_to()).
|
|
*
|
|
* The caller should ensure the task is blocked in cpu_switch_to() for the
|
|
* duration of the unwind, or the unwind will be bogus. It is never valid to
|
|
* call this for the current task.
|
|
*/
|
|
static __always_inline void
|
|
kunwind_init_from_task(struct kunwind_state *state,
|
|
struct task_struct *task)
|
|
{
|
|
kunwind_init(state, task);
|
|
|
|
state->common.fp = thread_saved_fp(task);
|
|
state->common.pc = thread_saved_pc(task);
|
|
state->source = KUNWIND_SOURCE_TASK;
|
|
}
|
|
|
|
static __always_inline int
|
|
kunwind_recover_return_address(struct kunwind_state *state)
|
|
{
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
if (state->task->ret_stack &&
|
|
(state->common.pc == (unsigned long)return_to_handler)) {
|
|
unsigned long orig_pc;
|
|
orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
|
|
state->common.pc,
|
|
(void *)state->common.fp);
|
|
if (state->common.pc == orig_pc) {
|
|
WARN_ON_ONCE(state->task == current);
|
|
return -EINVAL;
|
|
}
|
|
state->common.pc = orig_pc;
|
|
state->flags.fgraph = 1;
|
|
}
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
#ifdef CONFIG_KRETPROBES
|
|
if (is_kretprobe_trampoline(state->common.pc)) {
|
|
unsigned long orig_pc;
|
|
orig_pc = kretprobe_find_ret_addr(state->task,
|
|
(void *)state->common.fp,
|
|
&state->kr_cur);
|
|
state->common.pc = orig_pc;
|
|
state->flags.kretprobe = 1;
|
|
}
|
|
#endif /* CONFIG_KRETPROBES */
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline
|
|
int kunwind_next_regs_pc(struct kunwind_state *state)
|
|
{
|
|
struct stack_info *info;
|
|
unsigned long fp = state->common.fp;
|
|
struct pt_regs *regs;
|
|
|
|
regs = container_of((u64 *)fp, struct pt_regs, stackframe.record.fp);
|
|
|
|
info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs));
|
|
if (!info)
|
|
return -EINVAL;
|
|
|
|
unwind_consume_stack(&state->common, info, (unsigned long)regs,
|
|
sizeof(*regs));
|
|
|
|
state->regs = regs;
|
|
state->common.pc = regs->pc;
|
|
state->common.fp = regs->regs[29];
|
|
state->regs = NULL;
|
|
state->source = KUNWIND_SOURCE_REGS_PC;
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline int
|
|
kunwind_next_frame_record_meta(struct kunwind_state *state)
|
|
{
|
|
struct task_struct *tsk = state->task;
|
|
unsigned long fp = state->common.fp;
|
|
struct frame_record_meta *meta;
|
|
struct stack_info *info;
|
|
|
|
info = unwind_find_stack(&state->common, fp, sizeof(*meta));
|
|
if (!info)
|
|
return -EINVAL;
|
|
|
|
meta = (struct frame_record_meta *)fp;
|
|
switch (READ_ONCE(meta->type)) {
|
|
case FRAME_META_TYPE_FINAL:
|
|
if (meta == &task_pt_regs(tsk)->stackframe)
|
|
return -ENOENT;
|
|
WARN_ON_ONCE(tsk == current);
|
|
return -EINVAL;
|
|
case FRAME_META_TYPE_PT_REGS:
|
|
return kunwind_next_regs_pc(state);
|
|
default:
|
|
WARN_ON_ONCE(tsk == current);
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
static __always_inline int
|
|
kunwind_next_frame_record(struct kunwind_state *state)
|
|
{
|
|
unsigned long fp = state->common.fp;
|
|
struct frame_record *record;
|
|
struct stack_info *info;
|
|
unsigned long new_fp, new_pc;
|
|
|
|
if (fp & 0x7)
|
|
return -EINVAL;
|
|
|
|
info = unwind_find_stack(&state->common, fp, sizeof(*record));
|
|
if (!info)
|
|
return -EINVAL;
|
|
|
|
record = (struct frame_record *)fp;
|
|
new_fp = READ_ONCE(record->fp);
|
|
new_pc = READ_ONCE(record->lr);
|
|
|
|
if (!new_fp && !new_pc)
|
|
return kunwind_next_frame_record_meta(state);
|
|
|
|
unwind_consume_stack(&state->common, info, fp, sizeof(*record));
|
|
|
|
state->common.fp = new_fp;
|
|
state->common.pc = new_pc;
|
|
state->source = KUNWIND_SOURCE_FRAME;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Unwind from one frame record (A) to the next frame record (B).
|
|
*
|
|
* We terminate early if the location of B indicates a malformed chain of frame
|
|
* records (e.g. a cycle), determined based on the location and fp value of A
|
|
* and the location (but not the fp value) of B.
|
|
*/
|
|
static __always_inline int
|
|
kunwind_next(struct kunwind_state *state)
|
|
{
|
|
int err;
|
|
|
|
state->flags.all = 0;
|
|
|
|
switch (state->source) {
|
|
case KUNWIND_SOURCE_FRAME:
|
|
case KUNWIND_SOURCE_CALLER:
|
|
case KUNWIND_SOURCE_TASK:
|
|
case KUNWIND_SOURCE_REGS_PC:
|
|
err = kunwind_next_frame_record(state);
|
|
break;
|
|
default:
|
|
err = -EINVAL;
|
|
}
|
|
|
|
if (err)
|
|
return err;
|
|
|
|
state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
|
|
|
|
return kunwind_recover_return_address(state);
|
|
}
|
|
|
|
typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
|
|
|
|
static __always_inline void
|
|
do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
|
|
void *cookie)
|
|
{
|
|
if (kunwind_recover_return_address(state))
|
|
return;
|
|
|
|
while (1) {
|
|
int ret;
|
|
|
|
if (!consume_state(state, cookie))
|
|
break;
|
|
ret = kunwind_next(state);
|
|
if (ret < 0)
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Per-cpu stacks are only accessible when unwinding the current task in a
|
|
* non-preemptible context.
|
|
*/
|
|
#define STACKINFO_CPU(name) \
|
|
({ \
|
|
((task == current) && !preemptible()) \
|
|
? stackinfo_get_##name() \
|
|
: stackinfo_get_unknown(); \
|
|
})
|
|
|
|
/*
|
|
* SDEI stacks are only accessible when unwinding the current task in an NMI
|
|
* context.
|
|
*/
|
|
#define STACKINFO_SDEI(name) \
|
|
({ \
|
|
((task == current) && in_nmi()) \
|
|
? stackinfo_get_sdei_##name() \
|
|
: stackinfo_get_unknown(); \
|
|
})
|
|
|
|
#define STACKINFO_EFI \
|
|
({ \
|
|
((task == current) && current_in_efi()) \
|
|
? stackinfo_get_efi() \
|
|
: stackinfo_get_unknown(); \
|
|
})
|
|
|
|
static __always_inline void
|
|
kunwind_stack_walk(kunwind_consume_fn consume_state,
|
|
void *cookie, struct task_struct *task,
|
|
struct pt_regs *regs)
|
|
{
|
|
struct stack_info stacks[] = {
|
|
stackinfo_get_task(task),
|
|
STACKINFO_CPU(irq),
|
|
#if defined(CONFIG_VMAP_STACK)
|
|
STACKINFO_CPU(overflow),
|
|
#endif
|
|
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
|
|
STACKINFO_SDEI(normal),
|
|
STACKINFO_SDEI(critical),
|
|
#endif
|
|
#ifdef CONFIG_EFI
|
|
STACKINFO_EFI,
|
|
#endif
|
|
};
|
|
struct kunwind_state state = {
|
|
.common = {
|
|
.stacks = stacks,
|
|
.nr_stacks = ARRAY_SIZE(stacks),
|
|
},
|
|
};
|
|
|
|
if (regs) {
|
|
if (task != current)
|
|
return;
|
|
kunwind_init_from_regs(&state, regs);
|
|
} else if (task == current) {
|
|
kunwind_init_from_caller(&state);
|
|
} else {
|
|
kunwind_init_from_task(&state, task);
|
|
}
|
|
|
|
do_kunwind(&state, consume_state, cookie);
|
|
}
|
|
|
|
struct kunwind_consume_entry_data {
|
|
stack_trace_consume_fn consume_entry;
|
|
void *cookie;
|
|
};
|
|
|
|
static __always_inline bool
|
|
arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
|
|
{
|
|
struct kunwind_consume_entry_data *data = cookie;
|
|
return data->consume_entry(data->cookie, state->common.pc);
|
|
}
|
|
|
|
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
|
|
void *cookie, struct task_struct *task,
|
|
struct pt_regs *regs)
|
|
{
|
|
struct kunwind_consume_entry_data data = {
|
|
.consume_entry = consume_entry,
|
|
.cookie = cookie,
|
|
};
|
|
|
|
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
|
|
}
|
|
|
|
struct bpf_unwind_consume_entry_data {
|
|
bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
|
|
void *cookie;
|
|
};
|
|
|
|
static bool
|
|
arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
|
|
{
|
|
struct bpf_unwind_consume_entry_data *data = cookie;
|
|
|
|
return data->consume_entry(data->cookie, state->common.pc, 0,
|
|
state->common.fp);
|
|
}
|
|
|
|
noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
|
|
u64 fp), void *cookie)
|
|
{
|
|
struct bpf_unwind_consume_entry_data data = {
|
|
.consume_entry = consume_entry,
|
|
.cookie = cookie,
|
|
};
|
|
|
|
kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
|
|
}
|
|
|
|
static const char *state_source_string(const struct kunwind_state *state)
|
|
{
|
|
switch (state->source) {
|
|
case KUNWIND_SOURCE_FRAME: return NULL;
|
|
case KUNWIND_SOURCE_CALLER: return "C";
|
|
case KUNWIND_SOURCE_TASK: return "T";
|
|
case KUNWIND_SOURCE_REGS_PC: return "P";
|
|
default: return "U";
|
|
}
|
|
}
|
|
|
|
static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
|
|
{
|
|
const char *source = state_source_string(state);
|
|
union unwind_flags flags = state->flags;
|
|
bool has_info = source || flags.all;
|
|
char *loglvl = arg;
|
|
|
|
printk("%s %pSb%s%s%s%s%s\n", loglvl,
|
|
(void *)state->common.pc,
|
|
has_info ? " (" : "",
|
|
source ? source : "",
|
|
flags.fgraph ? "F" : "",
|
|
flags.kretprobe ? "K" : "",
|
|
has_info ? ")" : "");
|
|
|
|
return true;
|
|
}
|
|
|
|
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
|
const char *loglvl)
|
|
{
|
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
|
|
|
if (regs && user_mode(regs))
|
|
return;
|
|
|
|
if (!tsk)
|
|
tsk = current;
|
|
|
|
if (!try_get_task_stack(tsk))
|
|
return;
|
|
|
|
printk("%sCall trace:\n", loglvl);
|
|
kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
|
|
|
|
put_task_stack(tsk);
|
|
}
|
|
|
|
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
|
{
|
|
dump_backtrace(NULL, tsk, loglvl);
|
|
barrier();
|
|
}
|
|
|
|
/*
|
|
* The struct defined for userspace stack frame in AARCH64 mode.
|
|
*/
|
|
struct frame_tail {
|
|
struct frame_tail __user *fp;
|
|
unsigned long lr;
|
|
} __attribute__((packed));
|
|
|
|
/*
|
|
* Get the return address for a single stackframe and return a pointer to the
|
|
* next frame tail.
|
|
*/
|
|
static struct frame_tail __user *
|
|
unwind_user_frame(struct frame_tail __user *tail, void *cookie,
|
|
stack_trace_consume_fn consume_entry)
|
|
{
|
|
struct frame_tail buftail;
|
|
unsigned long err;
|
|
unsigned long lr;
|
|
|
|
/* Also check accessibility of one struct frame_tail beyond */
|
|
if (!access_ok(tail, sizeof(buftail)))
|
|
return NULL;
|
|
|
|
pagefault_disable();
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
pagefault_enable();
|
|
|
|
if (err)
|
|
return NULL;
|
|
|
|
lr = ptrauth_strip_user_insn_pac(buftail.lr);
|
|
|
|
if (!consume_entry(cookie, lr))
|
|
return NULL;
|
|
|
|
/*
|
|
* Frame pointers should strictly progress back up the stack
|
|
* (towards higher addresses).
|
|
*/
|
|
if (tail >= buftail.fp)
|
|
return NULL;
|
|
|
|
return buftail.fp;
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
/*
|
|
* The registers we're interested in are at the end of the variable
|
|
* length saved register structure. The fp points at the end of this
|
|
* structure so the address of this struct is:
|
|
* (struct compat_frame_tail *)(xxx->fp)-1
|
|
*
|
|
* This code has been adapted from the ARM OProfile support.
|
|
*/
|
|
struct compat_frame_tail {
|
|
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
|
|
u32 sp;
|
|
u32 lr;
|
|
} __attribute__((packed));
|
|
|
|
static struct compat_frame_tail __user *
|
|
unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
|
|
stack_trace_consume_fn consume_entry)
|
|
{
|
|
struct compat_frame_tail buftail;
|
|
unsigned long err;
|
|
|
|
/* Also check accessibility of one struct frame_tail beyond */
|
|
if (!access_ok(tail, sizeof(buftail)))
|
|
return NULL;
|
|
|
|
pagefault_disable();
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
pagefault_enable();
|
|
|
|
if (err)
|
|
return NULL;
|
|
|
|
if (!consume_entry(cookie, buftail.lr))
|
|
return NULL;
|
|
|
|
/*
|
|
* Frame pointers should strictly progress back up the stack
|
|
* (towards higher addresses).
|
|
*/
|
|
if (tail + 1 >= (struct compat_frame_tail __user *)
|
|
compat_ptr(buftail.fp))
|
|
return NULL;
|
|
|
|
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
|
|
}
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
|
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
|
|
const struct pt_regs *regs)
|
|
{
|
|
if (!consume_entry(cookie, regs->pc))
|
|
return;
|
|
|
|
if (!compat_user_mode(regs)) {
|
|
/* AARCH64 mode */
|
|
struct frame_tail __user *tail;
|
|
|
|
tail = (struct frame_tail __user *)regs->regs[29];
|
|
while (tail && !((unsigned long)tail & 0x7))
|
|
tail = unwind_user_frame(tail, cookie, consume_entry);
|
|
} else {
|
|
#ifdef CONFIG_COMPAT
|
|
/* AARCH32 compat mode */
|
|
struct compat_frame_tail __user *tail;
|
|
|
|
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
|
while (tail && !((unsigned long)tail & 0x3))
|
|
tail = unwind_compat_user_frame(tail, cookie, consume_entry);
|
|
#endif
|
|
}
|
|
}
|