2019-06-04 10:11:33 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-11-02 14:40:46 +05:30
|
|
|
/*
|
|
|
|
* Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
|
|
|
|
*/
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/uprobes.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
|
|
#include "decode-insn.h"
|
|
|
|
|
|
|
|
#define UPROBE_INV_FAULT_CODE UINT_MAX
|
|
|
|
|
|
|
|
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
|
|
|
|
void *src, unsigned long len)
|
|
|
|
{
|
|
|
|
void *xol_page_kaddr = kmap_atomic(page);
|
|
|
|
void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
|
|
|
|
|
2024-09-19 12:17:19 +00:00
|
|
|
/*
|
|
|
|
* Initial cache maintenance of the xol page done via set_pte_at().
|
|
|
|
* Subsequent CMOs only needed if the xol slot changes.
|
|
|
|
*/
|
|
|
|
if (!memcmp(dst, src, len))
|
|
|
|
goto done;
|
|
|
|
|
2016-11-02 14:40:46 +05:30
|
|
|
/* Initialize the slot */
|
|
|
|
memcpy(dst, src, len);
|
|
|
|
|
|
|
|
/* flush caches (dcache/icache) */
|
2021-05-24 09:29:59 +01:00
|
|
|
sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
|
2016-11-02 14:40:46 +05:30
|
|
|
|
2024-09-19 12:17:19 +00:00
|
|
|
done:
|
2016-11-02 14:40:46 +05:30
|
|
|
kunmap_atomic(xol_page_kaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return instruction_pointer(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
2024-10-08 16:58:51 +01:00
|
|
|
u32 insn;
|
2016-11-02 14:40:46 +05:30
|
|
|
|
|
|
|
/* TODO: Currently we do not support AARCH32 instruction probing */
|
2017-08-20 13:20:47 +03:00
|
|
|
if (mm->context.flags & MMCF_AARCH32)
|
2021-02-23 16:25:34 +08:00
|
|
|
return -EOPNOTSUPP;
|
2016-11-02 14:40:46 +05:30
|
|
|
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
arm64: probes: Fix uprobes for big-endian kernels
The arm64 uprobes code is broken for big-endian kernels as it doesn't
convert the in-memory instruction encoding (which is always
little-endian) into the kernel's native endianness before analyzing and
simulating instructions. This may result in a few distinct problems:
* The kernel may may erroneously reject probing an instruction which can
safely be probed.
* The kernel may erroneously erroneously permit stepping an
instruction out-of-line when that instruction cannot be stepped
out-of-line safely.
* The kernel may erroneously simulate instruction incorrectly dur to
interpretting the byte-swapped encoding.
The endianness mismatch isn't caught by the compiler or sparse because:
* The arch_uprobe::{insn,ixol} fields are encoded as arrays of u8, so
the compiler and sparse have no idea these contain a little-endian
32-bit value. The core uprobes code populates these with a memcpy()
which similarly does not handle endianness.
* While the uprobe_opcode_t type is an alias for __le32, both
arch_uprobe_analyze_insn() and arch_uprobe_skip_sstep() cast from u8[]
to the similarly-named probe_opcode_t, which is an alias for u32.
Hence there is no endianness conversion warning.
Fix this by changing the arch_uprobe::{insn,ixol} fields to __le32 and
adding the appropriate __le32_to_cpu() conversions prior to consuming
the instruction encoding. The core uprobes copies these fields as opaque
ranges of bytes, and so is unaffected by this change.
At the same time, remove MAX_UINSN_BYTES and consistently use
AARCH64_INSN_SIZE for clarity.
Tested with the following:
| #include <stdio.h>
| #include <stdbool.h>
|
| #define noinline __attribute__((noinline))
|
| static noinline void *adrp_self(void)
| {
| void *addr;
|
| asm volatile(
| " adrp %x0, adrp_self\n"
| " add %x0, %x0, :lo12:adrp_self\n"
| : "=r" (addr));
| }
|
|
| int main(int argc, char *argv)
| {
| void *ptr = adrp_self();
| bool equal = (ptr == adrp_self);
|
| printf("adrp_self => %p\n"
| "adrp_self() => %p\n"
| "%s\n",
| adrp_self, ptr, equal ? "EQUAL" : "NOT EQUAL");
|
| return 0;
| }
.... where the adrp_self() function was compiled to:
| 00000000004007e0 <adrp_self>:
| 4007e0: 90000000 adrp x0, 400000 <__ehdr_start>
| 4007e4: 911f8000 add x0, x0, #0x7e0
| 4007e8: d65f03c0 ret
Before this patch, the ADRP is not recognized, and is assumed to be
steppable, resulting in corruption of the result:
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0x4007e0
| EQUAL
| # echo 'p /root/adrp-self:0x007e0' > /sys/kernel/tracing/uprobe_events
| # echo 1 > /sys/kernel/tracing/events/uprobes/enable
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0xffffffffff7e0
| NOT EQUAL
After this patch, the ADRP is correctly recognized and simulated:
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0x4007e0
| EQUAL
| #
| # echo 'p /root/adrp-self:0x007e0' > /sys/kernel/tracing/uprobe_events
| # echo 1 > /sys/kernel/tracing/events/uprobes/enable
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0x4007e0
| EQUAL
Fixes: 9842ceae9fa8 ("arm64: Add uprobe support")
Cc: stable@vger.kernel.org
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20241008155851.801546-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2024-10-08 16:58:48 +01:00
|
|
|
insn = le32_to_cpu(auprobe->insn);
|
2016-11-02 14:40:46 +05:30
|
|
|
|
|
|
|
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
|
|
|
|
case INSN_REJECTED:
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
case INSN_GOOD_NO_SLOT:
|
|
|
|
auprobe->simulate = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
|
|
|
|
/* Initialize with an invalid fault code to detect if ol insn trapped */
|
|
|
|
current->thread.fault_code = UPROBE_INV_FAULT_CODE;
|
|
|
|
|
|
|
|
/* Instruction points to execute ol */
|
|
|
|
instruction_pointer_set(regs, utask->xol_vaddr);
|
|
|
|
|
|
|
|
user_enable_single_step(current);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
|
|
|
|
|
|
|
|
/* Instruction points to execute next to breakpoint address */
|
|
|
|
instruction_pointer_set(regs, utask->vaddr + 4);
|
|
|
|
|
|
|
|
user_disable_single_step(current);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
|
|
|
|
* insn itself is trapped, then detect the case with the help of
|
|
|
|
* invalid fault code which is being set in arch_uprobe_pre_xol
|
|
|
|
*/
|
|
|
|
if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
|
{
|
2024-10-08 16:58:51 +01:00
|
|
|
u32 insn;
|
2016-11-02 14:40:46 +05:30
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
if (!auprobe->simulate)
|
|
|
|
return false;
|
|
|
|
|
arm64: probes: Fix uprobes for big-endian kernels
The arm64 uprobes code is broken for big-endian kernels as it doesn't
convert the in-memory instruction encoding (which is always
little-endian) into the kernel's native endianness before analyzing and
simulating instructions. This may result in a few distinct problems:
* The kernel may may erroneously reject probing an instruction which can
safely be probed.
* The kernel may erroneously erroneously permit stepping an
instruction out-of-line when that instruction cannot be stepped
out-of-line safely.
* The kernel may erroneously simulate instruction incorrectly dur to
interpretting the byte-swapped encoding.
The endianness mismatch isn't caught by the compiler or sparse because:
* The arch_uprobe::{insn,ixol} fields are encoded as arrays of u8, so
the compiler and sparse have no idea these contain a little-endian
32-bit value. The core uprobes code populates these with a memcpy()
which similarly does not handle endianness.
* While the uprobe_opcode_t type is an alias for __le32, both
arch_uprobe_analyze_insn() and arch_uprobe_skip_sstep() cast from u8[]
to the similarly-named probe_opcode_t, which is an alias for u32.
Hence there is no endianness conversion warning.
Fix this by changing the arch_uprobe::{insn,ixol} fields to __le32 and
adding the appropriate __le32_to_cpu() conversions prior to consuming
the instruction encoding. The core uprobes copies these fields as opaque
ranges of bytes, and so is unaffected by this change.
At the same time, remove MAX_UINSN_BYTES and consistently use
AARCH64_INSN_SIZE for clarity.
Tested with the following:
| #include <stdio.h>
| #include <stdbool.h>
|
| #define noinline __attribute__((noinline))
|
| static noinline void *adrp_self(void)
| {
| void *addr;
|
| asm volatile(
| " adrp %x0, adrp_self\n"
| " add %x0, %x0, :lo12:adrp_self\n"
| : "=r" (addr));
| }
|
|
| int main(int argc, char *argv)
| {
| void *ptr = adrp_self();
| bool equal = (ptr == adrp_self);
|
| printf("adrp_self => %p\n"
| "adrp_self() => %p\n"
| "%s\n",
| adrp_self, ptr, equal ? "EQUAL" : "NOT EQUAL");
|
| return 0;
| }
.... where the adrp_self() function was compiled to:
| 00000000004007e0 <adrp_self>:
| 4007e0: 90000000 adrp x0, 400000 <__ehdr_start>
| 4007e4: 911f8000 add x0, x0, #0x7e0
| 4007e8: d65f03c0 ret
Before this patch, the ADRP is not recognized, and is assumed to be
steppable, resulting in corruption of the result:
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0x4007e0
| EQUAL
| # echo 'p /root/adrp-self:0x007e0' > /sys/kernel/tracing/uprobe_events
| # echo 1 > /sys/kernel/tracing/events/uprobes/enable
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0xffffffffff7e0
| NOT EQUAL
After this patch, the ADRP is correctly recognized and simulated:
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0x4007e0
| EQUAL
| #
| # echo 'p /root/adrp-self:0x007e0' > /sys/kernel/tracing/uprobe_events
| # echo 1 > /sys/kernel/tracing/events/uprobes/enable
| # ./adrp-self
| adrp_self => 0x4007e0
| adrp_self() => 0x4007e0
| EQUAL
Fixes: 9842ceae9fa8 ("arm64: Add uprobe support")
Cc: stable@vger.kernel.org
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20241008155851.801546-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2024-10-08 16:58:48 +01:00
|
|
|
insn = le32_to_cpu(auprobe->insn);
|
2016-11-02 14:40:46 +05:30
|
|
|
addr = instruction_pointer(regs);
|
|
|
|
|
|
|
|
if (auprobe->api.handler)
|
|
|
|
auprobe->api.handler(insn, addr, regs);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Task has received a fatal signal, so reset back to probbed
|
|
|
|
* address.
|
|
|
|
*/
|
|
|
|
instruction_pointer_set(regs, utask->vaddr);
|
|
|
|
|
|
|
|
user_disable_single_step(current);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If a simple branch instruction (B) was called for retprobed
|
|
|
|
* assembly label then return true even when regs->sp and ret->stack
|
|
|
|
* are same. It will ensure that cleanup and reporting of return
|
|
|
|
* instances corresponding to callee label is done when
|
|
|
|
* handle_trampoline for called function is executed.
|
|
|
|
*/
|
|
|
|
if (ctx == RP_CHECK_CHAIN_CALL)
|
|
|
|
return regs->sp <= ret->stack;
|
|
|
|
else
|
|
|
|
return regs->sp < ret->stack;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long
|
|
|
|
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long orig_ret_vaddr;
|
|
|
|
|
|
|
|
orig_ret_vaddr = procedure_link_pointer(regs);
|
|
|
|
/* Replace the return addr with trampoline addr */
|
|
|
|
procedure_link_pointer_set(regs, trampoline_vaddr);
|
|
|
|
|
|
|
|
return orig_ret_vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int arch_uprobe_exception_notify(struct notifier_block *self,
|
|
|
|
unsigned long val, void *data)
|
|
|
|
{
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2025-07-07 12:40:59 +01:00
|
|
|
int uprobe_brk_handler(struct pt_regs *regs,
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
unsigned long esr)
|
2016-11-02 14:40:46 +05:30
|
|
|
{
|
2019-02-26 15:37:09 +00:00
|
|
|
if (uprobe_pre_sstep_notifier(regs))
|
2016-11-02 14:40:46 +05:30
|
|
|
return DBG_HOOK_HANDLED;
|
|
|
|
|
|
|
|
return DBG_HOOK_ERROR;
|
|
|
|
}
|
|
|
|
|
2025-07-07 12:41:00 +01:00
|
|
|
int uprobe_single_step_handler(struct pt_regs *regs,
|
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 12:44:42 +01:00
|
|
|
unsigned long esr)
|
2016-11-02 14:40:46 +05:30
|
|
|
{
|
|
|
|
struct uprobe_task *utask = current->utask;
|
|
|
|
|
2019-02-26 15:37:09 +00:00
|
|
|
WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
|
|
|
|
if (uprobe_post_sstep_notifier(regs))
|
|
|
|
return DBG_HOOK_HANDLED;
|
2016-11-02 14:40:46 +05:30
|
|
|
|
|
|
|
return DBG_HOOK_ERROR;
|
|
|
|
}
|
|
|
|
|