linux/arch/x86/kernel/kprobes/opt.c

556 lines
15 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Kernel Probes Jump Optimization (Optprobes)
*
* Copyright (C) IBM Corporation, 2002, 2004
* Copyright (C) Hitachi Ltd., 2012
*/
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/preempt.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/objtool.h>
mm: reorder includes after introduction of linux/pgtable.h The replacement of <asm/pgrable.h> with <linux/pgtable.h> made the include of the latter in the middle of asm includes. Fix this up with the aid of the below script and manual adjustments here and there. import sys import re if len(sys.argv) is not 3: print "USAGE: %s <file> <header>" % (sys.argv[0]) sys.exit(1) hdr_to_move="#include <linux/%s>" % sys.argv[2] moved = False in_hdrs = False with open(sys.argv[1], "r") as f: lines = f.readlines() for _line in lines: line = _line.rstrip(' ') if line == hdr_to_move: continue if line.startswith("#include <linux/"): in_hdrs = True elif not moved and in_hdrs: moved = True print hdr_to_move print line Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-4-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-08 21:32:42 -07:00
#include <linux/pgtable.h>
#include <linux/static_call.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/debugreg.h>
#include <asm/set_memory.h>
2017-08-03 11:39:26 +09:00
#include <asm/sections.h>
#include <asm/nospec-branch.h>
#include "common.h"
unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
{
struct optimized_kprobe *op;
struct kprobe *kp;
long offs;
int i;
for (i = 0; i < JMP32_INSN_SIZE; i++) {
kp = get_kprobe((void *)addr - i);
/* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp);
/* If op is optimized or under unoptimizing */
if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found;
}
}
return addr;
found:
/*
* If the kprobe can be optimized, original bytes which can be
* overwritten by jump destination address. In this case, original
* bytes must be recovered from op->optinsn.copied_insn buffer.
*/
if (copy_from_kernel_nofault(buf, (void *)addr,
MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
return 0UL;
if (addr == (unsigned long)kp->addr) {
buf[0] = kp->opcode;
memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
} else {
offs = addr - (unsigned long)kp->addr - 1;
memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
}
return (unsigned long)buf;
}
static void synthesize_clac(kprobe_opcode_t *addr)
{
/*
* Can't be static_cpu_has() due to how objtool treats this feature bit.
* This isn't a fast path anyway.
*/
if (!boot_cpu_has(X86_FEATURE_SMAP))
return;
/* Replace the NOP3 with CLAC */
addr[0] = 0x0f;
addr[1] = 0x01;
addr[2] = 0xca;
}
/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
{
#ifdef CONFIG_X86_64
*addr++ = 0x48;
*addr++ = 0xbf;
#else
*addr++ = 0xb8;
#endif
*(unsigned long *)addr = val;
}
asm (
".pushsection .rodata\n"
"optprobe_template_func:\n"
".global optprobe_template_entry\n"
"optprobe_template_entry:\n"
#ifdef CONFIG_X86_64
" pushq $" __stringify(__KERNEL_DS) "\n"
/* Save the 'sp - 8', this will be fixed later. */
" pushq %rsp\n"
" pushfq\n"
".global optprobe_template_clac\n"
"optprobe_template_clac:\n"
ASM_NOP3
SAVE_REGS_STRING
" movq %rsp, %rsi\n"
".global optprobe_template_val\n"
"optprobe_template_val:\n"
ASM_NOP5
ASM_NOP5
".global optprobe_template_call\n"
"optprobe_template_call:\n"
ASM_NOP5
/* Copy 'regs->flags' into 'regs->ss'. */
" movq 18*8(%rsp), %rdx\n"
" movq %rdx, 20*8(%rsp)\n"
RESTORE_REGS_STRING
/* Skip 'regs->flags' and 'regs->sp'. */
" addq $16, %rsp\n"
/* And pop flags register from 'regs->ss'. */
" popfq\n"
#else /* CONFIG_X86_32 */
" pushl %ss\n"
/* Save the 'sp - 4', this will be fixed later. */
" pushl %esp\n"
" pushfl\n"
".global optprobe_template_clac\n"
"optprobe_template_clac:\n"
ASM_NOP3
SAVE_REGS_STRING
" movl %esp, %edx\n"
".global optprobe_template_val\n"
"optprobe_template_val:\n"
ASM_NOP5
".global optprobe_template_call\n"
"optprobe_template_call:\n"
ASM_NOP5
/* Copy 'regs->flags' into 'regs->ss'. */
" movl 14*4(%esp), %edx\n"
" movl %edx, 16*4(%esp)\n"
RESTORE_REGS_STRING
/* Skip 'regs->flags' and 'regs->sp'. */
" addl $8, %esp\n"
/* And pop flags register from 'regs->ss'. */
" popfl\n"
#endif
".global optprobe_template_end\n"
"optprobe_template_end:\n"
".popsection\n");
void optprobe_template_func(void);
STACK_FRAME_NON_STANDARD(optprobe_template_func);
#define TMPL_CLAC_IDX \
((long)optprobe_template_clac - (long)optprobe_template_entry)
#define TMPL_MOVE_IDX \
((long)optprobe_template_val - (long)optprobe_template_entry)
#define TMPL_CALL_IDX \
((long)optprobe_template_call - (long)optprobe_template_entry)
#define TMPL_END_IDX \
((long)optprobe_template_end - (long)optprobe_template_entry)
/* Optimized kprobe call back function: called from optinsn */
kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes annotation Use NOKPROBE_SYMBOL macro for protecting functions from kprobes instead of __kprobes annotation under arch/x86. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. This just folds a bunch of previous NOKPROBE_SYMBOL() cleanup patches for x86 to one patch. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Link: http://lkml.kernel.org/r/20140417081814.26341.51656.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fernando Luis Vázquez Cao <fernando_b1@lab.ntt.co.jp> Cc: Gleb Natapov <gleb@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Lebon <jlebon@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: Matt Fleming <matt.fleming@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Seiji Aguchi <seiji.aguchi@hds.com> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vineet Gupta <vgupta@synopsys.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-04-17 17:18:14 +09:00
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{
/* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(&op->kp))
return;
preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp);
} else {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
/* Adjust stack pointer */
regs->sp += sizeof(long);
/* Save skipped registers */
regs->cs = __KERNEL_CS;
#ifdef CONFIG_X86_32
regs->gs = 0;
#endif
regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
regs->orig_ax = ~0UL;
__this_cpu_write(current_kprobe, &op->kp);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
preempt_enable();
}
kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes annotation Use NOKPROBE_SYMBOL macro for protecting functions from kprobes instead of __kprobes annotation under arch/x86. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. This just folds a bunch of previous NOKPROBE_SYMBOL() cleanup patches for x86 to one patch. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Link: http://lkml.kernel.org/r/20140417081814.26341.51656.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fernando Luis Vázquez Cao <fernando_b1@lab.ntt.co.jp> Cc: Gleb Natapov <gleb@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Lebon <jlebon@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: Matt Fleming <matt.fleming@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Seiji Aguchi <seiji.aguchi@hds.com> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vineet Gupta <vgupta@synopsys.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-04-17 17:18:14 +09:00
NOKPROBE_SYMBOL(optimized_callback);
static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
{
kprobes/x86: Consolidate insn decoder users for copying code Consolidate x86 instruction decoder users on the path of copying original code for kprobes. Kprobes decodes the same instruction a maximum of 3 times when preparing the instruction buffer: - The first time for getting the length of the instruction, - the 2nd for adjusting displacement, - and the 3rd for checking whether the instruction is boostable or not. For each time, the actual decoding target address is slightly different (1st is original address or recovered instruction buffer, 2nd and 3rd are pointing to the copied buffer), but all have the same instruction. Thus, this patch also changes the target address to the copied buffer at first and reuses the decoded "insn" for displacement adjusting and checking boostability. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David S . Miller <davem@davemloft.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ye Xiaolong <xiaolong.ye@intel.com> Link: http://lkml.kernel.org/r/149076389643.22469.13151892839998777373.stgit@devbox Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-03-29 14:05:06 +09:00
struct insn insn;
int len = 0, ret;
while (len < JMP32_INSN_SIZE) {
kprobes/x86: Fix instruction patching corruption when copying more than one RIP-relative instruction After copy_optimized_instructions() copies several instructions to the working buffer it tries to fix up the real RIP address, but it adjusts the RIP-relative instruction with an incorrect RIP address for the 2nd and subsequent instructions due to a bug in the logic. This will break the kernel pretty badly (with likely outcomes such as a kernel freeze, a crash, or worse) because probed instructions can refer to the wrong data. For example putting kprobes on cpumask_next() typically hits this bug. cpumask_next() is normally like below if CONFIG_CPUMASK_OFFSTACK=y (in this case nr_cpumask_bits is an alias of nr_cpu_ids): <cpumask_next>: 48 89 f0 mov %rsi,%rax 8b 35 7b fb e2 00 mov 0xe2fb7b(%rip),%esi # ffffffff82db9e64 <nr_cpu_ids> 55 push %rbp ... If we put a kprobe on it and it gets jump-optimized, it gets patched by the kprobes code like this: <cpumask_next>: e9 95 7d 07 1e jmpq 0xffffffffa000207a 7b fb jnp 0xffffffff81f8a2e2 <cpumask_next+2> e2 00 loop 0xffffffff81f8a2e9 <cpumask_next+9> 55 push %rbp This shows that the first two MOV instructions were copied to a trampoline buffer at 0xffffffffa000207a. Here is the disassembled result of the trampoline, skipping the optprobe template instructions: # Dump of assembly code from 0xffffffffa000207a to 0xffffffffa00020ea: 54 push %rsp ... 48 83 c4 08 add $0x8,%rsp 9d popfq 48 89 f0 mov %rsi,%rax 8b 35 82 7d db e2 mov -0x1d24827e(%rip),%esi # 0xffffffff82db9e67 <nr_cpu_ids+3> This dump shows that the second MOV accesses *(nr_cpu_ids+3) instead of the original *nr_cpu_ids. This leads to a kernel freeze because cpumask_next() always returns 0 and for_each_cpu() never ends. Fix this by adding 'len' correctly to the real RIP address while copying. [ mingo: Improved the changelog. ] Reported-by: Michael Rodin <michael@rodin.online> Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org # v4.15+ Fixes: 63fef14fc98a ("kprobes/x86: Make insn buffer always ROX and use text_poke()") Link: http://lkml.kernel.org/r/153504457253.22602.1314289671019919596.stgit@devbox Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-08-24 02:16:12 +09:00
ret = __copy_instruction(dest + len, src + len, real + len, &insn);
kprobes/x86: Consolidate insn decoder users for copying code Consolidate x86 instruction decoder users on the path of copying original code for kprobes. Kprobes decodes the same instruction a maximum of 3 times when preparing the instruction buffer: - The first time for getting the length of the instruction, - the 2nd for adjusting displacement, - and the 3rd for checking whether the instruction is boostable or not. For each time, the actual decoding target address is slightly different (1st is original address or recovered instruction buffer, 2nd and 3rd are pointing to the copied buffer), but all have the same instruction. Thus, this patch also changes the target address to the copied buffer at first and reuses the decoded "insn" for displacement adjusting and checking boostability. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David S . Miller <davem@davemloft.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ye Xiaolong <xiaolong.ye@intel.com> Link: http://lkml.kernel.org/r/149076389643.22469.13151892839998777373.stgit@devbox Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-03-29 14:05:06 +09:00
if (!ret || !can_boost(&insn, src + len))
return -EINVAL;
len += ret;
}
/* Check whether the address range is reserved */
if (ftrace_text_reserved(src, src + len - 1) ||
alternatives_text_reserved(src, src + len - 1) ||
jump_label_text_reserved(src, src + len - 1) ||
static_call_text_reserved(src, src + len - 1))
return -EBUSY;
return len;
}
/* Check whether insn is indirect jump */
x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT The kprobes optimization check can_optimize() calls insn_is_indirect_jump() to detect indirect jump instructions in a target function. If any is found, creating an optprobe is disallowed in the function because the jump could be from a jump table and could potentially land in the middle of the target optprobe. With retpolines, insn_is_indirect_jump() additionally looks for calls to indirect thunks which the compiler potentially used to replace original jumps. This extra check is however unnecessary because jump tables are disabled when the kernel is built with retpolines. The same is currently the case with IBT. Based on this observation, remove the logic to look for calls to indirect thunks and skip the check for indirect jumps altogether if the kernel is built with retpolines or IBT. Remove subsequently the symbols __indirect_thunk_start and __indirect_thunk_end which are no longer needed. Dropping this logic indirectly fixes a problem where the range [__indirect_thunk_start, __indirect_thunk_end] wrongly included also the return thunk. It caused that machines which used the return thunk as a mitigation and didn't have it patched by any alternative ended up not being able to use optprobes in any regular function. Fixes: 0b53c374b9ef ("x86/retpoline: Use -mfunction-return") Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Suggested-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Petr Pavlu <petr.pavlu@suse.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Link: https://lore.kernel.org/r/20230711091952.27944-3-petr.pavlu@suse.com
2023-07-11 11:19:52 +02:00
static int insn_is_indirect_jump(struct insn *insn)
{
return ((insn->opcode.bytes[0] == 0xff &&
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
insn->opcode.bytes[0] == 0xea); /* Segment based jump */
}
/* Check whether insn jumps into specified address range */
static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
{
unsigned long target = 0;
switch (insn->opcode.bytes[0]) {
case 0xe0: /* loopne */
case 0xe1: /* loope */
case 0xe2: /* loop */
case 0xe3: /* jcxz */
case 0xe9: /* near relative jump */
case 0xeb: /* short relative jump */
break;
case 0x0f:
if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
break;
return 0;
default:
if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
break;
return 0;
}
target = (unsigned long)insn->next_byte + insn->immediate.value;
return (start <= target && target <= start + len);
}
/* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr)
{
unsigned long addr, size = 0, offset = 0;
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
/* Lookup symbol including addr */
if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
return 0;
/*
* Do not optimize in the entry code due to the unstable
2017-08-03 11:39:26 +09:00
* stack handling and registers setup.
*/
2017-08-03 11:39:26 +09:00
if (((paddr >= (unsigned long)__entry_text_start) &&
(paddr < (unsigned long)__entry_text_end)))
return 0;
/* Check there is enough space for a relative jump. */
if (size - offset < JMP32_INSN_SIZE)
return 0;
/* Decode instructions */
addr = paddr - offset;
while (addr < paddr - offset + size) { /* Decode until function end */
x86: Remove arbitrary instruction size limit in instruction decoder The current x86 instruction decoder steps along through the instruction stream but always ensures that it never steps farther than the largest possible instruction size (MAX_INSN_SIZE). The MPX code is now going to be doing some decoding of userspace instructions. We copy those from userspace in to the kernel and they're obviously completely untrusted coming from userspace. In addition to the constraint that instructions can only be so long, we also have to be aware of how long the buffer is that came in from userspace. This _looks_ to be similar to what the perf and kprobes is doing, but it's unclear to me whether they are affected. The whole reason we need this is that it is perfectly valid to be executing an instruction within MAX_INSN_SIZE bytes of an unreadable page. We should be able to gracefully handle short reads in those cases. This adds support to the decoder to record how long the buffer being decoded is and to refuse to "validate" the instruction if we would have gone over the end of the buffer to decode it. The kprobes code probably needs to be looked at here a bit more carefully. This patch still respects the MAX_INSN_SIZE limit there but the kprobes code does look like it might be able to be a bit more strict than it currently is. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Jim Keniston <jkenisto@us.ibm.com> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: x86@kernel.org Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20141114153957.E6B01535@viggo.jf.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 07:39:57 -08:00
unsigned long recovered_insn;
int ret;
if (search_exception_tables(addr))
/*
* Since some fixup code will jumps into this function,
* we can't optimize kprobe in this function.
*/
return 0;
x86: Remove arbitrary instruction size limit in instruction decoder The current x86 instruction decoder steps along through the instruction stream but always ensures that it never steps farther than the largest possible instruction size (MAX_INSN_SIZE). The MPX code is now going to be doing some decoding of userspace instructions. We copy those from userspace in to the kernel and they're obviously completely untrusted coming from userspace. In addition to the constraint that instructions can only be so long, we also have to be aware of how long the buffer is that came in from userspace. This _looks_ to be similar to what the perf and kprobes is doing, but it's unclear to me whether they are affected. The whole reason we need this is that it is perfectly valid to be executing an instruction within MAX_INSN_SIZE bytes of an unreadable page. We should be able to gracefully handle short reads in those cases. This adds support to the decoder to record how long the buffer being decoded is and to refuse to "validate" the instruction if we would have gone over the end of the buffer to decode it. The kprobes code probably needs to be looked at here a bit more carefully. This patch still respects the MAX_INSN_SIZE limit there but the kprobes code does look like it might be able to be a bit more strict than it currently is. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Jim Keniston <jkenisto@us.ibm.com> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: x86@kernel.org Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20141114153957.E6B01535@viggo.jf.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-14 07:39:57 -08:00
recovered_insn = recover_probed_instruction(buf, addr);
if (!recovered_insn)
return 0;
ret = insn_decode_kernel(&insn, (void *)recovered_insn);
if (ret < 0)
return 0;
#ifdef CONFIG_KGDB
/*
* If there is a dynamically installed kgdb sw breakpoint,
* this function should not be probed.
*/
if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
kgdb_has_hit_break(addr))
return 0;
#endif
/* Recover address */
insn.kaddr = (void *)addr;
insn.next_byte = (void *)(addr + insn.length);
x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT The kprobes optimization check can_optimize() calls insn_is_indirect_jump() to detect indirect jump instructions in a target function. If any is found, creating an optprobe is disallowed in the function because the jump could be from a jump table and could potentially land in the middle of the target optprobe. With retpolines, insn_is_indirect_jump() additionally looks for calls to indirect thunks which the compiler potentially used to replace original jumps. This extra check is however unnecessary because jump tables are disabled when the kernel is built with retpolines. The same is currently the case with IBT. Based on this observation, remove the logic to look for calls to indirect thunks and skip the check for indirect jumps altogether if the kernel is built with retpolines or IBT. Remove subsequently the symbols __indirect_thunk_start and __indirect_thunk_end which are no longer needed. Dropping this logic indirectly fixes a problem where the range [__indirect_thunk_start, __indirect_thunk_end] wrongly included also the return thunk. It caused that machines which used the return thunk as a mitigation and didn't have it patched by any alternative ended up not being able to use optprobes in any regular function. Fixes: 0b53c374b9ef ("x86/retpoline: Use -mfunction-return") Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Suggested-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Petr Pavlu <petr.pavlu@suse.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Link: https://lore.kernel.org/r/20230711091952.27944-3-petr.pavlu@suse.com
2023-07-11 11:19:52 +02:00
/*
* Check any instructions don't jump into target, indirectly or
* directly.
*
* The indirect case is present to handle a code with jump
* tables. When the kernel uses retpolines, the check should in
* theory additionally look for jumps to indirect thunks.
* However, the kernel built with retpolines or IBT has jump
* tables disabled so the check can be skipped altogether.
*/
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) &&
x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT The kprobes optimization check can_optimize() calls insn_is_indirect_jump() to detect indirect jump instructions in a target function. If any is found, creating an optprobe is disallowed in the function because the jump could be from a jump table and could potentially land in the middle of the target optprobe. With retpolines, insn_is_indirect_jump() additionally looks for calls to indirect thunks which the compiler potentially used to replace original jumps. This extra check is however unnecessary because jump tables are disabled when the kernel is built with retpolines. The same is currently the case with IBT. Based on this observation, remove the logic to look for calls to indirect thunks and skip the check for indirect jumps altogether if the kernel is built with retpolines or IBT. Remove subsequently the symbols __indirect_thunk_start and __indirect_thunk_end which are no longer needed. Dropping this logic indirectly fixes a problem where the range [__indirect_thunk_start, __indirect_thunk_end] wrongly included also the return thunk. It caused that machines which used the return thunk as a mitigation and didn't have it patched by any alternative ended up not being able to use optprobes in any regular function. Fixes: 0b53c374b9ef ("x86/retpoline: Use -mfunction-return") Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Suggested-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Petr Pavlu <petr.pavlu@suse.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Link: https://lore.kernel.org/r/20230711091952.27944-3-petr.pavlu@suse.com
2023-07-11 11:19:52 +02:00
!IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
insn_is_indirect_jump(&insn))
return 0;
if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
DISP32_SIZE))
return 0;
addr += insn.length;
}
return 1;
}
/* Check optimized_kprobe can actually be optimized. */
int arch_check_optimized_kprobe(struct optimized_kprobe *op)
{
int i;
struct kprobe *p;
for (i = 1; i < op->optinsn.size; i++) {
p = get_kprobe(op->kp.addr + i);
x86/kprobes: Fix arch_check_optimized_kprobe check within optimized_kprobe range When arch_prepare_optimized_kprobe calculating jump destination address, it copies original instructions from jmp-optimized kprobe (see __recover_optprobed_insn), and calculated based on length of original instruction. arch_check_optimized_kprobe does not check KPROBE_FLAG_OPTIMATED when checking whether jmp-optimized kprobe exists. As a result, setup_detour_execution may jump to a range that has been overwritten by jump destination address, resulting in an inval opcode error. For example, assume that register two kprobes whose addresses are <func+9> and <func+11> in "func" function. The original code of "func" function is as follows: 0xffffffff816cb5e9 <+9>: push %r12 0xffffffff816cb5eb <+11>: xor %r12d,%r12d 0xffffffff816cb5ee <+14>: test %rdi,%rdi 0xffffffff816cb5f1 <+17>: setne %r12b 0xffffffff816cb5f5 <+21>: push %rbp 1.Register the kprobe for <func+11>, assume that is kp1, corresponding optimized_kprobe is op1. After the optimization, "func" code changes to: 0xffffffff816cc079 <+9>: push %r12 0xffffffff816cc07b <+11>: jmp 0xffffffffa0210000 0xffffffff816cc080 <+16>: incl 0xf(%rcx) 0xffffffff816cc083 <+19>: xchg %eax,%ebp 0xffffffff816cc084 <+20>: (bad) 0xffffffff816cc085 <+21>: push %rbp Now op1->flags == KPROBE_FLAG_OPTIMATED; 2. Register the kprobe for <func+9>, assume that is kp2, corresponding optimized_kprobe is op2. register_kprobe(kp2) register_aggr_kprobe alloc_aggr_kprobe __prepare_optimized_kprobe arch_prepare_optimized_kprobe __recover_optprobed_insn // copy original bytes from kp1->optinsn.copied_insn, // jump address = <func+14> 3. disable kp1: disable_kprobe(kp1) __disable_kprobe ... if (p == orig_p || aggr_kprobe_disabled(orig_p)) { ret = disarm_kprobe(orig_p, true) // add op1 in unoptimizing_list, not unoptimized orig_p->flags |= KPROBE_FLAG_DISABLED; // op1->flags == KPROBE_FLAG_OPTIMATED | KPROBE_FLAG_DISABLED ... 4. unregister kp2 __unregister_kprobe_top ... if (!kprobe_disabled(ap) && !kprobes_all_disarmed) { optimize_kprobe(op) ... if (arch_check_optimized_kprobe(op) < 0) // because op1 has KPROBE_FLAG_DISABLED, here not return return; p->kp.flags |= KPROBE_FLAG_OPTIMIZED; // now op2 has KPROBE_FLAG_OPTIMIZED } "func" code now is: 0xffffffff816cc079 <+9>: int3 0xffffffff816cc07a <+10>: push %rsp 0xffffffff816cc07b <+11>: jmp 0xffffffffa0210000 0xffffffff816cc080 <+16>: incl 0xf(%rcx) 0xffffffff816cc083 <+19>: xchg %eax,%ebp 0xffffffff816cc084 <+20>: (bad) 0xffffffff816cc085 <+21>: push %rbp 5. if call "func", int3 handler call setup_detour_execution: if (p->flags & KPROBE_FLAG_OPTIMIZED) { ... regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; ... } The code for the destination address is 0xffffffffa021072c: push %r12 0xffffffffa021072e: xor %r12d,%r12d 0xffffffffa0210731: jmp 0xffffffff816cb5ee <func+14> However, <func+14> is not a valid start instruction address. As a result, an error occurs. Link: https://lore.kernel.org/all/20230216034247.32348-3-yangjihong1@huawei.com/ Fixes: f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code") Signed-off-by: Yang Jihong <yangjihong1@huawei.com> Cc: stable@vger.kernel.org Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
2023-02-21 08:49:16 +09:00
if (p && !kprobe_disarmed(p))
return -EEXIST;
}
return 0;
}
/* Check the addr is within the optimized instructions. */
int arch_within_optimized_kprobe(struct optimized_kprobe *op,
kprobe_opcode_t *addr)
{
return (op->kp.addr <= addr &&
op->kp.addr + op->optinsn.size > addr);
}
/* Free optimized instruction slot */
static
void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
{
u8 *slot = op->optinsn.insn;
if (slot) {
int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
/* Record the perf event before freeing the slot */
if (dirty)
perf_event_text_poke(slot, slot, len, NULL, 0);
free_optinsn_slot(slot, dirty);
op->optinsn.insn = NULL;
op->optinsn.size = 0;
}
}
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
__arch_remove_optimized_kprobe(op, 1);
}
/*
* Copy replacing target instructions
* Target instructions MUST be relocatable (checked inside)
* This is called when new aggr(opt)probe is allocated or reused.
*/
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
struct kprobe *__unused)
{
u8 *buf = NULL, *slot;
int ret, len;
long rel;
if (!can_optimize((unsigned long)op->kp.addr))
return -EILSEQ;
buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
op->optinsn.insn = slot = get_optinsn_slot();
if (!slot) {
ret = -ENOMEM;
goto out;
}
/*
* Verify if the address gap is in 2GB range, because this uses
* a relative jump.
*/
rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
if (abs(rel) > 0x7fffffff) {
ret = -ERANGE;
goto err;
}
/* Copy arch-dep-instance from template */
memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
/* Copy instructions into the out-of-line buffer */
ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
slot + TMPL_END_IDX);
if (ret < 0)
goto err;
op->optinsn.size = ret;
len = TMPL_END_IDX + op->optinsn.size;
synthesize_clac(buf + TMPL_CLAC_IDX);
/* Set probe information */
synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
/* Set probe function call */
synthesize_relcall(buf + TMPL_CALL_IDX,
slot + TMPL_CALL_IDX, optimized_callback);
/* Set returning jmp instruction at the tail of out-of-line buffer */
synthesize_reljump(buf + len, slot + len,
(u8 *)op->kp.addr + op->optinsn.size);
len += JMP32_INSN_SIZE;
/*
* Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
* used in __arch_remove_optimized_kprobe().
*/
/* We have to use text_poke() for instruction buffer because it is RO */
perf_event_text_poke(slot, NULL, 0, buf, len);
text_poke(slot, buf, len);
ret = 0;
out:
kfree(buf);
return ret;
err:
__arch_remove_optimized_kprobe(op, 0);
goto out;
}
/*
* Replace breakpoints (INT3) with relative jumps (JMP.d32).
* Caller must call with locking kprobe_mutex and text_mutex.
*
* The caller will have installed a regular kprobe and after that issued
* syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
* the 4 bytes after the INT3 are unused and can now be overwritten.
*/
void arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
u8 insn_buff[JMP32_INSN_SIZE];
list_for_each_entry_safe(op, tmp, oplist, list) {
s32 rel = (s32)((long)op->optinsn.insn -
((long)op->kp.addr + JMP32_INSN_SIZE));
WARN_ON(kprobe_disabled(&op->kp));
/* Backup instructions which will be replaced by jump address */
memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
DISP32_SIZE);
insn_buff[0] = JMP32_INSN_OPCODE;
*(s32 *)(&insn_buff[1]) = rel;
smp_text_poke_single(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
list_del_init(&op->list);
}
}
/*
* Replace a relative jump (JMP.d32) with a breakpoint (INT3).
*
* After that, we can restore the 4 bytes after the INT3 to undo what
* arch_optimize_kprobes() scribbled. This is safe since those bytes will be
* unused once the INT3 lands.
*/
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
u8 old[JMP32_INSN_SIZE];
u8 *addr = op->kp.addr;
memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
memcpy(new + INT3_INSN_SIZE,
op->optinsn.copied_insn,
JMP32_INSN_SIZE - INT3_INSN_SIZE);
text_poke(addr, new, INT3_INSN_SIZE);
smp_text_poke_sync_each_cpu();
text_poke(addr + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
JMP32_INSN_SIZE - INT3_INSN_SIZE);
smp_text_poke_sync_each_cpu();
perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
}
/*
* Recover original instructions and breakpoints from relative jumps.
* Caller must call with locking kprobe_mutex.
*/
extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list)
{
struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, oplist, list) {
arch_unoptimize_kprobe(op);
list_move(&op->list, done_list);
}
}
kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes annotation Use NOKPROBE_SYMBOL macro for protecting functions from kprobes instead of __kprobes annotation under arch/x86. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. This just folds a bunch of previous NOKPROBE_SYMBOL() cleanup patches for x86 to one patch. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Link: http://lkml.kernel.org/r/20140417081814.26341.51656.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fernando Luis Vázquez Cao <fernando_b1@lab.ntt.co.jp> Cc: Gleb Natapov <gleb@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Lebon <jlebon@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: Matt Fleming <matt.fleming@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Seiji Aguchi <seiji.aguchi@hds.com> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vineet Gupta <vgupta@synopsys.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-04-17 17:18:14 +09:00
int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
{
struct optimized_kprobe *op;
if (p->flags & KPROBE_FLAG_OPTIMIZED) {
/* This kprobe is really able to run optimized path. */
op = container_of(p, struct optimized_kprobe, kp);
/* Detour through copied instructions */
regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
if (!reenter)
reset_current_kprobe();
return 1;
}
return 0;
}
kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes annotation Use NOKPROBE_SYMBOL macro for protecting functions from kprobes instead of __kprobes annotation under arch/x86. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. This just folds a bunch of previous NOKPROBE_SYMBOL() cleanup patches for x86 to one patch. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Link: http://lkml.kernel.org/r/20140417081814.26341.51656.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Borislav Petkov <bp@suse.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fernando Luis Vázquez Cao <fernando_b1@lab.ntt.co.jp> Cc: Gleb Natapov <gleb@redhat.com> Cc: Jason Wang <jasowang@redhat.com> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Lebon <jlebon@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: Matt Fleming <matt.fleming@intel.com> Cc: Michel Lespinasse <walken@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Seiji Aguchi <seiji.aguchi@hds.com> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vineet Gupta <vgupta@synopsys.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-04-17 17:18:14 +09:00
NOKPROBE_SYMBOL(setup_detour_execution);