mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-05-20 08:03:25 +00:00
parisc: Add lightweight spinlock checks
Add a lightweight spinlock check which uses only two instructions per spinlock call. It detects if a spinlock has been trashed by some memory corruption and then halts the kernel. It will not detect uninitialized spinlocks, for which CONFIG_DEBUG_SPINLOCK needs to be enabled. This lightweight spinlock check shouldn't influence runtime, so it's safe to enable it by default. The __ARCH_SPIN_LOCK_UNLOCKED_VAL constant has been choosen small enough to be able to be loaded by one LDI assembler statement. Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
parent
b6405f0829
commit
15e64ef652
4 changed files with 61 additions and 7 deletions
|
@ -1 +1,12 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
config LIGHTWEIGHT_SPINLOCK_CHECK
|
||||||
|
bool "Enable lightweight spinlock checks"
|
||||||
|
depends on SMP && !DEBUG_SPINLOCK
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Add checks with low performance impact to the spinlock functions
|
||||||
|
to catch memory overwrites at runtime. For more advanced
|
||||||
|
spinlock debugging you should choose the DEBUG_SPINLOCK option
|
||||||
|
which will detect unitialized spinlocks too.
|
||||||
|
If unsure say Y here.
|
||||||
|
|
|
@ -7,10 +7,26 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/spinlock_types.h>
|
#include <asm/spinlock_types.h>
|
||||||
|
|
||||||
|
#define SPINLOCK_BREAK_INSN 0x0000c006 /* break 6,6 */
|
||||||
|
|
||||||
|
static inline void arch_spin_val_check(int lock_val)
|
||||||
|
{
|
||||||
|
if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
|
||||||
|
asm volatile( "andcm,= %0,%1,%%r0\n"
|
||||||
|
".word %2\n"
|
||||||
|
: : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
|
||||||
|
"i" (SPINLOCK_BREAK_INSN));
|
||||||
|
}
|
||||||
|
|
||||||
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a = __ldcw_align(x);
|
volatile unsigned int *a;
|
||||||
return READ_ONCE(*a) == 0;
|
int lock_val;
|
||||||
|
|
||||||
|
a = __ldcw_align(x);
|
||||||
|
lock_val = READ_ONCE(*a);
|
||||||
|
arch_spin_val_check(lock_val);
|
||||||
|
return (lock_val == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_spin_lock(arch_spinlock_t *x)
|
static inline void arch_spin_lock(arch_spinlock_t *x)
|
||||||
|
@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
|
|
||||||
a = __ldcw_align(x);
|
a = __ldcw_align(x);
|
||||||
while (__ldcw(a) == 0)
|
do {
|
||||||
|
int lock_val_old;
|
||||||
|
|
||||||
|
lock_val_old = __ldcw(a);
|
||||||
|
arch_spin_val_check(lock_val_old);
|
||||||
|
if (lock_val_old)
|
||||||
|
return; /* got lock */
|
||||||
|
|
||||||
|
/* wait until we should try to get lock again */
|
||||||
while (*a == 0)
|
while (*a == 0)
|
||||||
continue;
|
continue;
|
||||||
|
} while (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||||
|
@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||||
|
|
||||||
a = __ldcw_align(x);
|
a = __ldcw_align(x);
|
||||||
/* Release with ordered store. */
|
/* Release with ordered store. */
|
||||||
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
|
__asm__ __volatile__("stw,ma %0,0(%1)"
|
||||||
|
: : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
|
int lock_val;
|
||||||
|
|
||||||
a = __ldcw_align(x);
|
a = __ldcw_align(x);
|
||||||
return __ldcw(a) != 0;
|
lock_val = __ldcw(a);
|
||||||
|
arch_spin_val_check(lock_val);
|
||||||
|
return lock_val != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2,13 +2,17 @@
|
||||||
#ifndef __ASM_SPINLOCK_TYPES_H
|
#ifndef __ASM_SPINLOCK_TYPES_H
|
||||||
#define __ASM_SPINLOCK_TYPES_H
|
#define __ASM_SPINLOCK_TYPES_H
|
||||||
|
|
||||||
|
#define __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
#ifdef CONFIG_PA20
|
#ifdef CONFIG_PA20
|
||||||
volatile unsigned int slock;
|
volatile unsigned int slock;
|
||||||
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
|
||||||
#else
|
#else
|
||||||
volatile unsigned int lock[4];
|
volatile unsigned int lock[4];
|
||||||
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
# define __ARCH_SPIN_LOCK_UNLOCKED \
|
||||||
|
{ { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
|
||||||
|
__ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
|
||||||
#endif
|
#endif
|
||||||
} arch_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,10 @@
|
||||||
#include <linux/kgdb.h>
|
#include <linux/kgdb.h>
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
|
|
||||||
|
#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
|
||||||
|
#include <asm/spinlock.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "../math-emu/math-emu.h" /* for handle_fpe() */
|
#include "../math-emu/math-emu.h" /* for handle_fpe() */
|
||||||
|
|
||||||
static void parisc_show_stack(struct task_struct *task,
|
static void parisc_show_stack(struct task_struct *task,
|
||||||
|
@ -309,6 +313,12 @@ static void handle_break(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
|
||||||
|
if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
|
||||||
|
die_if_kernel("Spinlock was trashed", regs, 1);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (unlikely(iir != GDB_BREAK_INSN))
|
if (unlikely(iir != GDB_BREAK_INSN))
|
||||||
parisc_printk_ratelimited(0, regs,
|
parisc_printk_ratelimited(0, regs,
|
||||||
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
|
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
|
||||||
|
|
Loading…
Add table
Reference in a new issue