mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
powerpc: enable dynamic preemption
Once the lazy preemption is supported, it would be desirable to change the preemption models at runtime. So add support for dynamic preemption using DYNAMIC_KEY. ::Tested lightly on Power10 LPAR Performance numbers indicate that, preempt=none(no dynamic) and preempt=none(dynamic) are close. cat /sys/kernel/debug/sched/preempt (none) voluntary full lazy perf stat -e probe:__cond_resched -a sleep 1 Performance counter stats for 'system wide': 1,253 probe:__cond_resched echo full > /sys/kernel/debug/sched/preempt cat /sys/kernel/debug/sched/preempt none voluntary (full) lazy perf stat -e probe:__cond_resched -a sleep 1 Performance counter stats for 'system wide': 0 probe:__cond_resched Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20250210184334.567383-2-sshegde@linux.ibm.com
This commit is contained in:
parent
28affd477b
commit
6ad7751537
4 changed files with 23 additions and 2 deletions
|
@ -277,6 +277,7 @@ config PPC
|
|||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select HAVE_RETHOOK if KPROBES
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
|
|
16
arch/powerpc/include/asm/preempt.h
Normal file
16
arch/powerpc/include/asm/preempt.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_POWERPC_PREEMPT_H
|
||||
#define __ASM_POWERPC_PREEMPT_H
|
||||
|
||||
#include <asm-generic/preempt.h>
|
||||
|
||||
#if defined(CONFIG_PREEMPT_DYNAMIC)
|
||||
#include <linux/jump_label.h>
|
||||
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
#define need_irq_preemption() \
|
||||
(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
||||
#else
|
||||
#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_POWERPC_PREEMPT_H */
|
|
@ -25,6 +25,10 @@
|
|||
unsigned long global_dbcr0[NR_CPUS];
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PREEMPT_DYNAMIC)
|
||||
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
|
||||
static inline bool exit_must_hard_disable(void)
|
||||
|
@ -396,7 +400,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
|
|||
/* Returning to a kernel context with local irqs enabled. */
|
||||
WARN_ON_ONCE(!(regs->msr & MSR_EE));
|
||||
again:
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION)) {
|
||||
if (need_irq_preemption()) {
|
||||
/* Return to preemptible kernel context */
|
||||
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
|
||||
if (preempt_count() == 0)
|
||||
|
|
|
@ -45,7 +45,7 @@ int exit_vmx_usercopy(void)
|
|||
* set and we are preemptible. The hack here is to schedule a
|
||||
* decrementer to fire here and reschedule for us if necessary.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched())
|
||||
if (need_irq_preemption() && need_resched())
|
||||
set_dec(1);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue