mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
x86/irq: Move irq stacks to percpu hot section
No functional change. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Uros Bizjak <ubizjak@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20250303165246.2175811-8-brgerst@gmail.com
This commit is contained in:
parent
c8f1ac2bd7
commit
c6a0918072
9 changed files with 30 additions and 24 deletions
|
@ -15,12 +15,6 @@ struct task_struct;
|
|||
struct pcpu_hot {
|
||||
struct task_struct *current_task;
|
||||
unsigned long top_of_stack;
|
||||
void *hardirq_stack_ptr;
|
||||
#ifdef CONFIG_X86_64
|
||||
bool hardirq_stack_inuse;
|
||||
#else
|
||||
void *softirq_stack_ptr;
|
||||
#endif
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
|
||||
|
|
|
@ -116,7 +116,7 @@
|
|||
ASM_CALL_ARG2
|
||||
|
||||
#define call_on_irqstack(func, asm_call, argconstr...) \
|
||||
call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr), \
|
||||
call_on_stack(__this_cpu_read(hardirq_stack_ptr), \
|
||||
func, asm_call, argconstr)
|
||||
|
||||
/* Macros to assert type correctness for run_*_on_irqstack macros */
|
||||
|
@ -135,7 +135,7 @@
|
|||
* User mode entry and interrupt on the irq stack do not \
|
||||
* switch stacks. If from user mode the task stack is empty. \
|
||||
*/ \
|
||||
if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
|
||||
if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
|
||||
irq_enter_rcu(); \
|
||||
func(c_args); \
|
||||
irq_exit_rcu(); \
|
||||
|
@ -146,9 +146,9 @@
|
|||
* places. Invoke the stack switch macro with the call \
|
||||
* sequence which matches the above direct invocation. \
|
||||
*/ \
|
||||
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
|
||||
__this_cpu_write(hardirq_stack_inuse, true); \
|
||||
call_on_irqstack(func, asm_call, constr); \
|
||||
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
|
||||
__this_cpu_write(hardirq_stack_inuse, false); \
|
||||
} \
|
||||
}
|
||||
|
||||
|
@ -212,9 +212,9 @@
|
|||
*/
|
||||
#define do_softirq_own_stack() \
|
||||
{ \
|
||||
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
|
||||
__this_cpu_write(hardirq_stack_inuse, true); \
|
||||
call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
|
||||
__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
|
||||
__this_cpu_write(hardirq_stack_inuse, false); \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -415,6 +415,13 @@ struct irq_stack {
|
|||
char stack[IRQ_STACK_SIZE];
|
||||
} __aligned(IRQ_STACK_SIZE);
|
||||
|
||||
DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
|
||||
#ifdef CONFIG_X86_64
|
||||
DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
|
||||
#else
|
||||
DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
|
||||
{
|
||||
|
|
|
@ -37,7 +37,7 @@ const char *stack_type_name(enum stack_type type)
|
|||
|
||||
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
|
||||
unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
||||
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
||||
|
||||
/*
|
||||
|
@ -62,7 +62,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
|
|||
|
||||
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr);
|
||||
unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
|
||||
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
|
||||
|
||||
/*
|
||||
|
|
|
@ -134,7 +134,7 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
|
|||
|
||||
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
|
||||
{
|
||||
unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
|
||||
unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
|
||||
unsigned long *begin;
|
||||
|
||||
/*
|
||||
|
|
|
@ -36,6 +36,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|||
DEFINE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
|
||||
EXPORT_PER_CPU_SYMBOL(__softirq_pending);
|
||||
|
||||
DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
|
||||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/*
|
||||
|
|
|
@ -49,6 +49,8 @@ static inline bool check_stack_overflow(void) { return false; }
|
|||
static inline void print_stack_overflow(void) { }
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
|
||||
|
||||
static void call_on_stack(void *func, void *stack)
|
||||
{
|
||||
asm volatile("xchgl %[sp], %%esp\n"
|
||||
|
@ -70,7 +72,7 @@ static inline bool execute_on_irq_stack(bool overflow, struct irq_desc *desc)
|
|||
u32 *isp, *prev_esp;
|
||||
|
||||
curstk = (struct irq_stack *) current_stack();
|
||||
irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr);
|
||||
irqstk = __this_cpu_read(hardirq_stack_ptr);
|
||||
|
||||
/*
|
||||
* this is where we switch to the IRQ stack. However, if we are
|
||||
|
@ -107,7 +109,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
|||
int node = cpu_to_node(cpu);
|
||||
struct page *ph, *ps;
|
||||
|
||||
if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
|
||||
if (per_cpu(hardirq_stack_ptr, cpu))
|
||||
return 0;
|
||||
|
||||
ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
|
||||
|
@ -119,8 +121,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
|
||||
per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
|
||||
per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
|
||||
per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -130,7 +132,7 @@ void do_softirq_own_stack(void)
|
|||
struct irq_stack *irqstk;
|
||||
u32 *isp, *prev_esp;
|
||||
|
||||
irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr);
|
||||
irqstk = __this_cpu_read(softirq_stack_ptr);
|
||||
|
||||
/* build the stack frame on the softirq stack */
|
||||
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/io_apic.h>
|
||||
#include <asm/apic.h>
|
||||
|
||||
DEFINE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
|
||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
|
@ -50,7 +51,7 @@ static int map_irq_stack(unsigned int cpu)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Store actual TOS to avoid adjustment in the hotpath */
|
||||
per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -63,14 +64,14 @@ static int map_irq_stack(unsigned int cpu)
|
|||
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
|
||||
|
||||
/* Store actual TOS to avoid adjustment in the hotpath */
|
||||
per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||
per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int irq_init_percpu_irqstack(unsigned int cpu)
|
||||
{
|
||||
if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
|
||||
if (per_cpu(hardirq_stack_ptr, cpu))
|
||||
return 0;
|
||||
return map_irq_stack(cpu);
|
||||
}
|
||||
|
|
|
@ -614,7 +614,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||
int cpu = smp_processor_id();
|
||||
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
||||
this_cpu_read(pcpu_hot.hardirq_stack_inuse));
|
||||
this_cpu_read(hardirq_stack_inuse));
|
||||
|
||||
if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_prepare(prev_p, cpu);
|
||||
|
|
Loading…
Add table
Reference in a new issue