2018-08-04 10:23:16 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-07-10 18:00:26 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
|
|
* Copyright (C) 2017 SiFive
|
2018-08-04 10:23:16 +02:00
|
|
|
* Copyright (C) 2018 Christoph Hellwig
|
2017-07-10 18:00:26 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irqchip.h>
|
2023-03-28 09:22:18 +05:30
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/module.h>
|
2023-09-27 22:48:03 +00:00
|
|
|
#include <linux/scs.h>
|
2018-10-02 12:15:07 -07:00
|
|
|
#include <linux/seq_file.h>
|
2023-03-28 09:22:19 +05:30
|
|
|
#include <asm/sbi.h>
|
2023-06-13 21:30:17 -04:00
|
|
|
#include <asm/smp.h>
|
|
|
|
#include <asm/softirq_stack.h>
|
|
|
|
#include <asm/stacktrace.h>
|
2017-07-10 18:00:26 -07:00
|
|
|
|
2023-03-28 09:22:18 +05:30
|
|
|
static struct fwnode_handle *(*__get_intc_node)(void);
|
|
|
|
|
|
|
|
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void))
|
|
|
|
{
|
|
|
|
__get_intc_node = fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct fwnode_handle *riscv_get_intc_hwnode(void)
|
|
|
|
{
|
|
|
|
if (__get_intc_node)
|
|
|
|
return __get_intc_node();
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
|
|
|
|
|
2023-06-13 21:30:16 -04:00
|
|
|
#ifdef CONFIG_IRQ_STACKS
|
|
|
|
#include <asm/irq_stack.h>
|
|
|
|
|
2023-09-27 22:48:03 +00:00
|
|
|
DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SHADOW_CALL_STACK
|
|
|
|
DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void init_irq_scs(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (!scs_is_enabled())
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
per_cpu(irq_shadow_call_stack_ptr, cpu) =
|
|
|
|
scs_alloc(cpu_to_node(cpu));
|
|
|
|
}
|
|
|
|
|
2023-06-13 21:30:16 -04:00
|
|
|
DEFINE_PER_CPU(ulong *, irq_stack_ptr);
|
|
|
|
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
static void init_irq_stacks(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
ulong *p;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
|
|
|
|
per_cpu(irq_stack_ptr, cpu) = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
|
|
|
|
DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack);
|
|
|
|
|
|
|
|
static void init_irq_stacks(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_VMAP_STACK */
|
2023-06-13 21:30:17 -04:00
|
|
|
|
2023-09-13 13:29:40 +08:00
|
|
|
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
2023-09-27 22:48:00 +00:00
|
|
|
static void ___do_softirq(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
__do_softirq();
|
|
|
|
}
|
|
|
|
|
2023-06-13 21:30:17 -04:00
|
|
|
void do_softirq_own_stack(void)
|
|
|
|
{
|
2023-09-27 22:48:00 +00:00
|
|
|
if (on_thread_stack())
|
|
|
|
call_on_irq_stack(NULL, ___do_softirq);
|
|
|
|
else
|
2023-06-13 21:30:17 -04:00
|
|
|
__do_softirq();
|
|
|
|
}
|
2023-09-13 13:29:40 +08:00
|
|
|
#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
|
2023-06-13 21:30:17 -04:00
|
|
|
|
2023-06-13 21:30:16 -04:00
|
|
|
#else
|
2023-09-27 22:48:03 +00:00
|
|
|
static void init_irq_scs(void) {}
|
2023-06-13 21:30:16 -04:00
|
|
|
static void init_irq_stacks(void) {}
|
|
|
|
#endif /* CONFIG_IRQ_STACKS */
|
|
|
|
|
2018-10-02 12:15:07 -07:00
|
|
|
int arch_show_interrupts(struct seq_file *p, int prec)
|
|
|
|
{
|
|
|
|
show_ipi_stats(p, prec);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-10 18:00:26 -07:00
|
|
|
void __init init_IRQ(void)
|
|
|
|
{
|
2023-09-27 22:48:03 +00:00
|
|
|
init_irq_scs();
|
2023-06-13 21:30:16 -04:00
|
|
|
init_irq_stacks();
|
2017-07-10 18:00:26 -07:00
|
|
|
irqchip_init();
|
2020-06-01 14:45:40 +05:30
|
|
|
if (!handle_arch_irq)
|
|
|
|
panic("No interrupt controller found.");
|
2023-03-28 09:22:19 +05:30
|
|
|
sbi_ipi_init();
|
2017-07-10 18:00:26 -07:00
|
|
|
}
|