irqchip/riscv-imsic: Separate next and previous pointers in IMSIC vector

Currently, there is only one "move" pointer in struct imsic_vector so
during vector movement the old vector points to the new vector and new
vector points to itself.

To support forced cleanup of the old vector, add separate "move_next" and
"move_prev" pointers to struct imsic_vector, where during vector movement
the "move_next" pointer of the old vector points to the new vector and the
"move_prev" pointer of the new vector points to the old vector.

Both "move_next" and "move_prev" pointers are cleared separately by
__imsic_local_sync() with a restriction that "move_prev" on the new
CPU is cleared only after the old CPU has cleared "move_next".

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250217085657.789309-8-apatel@ventanamicro.com
This commit is contained in:
Anup Patel 2025-02-17 14:26:53 +05:30 committed by Thomas Gleixner
parent 58d868b67a
commit 0f67911e82
3 changed files with 78 additions and 33 deletions

View file

@ -77,6 +77,12 @@ static void imsic_handle_irq(struct irq_desc *desc)
struct imsic_vector *vec; struct imsic_vector *vec;
unsigned long local_id; unsigned long local_id;
/*
* Process pending local synchronization instead of waiting
* for per-CPU local timer to expire.
*/
imsic_local_sync_all(false);
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) { while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@ -120,7 +126,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while * Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state. * this CPU was not running so sync-up local enable/disable state.
*/ */
imsic_local_sync_all(); imsic_local_sync_all(true);
/* Enable local interrupt delivery */ /* Enable local interrupt delivery */
imsic_local_delivery(true); imsic_local_delivery(true);

View file

@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
} }
} }
static void __imsic_local_sync(struct imsic_local_priv *lpriv) static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{ {
struct imsic_local_config *mlocal; struct imsic_local_config *mlocal;
struct imsic_vector *vec, *mvec; struct imsic_vector *vec, *mvec;
bool ret = true;
int i; int i;
lockdep_assert_held(&lpriv->lock); lockdep_assert_held(&lpriv->lock);
@ -143,35 +144,75 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i); __imsic_id_clear_enable(i);
/* /*
* If the ID was being moved to a new ID on some other CPU * Clear the previous vector pointer of the new vector only
* then we can get a MSI during the movement so check the * after the movement is complete on the old CPU.
* ID pending bit and re-trigger the new ID on other CPU
* using MMIO write.
*/ */
mvec = READ_ONCE(vec->move); mvec = READ_ONCE(vec->move_prev);
WRITE_ONCE(vec->move, NULL); if (mvec) {
if (mvec && mvec != vec) { /*
* If the old vector has not been updated then
* try again in the next sync-up call.
*/
if (READ_ONCE(mvec->move_next)) {
ret = false;
continue;
}
WRITE_ONCE(vec->move_prev, NULL);
}
/*
* If a vector was being moved to a new vector on some other
* CPU then we can get a MSI during the movement so check the
* ID pending bit and re-trigger the new ID on other CPU using
* MMIO write.
*/
mvec = READ_ONCE(vec->move_next);
if (mvec) {
if (__imsic_id_read_clear_pending(i)) { if (__imsic_id_read_clear_pending(i)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va); writel_relaxed(mvec->local_id, mlocal->msi_va);
} }
WRITE_ONCE(vec->move_next, NULL);
imsic_vector_free(&lpriv->vectors[i]); imsic_vector_free(&lpriv->vectors[i]);
} }
skip: skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1); bitmap_clear(lpriv->dirty_bitmap, i, 1);
} }
return ret;
} }
void imsic_local_sync_all(void) #ifdef CONFIG_SMP
static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
{
lockdep_assert_held(&lpriv->lock);
if (!timer_pending(&lpriv->timer)) {
lpriv->timer.expires = jiffies + 1;
add_timer_on(&lpriv->timer, smp_processor_id());
}
}
#else
static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
{
}
#endif
void imsic_local_sync_all(bool force_all)
{ {
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags); raw_spin_lock_irqsave(&lpriv->lock, flags);
if (force_all)
bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
__imsic_local_sync(lpriv); if (!__imsic_local_sync(lpriv))
__imsic_local_timer_start(lpriv);
raw_spin_unlock_irqrestore(&lpriv->lock, flags); raw_spin_unlock_irqrestore(&lpriv->lock, flags);
} }
@ -190,12 +231,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer) static void imsic_local_timer_callback(struct timer_list *timer)
{ {
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); imsic_local_sync_all(false);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
__imsic_local_sync(lpriv);
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
} }
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@ -216,14 +252,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/ */
if (cpu_online(cpu)) { if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) { if (cpu == smp_processor_id()) {
__imsic_local_sync(lpriv); if (__imsic_local_sync(lpriv))
return; return;
} }
if (!timer_pending(&lpriv->timer)) { __imsic_local_timer_start(lpriv);
lpriv->timer.expires = jiffies + 1;
add_timer_on(&lpriv->timer, cpu);
}
} }
} }
#else #else
@ -278,8 +311,9 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock); raw_spin_unlock(&lpriv->lock);
} }
static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec, static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
bool new_enable, struct imsic_vector *new_move) struct imsic_vector *vec, bool is_old_vec,
bool new_enable, struct imsic_vector *move_vec)
{ {
unsigned long flags; unsigned long flags;
bool enabled; bool enabled;
@ -289,7 +323,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */ /* Update enable and move details */
enabled = READ_ONCE(vec->enable); enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable); WRITE_ONCE(vec->enable, new_enable);
WRITE_ONCE(vec->move, new_move); if (is_old_vec)
WRITE_ONCE(vec->move_next, move_vec);
else
WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */ /* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@ -322,8 +359,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved * interrupt on the old vector while device was being moved
* to the new vector. * to the new vector.
*/ */
enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec); enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec); imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
} }
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@ -386,7 +423,8 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
vec = &lpriv->vectors[local_id]; vec = &lpriv->vectors[local_id];
vec->hwirq = hwirq; vec->hwirq = hwirq;
vec->enable = false; vec->enable = false;
vec->move = NULL; vec->move_next = NULL;
vec->move_prev = NULL;
return vec; return vec;
} }

View file

@ -23,7 +23,8 @@ struct imsic_vector {
unsigned int hwirq; unsigned int hwirq;
/* Details accessed using local lock held */ /* Details accessed using local lock held */
bool enable; bool enable;
struct imsic_vector *move; struct imsic_vector *move_next;
struct imsic_vector *move_prev;
}; };
struct imsic_local_priv { struct imsic_local_priv {
@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false); __imsic_eix_update(id, 1, false, false);
} }
void imsic_local_sync_all(void); void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable); void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec); void imsic_vector_mask(struct imsic_vector *vec);
@ -87,7 +88,7 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec) static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{ {
return READ_ONCE(vec->move); return READ_ONCE(vec->move_prev);
} }
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec); void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);