mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
smp: Document preemption and stop_machine() mutual exclusion
Recently while revising RCU's cpu online checks, there was some discussion around how IPIs synchronize with hotplug. Add comments explaining how preemption disable creates mutual exclusion with CPU hotplug's stop_machine mechanism. The key insight is that stop_machine() atomically updates CPU masks and flushes IPIs with interrupts disabled, and cannot proceed while any CPU (including the IPI sender) has preemption disabled. [ Apply peterz feedback. ] Cc: Andrea Righi <arighi@nvidia.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Frederic Weisbecker <frederic@kernel.org> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: rcu@vger.kernel.org Acked-by: Paul E. McKenney <paulmck@kernel.org> Co-developed-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
fc6f89dc70
commit
cf4fc66746
1 changed files with 17 additions and 9 deletions
26
kernel/smp.c
26
kernel/smp.c
|
@ -86,13 +86,15 @@ int smpcfd_dead_cpu(unsigned int cpu)
|
|||
int smpcfd_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* The IPIs for the smp-call-function callbacks queued by other
|
||||
* CPUs might arrive late, either due to hardware latencies or
|
||||
* because this CPU disabled interrupts (inside stop-machine)
|
||||
* before the IPIs were sent. So flush out any pending callbacks
|
||||
* explicitly (without waiting for the IPIs to arrive), to
|
||||
* ensure that the outgoing CPU doesn't go offline with work
|
||||
* still pending.
|
||||
* The IPIs for the smp-call-function callbacks queued by other CPUs
|
||||
* might arrive late, either due to hardware latencies or because this
|
||||
* CPU disabled interrupts (inside stop-machine) before the IPIs were
|
||||
* sent. So flush out any pending callbacks explicitly (without waiting
|
||||
* for the IPIs to arrive), to ensure that the outgoing CPU doesn't go
|
||||
* offline with work still pending.
|
||||
*
|
||||
* This runs with interrupts disabled inside the stopper task invoked by
|
||||
* stop_machine(), ensuring mutually exclusive CPU offlining and IPI flush.
|
||||
*/
|
||||
__flush_smp_call_function_queue(false);
|
||||
irq_work_run();
|
||||
|
@ -418,6 +420,10 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
|
|||
*/
|
||||
static int generic_exec_single(int cpu, call_single_data_t *csd)
|
||||
{
|
||||
/*
|
||||
* Preemption already disabled here so stopper cannot run on this CPU,
|
||||
* ensuring mutually exclusive CPU offlining and last IPI flush.
|
||||
*/
|
||||
if (cpu == smp_processor_id()) {
|
||||
smp_call_func_t func = csd->func;
|
||||
void *info = csd->info;
|
||||
|
@ -638,8 +644,10 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
|||
int err;
|
||||
|
||||
/*
|
||||
* prevent preemption and reschedule on another processor,
|
||||
* as well as CPU removal
|
||||
* Prevent preemption and reschedule on another CPU, as well as CPU
|
||||
* removal. This prevents stopper from running on this CPU, thus
|
||||
* providing mutual exclusion of the below cpu_online() check and
|
||||
* IPI sending ensuring IPI are not missed by CPU going offline.
|
||||
*/
|
||||
this_cpu = get_cpu();
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue