mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
stop-machine: Improve documentation
Changes ------- * Improve kernel-doc function-header comments * Document preemption and stop_machine() mutual exclusion (Joel Fernandes) -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmiBbxgTHHBhdWxtY2tA a2VybmVsLm9yZwAKCRCevxLzctn7jCgqD/4zu5NL6k5JmzPnYoLKPmjyAhQCZHDf saXTuH7mmZPLj0KB/LoiwHTSLO+ldFzqwlggw2YuyqfdHMQzc91NVBU2ZZMAFncD tEdcIQjDvtBHQmi7PU0b/knNJuVEHSCq6OtpmcnvSjQsZXfPeswrfd1ofuFrgc37 jpmyYhplFMAbM4V0IbkjzX8afLYKQRG0Eb51S+yAmbz0ZdiEr4OuSRs3zJvQfCRE /PYfMM8oczUW6MaVJRRIi67KIAiGBfhN+X4Uc39yir/gAu/HQMEv7/+3ImemUu5d x/ZIca/LtJh7Ga4EHNdc7I9AOtDifzXti07izFiILAHrsMlpALMqIRbftHQAknwf CQQUpoB/C0qH1PxZuYyeOByo+MXYejKAsum8990AY6z9mBjTGtdE4cr/ikeYVFft DZLuDPKoy48C5xu0ycj5Ir9TF8LkTBAUdRsUmXiF+SnQWbOwnmcr79XoPp+7JH2A /70EClttaaTvCjpzDZRTNaHjPZ7bdiARUYwCvR/Vgd8KbqYaUoXnbSZjcLGbnSNC W/GCPiDJCEhxbKnli6Vdc2cEI7aA9mUUZqSMo6XryjixeXoPUEfmv91vZ3GkRrGv QMM+MxBuYLWG//G2rBbyIiGE81ewAn6TmDVU54UfqKbB3AmOioyDN0jI/kJCAYfx UqqLyz7+LMbx+A== =tjJh -----END PGP SIGNATURE----- Merge tag 'stop-machine.2025.07.23a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu Pull stop-machine documentation updates from Paul McKenney: - Improve kernel-doc function-header comments - Document preemption and stop_machine() mutual exclusion (Joel Fernandes) * tag 'stop-machine.2025.07.23a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: smp: Document preemption and stop_machine() mutual exclusion stop_machine: Improve kernel-doc function-header comments
This commit is contained in:
commit
909d2bb07d
2 changed files with 58 additions and 32 deletions
|
@ -88,55 +88,73 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task
|
|||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* stop_machine "Bogolock": stop the entire machine, disable
|
||||
* interrupts. This is a very heavy lock, which is equivalent to
|
||||
* grabbing every spinlock (and more). So the "read" side to such a
|
||||
* lock is anything which disables preemption.
|
||||
* stop_machine "Bogolock": stop the entire machine, disable interrupts.
|
||||
* This is a very heavy lock, which is equivalent to grabbing every raw
|
||||
* spinlock (and more). So the "read" side to such a lock is anything
|
||||
* which disables preemption.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
|
||||
|
||||
/**
|
||||
* stop_machine: freeze the machine on all CPUs and run this function
|
||||
* @fn: the function to run
|
||||
* @data: the data ptr for the @fn()
|
||||
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
||||
* @data: the data ptr to pass to @fn()
|
||||
* @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
|
||||
*
|
||||
* Description: This causes a thread to be scheduled on every cpu,
|
||||
* each of which disables interrupts. The result is that no one is
|
||||
* holding a spinlock or inside any other preempt-disabled region when
|
||||
* @fn() runs.
|
||||
* Description: This causes a thread to be scheduled on every CPU, which
|
||||
* will run with interrupts disabled. Each CPU specified by @cpus will
|
||||
* run @fn. While @fn is executing, there will no other CPUs holding
|
||||
* a raw spinlock or running within any other type of preempt-disabled
|
||||
* region of code.
|
||||
*
|
||||
* This can be thought of as a very heavy write lock, equivalent to
|
||||
* grabbing every spinlock in the kernel.
|
||||
* When @cpus specifies only a single CPU, this can be thought of as
|
||||
* a reader-writer lock where readers disable preemption (for example,
|
||||
* by holding a raw spinlock) and where the insanely heavy writers run
|
||||
* @fn while also preventing any other CPU from doing any useful work.
|
||||
* These writers can also be thought of as having implicitly grabbed every
|
||||
* raw spinlock in the kernel.
|
||||
*
|
||||
* Protects against CPU hotplug.
|
||||
* When @fn is a no-op, this can be thought of as an RCU implementation
|
||||
* where readers again disable preemption and writers use stop_machine()
|
||||
* in place of synchronize_rcu(), albeit with orders of magnitude more
|
||||
* disruption than even that of synchronize_rcu_expedited().
|
||||
*
|
||||
* Although only one stop_machine() operation can proceed at a time,
|
||||
* the possibility of blocking in cpus_read_lock() means that the caller
|
||||
* cannot usefully rely on this serialization.
|
||||
*
|
||||
* Return: 0 if all invocations of @fn return zero. Otherwise, the
|
||||
* value returned by an arbitrarily chosen member of the set of calls to
|
||||
* @fn that returned non-zero.
|
||||
*/
|
||||
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
|
||||
|
||||
/**
|
||||
* stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
|
||||
* @fn: the function to run
|
||||
* @data: the data ptr for the @fn()
|
||||
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
||||
* @data: the data ptr to pass to @fn()
|
||||
* @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
|
||||
*
|
||||
* Same as above. Must be called from with in a cpus_read_lock() protected
|
||||
* region. Avoids nested calls to cpus_read_lock().
|
||||
* Same as above. Avoids nested calls to cpus_read_lock().
|
||||
*
|
||||
* Context: Must be called from within a cpus_read_lock() protected region.
|
||||
*/
|
||||
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
|
||||
|
||||
/**
|
||||
* stop_core_cpuslocked: - stop all threads on just one core
|
||||
* @cpu: any cpu in the targeted core
|
||||
* @fn: the function to run
|
||||
* @data: the data ptr for @fn()
|
||||
* @fn: the function to run on each CPU in the core containing @cpu
|
||||
* @data: the data ptr to pass to @fn()
|
||||
*
|
||||
* Same as above, but instead of every CPU, only the logical CPUs of a
|
||||
* single core are affected.
|
||||
* Same as above, but instead of every CPU, only the logical CPUs of the
|
||||
* single core containing @cpu are affected.
|
||||
*
|
||||
* Context: Must be called from within a cpus_read_lock() protected region.
|
||||
*
|
||||
* Return: 0 if all executions of @fn returned 0, any non zero return
|
||||
* value if any returned non zero.
|
||||
* Return: 0 if all invocations of @fn return zero. Otherwise, the
|
||||
* value returned by an arbitrarily chosen member of the set of calls to
|
||||
* @fn that returned non-zero.
|
||||
*/
|
||||
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
|
||||
|
||||
|
|
26
kernel/smp.c
26
kernel/smp.c
|
@ -86,13 +86,15 @@ int smpcfd_dead_cpu(unsigned int cpu)
|
|||
int smpcfd_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* The IPIs for the smp-call-function callbacks queued by other
|
||||
* CPUs might arrive late, either due to hardware latencies or
|
||||
* because this CPU disabled interrupts (inside stop-machine)
|
||||
* before the IPIs were sent. So flush out any pending callbacks
|
||||
* explicitly (without waiting for the IPIs to arrive), to
|
||||
* ensure that the outgoing CPU doesn't go offline with work
|
||||
* still pending.
|
||||
* The IPIs for the smp-call-function callbacks queued by other CPUs
|
||||
* might arrive late, either due to hardware latencies or because this
|
||||
* CPU disabled interrupts (inside stop-machine) before the IPIs were
|
||||
* sent. So flush out any pending callbacks explicitly (without waiting
|
||||
* for the IPIs to arrive), to ensure that the outgoing CPU doesn't go
|
||||
* offline with work still pending.
|
||||
*
|
||||
* This runs with interrupts disabled inside the stopper task invoked by
|
||||
* stop_machine(), ensuring mutually exclusive CPU offlining and IPI flush.
|
||||
*/
|
||||
__flush_smp_call_function_queue(false);
|
||||
irq_work_run();
|
||||
|
@ -418,6 +420,10 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
|
|||
*/
|
||||
static int generic_exec_single(int cpu, call_single_data_t *csd)
|
||||
{
|
||||
/*
|
||||
* Preemption already disabled here so stopper cannot run on this CPU,
|
||||
* ensuring mutually exclusive CPU offlining and last IPI flush.
|
||||
*/
|
||||
if (cpu == smp_processor_id()) {
|
||||
smp_call_func_t func = csd->func;
|
||||
void *info = csd->info;
|
||||
|
@ -638,8 +644,10 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
|||
int err;
|
||||
|
||||
/*
|
||||
* prevent preemption and reschedule on another processor,
|
||||
* as well as CPU removal
|
||||
* Prevent preemption and reschedule on another CPU, as well as CPU
|
||||
* removal. This prevents stopper from running on this CPU, thus
|
||||
* providing mutual exclusion of the below cpu_online() check and
|
||||
* IPI sending ensuring IPI are not missed by CPU going offline.
|
||||
*/
|
||||
this_cpu = get_cpu();
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue