mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	 b2f6662ac0
			
		
	
	
		b2f6662ac0
		
	
	
	
	
		
			
			Invoking atomic_notifier_chain_notify() requires acquiring a spinlock_t,
which can block under CONFIG_PREEMPT_RT. Notifications for members of the
cpu_pm notification chain will be issued by the idle task, which can never
block.
Making *all* atomic_notifiers use a raw_spinlock is too big of a hammer, as
only notifications issued by the idle task are problematic.
Special-case cpu_pm_notifier_chain by kludging a raw_notifier and
raw_spinlock_t together, matching the atomic_notifier behavior with a
raw_spinlock_t.
Fixes: 70d9329857 ("notifier: Fix broken error handling pattern")
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
		
	
			
		
			
				
	
	
		
			214 lines
		
	
	
	
		
			6.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			214 lines
		
	
	
	
		
			6.1 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0-only
 | |
| /*
 | |
|  * Copyright (C) 2011 Google, Inc.
 | |
|  *
 | |
|  * Author:
 | |
|  *	Colin Cross <ccross@android.com>
 | |
|  */
 | |
| 
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/cpu_pm.h>
 | |
| #include <linux/module.h>
 | |
| #include <linux/notifier.h>
 | |
| #include <linux/spinlock.h>
 | |
| #include <linux/syscore_ops.h>
 | |
| 
 | |
| /*
 | |
|  * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
 | |
|  * Notifications for cpu_pm will be issued by the idle task itself, which can
 | |
|  * never block, IOW it requires using a raw_spinlock_t.
 | |
|  */
 | |
| static struct {
 | |
| 	struct raw_notifier_head chain;
 | |
| 	raw_spinlock_t lock;
 | |
| } cpu_pm_notifier = {
 | |
| 	.chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
 | |
| 	.lock  = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
 | |
| };
 | |
| 
 | |
| static int cpu_pm_notify(enum cpu_pm_event event)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	/*
 | |
| 	 * This introduces a RCU read critical section, which could be
 | |
| 	 * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
 | |
| 	 * this.
 | |
| 	 */
 | |
| 	rcu_irq_enter_irqson();
 | |
| 	rcu_read_lock();
 | |
| 	ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
 | |
| 	rcu_read_unlock();
 | |
| 	rcu_irq_exit_irqson();
 | |
| 
 | |
| 	return notifier_to_errno(ret);
 | |
| }
 | |
| 
 | |
| static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	int ret;
 | |
| 
 | |
| 	rcu_irq_enter_irqson();
 | |
| 	raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
 | |
| 	ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
 | |
| 	raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
 | |
| 	rcu_irq_exit_irqson();
 | |
| 
 | |
| 	return notifier_to_errno(ret);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * cpu_pm_register_notifier - register a driver with cpu_pm
 | |
|  * @nb: notifier block to register
 | |
|  *
 | |
|  * Add a driver to a list of drivers that are notified about
 | |
|  * CPU and CPU cluster low power entry and exit.
 | |
|  *
 | |
|  * This function has the same return conditions as raw_notifier_chain_register.
 | |
|  */
 | |
| int cpu_pm_register_notifier(struct notifier_block *nb)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	int ret;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
 | |
| 	ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
 | |
| 	raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
 | |
| 
 | |
| /**
 | |
|  * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
 | |
|  * @nb: notifier block to be unregistered
 | |
|  *
 | |
|  * Remove a driver from the CPU PM notifier list.
 | |
|  *
 | |
|  * This function has the same return conditions as raw_notifier_chain_unregister.
 | |
|  */
 | |
| int cpu_pm_unregister_notifier(struct notifier_block *nb)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	int ret;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
 | |
| 	ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
 | |
| 	raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
 | |
| 
 | |
| /**
 | |
|  * cpu_pm_enter - CPU low power entry notifier
 | |
|  *
 | |
|  * Notifies listeners that a single CPU is entering a low power state that may
 | |
|  * cause some blocks in the same power domain as the cpu to reset.
 | |
|  *
 | |
|  * Must be called on the affected CPU with interrupts disabled.  Platform is
 | |
|  * responsible for ensuring that cpu_pm_enter is not called twice on the same
 | |
|  * CPU before cpu_pm_exit is called. Notified drivers can include VFP
 | |
|  * co-processor, interrupt controller and its PM extensions, local CPU
 | |
|  * timers context save/restore which shouldn't be interrupted. Hence it
 | |
|  * must be called with interrupts disabled.
 | |
|  *
 | |
|  * Return conditions are same as __raw_notifier_call_chain.
 | |
|  */
 | |
| int cpu_pm_enter(void)
 | |
| {
 | |
| 	return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED);
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(cpu_pm_enter);
 | |
| 
 | |
| /**
 | |
|  * cpu_pm_exit - CPU low power exit notifier
 | |
|  *
 | |
|  * Notifies listeners that a single CPU is exiting a low power state that may
 | |
|  * have caused some blocks in the same power domain as the cpu to reset.
 | |
|  *
 | |
|  * Notified drivers can include VFP co-processor, interrupt controller
 | |
|  * and its PM extensions, local CPU timers context save/restore which
 | |
|  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
 | |
|  *
 | |
|  * Return conditions are same as __raw_notifier_call_chain.
 | |
|  */
 | |
| int cpu_pm_exit(void)
 | |
| {
 | |
| 	return cpu_pm_notify(CPU_PM_EXIT);
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(cpu_pm_exit);
 | |
| 
 | |
| /**
 | |
|  * cpu_cluster_pm_enter - CPU cluster low power entry notifier
 | |
|  *
 | |
|  * Notifies listeners that all cpus in a power domain are entering a low power
 | |
|  * state that may cause some blocks in the same power domain to reset.
 | |
|  *
 | |
|  * Must be called after cpu_pm_enter has been called on all cpus in the power
 | |
|  * domain, and before cpu_pm_exit has been called on any cpu in the power
 | |
|  * domain. Notified drivers can include VFP co-processor, interrupt controller
 | |
|  * and its PM extensions, local CPU timers context save/restore which
 | |
|  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
 | |
|  *
 | |
|  * Must be called with interrupts disabled.
 | |
|  *
 | |
|  * Return conditions are same as __raw_notifier_call_chain.
 | |
|  */
 | |
| int cpu_cluster_pm_enter(void)
 | |
| {
 | |
| 	return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED);
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
 | |
| 
 | |
| /**
 | |
|  * cpu_cluster_pm_exit - CPU cluster low power exit notifier
 | |
|  *
 | |
|  * Notifies listeners that all cpus in a power domain are exiting form a
 | |
|  * low power state that may have caused some blocks in the same power domain
 | |
|  * to reset.
 | |
|  *
 | |
|  * Must be called after cpu_cluster_pm_enter has been called for the power
 | |
|  * domain, and before cpu_pm_exit has been called on any cpu in the power
 | |
|  * domain. Notified drivers can include VFP co-processor, interrupt controller
 | |
|  * and its PM extensions, local CPU timers context save/restore which
 | |
|  * shouldn't be interrupted. Hence it must be called with interrupts disabled.
 | |
|  *
 | |
|  * Return conditions are same as __raw_notifier_call_chain.
 | |
|  */
 | |
| int cpu_cluster_pm_exit(void)
 | |
| {
 | |
| 	return cpu_pm_notify(CPU_CLUSTER_PM_EXIT);
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
 | |
| 
 | |
| #ifdef CONFIG_PM
 | |
| static int cpu_pm_suspend(void)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = cpu_pm_enter();
 | |
| 	if (ret)
 | |
| 		return ret;
 | |
| 
 | |
| 	ret = cpu_cluster_pm_enter();
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static void cpu_pm_resume(void)
 | |
| {
 | |
| 	cpu_cluster_pm_exit();
 | |
| 	cpu_pm_exit();
 | |
| }
 | |
| 
 | |
| static struct syscore_ops cpu_pm_syscore_ops = {
 | |
| 	.suspend = cpu_pm_suspend,
 | |
| 	.resume = cpu_pm_resume,
 | |
| };
 | |
| 
 | |
| static int cpu_pm_init(void)
 | |
| {
 | |
| 	register_syscore_ops(&cpu_pm_syscore_ops);
 | |
| 	return 0;
 | |
| }
 | |
| core_initcall(cpu_pm_init);
 | |
| #endif
 |