mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge branches 'pm-cpuidle', 'pm-core' and 'pm-sleep'
Merge cpuidle updates, PM core updates and one hiberation-related update for 5.17-rc1: - Make cpuidle use default_groups in kobj_type (Greg Kroah-Hartman). - Fix two comments in cpuidle code (Jason Wang, Yang Li). - Simplify locking in pm_runtime_put_suppliers() (Rafael Wysocki). - Add safety net to supplier device release in the runtime PM core code (Rafael Wysocki). - Capture device status before disabling runtime PM for it (Rafael Wysocki). - Add new macros for declaring PM operations to allow drivers to avoid guarding them with CONFIG_PM #ifdefs or __maybe_unused and update some drivers to use these macros (Paul Cercueil). - Allow ACPI hardware signature to be honoured during restore from hibernation (David Woodhouse). * pm-cpuidle: cpuidle: use default_groups in kobj_type cpuidle: Fix cpuidle_remove_state_sysfs() kerneldoc comment cpuidle: menu: Fix typo in a comment * pm-core: PM: runtime: Simplify locking in pm_runtime_put_suppliers() mmc: mxc: Use the new PM macros mmc: jz4740: Use the new PM macros PM: runtime: Add safety net to supplier device release PM: runtime: Capture device status before disabling runtime PM PM: core: Add new *_PM_OPS macros, deprecate old ones PM: core: Redefine pm_ptr() macro r8169: Avoid misuse of pm_ptr() macro * pm-sleep: PM: hibernate: Allow ACPI hardware signature to be honoured
This commit is contained in:
commit
c001a52df4
17 changed files with 198 additions and 95 deletions
|
@ -225,14 +225,23 @@
|
||||||
For broken nForce2 BIOS resulting in XT-PIC timer.
|
For broken nForce2 BIOS resulting in XT-PIC timer.
|
||||||
|
|
||||||
acpi_sleep= [HW,ACPI] Sleep options
|
acpi_sleep= [HW,ACPI] Sleep options
|
||||||
Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
|
Format: { s3_bios, s3_mode, s3_beep, s4_hwsig,
|
||||||
old_ordering, nonvs, sci_force_enable, nobl }
|
s4_nohwsig, old_ordering, nonvs,
|
||||||
|
sci_force_enable, nobl }
|
||||||
See Documentation/power/video.rst for information on
|
See Documentation/power/video.rst for information on
|
||||||
s3_bios and s3_mode.
|
s3_bios and s3_mode.
|
||||||
s3_beep is for debugging; it makes the PC's speaker beep
|
s3_beep is for debugging; it makes the PC's speaker beep
|
||||||
as soon as the kernel's real-mode entry point is called.
|
as soon as the kernel's real-mode entry point is called.
|
||||||
|
s4_hwsig causes the kernel to check the ACPI hardware
|
||||||
|
signature during resume from hibernation, and gracefully
|
||||||
|
refuse to resume if it has changed. This complies with
|
||||||
|
the ACPI specification but not with reality, since
|
||||||
|
Windows does not do this and many laptops do change it
|
||||||
|
on docking. So the default behaviour is to allow resume
|
||||||
|
and simply warn when the signature changes, unless the
|
||||||
|
s4_hwsig option is enabled.
|
||||||
s4_nohwsig prevents ACPI hardware signature from being
|
s4_nohwsig prevents ACPI hardware signature from being
|
||||||
used during resume from hibernation.
|
used (or even warned about) during resume.
|
||||||
old_ordering causes the ACPI 1.0 ordering of the _PTS
|
old_ordering causes the ACPI 1.0 ordering of the _PTS
|
||||||
control method, with respect to putting devices into
|
control method, with respect to putting devices into
|
||||||
low power states, to be enforced (the ACPI 2.0 ordering
|
low power states, to be enforced (the ACPI 2.0 ordering
|
||||||
|
|
|
@ -265,6 +265,10 @@ defined in include/linux/pm.h:
|
||||||
RPM_SUSPENDED, which means that each device is initially regarded by the
|
RPM_SUSPENDED, which means that each device is initially regarded by the
|
||||||
PM core as 'suspended', regardless of its real hardware status
|
PM core as 'suspended', regardless of its real hardware status
|
||||||
|
|
||||||
|
`enum rpm_status last_status;`
|
||||||
|
- the last runtime PM status of the device captured before disabling runtime
|
||||||
|
PM for it (invalid initially and when disable_depth is 0)
|
||||||
|
|
||||||
`unsigned int runtime_auto;`
|
`unsigned int runtime_auto;`
|
||||||
- if set, indicates that the user space has allowed the device driver to
|
- if set, indicates that the user space has allowed the device driver to
|
||||||
power manage the device at run time via the /sys/devices/.../power/control
|
power manage the device at run time via the /sys/devices/.../power/control
|
||||||
|
@ -333,10 +337,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
||||||
|
|
||||||
`int pm_runtime_resume(struct device *dev);`
|
`int pm_runtime_resume(struct device *dev);`
|
||||||
- execute the subsystem-level resume callback for the device; returns 0 on
|
- execute the subsystem-level resume callback for the device; returns 0 on
|
||||||
success, 1 if the device's runtime PM status was already 'active' or
|
success, 1 if the device's runtime PM status is already 'active' (also if
|
||||||
error code on failure, where -EAGAIN means it may be safe to attempt to
|
'power.disable_depth' is nonzero, but the status was 'active' when it was
|
||||||
resume the device again in future, but 'power.runtime_error' should be
|
changing from 0 to 1) or error code on failure, where -EAGAIN means it may
|
||||||
checked additionally, and -EACCES means that 'power.disable_depth' is
|
be safe to attempt to resume the device again in future, but
|
||||||
|
'power.runtime_error' should be checked additionally, and -EACCES means
|
||||||
|
that the callback could not be run, because 'power.disable_depth' was
|
||||||
different from 0
|
different from 0
|
||||||
|
|
||||||
`int pm_runtime_resume_and_get(struct device *dev);`
|
`int pm_runtime_resume_and_get(struct device *dev);`
|
||||||
|
|
|
@ -139,8 +139,10 @@ static int __init acpi_sleep_setup(char *str)
|
||||||
if (strncmp(str, "s3_beep", 7) == 0)
|
if (strncmp(str, "s3_beep", 7) == 0)
|
||||||
acpi_realmode_flags |= 4;
|
acpi_realmode_flags |= 4;
|
||||||
#ifdef CONFIG_HIBERNATION
|
#ifdef CONFIG_HIBERNATION
|
||||||
|
if (strncmp(str, "s4_hwsig", 8) == 0)
|
||||||
|
acpi_check_s4_hw_signature(1);
|
||||||
if (strncmp(str, "s4_nohwsig", 10) == 0)
|
if (strncmp(str, "s4_nohwsig", 10) == 0)
|
||||||
acpi_no_s4_hw_signature();
|
acpi_check_s4_hw_signature(0);
|
||||||
#endif
|
#endif
|
||||||
if (strncmp(str, "nonvs", 5) == 0)
|
if (strncmp(str, "nonvs", 5) == 0)
|
||||||
acpi_nvs_nosave();
|
acpi_nvs_nosave();
|
||||||
|
|
|
@ -877,11 +877,11 @@ static inline void acpi_sleep_syscore_init(void) {}
|
||||||
#ifdef CONFIG_HIBERNATION
|
#ifdef CONFIG_HIBERNATION
|
||||||
static unsigned long s4_hardware_signature;
|
static unsigned long s4_hardware_signature;
|
||||||
static struct acpi_table_facs *facs;
|
static struct acpi_table_facs *facs;
|
||||||
static bool nosigcheck;
|
static int sigcheck = -1; /* Default behaviour is just to warn */
|
||||||
|
|
||||||
void __init acpi_no_s4_hw_signature(void)
|
void __init acpi_check_s4_hw_signature(int check)
|
||||||
{
|
{
|
||||||
nosigcheck = true;
|
sigcheck = check;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int acpi_hibernation_begin(pm_message_t stage)
|
static int acpi_hibernation_begin(pm_message_t stage)
|
||||||
|
@ -1009,12 +1009,28 @@ static void acpi_sleep_hibernate_setup(void)
|
||||||
hibernation_set_ops(old_suspend_ordering ?
|
hibernation_set_ops(old_suspend_ordering ?
|
||||||
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
||||||
sleep_states[ACPI_STATE_S4] = 1;
|
sleep_states[ACPI_STATE_S4] = 1;
|
||||||
if (nosigcheck)
|
if (!sigcheck)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
|
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
|
||||||
if (facs)
|
if (facs) {
|
||||||
|
/*
|
||||||
|
* s4_hardware_signature is the local variable which is just
|
||||||
|
* used to warn about mismatch after we're attempting to
|
||||||
|
* resume (in violation of the ACPI specification.)
|
||||||
|
*/
|
||||||
s4_hardware_signature = facs->hardware_signature;
|
s4_hardware_signature = facs->hardware_signature;
|
||||||
|
|
||||||
|
if (sigcheck > 0) {
|
||||||
|
/*
|
||||||
|
* If we're actually obeying the ACPI specification
|
||||||
|
* then the signature is written out as part of the
|
||||||
|
* swsusp header, in order to allow the boot kernel
|
||||||
|
* to gracefully decline to resume.
|
||||||
|
*/
|
||||||
|
swsusp_hardware_signature = facs->hardware_signature;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_HIBERNATION */
|
#else /* !CONFIG_HIBERNATION */
|
||||||
static inline void acpi_sleep_hibernate_setup(void) {}
|
static inline void acpi_sleep_hibernate_setup(void) {}
|
||||||
|
|
|
@ -485,8 +485,7 @@ static void device_link_release_fn(struct work_struct *work)
|
||||||
/* Ensure that all references to the link object have been dropped. */
|
/* Ensure that all references to the link object have been dropped. */
|
||||||
device_link_synchronize_removal();
|
device_link_synchronize_removal();
|
||||||
|
|
||||||
while (refcount_dec_not_one(&link->rpm_active))
|
pm_runtime_release_supplier(link, true);
|
||||||
pm_runtime_put(link->supplier);
|
|
||||||
|
|
||||||
put_device(link->consumer);
|
put_device(link->consumer);
|
||||||
put_device(link->supplier);
|
put_device(link->supplier);
|
||||||
|
|
|
@ -305,19 +305,40 @@ static int rpm_get_suppliers(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pm_runtime_release_supplier - Drop references to device link's supplier.
|
||||||
|
* @link: Target device link.
|
||||||
|
* @check_idle: Whether or not to check if the supplier device is idle.
|
||||||
|
*
|
||||||
|
* Drop all runtime PM references associated with @link to its supplier device
|
||||||
|
* and if @check_idle is set, check if that device is idle (and so it can be
|
||||||
|
* suspended).
|
||||||
|
*/
|
||||||
|
void pm_runtime_release_supplier(struct device_link *link, bool check_idle)
|
||||||
|
{
|
||||||
|
struct device *supplier = link->supplier;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The additional power.usage_count check is a safety net in case
|
||||||
|
* the rpm_active refcount becomes saturated, in which case
|
||||||
|
* refcount_dec_not_one() would return true forever, but it is not
|
||||||
|
* strictly necessary.
|
||||||
|
*/
|
||||||
|
while (refcount_dec_not_one(&link->rpm_active) &&
|
||||||
|
atomic_read(&supplier->power.usage_count) > 0)
|
||||||
|
pm_runtime_put_noidle(supplier);
|
||||||
|
|
||||||
|
if (check_idle)
|
||||||
|
pm_request_idle(supplier);
|
||||||
|
}
|
||||||
|
|
||||||
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
|
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
|
||||||
{
|
{
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
|
|
||||||
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
||||||
device_links_read_lock_held()) {
|
device_links_read_lock_held())
|
||||||
|
pm_runtime_release_supplier(link, try_to_suspend);
|
||||||
while (refcount_dec_not_one(&link->rpm_active))
|
|
||||||
pm_runtime_put_noidle(link->supplier);
|
|
||||||
|
|
||||||
if (try_to_suspend)
|
|
||||||
pm_request_idle(link->supplier);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rpm_put_suppliers(struct device *dev)
|
static void rpm_put_suppliers(struct device *dev)
|
||||||
|
@ -742,13 +763,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||||
trace_rpm_resume_rcuidle(dev, rpmflags);
|
trace_rpm_resume_rcuidle(dev, rpmflags);
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
if (dev->power.runtime_error)
|
if (dev->power.runtime_error) {
|
||||||
retval = -EINVAL;
|
retval = -EINVAL;
|
||||||
else if (dev->power.disable_depth == 1 && dev->power.is_suspended
|
} else if (dev->power.disable_depth > 0) {
|
||||||
&& dev->power.runtime_status == RPM_ACTIVE)
|
if (dev->power.runtime_status == RPM_ACTIVE &&
|
||||||
|
dev->power.last_status == RPM_ACTIVE)
|
||||||
retval = 1;
|
retval = 1;
|
||||||
else if (dev->power.disable_depth > 0)
|
else
|
||||||
retval = -EACCES;
|
retval = -EACCES;
|
||||||
|
}
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -1410,8 +1433,10 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
|
||||||
/* Update time accounting before disabling PM-runtime. */
|
/* Update time accounting before disabling PM-runtime. */
|
||||||
update_pm_runtime_accounting(dev);
|
update_pm_runtime_accounting(dev);
|
||||||
|
|
||||||
if (!dev->power.disable_depth++)
|
if (!dev->power.disable_depth++) {
|
||||||
__pm_runtime_barrier(dev);
|
__pm_runtime_barrier(dev);
|
||||||
|
dev->power.last_status = dev->power.runtime_status;
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock_irq(&dev->power.lock);
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
@ -1428,23 +1453,23 @@ void pm_runtime_enable(struct device *dev)
|
||||||
|
|
||||||
spin_lock_irqsave(&dev->power.lock, flags);
|
spin_lock_irqsave(&dev->power.lock, flags);
|
||||||
|
|
||||||
if (dev->power.disable_depth > 0) {
|
if (!dev->power.disable_depth) {
|
||||||
dev->power.disable_depth--;
|
|
||||||
|
|
||||||
/* About to enable runtime pm, set accounting_timestamp to now */
|
|
||||||
if (!dev->power.disable_depth)
|
|
||||||
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
|
|
||||||
} else {
|
|
||||||
dev_warn(dev, "Unbalanced %s!\n", __func__);
|
dev_warn(dev, "Unbalanced %s!\n", __func__);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN(!dev->power.disable_depth &&
|
if (--dev->power.disable_depth > 0)
|
||||||
dev->power.runtime_status == RPM_SUSPENDED &&
|
goto out;
|
||||||
!dev->power.ignore_children &&
|
|
||||||
atomic_read(&dev->power.child_count) > 0,
|
|
||||||
"Enabling runtime PM for inactive device (%s) with active children\n",
|
|
||||||
dev_name(dev));
|
|
||||||
|
|
||||||
|
dev->power.last_status = RPM_INVALID;
|
||||||
|
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
|
||||||
|
|
||||||
|
if (dev->power.runtime_status == RPM_SUSPENDED &&
|
||||||
|
!dev->power.ignore_children &&
|
||||||
|
atomic_read(&dev->power.child_count) > 0)
|
||||||
|
dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
|
||||||
|
|
||||||
|
out:
|
||||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pm_runtime_enable);
|
EXPORT_SYMBOL_GPL(pm_runtime_enable);
|
||||||
|
@ -1640,6 +1665,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
|
||||||
void pm_runtime_init(struct device *dev)
|
void pm_runtime_init(struct device *dev)
|
||||||
{
|
{
|
||||||
dev->power.runtime_status = RPM_SUSPENDED;
|
dev->power.runtime_status = RPM_SUSPENDED;
|
||||||
|
dev->power.last_status = RPM_INVALID;
|
||||||
dev->power.idle_notification = false;
|
dev->power.idle_notification = false;
|
||||||
|
|
||||||
dev->power.disable_depth = 1;
|
dev->power.disable_depth = 1;
|
||||||
|
@ -1722,8 +1748,6 @@ void pm_runtime_get_suppliers(struct device *dev)
|
||||||
void pm_runtime_put_suppliers(struct device *dev)
|
void pm_runtime_put_suppliers(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
unsigned long flags;
|
|
||||||
bool put;
|
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
@ -1731,11 +1755,17 @@ void pm_runtime_put_suppliers(struct device *dev)
|
||||||
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
||||||
device_links_read_lock_held())
|
device_links_read_lock_held())
|
||||||
if (link->supplier_preactivated) {
|
if (link->supplier_preactivated) {
|
||||||
|
bool put;
|
||||||
|
|
||||||
link->supplier_preactivated = false;
|
link->supplier_preactivated = false;
|
||||||
spin_lock_irqsave(&dev->power.lock, flags);
|
|
||||||
|
spin_lock_irq(&dev->power.lock);
|
||||||
|
|
||||||
put = pm_runtime_status_suspended(dev) &&
|
put = pm_runtime_status_suspended(dev) &&
|
||||||
refcount_dec_not_one(&link->rpm_active);
|
refcount_dec_not_one(&link->rpm_active);
|
||||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
|
||||||
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
if (put)
|
if (put)
|
||||||
pm_runtime_put(link->supplier);
|
pm_runtime_put(link->supplier);
|
||||||
}
|
}
|
||||||
|
@ -1772,9 +1802,7 @@ void pm_runtime_drop_link(struct device_link *link)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pm_runtime_drop_link_count(link->consumer);
|
pm_runtime_drop_link_count(link->consumer);
|
||||||
|
pm_runtime_release_supplier(link, true);
|
||||||
while (refcount_dec_not_one(&link->rpm_active))
|
|
||||||
pm_runtime_put(link->supplier);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pm_runtime_need_not_resume(struct device *dev)
|
static bool pm_runtime_need_not_resume(struct device *dev)
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
* 1) Energy break even point
|
* 1) Energy break even point
|
||||||
* 2) Performance impact
|
* 2) Performance impact
|
||||||
* 3) Latency tolerance (from pmqos infrastructure)
|
* 3) Latency tolerance (from pmqos infrastructure)
|
||||||
* These these three factors are treated independently.
|
* These three factors are treated independently.
|
||||||
*
|
*
|
||||||
* Energy break even point
|
* Energy break even point
|
||||||
* -----------------------
|
* -----------------------
|
||||||
|
|
|
@ -335,6 +335,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
|
||||||
&attr_default_status.attr,
|
&attr_default_status.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
ATTRIBUTE_GROUPS(cpuidle_state_default);
|
||||||
|
|
||||||
struct cpuidle_state_kobj {
|
struct cpuidle_state_kobj {
|
||||||
struct cpuidle_state *state;
|
struct cpuidle_state *state;
|
||||||
|
@ -448,7 +449,7 @@ static void cpuidle_state_sysfs_release(struct kobject *kobj)
|
||||||
|
|
||||||
static struct kobj_type ktype_state_cpuidle = {
|
static struct kobj_type ktype_state_cpuidle = {
|
||||||
.sysfs_ops = &cpuidle_state_sysfs_ops,
|
.sysfs_ops = &cpuidle_state_sysfs_ops,
|
||||||
.default_attrs = cpuidle_state_default_attrs,
|
.default_groups = cpuidle_state_default_groups,
|
||||||
.release = cpuidle_state_sysfs_release,
|
.release = cpuidle_state_sysfs_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -505,7 +506,7 @@ error_state:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes
|
* cpuidle_remove_state_sysfs - removes the cpuidle states sysfs attributes
|
||||||
* @device: the target device
|
* @device: the target device
|
||||||
*/
|
*/
|
||||||
static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
|
static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
|
||||||
|
@ -591,10 +592,11 @@ static struct attribute *cpuidle_driver_default_attrs[] = {
|
||||||
&attr_driver_name.attr,
|
&attr_driver_name.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
ATTRIBUTE_GROUPS(cpuidle_driver_default);
|
||||||
|
|
||||||
static struct kobj_type ktype_driver_cpuidle = {
|
static struct kobj_type ktype_driver_cpuidle = {
|
||||||
.sysfs_ops = &cpuidle_driver_sysfs_ops,
|
.sysfs_ops = &cpuidle_driver_sysfs_ops,
|
||||||
.default_attrs = cpuidle_driver_default_attrs,
|
.default_groups = cpuidle_driver_default_groups,
|
||||||
.release = cpuidle_driver_sysfs_release,
|
.release = cpuidle_driver_sysfs_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1103,17 +1103,17 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __maybe_unused jz4740_mmc_suspend(struct device *dev)
|
static int jz4740_mmc_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
return pinctrl_pm_select_sleep_state(dev);
|
return pinctrl_pm_select_sleep_state(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __maybe_unused jz4740_mmc_resume(struct device *dev)
|
static int jz4740_mmc_resume(struct device *dev)
|
||||||
{
|
{
|
||||||
return pinctrl_select_default_state(dev);
|
return pinctrl_select_default_state(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
|
DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
|
||||||
jz4740_mmc_resume);
|
jz4740_mmc_resume);
|
||||||
|
|
||||||
static struct platform_driver jz4740_mmc_driver = {
|
static struct platform_driver jz4740_mmc_driver = {
|
||||||
|
@ -1123,7 +1123,7 @@ static struct platform_driver jz4740_mmc_driver = {
|
||||||
.name = "jz4740-mmc",
|
.name = "jz4740-mmc",
|
||||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||||
.of_match_table = of_match_ptr(jz4740_mmc_of_match),
|
.of_match_table = of_match_ptr(jz4740_mmc_of_match),
|
||||||
.pm = pm_ptr(&jz4740_mmc_pm_ops),
|
.pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1183,7 +1183,6 @@ static int mxcmci_remove(struct platform_device *pdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
|
||||||
static int mxcmci_suspend(struct device *dev)
|
static int mxcmci_suspend(struct device *dev)
|
||||||
{
|
{
|
||||||
struct mmc_host *mmc = dev_get_drvdata(dev);
|
struct mmc_host *mmc = dev_get_drvdata(dev);
|
||||||
|
@ -1210,9 +1209,8 @@ static int mxcmci_resume(struct device *dev)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
|
DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
|
||||||
|
|
||||||
static struct platform_driver mxcmci_driver = {
|
static struct platform_driver mxcmci_driver = {
|
||||||
.probe = mxcmci_probe,
|
.probe = mxcmci_probe,
|
||||||
|
@ -1220,7 +1218,7 @@ static struct platform_driver mxcmci_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = DRIVER_NAME,
|
.name = DRIVER_NAME,
|
||||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||||
.pm = &mxcmci_pm_ops,
|
.pm = pm_sleep_ptr(&mxcmci_pm_ops),
|
||||||
.of_match_table = mxcmci_of_match,
|
.of_match_table = mxcmci_of_match,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -5441,7 +5441,9 @@ static struct pci_driver rtl8169_pci_driver = {
|
||||||
.probe = rtl_init_one,
|
.probe = rtl_init_one,
|
||||||
.remove = rtl_remove_one,
|
.remove = rtl_remove_one,
|
||||||
.shutdown = rtl_shutdown,
|
.shutdown = rtl_shutdown,
|
||||||
.driver.pm = pm_ptr(&rtl8169_pm_ops),
|
#ifdef CONFIG_PM
|
||||||
|
.driver.pm = &rtl8169_pm_ops,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
module_pci_driver(rtl8169_pci_driver);
|
module_pci_driver(rtl8169_pci_driver);
|
||||||
|
|
|
@ -506,7 +506,7 @@ acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
|
||||||
int acpi_resources_are_enforced(void);
|
int acpi_resources_are_enforced(void);
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATION
|
#ifdef CONFIG_HIBERNATION
|
||||||
void __init acpi_no_s4_hw_signature(void);
|
void __init acpi_check_s4_hw_signature(int check);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
|
|
@ -300,47 +300,59 @@ struct dev_pm_ops {
|
||||||
int (*runtime_idle)(struct device *dev);
|
int (*runtime_idle)(struct device *dev);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
|
.suspend = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.resume = pm_sleep_ptr(resume_fn), \
|
||||||
|
.freeze = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.thaw = pm_sleep_ptr(resume_fn), \
|
||||||
|
.poweroff = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.restore = pm_sleep_ptr(resume_fn),
|
||||||
|
|
||||||
|
#define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
|
.suspend_late = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.resume_early = pm_sleep_ptr(resume_fn), \
|
||||||
|
.freeze_late = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.thaw_early = pm_sleep_ptr(resume_fn), \
|
||||||
|
.poweroff_late = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.restore_early = pm_sleep_ptr(resume_fn),
|
||||||
|
|
||||||
|
#define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
|
.suspend_noirq = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.resume_noirq = pm_sleep_ptr(resume_fn), \
|
||||||
|
.freeze_noirq = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.thaw_noirq = pm_sleep_ptr(resume_fn), \
|
||||||
|
.poweroff_noirq = pm_sleep_ptr(suspend_fn), \
|
||||||
|
.restore_noirq = pm_sleep_ptr(resume_fn),
|
||||||
|
|
||||||
|
#define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||||
|
.runtime_suspend = suspend_fn, \
|
||||||
|
.runtime_resume = resume_fn, \
|
||||||
|
.runtime_idle = idle_fn,
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
.suspend = suspend_fn, \
|
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
||||||
.resume = resume_fn, \
|
|
||||||
.freeze = suspend_fn, \
|
|
||||||
.thaw = resume_fn, \
|
|
||||||
.poweroff = suspend_fn, \
|
|
||||||
.restore = resume_fn,
|
|
||||||
#else
|
#else
|
||||||
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
.suspend_late = suspend_fn, \
|
LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
||||||
.resume_early = resume_fn, \
|
|
||||||
.freeze_late = suspend_fn, \
|
|
||||||
.thaw_early = resume_fn, \
|
|
||||||
.poweroff_late = suspend_fn, \
|
|
||||||
.restore_early = resume_fn,
|
|
||||||
#else
|
#else
|
||||||
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PM_SLEEP
|
#ifdef CONFIG_PM_SLEEP
|
||||||
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
.suspend_noirq = suspend_fn, \
|
NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
||||||
.resume_noirq = resume_fn, \
|
|
||||||
.freeze_noirq = suspend_fn, \
|
|
||||||
.thaw_noirq = resume_fn, \
|
|
||||||
.poweroff_noirq = suspend_fn, \
|
|
||||||
.restore_noirq = resume_fn,
|
|
||||||
#else
|
#else
|
||||||
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||||
.runtime_suspend = suspend_fn, \
|
RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
|
||||||
.runtime_resume = resume_fn, \
|
|
||||||
.runtime_idle = idle_fn,
|
|
||||||
#else
|
#else
|
||||||
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
|
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
|
||||||
#endif
|
#endif
|
||||||
|
@ -349,9 +361,9 @@ struct dev_pm_ops {
|
||||||
* Use this if you want to use the same suspend and resume callbacks for suspend
|
* Use this if you want to use the same suspend and resume callbacks for suspend
|
||||||
* to RAM and hibernation.
|
* to RAM and hibernation.
|
||||||
*/
|
*/
|
||||||
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
|
#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
|
||||||
const struct dev_pm_ops __maybe_unused name = { \
|
static const struct dev_pm_ops name = { \
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -367,17 +379,27 @@ const struct dev_pm_ops __maybe_unused name = { \
|
||||||
* .resume_early(), to the same routines as .runtime_suspend() and
|
* .resume_early(), to the same routines as .runtime_suspend() and
|
||||||
* .runtime_resume(), respectively (and analogously for hibernation).
|
* .runtime_resume(), respectively (and analogously for hibernation).
|
||||||
*/
|
*/
|
||||||
|
#define DEFINE_UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
|
||||||
|
static const struct dev_pm_ops name = { \
|
||||||
|
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
|
RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
|
||||||
|
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
|
||||||
|
const struct dev_pm_ops __maybe_unused name = { \
|
||||||
|
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Deprecated. Use DEFINE_UNIVERSAL_DEV_PM_OPS() instead. */
|
||||||
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
|
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
|
||||||
const struct dev_pm_ops __maybe_unused name = { \
|
const struct dev_pm_ops __maybe_unused name = { \
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
|
||||||
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
|
||||||
#define pm_ptr(_ptr) (_ptr)
|
#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
|
||||||
#else
|
|
||||||
#define pm_ptr(_ptr) NULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PM_EVENT_ messages
|
* PM_EVENT_ messages
|
||||||
|
@ -499,6 +521,7 @@ const struct dev_pm_ops __maybe_unused name = { \
|
||||||
*/
|
*/
|
||||||
|
|
||||||
enum rpm_status {
|
enum rpm_status {
|
||||||
|
RPM_INVALID = -1,
|
||||||
RPM_ACTIVE = 0,
|
RPM_ACTIVE = 0,
|
||||||
RPM_RESUMING,
|
RPM_RESUMING,
|
||||||
RPM_SUSPENDED,
|
RPM_SUSPENDED,
|
||||||
|
@ -612,6 +635,7 @@ struct dev_pm_info {
|
||||||
unsigned int links_count;
|
unsigned int links_count;
|
||||||
enum rpm_request request;
|
enum rpm_request request;
|
||||||
enum rpm_status runtime_status;
|
enum rpm_status runtime_status;
|
||||||
|
enum rpm_status last_status;
|
||||||
int runtime_error;
|
int runtime_error;
|
||||||
int autosuspend_delay;
|
int autosuspend_delay;
|
||||||
u64 last_busy;
|
u64 last_busy;
|
||||||
|
|
|
@ -58,6 +58,7 @@ extern void pm_runtime_get_suppliers(struct device *dev);
|
||||||
extern void pm_runtime_put_suppliers(struct device *dev);
|
extern void pm_runtime_put_suppliers(struct device *dev);
|
||||||
extern void pm_runtime_new_link(struct device *dev);
|
extern void pm_runtime_new_link(struct device *dev);
|
||||||
extern void pm_runtime_drop_link(struct device_link *link);
|
extern void pm_runtime_drop_link(struct device_link *link);
|
||||||
|
extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle);
|
||||||
|
|
||||||
extern int devm_pm_runtime_enable(struct device *dev);
|
extern int devm_pm_runtime_enable(struct device *dev);
|
||||||
|
|
||||||
|
@ -283,6 +284,8 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {}
|
||||||
static inline void pm_runtime_put_suppliers(struct device *dev) {}
|
static inline void pm_runtime_put_suppliers(struct device *dev) {}
|
||||||
static inline void pm_runtime_new_link(struct device *dev) {}
|
static inline void pm_runtime_new_link(struct device *dev) {}
|
||||||
static inline void pm_runtime_drop_link(struct device_link *link) {}
|
static inline void pm_runtime_drop_link(struct device_link *link) {}
|
||||||
|
static inline void pm_runtime_release_supplier(struct device_link *link,
|
||||||
|
bool check_idle) {}
|
||||||
|
|
||||||
#endif /* !CONFIG_PM */
|
#endif /* !CONFIG_PM */
|
||||||
|
|
||||||
|
|
|
@ -446,6 +446,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
|
||||||
extern asmlinkage int swsusp_arch_suspend(void);
|
extern asmlinkage int swsusp_arch_suspend(void);
|
||||||
extern asmlinkage int swsusp_arch_resume(void);
|
extern asmlinkage int swsusp_arch_resume(void);
|
||||||
|
|
||||||
|
extern u32 swsusp_hardware_signature;
|
||||||
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
|
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
|
||||||
extern int hibernate(void);
|
extern int hibernate(void);
|
||||||
extern bool system_entering_hibernation(void);
|
extern bool system_entering_hibernation(void);
|
||||||
|
|
|
@ -170,6 +170,7 @@ extern int swsusp_swap_in_use(void);
|
||||||
#define SF_PLATFORM_MODE 1
|
#define SF_PLATFORM_MODE 1
|
||||||
#define SF_NOCOMPRESS_MODE 2
|
#define SF_NOCOMPRESS_MODE 2
|
||||||
#define SF_CRC32_MODE 4
|
#define SF_CRC32_MODE 4
|
||||||
|
#define SF_HW_SIG 8
|
||||||
|
|
||||||
/* kernel/power/hibernate.c */
|
/* kernel/power/hibernate.c */
|
||||||
extern int swsusp_check(void);
|
extern int swsusp_check(void);
|
||||||
|
|
|
@ -36,6 +36,8 @@
|
||||||
|
|
||||||
#define HIBERNATE_SIG "S1SUSPEND"
|
#define HIBERNATE_SIG "S1SUSPEND"
|
||||||
|
|
||||||
|
u32 swsusp_hardware_signature;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When reading an {un,}compressed image, we may restore pages in place,
|
* When reading an {un,}compressed image, we may restore pages in place,
|
||||||
* in which case some architectures need these pages cleaning before they
|
* in which case some architectures need these pages cleaning before they
|
||||||
|
@ -104,7 +106,8 @@ struct swap_map_handle {
|
||||||
|
|
||||||
struct swsusp_header {
|
struct swsusp_header {
|
||||||
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
|
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
|
||||||
sizeof(u32)];
|
sizeof(u32) - sizeof(u32)];
|
||||||
|
u32 hw_sig;
|
||||||
u32 crc32;
|
u32 crc32;
|
||||||
sector_t image;
|
sector_t image;
|
||||||
unsigned int flags; /* Flags to pass to the "boot" kernel */
|
unsigned int flags; /* Flags to pass to the "boot" kernel */
|
||||||
|
@ -312,7 +315,6 @@ static int hib_wait_io(struct hib_bio_batch *hb)
|
||||||
/*
|
/*
|
||||||
* Saving part
|
* Saving part
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
|
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
@ -324,6 +326,10 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
|
||||||
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
|
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
|
||||||
memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
|
memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
|
||||||
swsusp_header->image = handle->first_sector;
|
swsusp_header->image = handle->first_sector;
|
||||||
|
if (swsusp_hardware_signature) {
|
||||||
|
swsusp_header->hw_sig = swsusp_hardware_signature;
|
||||||
|
flags |= SF_HW_SIG;
|
||||||
|
}
|
||||||
swsusp_header->flags = flags;
|
swsusp_header->flags = flags;
|
||||||
if (flags & SF_CRC32_MODE)
|
if (flags & SF_CRC32_MODE)
|
||||||
swsusp_header->crc32 = handle->crc32;
|
swsusp_header->crc32 = handle->crc32;
|
||||||
|
@ -1537,6 +1543,12 @@ int swsusp_check(void)
|
||||||
} else {
|
} else {
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
}
|
}
|
||||||
|
if (!error && swsusp_header->flags & SF_HW_SIG &&
|
||||||
|
swsusp_header->hw_sig != swsusp_hardware_signature) {
|
||||||
|
pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
|
||||||
|
swsusp_header->hw_sig, swsusp_hardware_signature);
|
||||||
|
error = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
put:
|
put:
|
||||||
if (error)
|
if (error)
|
||||||
|
|
Loading…
Add table
Reference in a new issue