mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
driver core: Add device_link_test() for testing device link flags
To avoid coding mistakes like the one fixed by commit 3860cbe239
("PM:
sleep: Fix bit masking operation"), introduce device_link_test() for
testing device link flags and use it where applicable.
No intentional functional impact.
Signed-off-by: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/2793309.mvXUDI8C0e@rjwysocki.net
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
fb506e31b3
commit
b29929b819
4 changed files with 45 additions and 41 deletions
|
@ -460,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev,
|
|||
struct device_link *link = to_devlink(dev);
|
||||
const char *output;
|
||||
|
||||
if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
|
||||
if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
|
||||
output = "supplier unbind";
|
||||
else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
|
||||
else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
|
||||
output = "consumer unbind";
|
||||
else
|
||||
output = "never";
|
||||
|
@ -476,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev,
|
|||
{
|
||||
struct device_link *link = to_devlink(dev);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
|
||||
return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
|
||||
}
|
||||
static DEVICE_ATTR_RO(runtime_pm);
|
||||
|
||||
|
@ -485,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev,
|
|||
{
|
||||
struct device_link *link = to_devlink(dev);
|
||||
|
||||
return sysfs_emit(buf, "%d\n",
|
||||
!!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
|
||||
return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
|
||||
}
|
||||
static DEVICE_ATTR_RO(sync_state_only);
|
||||
|
||||
|
@ -792,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer,
|
|||
if (link->consumer != consumer)
|
||||
continue;
|
||||
|
||||
if (link->flags & DL_FLAG_INFERRED &&
|
||||
if (device_link_test(link, DL_FLAG_INFERRED) &&
|
||||
!(flags & DL_FLAG_INFERRED))
|
||||
link->flags &= ~DL_FLAG_INFERRED;
|
||||
|
||||
if (flags & DL_FLAG_PM_RUNTIME) {
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
|
||||
if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
|
||||
pm_runtime_new_link(consumer);
|
||||
link->flags |= DL_FLAG_PM_RUNTIME;
|
||||
}
|
||||
|
@ -807,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer,
|
|||
|
||||
if (flags & DL_FLAG_STATELESS) {
|
||||
kref_get(&link->kref);
|
||||
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
|
||||
!(link->flags & DL_FLAG_STATELESS)) {
|
||||
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
|
||||
!device_link_test(link, DL_FLAG_STATELESS)) {
|
||||
link->flags |= DL_FLAG_STATELESS;
|
||||
goto reorder;
|
||||
} else {
|
||||
|
@ -823,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer,
|
|||
* update the existing link to stay around longer.
|
||||
*/
|
||||
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
|
||||
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
|
||||
if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
|
||||
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
|
||||
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
|
||||
}
|
||||
|
@ -831,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer,
|
|||
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
|
||||
DL_FLAG_AUTOREMOVE_SUPPLIER);
|
||||
}
|
||||
if (!(link->flags & DL_FLAG_MANAGED)) {
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED)) {
|
||||
kref_get(&link->kref);
|
||||
link->flags |= DL_FLAG_MANAGED;
|
||||
device_link_init_status(link, consumer, supplier);
|
||||
}
|
||||
if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
|
||||
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
|
||||
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
|
||||
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
|
||||
goto reorder;
|
||||
|
@ -940,7 +939,7 @@ static void __device_link_del(struct kref *kref)
|
|||
|
||||
static void device_link_put_kref(struct device_link *link)
|
||||
{
|
||||
if (link->flags & DL_FLAG_STATELESS)
|
||||
if (device_link_test(link, DL_FLAG_STATELESS))
|
||||
kref_put(&link->kref, __device_link_del);
|
||||
else if (!device_is_registered(link->consumer))
|
||||
__device_link_del(&link->kref);
|
||||
|
@ -1004,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev)
|
|||
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
|
||||
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
||||
} else {
|
||||
WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
|
||||
WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
|
||||
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||
}
|
||||
}
|
||||
|
@ -1072,14 +1071,14 @@ int device_links_check_suppliers(struct device *dev)
|
|||
device_links_write_lock();
|
||||
|
||||
list_for_each_entry(link, &dev->links.suppliers, c_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
if (link->status != DL_STATE_AVAILABLE &&
|
||||
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
|
||||
!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
|
||||
|
||||
if (dev_is_best_effort(dev) &&
|
||||
link->flags & DL_FLAG_INFERRED &&
|
||||
device_link_test(link, DL_FLAG_INFERRED) &&
|
||||
!link->supplier->can_match) {
|
||||
ret = -EAGAIN;
|
||||
continue;
|
||||
|
@ -1128,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev,
|
|||
return;
|
||||
|
||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
if (link->status != DL_STATE_ACTIVE)
|
||||
return;
|
||||
|
@ -1268,7 +1267,7 @@ void device_links_force_bind(struct device *dev)
|
|||
device_links_write_lock();
|
||||
|
||||
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
if (link->status != DL_STATE_AVAILABLE) {
|
||||
|
@ -1329,7 +1328,7 @@ void device_links_driver_bound(struct device *dev)
|
|||
device_links_write_lock();
|
||||
|
||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -1345,7 +1344,7 @@ void device_links_driver_bound(struct device *dev)
|
|||
WARN_ON(link->status != DL_STATE_DORMANT);
|
||||
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
||||
|
||||
if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
|
||||
if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
|
||||
driver_deferred_probe_add(link->consumer);
|
||||
}
|
||||
|
||||
|
@ -1357,11 +1356,11 @@ void device_links_driver_bound(struct device *dev)
|
|||
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
|
||||
struct device *supplier;
|
||||
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
supplier = link->supplier;
|
||||
if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
|
||||
if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
|
||||
/*
|
||||
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
|
||||
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
|
||||
|
@ -1369,7 +1368,7 @@ void device_links_driver_bound(struct device *dev)
|
|||
*/
|
||||
device_link_drop_managed(link);
|
||||
} else if (dev_is_best_effort(dev) &&
|
||||
link->flags & DL_FLAG_INFERRED &&
|
||||
device_link_test(link, DL_FLAG_INFERRED) &&
|
||||
link->status != DL_STATE_CONSUMER_PROBE &&
|
||||
!link->supplier->can_match) {
|
||||
/*
|
||||
|
@ -1421,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev)
|
|||
struct device_link *link, *ln;
|
||||
|
||||
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
|
||||
if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
|
||||
device_link_drop_managed(link);
|
||||
continue;
|
||||
}
|
||||
|
@ -1436,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev)
|
|||
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
|
||||
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
||||
} else {
|
||||
WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
|
||||
WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
|
||||
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||
}
|
||||
}
|
||||
|
@ -1461,7 +1460,7 @@ void device_links_no_driver(struct device *dev)
|
|||
device_links_write_lock();
|
||||
|
||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -1498,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev)
|
|||
device_links_write_lock();
|
||||
|
||||
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
|
||||
WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
|
||||
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
|
||||
|
||||
/*
|
||||
|
@ -1510,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev)
|
|||
* has moved to DL_STATE_SUPPLIER_UNBIND.
|
||||
*/
|
||||
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
|
||||
link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
|
||||
device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
|
||||
device_link_drop_managed(link);
|
||||
|
||||
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||
|
@ -1544,7 +1543,7 @@ bool device_links_busy(struct device *dev)
|
|||
device_links_write_lock();
|
||||
|
||||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
if (link->status == DL_STATE_CONSUMER_PROBE
|
||||
|
@ -1586,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev)
|
|||
list_for_each_entry(link, &dev->links.consumers, s_node) {
|
||||
enum device_link_state status;
|
||||
|
||||
if (!(link->flags & DL_FLAG_MANAGED) ||
|
||||
link->flags & DL_FLAG_SYNC_STATE_ONLY)
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED) ||
|
||||
device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
|
||||
continue;
|
||||
|
||||
status = link->status;
|
||||
|
@ -1743,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
|
|||
|
||||
static void fw_devlink_relax_link(struct device_link *link)
|
||||
{
|
||||
if (!(link->flags & DL_FLAG_INFERRED))
|
||||
if (!device_link_test(link, DL_FLAG_INFERRED))
|
||||
return;
|
||||
|
||||
if (device_link_flag_is_sync_state_only(link->flags))
|
||||
|
@ -1779,7 +1778,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
|
|||
struct device_link *link = to_devlink(dev);
|
||||
struct device *sup = link->supplier;
|
||||
|
||||
if (!(link->flags & DL_FLAG_MANAGED) ||
|
||||
if (!device_link_test(link, DL_FLAG_MANAGED) ||
|
||||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
|
||||
!dev_has_sync_state(sup))
|
||||
return 0;
|
||||
|
@ -2063,7 +2062,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
|
|||
* such due to a cycle.
|
||||
*/
|
||||
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
|
||||
!(dev_link->flags & DL_FLAG_CYCLE))
|
||||
!device_link_test(dev_link, DL_FLAG_CYCLE))
|
||||
continue;
|
||||
|
||||
if (__fw_devlink_relax_cycles(con_handle,
|
||||
|
|
|
@ -1998,7 +1998,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
|
|||
idx = device_links_read_lock();
|
||||
|
||||
list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME))
|
||||
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
|
||||
continue;
|
||||
|
||||
if (!dev_pm_smart_suspend(link->supplier) &&
|
||||
|
|
|
@ -290,7 +290,7 @@ static int rpm_get_suppliers(struct device *dev)
|
|||
device_links_read_lock_held()) {
|
||||
int retval;
|
||||
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME))
|
||||
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
|
||||
continue;
|
||||
|
||||
retval = pm_runtime_get_sync(link->supplier);
|
||||
|
@ -1879,7 +1879,7 @@ void pm_runtime_get_suppliers(struct device *dev)
|
|||
|
||||
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
||||
device_links_read_lock_held())
|
||||
if (link->flags & DL_FLAG_PM_RUNTIME) {
|
||||
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
|
||||
link->supplier_preactivated = true;
|
||||
pm_runtime_get_sync(link->supplier);
|
||||
}
|
||||
|
@ -1933,7 +1933,7 @@ static void pm_runtime_drop_link_count(struct device *dev)
|
|||
*/
|
||||
void pm_runtime_drop_link(struct device_link *link)
|
||||
{
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME))
|
||||
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
|
||||
return;
|
||||
|
||||
pm_runtime_drop_link_count(link->consumer);
|
||||
|
|
|
@ -1162,6 +1162,11 @@ void device_links_supplier_sync_state_pause(void);
|
|||
void device_links_supplier_sync_state_resume(void);
|
||||
void device_link_wait_removal(void);
|
||||
|
||||
static inline bool device_link_test(const struct device_link *link, u32 flags)
|
||||
{
|
||||
return !!(link->flags & flags);
|
||||
}
|
||||
|
||||
/* Create alias, so I can be autoloaded. */
|
||||
#define MODULE_ALIAS_CHARDEV(major,minor) \
|
||||
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
|
||||
|
|
Loading…
Add table
Reference in a new issue