2017-12-27 12:55:14 -06:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2007-05-01 16:40:36 +10:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/of.h>
|
2007-05-01 16:49:51 +10:00
|
|
|
#include <linux/of_device.h>
|
2015-03-03 12:52:09 -05:00
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_iommu.h>
|
2021-08-16 14:26:16 +01:00
|
|
|
#include <linux/of_reserved_mem.h>
|
2020-09-17 18:43:40 +02:00
|
|
|
#include <linux/dma-direct.h> /* for bus_dma_region */
|
2020-09-22 15:31:03 +02:00
|
|
|
#include <linux/dma-map-ops.h>
|
2007-05-01 16:40:36 +10:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mod_devicetable.h>
|
|
|
|
#include <linux/slab.h>
|
2017-08-31 11:32:54 +01:00
|
|
|
#include <linux/platform_device.h>
|
2007-05-01 16:40:36 +10:00
|
|
|
|
|
|
|
#include <asm/errno.h>
|
2012-12-06 14:55:41 -08:00
|
|
|
#include "of_private.h"
|
2007-05-01 16:40:36 +10:00
|
|
|
|
|
|
|
/**
|
2010-04-13 16:13:22 -07:00
|
|
|
* of_match_device - Tell if a struct device matches an of_device_id list
|
2019-04-25 09:45:54 +08:00
|
|
|
* @matches: array of of device match structures to search in
|
2007-05-01 16:40:36 +10:00
|
|
|
* @dev: the of device structure to match against
|
|
|
|
*
|
2010-08-06 09:25:50 -06:00
|
|
|
* Used by a driver to check whether an platform_device present in the
|
2007-05-01 16:40:36 +10:00
|
|
|
* system is in its list of supported devices.
|
|
|
|
*/
|
|
|
|
const struct of_device_id *of_match_device(const struct of_device_id *matches,
|
2010-04-13 16:13:22 -07:00
|
|
|
const struct device *dev)
|
2007-05-01 16:40:36 +10:00
|
|
|
{
|
2022-01-18 11:34:04 -06:00
|
|
|
if (!matches || !dev->of_node || dev->of_node_reused)
|
2007-05-01 16:40:36 +10:00
|
|
|
return NULL;
|
2010-04-13 16:13:22 -07:00
|
|
|
return of_match_node(matches, dev->of_node);
|
2007-05-01 16:40:36 +10:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(of_match_device);
|
|
|
|
|
2021-08-16 14:26:17 +01:00
|
|
|
static void
|
2021-08-16 14:26:16 +01:00
|
|
|
of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
|
|
|
|
{
|
2025-04-23 14:42:14 -05:00
|
|
|
struct device_node *of_node = dev->of_node;
|
|
|
|
struct of_phandle_iterator it;
|
|
|
|
int rc, i = 0;
|
2021-08-16 14:26:16 +01:00
|
|
|
|
2021-08-16 14:26:17 +01:00
|
|
|
if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
|
|
|
|
return;
|
|
|
|
|
2021-08-16 14:26:16 +01:00
|
|
|
/*
|
|
|
|
* If dev->of_node doesn't exist or doesn't contain memory-region, try
|
|
|
|
* the OF node having DMA configuration.
|
|
|
|
*/
|
2025-04-23 14:42:14 -05:00
|
|
|
if (!of_property_present(of_node, "memory-region"))
|
2021-08-16 14:26:16 +01:00
|
|
|
of_node = np;
|
|
|
|
|
2025-04-23 14:42:14 -05:00
|
|
|
of_for_each_phandle(&it, rc, of_node, "memory-region", NULL, 0) {
|
2021-08-16 14:26:16 +01:00
|
|
|
/*
|
|
|
|
* There might be multiple memory regions, but only one
|
|
|
|
* restricted-dma-pool region is allowed.
|
|
|
|
*/
|
2025-04-23 14:42:14 -05:00
|
|
|
if (of_device_is_compatible(it.node, "restricted-dma-pool") &&
|
|
|
|
of_device_is_available(it.node)) {
|
|
|
|
if (of_reserved_mem_device_init_by_idx(dev, of_node, i))
|
|
|
|
dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
|
|
|
|
of_node_put(it.node);
|
2021-08-16 14:26:17 +01:00
|
|
|
break;
|
2022-07-02 09:44:49 +08:00
|
|
|
}
|
2025-04-23 14:42:14 -05:00
|
|
|
i++;
|
2021-08-16 14:26:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2015-03-03 12:52:09 -05:00
|
|
|
/**
|
2021-03-18 10:40:27 +00:00
|
|
|
* of_dma_configure_id - Setup DMA configuration
|
2015-03-03 12:52:09 -05:00
|
|
|
* @dev: Device to apply DMA configuration
|
|
|
|
* @np: Pointer to OF node having DMA configuration
|
2018-05-03 16:25:08 +02:00
|
|
|
* @force_dma: Whether device is to be set up by of_dma_configure() even if
|
|
|
|
* DMA capability is not explicitly described by firmware.
|
2020-06-19 09:20:08 +01:00
|
|
|
* @id: Optional const pointer value input id
|
2015-03-03 12:52:09 -05:00
|
|
|
*
|
|
|
|
* Try to get devices's DMA configuration from DT and update it
|
|
|
|
* accordingly.
|
|
|
|
*
|
|
|
|
* If platform code needs to use its own special DMA configuration, it
|
|
|
|
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
|
|
|
|
* to fix up DMA configuration.
|
|
|
|
*/
|
2020-06-19 09:20:08 +01:00
|
|
|
int of_dma_configure_id(struct device *dev, struct device_node *np,
|
|
|
|
bool force_dma, const u32 *id)
|
2015-03-03 12:52:09 -05:00
|
|
|
{
|
2020-09-17 18:43:40 +02:00
|
|
|
const struct bus_dma_region *map = NULL;
|
2022-09-29 13:48:38 +01:00
|
|
|
struct device_node *bus_np;
|
2024-04-19 17:54:41 +01:00
|
|
|
u64 mask, end = 0;
|
2024-07-02 12:40:50 +01:00
|
|
|
bool coherent, set_map = false;
|
2020-09-17 18:43:40 +02:00
|
|
|
int ret;
|
2015-03-03 12:52:09 -05:00
|
|
|
|
iommu: Get DT/ACPI parsing into the proper probe path
In hindsight, there were some crucial subtleties overlooked when moving
{of,acpi}_dma_configure() to driver probe time to allow waiting for
IOMMU drivers with -EPROBE_DEFER, and these have become an
ever-increasing source of problems. The IOMMU API has some fundamental
assumptions that iommu_probe_device() is called for every device added
to the system, in the order in which they are added. Calling it in a
random order or not at all dependent on driver binding leads to
malformed groups, a potential lack of isolation for devices with no
driver, and all manner of unexpected concurrency and race conditions.
We've attempted to mitigate the latter with point-fix bodges like
iommu_probe_device_lock, but it's a losing battle and the time has come
to bite the bullet and address the true source of the problem instead.
The crux of the matter is that the firmware parsing actually serves two
distinct purposes; one is identifying the IOMMU instance associated with
a device so we can check its availability, the second is actually
telling that instance about the relevant firmware-provided data for the
device. However the latter also depends on the former, and at the time
there was no good place to defer and retry that separately from the
availability check we also wanted for client driver probe.
Nowadays, though, we have a proper notion of multiple IOMMU instances in
the core API itself, and each one gets a chance to probe its own devices
upon registration, so we can finally make that work as intended for
DT/IORT/VIOT platforms too. All we need is for iommu_probe_device() to
be able to run the iommu_fwspec machinery currently buried deep in the
wrong end of {of,acpi}_dma_configure(). Luckily it turns out to be
surprisingly straightforward to bootstrap this transformation by pretty
much just calling the same path twice. At client driver probe time,
dev->driver is obviously set; conversely at device_add(), or a
subsequent bus_iommu_probe(), any device waiting for an IOMMU really
should *not* have a driver already, so we can use that as a condition to
disambiguate the two cases, and avoid recursing back into the IOMMU core
at the wrong times.
Obviously this isn't the nicest thing, but for now it gives us a
functional baseline to then unpick the layers in between without many
more awkward cross-subsystem patches. There are some minor side-effects
like dma_range_map potentially being created earlier, and some debug
prints being repeated, but these aren't significantly detrimental. Let's
make things work first, then deal with making them nice.
With the basic flow finally in the right order again, the next step is
probably turning the bus->dma_configure paths inside-out, since all we
really need from bus code is its notion of which device and input ID(s)
to parse the common firmware properties with...
Acked-by: Bjorn Helgaas <bhelgaas@google.com> # pci-driver.c
Acked-by: Rob Herring (Arm) <robh@kernel.org> # of/device.c
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/e3b191e6fd6ca9a1e84c5e5e40044faf97abb874.1740753261.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-02-28 15:46:33 +00:00
|
|
|
if (dev->dma_range_map) {
|
|
|
|
dev_dbg(dev, "dma_range_map already set\n");
|
|
|
|
goto skip_map;
|
|
|
|
}
|
|
|
|
|
2022-09-29 13:48:38 +01:00
|
|
|
if (np == dev->of_node)
|
|
|
|
bus_np = __of_get_dma_parent(np);
|
|
|
|
else
|
|
|
|
bus_np = of_node_get(np);
|
|
|
|
|
|
|
|
ret = of_dma_get_range(bus_np, &map);
|
|
|
|
of_node_put(bus_np);
|
2015-03-03 12:52:09 -05:00
|
|
|
if (ret < 0) {
|
2017-08-31 11:32:54 +01:00
|
|
|
/*
|
|
|
|
* For legacy reasons, we have to assume some devices need
|
|
|
|
* DMA configuration regardless of whether "dma-ranges" is
|
|
|
|
* correctly specified or not.
|
|
|
|
*/
|
2018-05-03 16:25:08 +02:00
|
|
|
if (!force_dma)
|
2017-08-31 11:32:54 +01:00
|
|
|
return ret == -ENODEV ? 0 : ret;
|
2015-03-03 12:52:09 -05:00
|
|
|
} else {
|
2020-09-17 18:43:40 +02:00
|
|
|
/* Determine the overall bounds of all DMA regions */
|
2024-04-19 17:54:43 +01:00
|
|
|
end = dma_range_map_max(map);
|
2024-07-02 12:40:50 +01:00
|
|
|
set_map = true;
|
2015-03-03 12:52:09 -05:00
|
|
|
}
|
iommu: Get DT/ACPI parsing into the proper probe path
In hindsight, there were some crucial subtleties overlooked when moving
{of,acpi}_dma_configure() to driver probe time to allow waiting for
IOMMU drivers with -EPROBE_DEFER, and these have become an
ever-increasing source of problems. The IOMMU API has some fundamental
assumptions that iommu_probe_device() is called for every device added
to the system, in the order in which they are added. Calling it in a
random order or not at all dependent on driver binding leads to
malformed groups, a potential lack of isolation for devices with no
driver, and all manner of unexpected concurrency and race conditions.
We've attempted to mitigate the latter with point-fix bodges like
iommu_probe_device_lock, but it's a losing battle and the time has come
to bite the bullet and address the true source of the problem instead.
The crux of the matter is that the firmware parsing actually serves two
distinct purposes; one is identifying the IOMMU instance associated with
a device so we can check its availability, the second is actually
telling that instance about the relevant firmware-provided data for the
device. However the latter also depends on the former, and at the time
there was no good place to defer and retry that separately from the
availability check we also wanted for client driver probe.
Nowadays, though, we have a proper notion of multiple IOMMU instances in
the core API itself, and each one gets a chance to probe its own devices
upon registration, so we can finally make that work as intended for
DT/IORT/VIOT platforms too. All we need is for iommu_probe_device() to
be able to run the iommu_fwspec machinery currently buried deep in the
wrong end of {of,acpi}_dma_configure(). Luckily it turns out to be
surprisingly straightforward to bootstrap this transformation by pretty
much just calling the same path twice. At client driver probe time,
dev->driver is obviously set; conversely at device_add(), or a
subsequent bus_iommu_probe(), any device waiting for an IOMMU really
should *not* have a driver already, so we can use that as a condition to
disambiguate the two cases, and avoid recursing back into the IOMMU core
at the wrong times.
Obviously this isn't the nicest thing, but for now it gives us a
functional baseline to then unpick the layers in between without many
more awkward cross-subsystem patches. There are some minor side-effects
like dma_range_map potentially being created earlier, and some debug
prints being repeated, but these aren't significantly detrimental. Let's
make things work first, then deal with making them nice.
With the basic flow finally in the right order again, the next step is
probably turning the bus->dma_configure paths inside-out, since all we
really need from bus code is its notion of which device and input ID(s)
to parse the common firmware properties with...
Acked-by: Bjorn Helgaas <bhelgaas@google.com> # pci-driver.c
Acked-by: Rob Herring (Arm) <robh@kernel.org> # of/device.c
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/e3b191e6fd6ca9a1e84c5e5e40044faf97abb874.1740753261.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-02-28 15:46:33 +00:00
|
|
|
skip_map:
|
2017-08-31 11:32:54 +01:00
|
|
|
/*
|
2018-07-23 23:16:12 +01:00
|
|
|
* If @dev is expected to be DMA-capable then the bus code that created
|
|
|
|
* it should have initialised its dma_mask pointer by this point. For
|
|
|
|
* now, we'll continue the legacy behaviour of coercing it to the
|
|
|
|
* coherent mask if not, but we'll no longer do so quietly.
|
2017-08-31 11:32:54 +01:00
|
|
|
*/
|
2018-07-23 23:16:12 +01:00
|
|
|
if (!dev->dma_mask) {
|
|
|
|
dev_warn(dev, "DMA mask not set\n");
|
2017-08-31 11:32:54 +01:00
|
|
|
dev->dma_mask = &dev->coherent_dma_mask;
|
2018-07-23 23:16:12 +01:00
|
|
|
}
|
2017-08-31 11:32:54 +01:00
|
|
|
|
2024-04-19 17:54:41 +01:00
|
|
|
if (!end && dev->coherent_dma_mask)
|
|
|
|
end = dev->coherent_dma_mask;
|
|
|
|
else if (!end)
|
|
|
|
end = (1ULL << 32) - 1;
|
2017-08-31 11:32:54 +01:00
|
|
|
|
2015-03-03 14:44:57 -06:00
|
|
|
/*
|
|
|
|
* Limit coherent and dma mask based on size and default mask
|
|
|
|
* set by the driver.
|
|
|
|
*/
|
2019-11-21 10:26:44 +01:00
|
|
|
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
of: fix DMA mask generation
Historically, DMA masks have suffered some ambiguity between whether
they represent the range of physical memory a device can access, or the
address bits a device is capable of driving, particularly since on many
platforms the two are equivalent. Whilst there are some stragglers left
(dma_max_pfn(), I'm looking at you...), the majority of DMA code has
been cleaned up to follow the latter definition, not least since it is
the only one which makes sense once IOMMUs are involved.
In this respect, of_dma_configure() has always done the wrong thing in
how it generates initial masks based on "dma-ranges". Although rounding
down did not affect the TI Keystone platform where dma_addr + size is
already a power of two, in any other case it results in a mask which is
at best unnecessarily constrained and at worst unusable.
BCM2837 illustrates the problem nicely, where we have a DMA base of 3GB
and a size of 1GB - 16MB, giving dma_addr + size = 0xff000000 and a
resultant mask of 0x7fffffff, which is then insufficient to even cover
the necessary offset, effectively making all DMA addresses out-of-range.
This has been hidden until now (mostly because we don't yet prevent
drivers from simply overwriting this initial mask later upon probe), but
due to recent changes elsewhere now shows up as USB being broken on
Raspberry Pi 3.
Make it right by rounding up instead of down, such that the mask
correctly correctly describes all possisble bits the device needs to
emit.
Fixes: 9a6d7298b083 ("of: Calculate device DMA masks based on DT dma-range size")
Reported-by: Stefan Wahren <stefan.wahren@i2se.com>
Reported-by: Andreas Färber <afaerber@suse.de>
Reported-by: Hans Verkuil <hverkuil@xs4all.nl>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2017-08-11 17:29:56 +01:00
|
|
|
dev->coherent_dma_mask &= mask;
|
|
|
|
*dev->dma_mask &= mask;
|
2021-01-19 18:52:03 +08:00
|
|
|
/* ...but only set bus limit and range map if we found valid dma-ranges earlier */
|
2024-07-02 12:40:50 +01:00
|
|
|
if (set_map) {
|
2019-11-21 10:26:44 +01:00
|
|
|
dev->bus_dma_limit = end;
|
2021-01-19 18:52:03 +08:00
|
|
|
dev->dma_range_map = map;
|
|
|
|
}
|
2015-03-03 14:44:57 -06:00
|
|
|
|
2015-03-03 12:52:09 -05:00
|
|
|
coherent = of_dma_is_coherent(np);
|
|
|
|
dev_dbg(dev, "device is%sdma coherent\n",
|
|
|
|
coherent ? " " : " not ");
|
|
|
|
|
2024-07-02 12:40:50 +01:00
|
|
|
ret = of_iommu_configure(dev, np, id);
|
|
|
|
if (ret == -EPROBE_DEFER) {
|
2021-01-19 18:52:03 +08:00
|
|
|
/* Don't touch range map if it wasn't set from a valid dma-ranges */
|
2024-07-02 12:40:50 +01:00
|
|
|
if (set_map)
|
2021-01-19 18:52:03 +08:00
|
|
|
dev->dma_range_map = NULL;
|
2020-09-17 18:43:40 +02:00
|
|
|
kfree(map);
|
2017-05-27 19:17:41 +05:30
|
|
|
return -EPROBE_DEFER;
|
2024-07-02 12:40:50 +01:00
|
|
|
}
|
|
|
|
/* Take all other IOMMU errors to mean we'll just carry on without it */
|
|
|
|
dev_dbg(dev, "device is%sbehind an iommu\n",
|
|
|
|
!ret ? " " : " not ");
|
2015-03-03 12:52:09 -05:00
|
|
|
|
2024-04-19 17:54:46 +01:00
|
|
|
arch_setup_dma_ops(dev, coherent);
|
2017-04-10 16:51:02 +05:30
|
|
|
|
2024-07-02 12:40:50 +01:00
|
|
|
if (ret)
|
2021-08-16 14:26:17 +01:00
|
|
|
of_dma_set_restricted_buffer(dev, np);
|
2021-06-19 11:40:43 +08:00
|
|
|
|
2017-04-10 16:51:02 +05:30
|
|
|
return 0;
|
2015-03-03 12:52:09 -05:00
|
|
|
}
|
2020-06-19 09:20:08 +01:00
|
|
|
EXPORT_SYMBOL_GPL(of_dma_configure_id);
|
2015-03-03 12:52:09 -05:00
|
|
|
|
2015-05-06 20:09:09 +02:00
|
|
|
const void *of_device_get_match_data(const struct device *dev)
|
|
|
|
{
|
|
|
|
const struct of_device_id *match;
|
|
|
|
|
|
|
|
match = of_match_device(dev->driver->of_match_table, dev);
|
|
|
|
if (!match)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return match->data;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(of_device_get_match_data);
|
|
|
|
|
2017-03-22 09:16:27 -05:00
|
|
|
/**
|
|
|
|
* of_device_modalias - Fill buffer with newline terminated modalias string
|
2021-03-18 10:40:27 +00:00
|
|
|
* @dev: Calling device
|
|
|
|
* @str: Modalias string
|
|
|
|
* @len: Size of @str
|
2017-03-22 09:16:27 -05:00
|
|
|
*/
|
|
|
|
ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
|
|
|
|
{
|
2023-04-04 18:21:15 +01:00
|
|
|
ssize_t sl;
|
|
|
|
|
|
|
|
if (!dev || !dev->of_node || dev->of_node_reused)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
sl = of_modalias(dev->of_node, str, len - 2);
|
2017-03-22 09:16:27 -05:00
|
|
|
if (sl < 0)
|
|
|
|
return sl;
|
2017-08-23 18:04:04 -07:00
|
|
|
if (sl > len - 2)
|
|
|
|
return -ENOMEM;
|
2017-03-22 09:16:27 -05:00
|
|
|
|
|
|
|
str[sl++] = '\n';
|
|
|
|
str[sl] = 0;
|
|
|
|
return sl;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_device_modalias);
|
|
|
|
|
2010-06-08 07:48:12 -06:00
|
|
|
/**
|
|
|
|
* of_device_uevent - Display OF related uevent information
|
2022-11-21 10:46:49 +01:00
|
|
|
* @dev: Device to display the uevent information for
|
|
|
|
* @env: Kernel object's userspace event reference to fill up
|
2010-06-08 07:48:12 -06:00
|
|
|
*/
|
2022-11-21 10:46:49 +01:00
|
|
|
void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
2010-06-08 07:48:12 -06:00
|
|
|
{
|
2018-08-29 08:36:12 -05:00
|
|
|
const char *compat, *type;
|
2012-12-06 14:55:41 -08:00
|
|
|
struct alias_prop *app;
|
2017-07-21 15:45:32 -05:00
|
|
|
struct property *p;
|
|
|
|
int seen = 0;
|
2010-06-08 07:48:12 -06:00
|
|
|
|
|
|
|
if ((!dev) || (!dev->of_node))
|
2012-02-01 11:22:22 -07:00
|
|
|
return;
|
2010-06-08 07:48:12 -06:00
|
|
|
|
2018-08-27 20:00:19 -05:00
|
|
|
add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
|
2017-06-01 15:50:55 -05:00
|
|
|
add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
|
2018-08-29 08:36:12 -05:00
|
|
|
type = of_node_get_device_type(dev->of_node);
|
|
|
|
if (type)
|
|
|
|
add_uevent_var(env, "OF_TYPE=%s", type);
|
2010-06-08 07:48:12 -06:00
|
|
|
|
|
|
|
/* Since the compatible field can contain pretty much anything
|
|
|
|
* it's not really legal to split it out with commas. We split it
|
|
|
|
* up using a number of environment variables instead. */
|
2017-07-21 15:45:32 -05:00
|
|
|
of_property_for_each_string(dev->of_node, "compatible", p, compat) {
|
2012-02-01 11:22:22 -07:00
|
|
|
add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
|
2010-06-08 07:48:12 -06:00
|
|
|
seen++;
|
|
|
|
}
|
2012-02-01 11:22:22 -07:00
|
|
|
add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
|
2012-12-06 14:55:41 -08:00
|
|
|
|
|
|
|
seen = 0;
|
2014-07-04 19:58:03 +03:00
|
|
|
mutex_lock(&of_mutex);
|
2012-12-06 14:55:41 -08:00
|
|
|
list_for_each_entry(app, &aliases_lookup, link) {
|
|
|
|
if (dev->of_node == app->np) {
|
|
|
|
add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
|
|
|
|
app->alias);
|
|
|
|
seen++;
|
|
|
|
}
|
|
|
|
}
|
2014-07-04 19:58:03 +03:00
|
|
|
mutex_unlock(&of_mutex);
|
2012-02-01 11:22:22 -07:00
|
|
|
}
|
2023-06-22 23:32:13 +02:00
|
|
|
EXPORT_SYMBOL_GPL(of_device_uevent);
|
2010-06-08 07:48:12 -06:00
|
|
|
|
2023-01-11 12:30:03 +01:00
|
|
|
int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
|
2012-02-01 11:22:22 -07:00
|
|
|
{
|
|
|
|
int sl;
|
|
|
|
|
2023-02-07 12:05:29 +01:00
|
|
|
if ((!dev) || (!dev->of_node) || dev->of_node_reused)
|
2012-02-01 11:22:22 -07:00
|
|
|
return -ENODEV;
|
2010-06-08 07:48:12 -06:00
|
|
|
|
2012-02-01 11:22:22 -07:00
|
|
|
/* Devicetree modalias is tricky, we add it in 2 steps */
|
2010-06-08 07:48:12 -06:00
|
|
|
if (add_uevent_var(env, "MODALIAS="))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2023-04-04 18:21:15 +01:00
|
|
|
sl = of_modalias(dev->of_node, &env->buf[env->buflen-1],
|
|
|
|
sizeof(env->buf) - env->buflen);
|
2023-02-07 12:05:30 +01:00
|
|
|
if (sl < 0)
|
|
|
|
return sl;
|
2010-06-08 07:48:12 -06:00
|
|
|
if (sl >= (sizeof(env->buf) - env->buflen))
|
|
|
|
return -ENOMEM;
|
|
|
|
env->buflen += sl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-28 14:56:48 -08:00
|
|
|
EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
|
2023-12-15 11:15:27 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* of_device_make_bus_id - Use the device node data to assign a unique name
|
|
|
|
* @dev: pointer to device structure that is linked to a device tree node
|
|
|
|
*
|
|
|
|
* This routine will first try using the translated bus address to
|
|
|
|
* derive a unique name. If it cannot, then it will prepend names from
|
|
|
|
* parent nodes until a unique name can be derived.
|
|
|
|
*/
|
|
|
|
void of_device_make_bus_id(struct device *dev)
|
|
|
|
{
|
|
|
|
struct device_node *node = dev->of_node;
|
|
|
|
const __be32 *reg;
|
|
|
|
u64 addr;
|
|
|
|
u32 mask;
|
|
|
|
|
|
|
|
/* Construct the name, using parent nodes if necessary to ensure uniqueness */
|
|
|
|
while (node->parent) {
|
|
|
|
/*
|
|
|
|
* If the address can be translated, then that is as much
|
|
|
|
* uniqueness as we need. Make it the first component and return
|
|
|
|
*/
|
|
|
|
reg = of_get_property(node, "reg", NULL);
|
|
|
|
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
|
|
|
|
if (!of_property_read_u32(node, "mask", &mask))
|
|
|
|
dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn",
|
|
|
|
addr, ffs(mask) - 1, node, dev_name(dev));
|
|
|
|
|
|
|
|
else
|
|
|
|
dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
|
|
|
|
addr, node, dev_name(dev));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* format arguments only used if dev_name() resolves to NULL */
|
|
|
|
dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
|
|
|
|
kbasename(node->full_name), dev_name(dev));
|
|
|
|
node = node->parent;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(of_device_make_bus_id);
|