mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-21 06:50:25 +00:00

Clean up the existing export namespace code along the same lines of
commit 33def8498f
("treewide: Convert macro and uses of __section(foo)
to __section("foo")") and for the same reason, it is not desired for the
namespace argument to be a macro expansion itself.
Scripted using
git grep -l -e MODULE_IMPORT_NS -e EXPORT_SYMBOL_NS | while read file;
do
awk -i inplace '
/^#define EXPORT_SYMBOL_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/^#define MODULE_IMPORT_NS/ {
gsub(/__stringify\(ns\)/, "ns");
print;
next;
}
/MODULE_IMPORT_NS/ {
$0 = gensub(/MODULE_IMPORT_NS\(([^)]*)\)/, "MODULE_IMPORT_NS(\"\\1\")", "g");
}
/EXPORT_SYMBOL_NS/ {
if ($0 ~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+),/) {
if ($0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/ &&
$0 !~ /(EXPORT_SYMBOL_NS[^(]*)\(\)/ &&
$0 !~ /^my/) {
getline line;
gsub(/[[:space:]]*\\$/, "");
gsub(/[[:space:]]/, "", line);
$0 = $0 " " line;
}
$0 = gensub(/(EXPORT_SYMBOL_NS[^(]*)\(([^,]+), ([^)]+)\)/,
"\\1(\\2, \"\\3\")", "g");
}
}
{ print }' $file;
done
Requested-by: Masahiro Yamada <masahiroy@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://mail.google.com/mail/u/2/#inbox/FMfcgzQXKWgMmjdFwwdsfgxzKpVHWPlc
Acked-by: Greg KH <gregkh@linuxfoundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
229 lines
4.7 KiB
C
229 lines
4.7 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* NVIDIA Tegra Video decoder driver
|
|
*
|
|
* Copyright (C) 2016-2019 GRATE-DRIVER project
|
|
*/
|
|
|
|
#include <linux/dma-buf.h>
|
|
#include <linux/iova.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/list.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/module.h>
|
|
|
|
#include "vde.h"
|
|
|
|
MODULE_IMPORT_NS("DMA_BUF");
|
|
|
|
struct tegra_vde_cache_entry {
|
|
enum dma_data_direction dma_dir;
|
|
struct dma_buf_attachment *a;
|
|
struct delayed_work dwork;
|
|
struct tegra_vde *vde;
|
|
struct list_head list;
|
|
struct sg_table *sgt;
|
|
struct iova *iova;
|
|
unsigned int refcnt;
|
|
};
|
|
|
|
static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
|
|
{
|
|
struct dma_buf *dmabuf = entry->a->dmabuf;
|
|
|
|
WARN_ON_ONCE(entry->refcnt);
|
|
|
|
if (entry->vde->domain)
|
|
tegra_vde_iommu_unmap(entry->vde, entry->iova);
|
|
|
|
dma_buf_unmap_attachment_unlocked(entry->a, entry->sgt, entry->dma_dir);
|
|
dma_buf_detach(dmabuf, entry->a);
|
|
dma_buf_put(dmabuf);
|
|
|
|
list_del(&entry->list);
|
|
kfree(entry);
|
|
}
|
|
|
|
static void tegra_vde_delayed_unmap(struct work_struct *work)
|
|
{
|
|
struct tegra_vde_cache_entry *entry;
|
|
struct tegra_vde *vde;
|
|
|
|
entry = container_of(work, struct tegra_vde_cache_entry,
|
|
dwork.work);
|
|
vde = entry->vde;
|
|
|
|
mutex_lock(&vde->map_lock);
|
|
tegra_vde_release_entry(entry);
|
|
mutex_unlock(&vde->map_lock);
|
|
}
|
|
|
|
int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
|
|
struct dma_buf *dmabuf,
|
|
enum dma_data_direction dma_dir,
|
|
struct dma_buf_attachment **ap,
|
|
dma_addr_t *addrp)
|
|
{
|
|
struct dma_buf_attachment *attachment;
|
|
struct tegra_vde_cache_entry *entry;
|
|
struct device *dev = vde->dev;
|
|
struct sg_table *sgt;
|
|
struct iova *iova;
|
|
int err;
|
|
|
|
mutex_lock(&vde->map_lock);
|
|
|
|
list_for_each_entry(entry, &vde->map_list, list) {
|
|
if (entry->a->dmabuf != dmabuf)
|
|
continue;
|
|
|
|
if (!cancel_delayed_work(&entry->dwork))
|
|
continue;
|
|
|
|
if (entry->dma_dir != dma_dir)
|
|
entry->dma_dir = DMA_BIDIRECTIONAL;
|
|
|
|
dma_buf_put(dmabuf);
|
|
|
|
if (vde->domain)
|
|
*addrp = iova_dma_addr(&vde->iova, entry->iova);
|
|
else
|
|
*addrp = sg_dma_address(entry->sgt->sgl);
|
|
|
|
goto ref;
|
|
}
|
|
|
|
attachment = dma_buf_attach(dmabuf, dev);
|
|
if (IS_ERR(attachment)) {
|
|
dev_err(dev, "Failed to attach dmabuf\n");
|
|
err = PTR_ERR(attachment);
|
|
goto err_unlock;
|
|
}
|
|
|
|
sgt = dma_buf_map_attachment_unlocked(attachment, dma_dir);
|
|
if (IS_ERR(sgt)) {
|
|
dev_err(dev, "Failed to get dmabufs sg_table\n");
|
|
err = PTR_ERR(sgt);
|
|
goto err_detach;
|
|
}
|
|
|
|
if (!vde->domain && sgt->nents > 1) {
|
|
dev_err(dev, "Sparse DMA region is unsupported, please enable IOMMU\n");
|
|
err = -EINVAL;
|
|
goto err_unmap;
|
|
}
|
|
|
|
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
|
if (!entry) {
|
|
err = -ENOMEM;
|
|
goto err_unmap;
|
|
}
|
|
|
|
if (vde->domain) {
|
|
err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
|
|
if (err)
|
|
goto err_free;
|
|
|
|
*addrp = iova_dma_addr(&vde->iova, iova);
|
|
} else {
|
|
*addrp = sg_dma_address(sgt->sgl);
|
|
iova = NULL;
|
|
}
|
|
|
|
INIT_DELAYED_WORK(&entry->dwork, tegra_vde_delayed_unmap);
|
|
list_add(&entry->list, &vde->map_list);
|
|
|
|
entry->dma_dir = dma_dir;
|
|
entry->iova = iova;
|
|
entry->vde = vde;
|
|
entry->sgt = sgt;
|
|
entry->a = attachment;
|
|
ref:
|
|
entry->refcnt++;
|
|
|
|
*ap = entry->a;
|
|
|
|
mutex_unlock(&vde->map_lock);
|
|
|
|
return 0;
|
|
|
|
err_free:
|
|
kfree(entry);
|
|
err_unmap:
|
|
dma_buf_unmap_attachment_unlocked(attachment, sgt, dma_dir);
|
|
err_detach:
|
|
dma_buf_detach(dmabuf, attachment);
|
|
err_unlock:
|
|
mutex_unlock(&vde->map_lock);
|
|
|
|
return err;
|
|
}
|
|
|
|
void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
|
|
struct dma_buf_attachment *a,
|
|
bool release)
|
|
{
|
|
struct tegra_vde_cache_entry *entry;
|
|
|
|
mutex_lock(&vde->map_lock);
|
|
|
|
list_for_each_entry(entry, &vde->map_list, list) {
|
|
if (entry->a != a)
|
|
continue;
|
|
|
|
WARN_ON_ONCE(!entry->refcnt);
|
|
|
|
if (--entry->refcnt == 0) {
|
|
if (release)
|
|
tegra_vde_release_entry(entry);
|
|
else
|
|
schedule_delayed_work(&entry->dwork, 5 * HZ);
|
|
}
|
|
break;
|
|
}
|
|
|
|
mutex_unlock(&vde->map_lock);
|
|
}
|
|
|
|
void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde)
|
|
{
|
|
struct tegra_vde_cache_entry *entry, *tmp;
|
|
|
|
mutex_lock(&vde->map_lock);
|
|
|
|
list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
|
|
if (entry->refcnt)
|
|
continue;
|
|
|
|
if (!cancel_delayed_work(&entry->dwork))
|
|
continue;
|
|
|
|
tegra_vde_release_entry(entry);
|
|
}
|
|
|
|
mutex_unlock(&vde->map_lock);
|
|
}
|
|
|
|
void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde)
|
|
{
|
|
struct tegra_vde_cache_entry *entry, *tmp;
|
|
|
|
mutex_lock(&vde->map_lock);
|
|
|
|
while (!list_empty(&vde->map_list)) {
|
|
list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
|
|
if (!cancel_delayed_work(&entry->dwork))
|
|
continue;
|
|
|
|
tegra_vde_release_entry(entry);
|
|
}
|
|
|
|
mutex_unlock(&vde->map_lock);
|
|
schedule();
|
|
mutex_lock(&vde->map_lock);
|
|
}
|
|
|
|
mutex_unlock(&vde->map_lock);
|
|
}
|