mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
media: videobuf2: add begin/end cpu_access callbacks to dma-sg
Provide begin_cpu_access() and end_cpu_access() dma_buf_ops callbacks for cache synchronisation on exported buffers. V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers. dma-sg allocates memory using the page allocator directly, so there is no memory consistency guarantee. Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
This commit is contained in:
parent
d5adf1b0c2
commit
d4db5eb57c
1 changed files with 30 additions and 0 deletions
|
@ -120,6 +120,12 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
|
||||||
buf->num_pages = size >> PAGE_SHIFT;
|
buf->num_pages = size >> PAGE_SHIFT;
|
||||||
buf->dma_sgt = &buf->sg_table;
|
buf->dma_sgt = &buf->sg_table;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NOTE: dma-sg allocates memory using the page allocator directly, so
|
||||||
|
* there is no memory consistency guarantee, hence dma-sg ignores DMA
|
||||||
|
* attributes passed from the upper layer. That means that
|
||||||
|
* V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
|
||||||
|
*/
|
||||||
buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
|
buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
|
||||||
GFP_KERNEL | __GFP_ZERO);
|
GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!buf->pages)
|
if (!buf->pages)
|
||||||
|
@ -469,6 +475,28 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
|
||||||
vb2_dma_sg_put(dbuf->priv);
|
vb2_dma_sg_put(dbuf->priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||||
|
struct sg_table *sgt = buf->dma_sgt;
|
||||||
|
|
||||||
|
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
|
||||||
|
enum dma_data_direction direction)
|
||||||
|
{
|
||||||
|
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||||
|
struct sg_table *sgt = buf->dma_sgt;
|
||||||
|
|
||||||
|
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
|
static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
|
||||||
{
|
{
|
||||||
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||||
|
@ -487,6 +515,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
|
||||||
.detach = vb2_dma_sg_dmabuf_ops_detach,
|
.detach = vb2_dma_sg_dmabuf_ops_detach,
|
||||||
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
|
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
|
||||||
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
|
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
|
||||||
|
.begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
|
||||||
|
.end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
|
||||||
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
|
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
|
||||||
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
|
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
|
||||||
.release = vb2_dma_sg_dmabuf_ops_release,
|
.release = vb2_dma_sg_dmabuf_ops_release,
|
||||||
|
|
Loading…
Add table
Reference in a new issue