From 5b3e3606ab06fc8a8158d01bee07b8aa1c6068e9 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 5 Sep 2019 12:41:33 +0900 Subject: [PATCH 01/61] dmaengine: uniphier-mdmac: use devm_platform_ioremap_resource() Replace the chain of platform_get_resource() and devm_ioremap_resource() with devm_platform_ioremap_resource(). This allows to remove the local variable for (struct resource *), and have one function call less. Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20190905034133.29514-1-yamada.masahiro@socionext.com Signed-off-by: Vinod Koul --- drivers/dma/uniphier-mdmac.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c index fde54687856b..21b8f1131d55 100644 --- a/drivers/dma/uniphier-mdmac.c +++ b/drivers/dma/uniphier-mdmac.c @@ -382,7 +382,6 @@ static int uniphier_mdmac_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct uniphier_mdmac_device *mdev; struct dma_device *ddev; - struct resource *res; int nr_chans, ret, i; nr_chans = platform_irq_count(pdev); @@ -398,8 +397,7 @@ static int uniphier_mdmac_probe(struct platform_device *pdev) if (!mdev) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mdev->reg_base = devm_ioremap_resource(dev, res); + mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mdev->reg_base)) return PTR_ERR(mdev->reg_base); From 6735ab500b8915182127a9030f04683b8f8b1431 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Thu, 5 Sep 2019 14:02:49 +0800 Subject: [PATCH 02/61] dmaengine: ti: edma: remove unused code drivers/dma/ti/edma.c: In function edma_probe: drivers/dma/ti/edma.c:2252:11: warning: variable off set but not used [-Wunused-but-set-variable] 'off' is not used now, so remove it. Signed-off-by: YueHaibing Acked-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20190905060249.23928-1-yuehaibing@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/ti/edma.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index ba7c4f07fcd6..54fd981e3db5 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2249,10 +2249,8 @@ static int edma_probe(struct platform_device *pdev) { struct edma_soc_info *info = pdev->dev.platform_data; s8 (*queue_priority_mapping)[2]; - int i, off; const s16 (*rsv_slots)[2]; - const s16 (*xbar_chans)[2]; - int irq; + int i, irq; char *irq_name; struct resource *mem; struct device_node *node = pdev->dev.of_node; @@ -2349,14 +2347,6 @@ static int edma_probe(struct platform_device *pdev) edma_write_slot(ecc, i, &dummy_paramset); } - /* Clear the xbar mapped channels in unused list */ - xbar_chans = info->xbar_chans; - if (xbar_chans) { - for (i = 0; xbar_chans[i][1] != -1; i++) { - off = xbar_chans[i][1]; - } - } - irq = platform_get_irq_byname(pdev, "edma3_ccint"); if (irq < 0 && node) irq = irq_of_parse_and_map(node, 0); From 2df4a02a9cebaac054c1acd9b6841dd99526500d Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 9 Sep 2019 15:34:50 +0900 Subject: [PATCH 03/61] dmaengine: rcar-dmac: Use of_data values instead of a macro Since we will have changed memory mapping of the DMAC in the future, this patch uses of_data values instead of a macro to calculate each channel's base offset. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/1568010892-17606-3-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 3993ab65c62c..74996a04e8df 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -210,12 +210,20 @@ struct rcar_dmac { #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) +/* + * struct rcar_dmac_of_data - This driver's OF data + * @chan_offset_base: DMAC channels base offset + * @chan_offset_stride: DMAC channels offset stride + */ +struct rcar_dmac_of_data { + u32 chan_offset_base; + u32 chan_offset_stride; +}; + /* ----------------------------------------------------------------------------- * Registers */ -#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i)) - #define RCAR_DMAISTA 0x0020 #define RCAR_DMASEC 0x0030 #define RCAR_DMAOR 0x0060 @@ -1726,6 +1734,7 @@ static const struct dev_pm_ops rcar_dmac_pm = { static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, struct rcar_dmac_chan *rchan, + const struct rcar_dmac_of_data *data, unsigned int index) { struct platform_device *pdev = to_platform_device(dmac->dev); @@ -1735,7 +1744,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, int ret; rchan->index = index; - rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); + rchan->iomem = dmac->iomem + data->chan_offset_base + + data->chan_offset_stride * index; rchan->mid_rid = -EINVAL; spin_lock_init(&rchan->lock); @@ -1813,10 +1823,15 @@ static int rcar_dmac_probe(struct platform_device *pdev) DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; struct dma_device *engine; struct rcar_dmac *dmac; + const struct rcar_dmac_of_data *data; struct resource *mem; unsigned int i; int ret; + data = of_device_get_match_data(&pdev->dev); + if (!data) + return -EINVAL; + dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); if (!dmac) return -ENOMEM; @@ -1901,7 +1916,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) if (!(dmac->channels_mask & BIT(i))) continue; - ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); + ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i); if (ret < 0) goto error; } @@ -1948,8 +1963,16 @@ static void rcar_dmac_shutdown(struct platform_device *pdev) rcar_dmac_stop_all_chan(dmac); } +static const struct rcar_dmac_of_data rcar_dmac_data = { + .chan_offset_base = 0x8000, + .chan_offset_stride = 0x80, +}; + static const struct of_device_id rcar_dmac_of_ids[] = { - { .compatible = "renesas,rcar-dmac", }, + { + .compatible = "renesas,rcar-dmac", + .data = &rcar_dmac_data, + }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); From d832c481bff39a28eb7e9b28485b749cab6b29b3 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 9 Sep 2019 15:34:51 +0900 Subject: [PATCH 04/61] dmaengine: rcar-dmac: Use devm_platform_ioremap_resource() This patch uses devm_platform_ioremap_resource() instead of using platform_get_resource() and devm_ioremap_resource() together to simplify. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/1568010892-17606-4-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 74996a04e8df..542786de69a9 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1824,7 +1824,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) struct dma_device *engine; struct rcar_dmac *dmac; const struct rcar_dmac_of_data *data; - struct resource *mem; unsigned int i; int ret; @@ -1863,8 +1862,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) return -ENOMEM; /* Request resources. */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); + dmac->iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dmac->iomem)) return PTR_ERR(dmac->iomem); From fcf8adb787073ba6c673c1bb9900a059c7f6676f Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 9 Sep 2019 15:34:52 +0900 Subject: [PATCH 05/61] dmaengine: rcar-dmac: Add dma-channel-mask property support This patch adds dma-channel-mask property support not to reserve some DMA channels for some reasons. (for example: a heterogeneous CPU uses it.) Signed-off-by: Yoshihiro Shimoda Reviewed-by: Simon Horman Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/1568010892-17606-5-git-send-email-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 542786de69a9..f06016d38a05 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -203,7 +203,7 @@ struct rcar_dmac { unsigned int n_channels; struct rcar_dmac_chan *channels; - unsigned int channels_mask; + u32 channels_mask; DECLARE_BITMAP(modules, 256); }; @@ -1810,7 +1810,15 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) return -EINVAL; } + /* + * If the driver is unable to read dma-channel-mask property, + * the driver assumes that it can use all channels. + */ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); + of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask); + + /* If the property has out-of-channel mask, this driver clears it */ + dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0); return 0; } From fbd1d637f6d1a5b0d2b487299111b06fb0b3c3fd Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 10:37:31 +0200 Subject: [PATCH 06/61] dmaengine: at_xdmac: Use devm_platform_ioremap_resource() in at_xdmac_probe() Simplify this function implementation by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Acked-by: Ludovic Desroches Link: https://lore.kernel.org/r/377247f3-b53a-a9d9-66c7-4b8515de3809@web.de Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b58ac720d9a1..f71c9f77d405 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1957,21 +1957,16 @@ static int atmel_xdmac_resume(struct device *dev) static int at_xdmac_probe(struct platform_device *pdev) { - struct resource *res; struct at_xdmac *atxdmac; int irq, size, nr_channels, i, ret; void __iomem *base; u32 reg; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); From 1148ac673f7463313e57ac7a407ddd89e798ea91 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 11:18:27 +0200 Subject: [PATCH 07/61] dmaengine: jz4780: Use devm_platform_ioremap_resource() in jz4780_dma_probe() Simplify this function implementation a bit by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://lore.kernel.org/r/5dd19f28-349a-4957-ea3a-6aebbd7c97e2@web.de Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index cafb1cc065bb..f42b3ef8e036 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -858,13 +858,7 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzdma->soc_data = soc_data; platform_set_drvdata(pdev, jzdma); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get I/O memory\n"); - return -EINVAL; - } - - jzdma->chn_base = devm_ioremap_resource(dev, res); + jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(jzdma->chn_base)) return PTR_ERR(jzdma->chn_base); From 3d4d6c27f65cbcfc8293cc08eae257e1d90aef48 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 11:36:18 +0200 Subject: [PATCH 08/61] dmaengine: k3dma: Use devm_platform_ioremap_resource() in k3_dma_probe() Simplify this function implementation by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://lore.kernel.org/r/aaed7862-49bb-e368-3e7b-5cc2c3d915b1@web.de Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 4b36c8810517..adecea51814f 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -835,13 +835,8 @@ static int k3_dma_probe(struct platform_device *op) const struct k3dma_soc_data *soc_data; struct k3_dma_dev *d; const struct of_device_id *of_id; - struct resource *iores; int i, ret, irq = 0; - iores = platform_get_resource(op, IORESOURCE_MEM, 0); - if (!iores) - return -EINVAL; - d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; @@ -850,7 +845,7 @@ static int k3_dma_probe(struct platform_device *op) if (!soc_data) return -EINVAL; - d->base = devm_ioremap_resource(&op->dev, iores); + d->base = devm_platform_ioremap_resource(op, 0); if (IS_ERR(d->base)) return PTR_ERR(d->base); From 9d68427d0f4f6c008a26a0a021ee37994549cbc1 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 12:52:25 +0200 Subject: [PATCH 09/61] dmaengine: mediatek: Use devm_platform_ioremap_resource() in mtk_cqdma_probe() Simplify this function implementation a bit by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://lore.kernel.org/r/c7e3bbae-44fa-9019-18ee-c6cdfd7c2a14@web.de Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-cqdma.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 723b11c190b3..6bf838e63be1 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -819,15 +819,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&cqdma->pc[i]->queue); spin_lock_init(&cqdma->pc[i]->lock); refcount_set(&cqdma->pc[i]->refcnt, 0); - - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - if (!res) { - dev_err(&pdev->dev, "No mem resource for %s\n", - dev_name(&pdev->dev)); - return -EINVAL; - } - - cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res); + cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(cqdma->pc[i]->base)) return PTR_ERR(cqdma->pc[i]->base); From a7dc0e6c1ec9f2f244f3cea3172bfe1521dabcfd Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 13:07:41 +0200 Subject: [PATCH 10/61] dmaengine: mediatek: Use devm_platform_ioremap_resource() in mtk_uart_apdma_probe() Simplify this function implementation by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://lore.kernel.org/r/366e776c-8760-eeb7-c248-7380c9f4fd34@web.de Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-uart-apdma.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index f40051d6aecb..c20e6bd4e298 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -475,7 +475,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mtk_uart_apdmadev *mtkd; int bit_mask = 32, rc; - struct resource *res; struct mtk_chan *c; unsigned int i; @@ -532,13 +531,7 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev) goto err_no_dma; } - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - if (!res) { - rc = -ENODEV; - goto err_no_dma; - } - - c->base = devm_ioremap_resource(&pdev->dev, res); + c->base = devm_platform_ioremap_resource(pdev, i); if (IS_ERR(c->base)) { rc = PTR_ERR(c->base); goto err_no_dma; From ecb4d34fafec4fea6fe218f25cc390775a609efd Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 13:23:54 +0200 Subject: [PATCH 11/61] dmaengine: owl: Use devm_platform_ioremap_resource() in owl_dma_probe() Simplify this function implementation by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Link: https://lore.kernel.org/r/d36b6a6c-2e3d-8d68-6ddc-969a377ca3b2@web.de Signed-off-by: Vinod Koul --- drivers/dma/owl-dma.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 90bbcef99ef8..023f951189a7 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -1045,18 +1045,13 @@ static int owl_dma_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct owl_dma *od; - struct resource *res; int ret, i, nr_channels, nr_requests; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - - od->base = devm_ioremap_resource(&pdev->dev, res); + od->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(od->base)) return PTR_ERR(od->base); From 833b48242686606670edaa72a465c0faa44d2916 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 22 Sep 2019 14:32:12 +0200 Subject: [PATCH 12/61] dmaengine: zx: Use devm_platform_ioremap_resource() in zx_dma_probe() Simplify this function implementation by using a known wrapper function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Acked-by: Shawn Guo Link: https://lore.kernel.org/r/85de79fa-1ca5-a1e5-0296-9e8a2066f134@web.de Signed-off-by: Vinod Koul --- drivers/dma/zx_dma.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c index 9f4436f7c914..6b457e683e70 100644 --- a/drivers/dma/zx_dma.c +++ b/drivers/dma/zx_dma.c @@ -754,18 +754,13 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, static int zx_dma_probe(struct platform_device *op) { struct zx_dma_dev *d; - struct resource *iores; int i, ret = 0; - iores = platform_get_resource(op, IORESOURCE_MEM, 0); - if (!iores) - return -EINVAL; - d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; - d->base = devm_ioremap_resource(&op->dev, iores); + d->base = devm_platform_ioremap_resource(op, 0); if (IS_ERR(d->base)) return PTR_ERR(d->base); From f27c22736d133baff0ab3fdc7b015d998267d817 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 24 Sep 2019 11:51:16 +0300 Subject: [PATCH 13/61] dmaengine: dw: platform: Mark 'hclk' clock optional On some platforms the clock can be fixed rate, always running one and there is no need to do anything with it. In order to support those platforms, switch to use optional clock. Fixes: f8d9ddbc2851 ("dmaengine: dw: platform: Enable iDMA 32-bit on Intel Elkhart Lake") Depends-on: 60b8f0ddf1a9 ("clk: Add (devm_)clk_get_optional() functions") Signed-off-by: Andy Shevchenko Acked-by: Viresh Kumar Link: https://lore.kernel.org/r/20190924085116.83683-1-andriy.shevchenko@linux.intel.com Signed-off-by: Vinod Koul --- drivers/dma/dw/platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index c90c798e5ec3..0585d749d935 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev) data->chip = chip; - chip->clk = devm_clk_get(chip->dev, "hclk"); + chip->clk = devm_clk_get_optional(chip->dev, "hclk"); if (IS_ERR(chip->clk)) return PTR_ERR(chip->clk); err = clk_prepare_enable(chip->clk); From bc3ecbe09ab19f766699c056f3e7175bf7e96c64 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Sep 2019 17:37:26 +0100 Subject: [PATCH 14/61] dmaengine: iop-adma: make array 'handler' static const, makes object smaller Don't populate the array 'handler' on the stack but instead make it static const. Makes the object code smaller by 80 bytes. Before: text data bss dec hex filename 38225 9084 64 47373 b90d drivers/dma/iop-adma.o After: text data bss dec hex filename 38081 9148 64 47293 b8bd drivers/dma/iop-adma.o (gcc version 9.2.1, amd64) Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20190905163726.19690-1-colin.king@canonical.com Signed-off-by: Vinod Koul --- drivers/dma/iop-adma.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index a3f942a6a946..4dc5478fc156 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1359,9 +1359,11 @@ static int iop_adma_probe(struct platform_device *pdev) iop_adma_device_clear_err_status(iop_chan); for (i = 0; i < 3; i++) { - irq_handler_t handler[] = { iop_adma_eot_handler, - iop_adma_eoc_handler, - iop_adma_err_handler }; + static const irq_handler_t handler[] = { + iop_adma_eot_handler, + iop_adma_eoc_handler, + iop_adma_err_handler + }; int irq = platform_get_irq(pdev, i); if (irq < 0) { ret = -ENXIO; From a8bd47542863947e2433db35558477caf0d89995 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 26 Sep 2019 16:20:59 +0530 Subject: [PATCH 15/61] dmaengine: xilinx_dma: use devm_platform_ioremap_resource() Replace the chain of platform_get_resource() and devm_ioremap_resource() with devm_platform_ioremap_resource(). It simplifies the flow and there is no functional change. Fixes below cocinelle warning- WARNING: Use devm_platform_ioremap_resource for xdev -> regs Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1569495060-18117-4-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index e7dc3c4dc8e0..fb87d0a62b26 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2604,7 +2604,6 @@ static int xilinx_dma_probe(struct platform_device *pdev) struct device_node *node = pdev->dev.of_node; struct xilinx_dma_device *xdev; struct device_node *child, *np = pdev->dev.of_node; - struct resource *io; u32 num_frames, addr_width, len_width; int i, err; @@ -2630,8 +2629,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) return err; /* Request and map I/O memory */ - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - xdev->regs = devm_ioremap_resource(&pdev->dev, io); + xdev->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(xdev->regs)) return PTR_ERR(xdev->regs); From 944879ba4c852ca68b555d84559f228c3430e443 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 26 Sep 2019 16:21:00 +0530 Subject: [PATCH 16/61] dmaengine: xilinx_dma: Remove clk_get error message for probe defer In dma probe, the driver checks for devm_clk_get return and print error message in the failing case. However for -EPROBE_DEFER this message is confusing so avoid it. Signed-off-by: Radhey Shyam Pandey Signed-off-by: Michal Simek Link: https://lore.kernel.org/r/1569495060-18117-5-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index fb87d0a62b26..94cdc410977b 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -2186,7 +2186,9 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", + err); return err; } @@ -2251,14 +2253,18 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", + err); return err; } *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); if (IS_ERR(*dev_clk)) { err = PTR_ERR(*dev_clk); - dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", + err); return err; } @@ -2291,7 +2297,9 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); - dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); + if (err != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", + err); return err; } @@ -2313,7 +2321,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, err = clk_prepare_enable(*axi_clk); if (err) { - dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); + dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", + err); return err; } From f228a4a24492b9b147ce4efdd384e064d659e38a Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 27 Sep 2019 11:29:43 +0800 Subject: [PATCH 17/61] dmaengine: sprd: Change to use devm_platform_ioremap_resource() Use the new helper that wraps the calls to platform_get_resource() and devm_ioremap_resource() together, which can simpify the code. Signed-off-by: Baolin Wang Link: https://lore.kernel.org/r/1af3efdac3b217203cace090c8947386854c0144.1569554639.git.baolin.wang@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 525dc7338fe3..ae104ccab7d5 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1057,7 +1057,6 @@ static int sprd_dma_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct sprd_dma_dev *sdev; struct sprd_dma_chn *dma_chn; - struct resource *res; u32 chn_count; int ret, i; @@ -1103,8 +1102,7 @@ static int sprd_dma_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "no interrupts for the dma controller\n"); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sdev->glb_base = devm_ioremap_resource(&pdev->dev, res); + sdev->glb_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sdev->glb_base)) return PTR_ERR(sdev->glb_base); From 9d2bbbc21772f133fd693bf10f38982e17f6549f Mon Sep 17 00:00:00 2001 From: Biju Das Date: Fri, 27 Sep 2019 11:37:09 +0100 Subject: [PATCH 18/61] dt-bindings: dmaengine: rcar-dmac: Document R8A774B1 bindings Renesas RZ/G2N (R8A774B1) SoC also has the R-Car gen2/3 compatible DMA controllers, therefore document RZ/G2N specific bindings. Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Acked-by: Rob Herring Link: https://lore.kernel.org/r/1569580629-55677-1-git-send-email-biju.das@bp.renesas.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 5a512c5ea76a..5551e929fd99 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -21,6 +21,7 @@ Required Properties: - "renesas,dmac-r8a7745" (RZ/G1E) - "renesas,dmac-r8a77470" (RZ/G1C) - "renesas,dmac-r8a774a1" (RZ/G2M) + - "renesas,dmac-r8a774b1" (RZ/G2N) - "renesas,dmac-r8a774c0" (RZ/G2E) - "renesas,dmac-r8a7790" (R-Car H2) - "renesas,dmac-r8a7791" (R-Car M2-W) From f2835adf8afb2cea248dd10d6eb0444c34b3b51b Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Mon, 30 Sep 2019 02:04:39 +0000 Subject: [PATCH 19/61] dmaengine: fsl-dpaa2-qdma: Add the DPDMAI(Data Path DMA Interface) support The MC(Management Complex) exports the DPDMAI(Data Path DMA Interface) object as an interface to operate the DPAA2(Data Path Acceleration Architecture 2) qDMA Engine. The DPDMAI enables sending frame-based requests to qDMA and receiving back confirmation response on transaction completion, utilizing the DPAA2 QBMan(Queue Manager and Buffer Manager hardware) infrastructure. DPDMAI object provides up to two priorities for processing qDMA requests. The following list summarizes the DPDMAI main features and capabilities: 1. Supports up to two scheduling priorities for processing service requests. - Each DPDMAI transmit queue is mapped to one of two service priorities, allowing further prioritization in hardware between requests from different DPDMAI objects. 2. Supports up to two receive queues for incoming transaction completion confirmations. - Each DPDMAI receive queue is mapped to one of two receive priorities, allowing further prioritization between other interfaces when associating the DPDMAI receive queues to DPIO or DPCON(Data Path Concentrator) objects. 3. Supports different scheduling options for processing received packets: - Queues can be configured either in 'parked' mode (default), or attached to a DPIO object, or attached to DPCON object. 4. Allows interaction with one or more DPIO objects for dequeueing/enqueueing frame descriptors(FD) and for acquiring/releasing buffers. 5. Supports enable, disable, and reset operations. Add dpdmai to support some platforms with dpaa2 qdma engine. Signed-off-by: Peng Ma Link: https://lore.kernel.org/r/20190930020440.7754-1-peng.ma@nxp.com Signed-off-by: Vinod Koul --- drivers/dma/fsl-dpaa2-qdma/dpdmai.c | 366 ++++++++++++++++++++++++++++ drivers/dma/fsl-dpaa2-qdma/dpdmai.h | 177 ++++++++++++++ 2 files changed, 543 insertions(+) create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpdmai.c create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpdmai.h diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c new file mode 100644 index 000000000000..fbc2b2f39bec --- /dev/null +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2019 NXP + +#include +#include +#include +#include "dpdmai.h" + +struct dpdmai_rsp_get_attributes { + __le32 id; + u8 num_of_priorities; + u8 pad0[3]; + __le16 major; + __le16 minor; +}; + +struct dpdmai_cmd_queue { + __le32 dest_id; + u8 priority; + u8 queue; + u8 dest_type; + u8 pad; + __le64 user_ctx; + union { + __le32 options; + __le32 fqid; + }; +}; + +struct dpdmai_rsp_get_tx_queue { + __le64 pad; + __le32 fqid; +}; + +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) + +/* cmd, param, offset, width, type, arg_name */ +#define DPDMAI_CMD_CREATE(_cmd, _cfg) \ +do { \ + typeof(_cmd) (cmd) = (_cmd); \ + typeof(_cfg) (cfg) = (_cfg); \ + MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\ + MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\ +} while (0) + +static inline u64 mc_enc(int lsoffset, int width, u64 val) +{ + return (val & MAKE_UMASK64(width)) << lsoffset; +} + +/** + * dpdmai_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmai_id: DPDMAI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpdmai_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, + int dpdmai_id, u16 *token) +{ + struct fsl_mc_command cmd = { 0 }; + __le64 *cmd_dpdmai_id; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, + cmd_flags, 0); + + cmd_dpdmai_id = cmd.params; + *cmd_dpdmai_id = cpu_to_le32(dpdmai_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmai_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, + cmd_flags, token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_create() - Create the DPDMAI object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @token: Returned token; use in subsequent API calls + * + * Create the DPDMAI object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent calls to + * this specific object. For objects that are created using the + * DPL file, call dpdmai_open() function to get an authentication + * token first. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, + const struct dpdmai_cfg *cfg, u16 *token) +{ + struct fsl_mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, + cmd_flags, 0); + DPDMAI_CMD_CREATE(cmd, cfg); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, + cmd_flags, token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, + cmd_flags, token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, + cmd_flags, token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_get_attributes() - Retrieve DPDMAI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpdmai_attr *attr) +{ + struct dpdmai_rsp_get_attributes *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, + cmd_flags, token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->version.major = le16_to_cpu(rsp_params->major); + attr->version.minor = le16_to_cpu(rsp_params->minor); + attr->num_of_priorities = rsp_params->num_of_priorities; + + return 0; +} + +/** + * dpdmai_set_rx_queue() - Set Rx queue configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @cfg: Rx queue configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 priority, const struct dpdmai_rx_queue_cfg *cfg) +{ + struct dpdmai_cmd_queue *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, + cmd_flags, token); + + cmd_params = (struct dpdmai_cmd_queue *)cmd.params; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->priority = cfg->dest_cfg.priority; + cmd_params->queue = priority; + cmd_params->dest_type = cfg->dest_cfg.dest_type; + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); + cmd_params->options = cpu_to_le32(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @attr: Returned Rx queue attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 priority, struct dpdmai_rx_queue_attr *attr) +{ + struct dpdmai_cmd_queue *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, + cmd_flags, token); + + cmd_params = (struct dpdmai_cmd_queue *)cmd.params; + cmd_params->queue = priority; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); + attr->dest_cfg.priority = cmd_params->priority; + attr->dest_cfg.dest_type = cmd_params->dest_type; + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); + attr->fqid = le32_to_cpu(cmd_params->fqid); + + return 0; +} + +/** + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMAI object + * @priority: Select the queue relative to number of + * priorities configured at DPDMAI creation + * @fqid: Returned Tx queue + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, u8 priority, u32 *fqid) +{ + struct dpdmai_rsp_get_tx_queue *rsp_params; + struct dpdmai_cmd_queue *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, + cmd_flags, token); + + cmd_params = (struct dpdmai_cmd_queue *)cmd.params; + cmd_params->queue = priority; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; + *fqid = le32_to_cpu(rsp_params->fqid); + + return 0; +} diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h new file mode 100644 index 000000000000..6d785093da8e --- /dev/null +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2019 NXP */ + +#ifndef __FSL_DPDMAI_H +#define __FSL_DPDMAI_H + +/* DPDMAI Version */ +#define DPDMAI_VER_MAJOR 2 +#define DPDMAI_VER_MINOR 2 + +#define DPDMAI_CMD_BASE_VERSION 0 +#define DPDMAI_CMD_ID_OFFSET 4 + +#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \ + DPDMAI_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800) +#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E) +#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E) + +#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002) +#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003) +#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMDID_FORMAT(0x004) +#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005) +#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006) + +#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010) +#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011) +#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012) +#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013) +#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014) +#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015) +#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016) +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017) + +#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0) +#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1) +#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2) + +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ + +#define MAKE_UMASK64(_width) \ + ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : (u64)-1)) + +/* Data Path DMA Interface API + * Contains initialization APIs and runtime control APIs for DPDMAI + */ + +/** + * Maximum number of Tx/Rx priorities per DPDMAI object + */ +#define DPDMAI_PRIO_NUM 2 + +/* DPDMAI queue modification options */ + +/** + * Select to modify the user's context associated with the queue + */ +#define DPDMAI_QUEUE_OPT_USER_CTX 0x1 + +/** + * Select to modify the queue's destination + */ +#define DPDMAI_QUEUE_OPT_DEST 0x2 + +/** + * struct dpdmai_cfg - Structure representing DPDMAI configuration + * @priorities: Priorities for the DMA hardware processing; valid priorities are + * configured with values 1-8; the entry following last valid entry + * should be configured with 0 + */ +struct dpdmai_cfg { + u8 priorities[DPDMAI_PRIO_NUM]; +}; + +/** + * struct dpdmai_attr - Structure representing DPDMAI attributes + * @id: DPDMAI object ID + * @version: DPDMAI version + * @num_of_priorities: number of priorities + */ +struct dpdmai_attr { + int id; + /** + * struct version - DPDMAI version + * @major: DPDMAI major version + * @minor: DPDMAI minor version + */ + struct { + u16 major; + u16 minor; + } version; + u8 num_of_priorities; +}; + +/** + * enum dpdmai_dest - DPDMAI destination types + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode + * and does not generate FQDAN notifications; user is expected to dequeue + * from the queue based on polling or other user-defined method + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON object; + * user is expected to dequeue from the DPCON channel + */ +enum dpdmai_dest { + DPDMAI_DEST_NONE = 0, + DPDMAI_DEST_DPIO = 1, + DPDMAI_DEST_DPCON = 2 +}; + +/** + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPDMAI_DEST_NONE' option + */ +struct dpdmai_dest_cfg { + enum dpdmai_dest dest_type; + int dest_id; + u8 priority; +}; + +/** + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration + * @options: Flags representing the suggested modifications to the queue; + * Use any combination of 'DPDMAI_QUEUE_OPT_' flags + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame; + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' + * @dest_cfg: Queue destination parameters; + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' + */ +struct dpdmai_rx_queue_cfg { + struct dpdmai_dest_cfg dest_cfg; + u32 options; + u64 user_ctx; + +}; + +/** + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues + * @user_ctx: User context value provided in the frame descriptor of each + * dequeued frame + * @dest_cfg: Queue destination configuration + * @fqid: Virtual FQID value to be used for dequeue operations + */ +struct dpdmai_rx_queue_attr { + struct dpdmai_dest_cfg dest_cfg; + u64 user_ctx; + u32 fqid; +}; + +int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, + int dpdmai_id, u16 *token); +int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, + const struct dpdmai_cfg *cfg, u16 *token); +int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, struct dpdmai_attr *attr); +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 priority, const struct dpdmai_rx_queue_cfg *cfg); +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u8 priority, struct dpdmai_rx_queue_attr *attr); +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, + u16 token, u8 priority, u32 *fqid); + +#endif /* __FSL_DPDMAI_H */ From 7fdf9b05c73b79c4d9a85b5a9905efa10ee482a6 Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Mon, 30 Sep 2019 02:04:40 +0000 Subject: [PATCH 20/61] dmaengine: fsl-dpaa2-qdma: Add NXP dpaa2 qDMA controller driver for Layerscape SoCs DPPA2(Data Path Acceleration Architecture 2) qDMA supports virtualized channel by allowing DMA jobs to be enqueued into different work queues. Core can initiate a DMA transaction by preparing a frame descriptor(FD) for each DMA job and enqueuing this job through a hardware portal. DPAA2 components can also prepare a FD and enqueue a DMA job through a hardware portal. The qDMA prefetches DMA jobs through DPAA2 hardware portal. It then schedules and dispatches to internal DMA hardware engines, which generate read and write requests. Both qDMA source data and destination data can be either contiguous or non-contiguous using one or more scatter/gather tables. The qDMA supports global bandwidth flow control where all DMA transactions are stalled if the bandwidth threshold has been reached. Also supported are transaction based read throttling. Add NXP dppa2 qDMA to support some of Layerscape SoCs. such as: LS1088A, LS208xA, LX2, etc. Signed-off-by: Peng Ma Link: https://lore.kernel.org/r/20190930020440.7754-2-peng.ma@nxp.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 + drivers/dma/Makefile | 1 + drivers/dma/fsl-dpaa2-qdma/Kconfig | 9 + drivers/dma/fsl-dpaa2-qdma/Makefile | 3 + drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c | 825 ++++++++++++++++++++++++ drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h | 153 +++++ 6 files changed, 993 insertions(+) create mode 100644 drivers/dma/fsl-dpaa2-qdma/Kconfig create mode 100644 drivers/dma/fsl-dpaa2-qdma/Makefile create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c create mode 100644 drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 7af874b69ffb..f5decdc24181 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -669,6 +669,8 @@ source "drivers/dma/sh/Kconfig" source "drivers/dma/ti/Kconfig" +source "drivers/dma/fsl-dpaa2-qdma/Kconfig" + # clients comment "DMA Clients" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index f5ce8665e944..b3d48f928328 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_ZX_DMA) += zx_dma.o obj-$(CONFIG_ST_FDMA) += st_fdma.o +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/ obj-y += mediatek/ obj-y += qcom/ diff --git a/drivers/dma/fsl-dpaa2-qdma/Kconfig b/drivers/dma/fsl-dpaa2-qdma/Kconfig new file mode 100644 index 000000000000..258ed6be934d --- /dev/null +++ b/drivers/dma/fsl-dpaa2-qdma/Kconfig @@ -0,0 +1,9 @@ +menuconfig FSL_DPAA2_QDMA + tristate "NXP DPAA2 QDMA" + depends on ARM64 + depends on FSL_MC_BUS && FSL_MC_DPIO + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + NXP Data Path Acceleration Architecture 2 QDMA driver, + using the NXP MC bus driver. diff --git a/drivers/dma/fsl-dpaa2-qdma/Makefile b/drivers/dma/fsl-dpaa2-qdma/Makefile new file mode 100644 index 000000000000..c1d0226f2bd7 --- /dev/null +++ b/drivers/dma/fsl-dpaa2-qdma/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for the NXP DPAA2 qDMA controllers +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c new file mode 100644 index 000000000000..c70a7965f140 --- /dev/null +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2019 NXP + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../virt-dma.h" +#include "dpdmai.h" +#include "dpaa2-qdma.h" + +static bool smmu_disable = true; + +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan); +} + +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) +{ + return container_of(vd, struct dpaa2_qdma_comp, vdesc); +} + +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev; + + dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev, + sizeof(struct dpaa2_fd), + sizeof(struct dpaa2_fd), 0); + if (!dpaa2_chan->fd_pool) + goto err; + + dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev, + sizeof(struct dpaa2_fl_entry), + sizeof(struct dpaa2_fl_entry), 0); + if (!dpaa2_chan->fl_pool) + goto err_fd; + + dpaa2_chan->sdd_pool = + dma_pool_create("sdd_pool", dev, + sizeof(struct dpaa2_qdma_sd_d), + sizeof(struct dpaa2_qdma_sd_d), 0); + if (!dpaa2_chan->sdd_pool) + goto err_fl; + + return dpaa2_qdma->desc_allocated++; +err_fl: + dma_pool_destroy(dpaa2_chan->fl_pool); +err_fd: + dma_pool_destroy(dpaa2_chan->fd_pool); +err: + return -ENOMEM; +} + +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan) +{ + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; + unsigned long flags; + + LIST_HEAD(head); + + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags); + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head); + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags); + + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head); + + dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used); + dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free); + + dma_pool_destroy(dpaa2_chan->fd_pool); + dma_pool_destroy(dpaa2_chan->fl_pool); + dma_pool_destroy(dpaa2_chan->sdd_pool); + dpaa2_qdma->desc_allocated--; +} + +/* + * Request a command descriptor for enqueue. + */ +static struct dpaa2_qdma_comp * +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan) +{ + struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv; + struct device *dev = &qdma_priv->dpdmai_dev->dev; + struct dpaa2_qdma_comp *comp_temp = NULL; + unsigned long flags; + + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); + if (list_empty(&dpaa2_chan->comp_free)) { + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); + comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT); + if (!comp_temp) + goto err; + comp_temp->fd_virt_addr = + dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT, + &comp_temp->fd_bus_addr); + if (!comp_temp->fd_virt_addr) + goto err_comp; + + comp_temp->fl_virt_addr = + dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT, + &comp_temp->fl_bus_addr); + if (!comp_temp->fl_virt_addr) + goto err_fd_virt; + + comp_temp->desc_virt_addr = + dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT, + &comp_temp->desc_bus_addr); + if (!comp_temp->desc_virt_addr) + goto err_fl_virt; + + comp_temp->qchan = dpaa2_chan; + return comp_temp; + } + + comp_temp = list_first_entry(&dpaa2_chan->comp_free, + struct dpaa2_qdma_comp, list); + list_del(&comp_temp->list); + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); + + comp_temp->qchan = dpaa2_chan; + + return comp_temp; + +err_fl_virt: + dma_pool_free(dpaa2_chan->fl_pool, + comp_temp->fl_virt_addr, + comp_temp->fl_bus_addr); +err_fd_virt: + dma_pool_free(dpaa2_chan->fd_pool, + comp_temp->fd_virt_addr, + comp_temp->fd_bus_addr); +err_comp: + kfree(comp_temp); +err: + dev_err(dev, "Failed to request descriptor\n"); + return NULL; +} + +static void +dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp) +{ + struct dpaa2_fd *fd; + + fd = dpaa2_comp->fd_virt_addr; + memset(fd, 0, sizeof(struct dpaa2_fd)); + + /* fd populated */ + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr); + + /* + * Bypass memory translation, Frame list format, short length disable + * we need to disable BMT if fsl-mc use iova addr + */ + if (smmu_disable) + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE); + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE); + + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX); +} + +/* first frame list for descriptor buffer */ +static void +dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list, + struct dpaa2_qdma_comp *dpaa2_comp, + bool wrt_changed) +{ + struct dpaa2_qdma_sd_d *sdd; + + sdd = dpaa2_comp->desc_virt_addr; + memset(sdd, 0, 2 * (sizeof(*sdd))); + + /* source descriptor CMD */ + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); + sdd++; + + /* dest descriptor CMD */ + if (wrt_changed) + sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT); + else + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); + + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); + + /* first frame list to source descriptor */ + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr); + dpaa2_fl_set_len(f_list, 0x20); + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG); + + /* bypass memory translation */ + if (smmu_disable) + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); +} + +/* source and destination frame list */ +static void +dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list, + dma_addr_t dst, dma_addr_t src, + size_t len, uint8_t fmt) +{ + /* source frame list to source buffer */ + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); + + dpaa2_fl_set_addr(f_list, src); + dpaa2_fl_set_len(f_list, len); + + /* single buffer frame or scatter gather frame */ + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); + + /* bypass memory translation */ + if (smmu_disable) + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); + + f_list++; + + /* destination frame list to destination buffer */ + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); + + dpaa2_fl_set_addr(f_list, dst); + dpaa2_fl_set_len(f_list, len); + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); + /* single buffer frame or scatter gather frame */ + dpaa2_fl_set_final(f_list, QDMA_FL_F); + /* bypass memory translation */ + if (smmu_disable) + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); +} + +static struct dma_async_tx_descriptor +*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, + dma_addr_t src, size_t len, ulong flags) +{ + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); + struct dpaa2_qdma_engine *dpaa2_qdma; + struct dpaa2_qdma_comp *dpaa2_comp; + struct dpaa2_fl_entry *f_list; + bool wrt_changed; + + dpaa2_qdma = dpaa2_chan->qdma; + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); + if (!dpaa2_comp) + return NULL; + + wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup; + + /* populate Frame descriptor */ + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp); + + f_list = dpaa2_comp->fl_virt_addr; + + /* first frame list for descriptor buffer (logn format) */ + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed); + + f_list++; + + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF); + + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); +} + +static void dpaa2_qdma_issue_pending(struct dma_chan *chan) +{ + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); + struct dpaa2_qdma_comp *dpaa2_comp; + struct virt_dma_desc *vdesc; + struct dpaa2_fd *fd; + unsigned long flags; + int err; + + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); + spin_lock(&dpaa2_chan->vchan.lock); + if (vchan_issue_pending(&dpaa2_chan->vchan)) { + vdesc = vchan_next_desc(&dpaa2_chan->vchan); + if (!vdesc) + goto err_enqueue; + dpaa2_comp = to_fsl_qdma_comp(vdesc); + + fd = dpaa2_comp->fd_virt_addr; + + list_del(&vdesc->node); + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used); + + err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd); + if (err) { + list_del(&dpaa2_comp->list); + list_add_tail(&dpaa2_comp->list, + &dpaa2_chan->comp_free); + } + } +err_enqueue: + spin_unlock(&dpaa2_chan->vchan.lock); + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); +} + +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) +{ + struct dpaa2_qdma_priv_per_prio *ppriv; + struct device *dev = &ls_dev->dev; + struct dpaa2_qdma_priv *priv; + u8 prio_def = DPDMAI_PRIO_NUM; + int err = -EINVAL; + int i; + + priv = dev_get_drvdata(dev); + + priv->dev = dev; + priv->dpqdma_id = ls_dev->obj_desc.id; + + /* Get the handle for the DPDMAI this interface is associate with */ + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle); + if (err) { + dev_err(dev, "dpdmai_open() failed\n"); + return err; + } + + dev_dbg(dev, "Opened dpdmai object successfully\n"); + + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, + &priv->dpdmai_attr); + if (err) { + dev_err(dev, "dpdmai_get_attributes() failed\n"); + goto exit; + } + + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) { + dev_err(dev, "DPDMAI major version mismatch\n" + "Found %u.%u, supported version is %u.%u\n", + priv->dpdmai_attr.version.major, + priv->dpdmai_attr.version.minor, + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); + goto exit; + } + + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) { + dev_err(dev, "DPDMAI minor version mismatch\n" + "Found %u.%u, supported version is %u.%u\n", + priv->dpdmai_attr.version.major, + priv->dpdmai_attr.version.minor, + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); + goto exit; + } + + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def); + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL); + if (!ppriv) { + err = -ENOMEM; + goto exit; + } + priv->ppriv = ppriv; + + for (i = 0; i < priv->num_pairs; i++) { + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, + i, &priv->rx_queue_attr[i]); + if (err) { + dev_err(dev, "dpdmai_get_rx_queue() failed\n"); + goto exit; + } + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; + + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, + i, &priv->tx_fqid[i]); + if (err) { + dev_err(dev, "dpdmai_get_tx_queue() failed\n"); + goto exit; + } + ppriv->req_fqid = priv->tx_fqid[i]; + ppriv->prio = i; + ppriv->priv = priv; + ppriv++; + } + + return 0; +exit: + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); + return err; +} + +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx) +{ + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx, + struct dpaa2_qdma_priv_per_prio, nctx); + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp; + struct dpaa2_qdma_priv *priv = ppriv->priv; + u32 n_chans = priv->dpaa2_qdma->n_chans; + struct dpaa2_qdma_chan *qchan; + const struct dpaa2_fd *fd_eq; + const struct dpaa2_fd *fd; + struct dpaa2_dq *dq; + int is_last = 0; + int found; + u8 status; + int err; + int i; + + do { + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, + ppriv->store); + } while (err); + + while (!is_last) { + do { + dq = dpaa2_io_store_next(ppriv->store, &is_last); + } while (!is_last && !dq); + if (!dq) { + dev_err(priv->dev, "FQID returned no valid frames!\n"); + continue; + } + + /* obtain FD and process the error */ + fd = dpaa2_dq_fd(dq); + + status = dpaa2_fd_get_ctrl(fd) & 0xff; + if (status) + dev_err(priv->dev, "FD error occurred\n"); + found = 0; + for (i = 0; i < n_chans; i++) { + qchan = &priv->dpaa2_qdma->chans[i]; + spin_lock(&qchan->queue_lock); + if (list_empty(&qchan->comp_used)) { + spin_unlock(&qchan->queue_lock); + continue; + } + list_for_each_entry_safe(dpaa2_comp, _comp_tmp, + &qchan->comp_used, list) { + fd_eq = dpaa2_comp->fd_virt_addr; + + if (le64_to_cpu(fd_eq->simple.addr) == + le64_to_cpu(fd->simple.addr)) { + spin_lock(&qchan->vchan.lock); + vchan_cookie_complete(& + dpaa2_comp->vdesc); + spin_unlock(&qchan->vchan.lock); + found = 1; + break; + } + } + spin_unlock(&qchan->queue_lock); + if (found) + break; + } + } + + dpaa2_io_service_rearm(NULL, ctx); +} + +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv) +{ + struct dpaa2_qdma_priv_per_prio *ppriv; + struct device *dev = priv->dev; + int err = -EINVAL; + int i, num; + + num = priv->num_pairs; + ppriv = priv->ppriv; + for (i = 0; i < num; i++) { + ppriv->nctx.is_cdan = 0; + ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU; + ppriv->nctx.id = ppriv->rsp_fqid; + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb; + err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev); + if (err) { + dev_err(dev, "Notification register failed\n"); + goto err_service; + } + + ppriv->store = + dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev); + if (!ppriv->store) { + dev_err(dev, "dpaa2_io_store_create() failed\n"); + goto err_store; + } + + ppriv++; + } + return 0; + +err_store: + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); +err_service: + ppriv--; + while (ppriv >= priv->ppriv) { + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); + dpaa2_io_store_destroy(ppriv->store); + ppriv--; + } + return err; +} + +static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv) +{ + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; + int i; + + for (i = 0; i < priv->num_pairs; i++) { + dpaa2_io_store_destroy(ppriv->store); + ppriv++; + } +} + +static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv) +{ + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; + struct device *dev = priv->dev; + int i; + + for (i = 0; i < priv->num_pairs; i++) { + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); + ppriv++; + } +} + +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) +{ + struct dpdmai_rx_queue_cfg rx_queue_cfg; + struct dpaa2_qdma_priv_per_prio *ppriv; + struct device *dev = priv->dev; + struct fsl_mc_device *ls_dev; + int i, num; + int err; + + ls_dev = to_fsl_mc_device(dev); + num = priv->num_pairs; + ppriv = priv->ppriv; + for (i = 0; i < num; i++) { + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX | + DPDMAI_QUEUE_OPT_DEST; + rx_queue_cfg.user_ctx = ppriv->nctx.qman64; + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO; + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; + rx_queue_cfg.dest_cfg.priority = ppriv->prio; + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, + rx_queue_cfg.dest_cfg.priority, + &rx_queue_cfg); + if (err) { + dev_err(dev, "dpdmai_set_rx_queue() failed\n"); + return err; + } + + ppriv++; + } + + return 0; +} + +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv) +{ + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; + struct device *dev = priv->dev; + struct fsl_mc_device *ls_dev; + int err = 0; + int i; + + ls_dev = to_fsl_mc_device(dev); + + for (i = 0; i < priv->num_pairs; i++) { + ppriv->nctx.qman64 = 0; + ppriv->nctx.dpio_id = 0; + ppriv++; + } + + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle); + if (err) + dev_err(dev, "dpdmai_reset() failed\n"); + + return err; +} + +static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, + struct list_head *head) +{ + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp; + unsigned long flags; + + list_for_each_entry_safe(comp_tmp, _comp_tmp, + head, list) { + spin_lock_irqsave(&qchan->queue_lock, flags); + list_del(&comp_tmp->list); + spin_unlock_irqrestore(&qchan->queue_lock, flags); + dma_pool_free(qchan->fd_pool, + comp_tmp->fd_virt_addr, + comp_tmp->fd_bus_addr); + dma_pool_free(qchan->fl_pool, + comp_tmp->fl_virt_addr, + comp_tmp->fl_bus_addr); + dma_pool_free(qchan->sdd_pool, + comp_tmp->desc_virt_addr, + comp_tmp->desc_bus_addr); + kfree(comp_tmp); + } +} + +static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma) +{ + struct dpaa2_qdma_chan *qchan; + int num, i; + + num = dpaa2_qdma->n_chans; + for (i = 0; i < num; i++) { + qchan = &dpaa2_qdma->chans[i]; + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used); + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free); + dma_pool_destroy(qchan->fd_pool); + dma_pool_destroy(qchan->fl_pool); + dma_pool_destroy(qchan->sdd_pool); + } +} + +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct dpaa2_qdma_comp *dpaa2_comp; + struct dpaa2_qdma_chan *qchan; + unsigned long flags; + + dpaa2_comp = to_fsl_qdma_comp(vdesc); + qchan = dpaa2_comp->qchan; + spin_lock_irqsave(&qchan->queue_lock, flags); + list_del(&dpaa2_comp->list); + list_add_tail(&dpaa2_comp->list, &qchan->comp_free); + spin_unlock_irqrestore(&qchan->queue_lock, flags); +} + +static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma) +{ + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv; + struct dpaa2_qdma_chan *dpaa2_chan; + int num = priv->num_pairs; + int i; + + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels); + for (i = 0; i < dpaa2_qdma->n_chans; i++) { + dpaa2_chan = &dpaa2_qdma->chans[i]; + dpaa2_chan->qdma = dpaa2_qdma; + dpaa2_chan->fqid = priv->tx_fqid[i % num]; + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); + spin_lock_init(&dpaa2_chan->queue_lock); + INIT_LIST_HEAD(&dpaa2_chan->comp_used); + INIT_LIST_HEAD(&dpaa2_chan->comp_free); + } + return 0; +} + +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev) +{ + struct device *dev = &dpdmai_dev->dev; + struct dpaa2_qdma_engine *dpaa2_qdma; + struct dpaa2_qdma_priv *priv; + int err; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + dev_set_drvdata(dev, priv); + priv->dpdmai_dev = dpdmai_dev; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + if (priv->iommu_domain) + smmu_disable = false; + + /* obtain a MC portal */ + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "MC portal allocation failed\n"); + goto err_mcportal; + } + + /* DPDMAI initialization */ + err = dpaa2_qdma_setup(dpdmai_dev); + if (err) { + dev_err(dev, "dpaa2_dpdmai_setup() failed\n"); + goto err_dpdmai_setup; + } + + /* DPIO */ + err = dpaa2_qdma_dpio_setup(priv); + if (err) { + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n"); + goto err_dpio_setup; + } + + /* DPDMAI binding to DPIO */ + err = dpaa2_dpdmai_bind(priv); + if (err) { + dev_err(dev, "dpaa2_dpdmai_bind() failed\n"); + goto err_bind; + } + + /* DPDMAI enable */ + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle); + if (err) { + dev_err(dev, "dpdmai_enable() faile\n"); + goto err_enable; + } + + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL); + if (!dpaa2_qdma) { + err = -ENOMEM; + goto err_eng; + } + + priv->dpaa2_qdma = dpaa2_qdma; + dpaa2_qdma->priv = priv; + + dpaa2_qdma->desc_allocated = 0; + dpaa2_qdma->n_chans = NUM_CH; + + dpaa2_dpdmai_init_channels(dpaa2_qdma); + + if (soc_device_match(soc_fixup_tuning)) + dpaa2_qdma->qdma_wrtype_fixup = true; + else + dpaa2_qdma->qdma_wrtype_fixup = false; + + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask); + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask); + + dpaa2_qdma->dma_dev.dev = dev; + dpaa2_qdma->dma_dev.device_alloc_chan_resources = + dpaa2_qdma_alloc_chan_resources; + dpaa2_qdma->dma_dev.device_free_chan_resources = + dpaa2_qdma_free_chan_resources; + dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status; + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy; + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending; + + err = dma_async_device_register(&dpaa2_qdma->dma_dev); + if (err) { + dev_err(dev, "Can't register NXP QDMA engine.\n"); + goto err_dpaa2_qdma; + } + + return 0; + +err_dpaa2_qdma: + kfree(dpaa2_qdma); +err_eng: + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle); +err_enable: + dpaa2_dpdmai_dpio_unbind(priv); +err_bind: + dpaa2_dpmai_store_free(priv); + dpaa2_dpdmai_dpio_free(priv); +err_dpio_setup: + kfree(priv->ppriv); + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle); +err_dpdmai_setup: + fsl_mc_portal_free(priv->mc_io); +err_mcportal: + kfree(priv); + dev_set_drvdata(dev, NULL); + return err; +} + +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev) +{ + struct dpaa2_qdma_engine *dpaa2_qdma; + struct dpaa2_qdma_priv *priv; + struct device *dev; + + dev = &ls_dev->dev; + priv = dev_get_drvdata(dev); + dpaa2_qdma = priv->dpaa2_qdma; + + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); + dpaa2_dpdmai_dpio_unbind(priv); + dpaa2_dpmai_store_free(priv); + dpaa2_dpdmai_dpio_free(priv); + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); + fsl_mc_portal_free(priv->mc_io); + dev_set_drvdata(dev, NULL); + dpaa2_dpdmai_free_channels(dpaa2_qdma); + + dma_async_device_unregister(&dpaa2_qdma->dma_dev); + kfree(priv); + kfree(dpaa2_qdma); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpdmai", + }, + { .vendor = 0x0 } +}; + +static struct fsl_mc_driver dpaa2_qdma_driver = { + .driver = { + .name = "dpaa2-qdma", + .owner = THIS_MODULE, + }, + .probe = dpaa2_qdma_probe, + .remove = dpaa2_qdma_remove, + .match_id_table = dpaa2_qdma_id_table +}; + +static int __init dpaa2_qdma_driver_init(void) +{ + return fsl_mc_driver_register(&(dpaa2_qdma_driver)); +} +late_initcall(dpaa2_qdma_driver_init); + +static void __exit fsl_qdma_exit(void) +{ + fsl_mc_driver_unregister(&(dpaa2_qdma_driver)); +} +module_exit(fsl_qdma_exit); + +MODULE_ALIAS("platform:fsl-dpaa2-qdma"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver"); diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h new file mode 100644 index 000000000000..7d571849c569 --- /dev/null +++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2019 NXP */ + +#ifndef __DPAA2_QDMA_H +#define __DPAA2_QDMA_H + +#define DPAA2_QDMA_STORE_SIZE 16 +#define NUM_CH 8 + +struct dpaa2_qdma_sd_d { + u32 rsv:32; + union { + struct { + u32 ssd:12; /* souce stride distance */ + u32 sss:12; /* souce stride size */ + u32 rsv1:8; + } sdf; + struct { + u32 dsd:12; /* Destination stride distance */ + u32 dss:12; /* Destination stride size */ + u32 rsv2:8; + } ddf; + } df; + u32 rbpcmd; /* Route-by-port command */ + u32 cmd; +} __attribute__((__packed__)); + +/* Source descriptor command read transaction type for RBP=0: */ +/* coherent copy of cacheable memory */ +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28) +/* Destination descriptor command write transaction type for RBP=0: */ +/* coherent copy of cacheable memory */ +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28) +#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28) + +#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */ +#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */ +#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */ +#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */ +#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */ + +#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */ +#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */ + +#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */ +#define QDMA_FD_LONG_FORMAT (0) /* long format */ +#define QDMA_SER_DISABLE (8) /* no notification */ +#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */ +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */ +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */ +#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */ + +#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */ +#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */ +/* Flow Context: 49bit physical address */ +#define QMAN_FD_CBMT_ENABLE BIT(15) +#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */ +#define QMAN_FD_SC_DISABLE (0) /* stashing control */ + +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */ +#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */ +#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */ +#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */ +#define QDMA_FL_SL_LONG (0x0)/* long length */ +#define QDMA_FL_SL_SHORT (0x1) /* short length */ +#define QDMA_FL_F (0x1)/* last frame list bit */ + +/*Description of Frame list table structure*/ +struct dpaa2_qdma_chan { + struct dpaa2_qdma_engine *qdma; + struct virt_dma_chan vchan; + struct virt_dma_desc vdesc; + enum dma_status status; + u32 fqid; + + /* spinlock used by dpaa2 qdma driver */ + spinlock_t queue_lock; + struct dma_pool *fd_pool; + struct dma_pool *fl_pool; + struct dma_pool *sdd_pool; + + struct list_head comp_used; + struct list_head comp_free; + +}; + +struct dpaa2_qdma_comp { + dma_addr_t fd_bus_addr; + dma_addr_t fl_bus_addr; + dma_addr_t desc_bus_addr; + struct dpaa2_fd *fd_virt_addr; + struct dpaa2_fl_entry *fl_virt_addr; + struct dpaa2_qdma_sd_d *desc_virt_addr; + struct dpaa2_qdma_chan *qchan; + struct virt_dma_desc vdesc; + struct list_head list; +}; + +struct dpaa2_qdma_engine { + struct dma_device dma_dev; + u32 n_chans; + struct dpaa2_qdma_chan chans[NUM_CH]; + int qdma_wrtype_fixup; + int desc_allocated; + + struct dpaa2_qdma_priv *priv; +}; + +/* + * dpaa2_qdma_priv - driver private data + */ +struct dpaa2_qdma_priv { + int dpqdma_id; + + struct iommu_domain *iommu_domain; + struct dpdmai_attr dpdmai_attr; + struct device *dev; + struct fsl_mc_io *mc_io; + struct fsl_mc_device *dpdmai_dev; + u8 num_pairs; + + struct dpaa2_qdma_engine *dpaa2_qdma; + struct dpaa2_qdma_priv_per_prio *ppriv; + + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; + u32 tx_fqid[DPDMAI_PRIO_NUM]; +}; + +struct dpaa2_qdma_priv_per_prio { + int req_fqid; + int rsp_fqid; + int prio; + + struct dpaa2_io_store *store; + struct dpaa2_io_notification_ctx nctx; + + struct dpaa2_qdma_priv *priv; +}; + +static struct soc_device_attribute soc_fixup_tuning[] = { + { .family = "QorIQ LX2160A"}, + { }, +}; + +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */ +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \ + sizeof(struct dpaa2_fl_entry) * 3 + \ + sizeof(struct dpaa2_qdma_sd_d) * 2) + +static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma); +static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, + struct list_head *head); +#endif /* __DPAA2_QDMA_H */ From df781c0ceebae321fb7f7e96773ea451805e14b9 Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Mon, 14 Oct 2019 22:33:50 -0500 Subject: [PATCH 21/61] dt-bindings: milbeaut-m10v-hdmac: Add Socionext Milbeaut HDMAC bindings Document the devicetree bindings for Socionext Milbeaut HDMAC controller. Controller has upto 8 floating channels, that need a predefined slave-id to work from a set of slaves. Reviewed-by: Rob Herring Signed-off-by: Jassi Brar Link: https://lore.kernel.org/r/20191015033350.14866-1-jassisinghbrar@gmail.com Signed-off-by: Vinod Koul --- .../bindings/dma/milbeaut-m10v-hdmac.txt | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt diff --git a/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt b/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt new file mode 100644 index 000000000000..1f0875bd5abc --- /dev/null +++ b/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt @@ -0,0 +1,32 @@ +* Milbeaut AHB DMA Controller + +Milbeaut AHB DMA controller has transfer capability below. + - device to memory transfer + - memory to device transfer + +Required property: +- compatible: Should be "socionext,milbeaut-m10v-hdmac" +- reg: Should contain DMA registers location and length. +- interrupts: Should contain all of the per-channel DMA interrupts. + Number of channels is configurable - 2, 4 or 8, so + the number of interrupts specified should be {2,4,8}. +- #dma-cells: Should be 1. Specify the ID of the slave. +- clocks: Phandle to the clock used by the HDMAC module. + + +Example: + + hdmac1: dma-controller@1e110000 { + compatible = "socionext,milbeaut-m10v-hdmac"; + reg = <0x1e110000 0x10000>; + interrupts = <0 132 4>, + <0 133 4>, + <0 134 4>, + <0 135 4>, + <0 136 4>, + <0 137 4>, + <0 138 4>, + <0 139 4>; + #dma-cells = <1>; + clocks = <&dummy_clk>; + }; From 6c3214e698e49da9b694cdda86332527260cf119 Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Mon, 14 Oct 2019 22:33:59 -0500 Subject: [PATCH 22/61] dmaengine: milbeaut-hdmac: Add HDMAC driver for Milbeaut platforms Driver for Socionext Milbeaut HDMAC controller. The controller has upto 8 floating channels, that need a predefined slave-id to work from a set of slaves. Signed-off-by: Jassi Brar Link: https://lore.kernel.org/r/20191015033359.14925-1-jassisinghbrar@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 10 + drivers/dma/Makefile | 1 + drivers/dma/milbeaut-hdmac.c | 581 +++++++++++++++++++++++++++++++++++ 3 files changed, 592 insertions(+) create mode 100644 drivers/dma/milbeaut-hdmac.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f5decdc24181..a0ab82243b92 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -342,6 +342,16 @@ config MCF_EDMA minimal intervention from a host processor. This module can be found on Freescale ColdFire mcf5441x SoCs. +config MILBEAUT_HDMAC + tristate "Milbeaut AHB DMA support" + depends on ARCH_MILBEAUT || COMPILE_TEST + depends on OF + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Say yes here to support the Socionext Milbeaut + HDMAC device. + config MMP_PDMA bool "MMP PDMA support" depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b3d48f928328..55b65ec7575a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o +obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_MOXART_DMA) += moxart-dma.o diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c new file mode 100644 index 000000000000..2bb33535ab9e --- /dev/null +++ b/drivers/dma/milbeaut-hdmac.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Linaro Ltd. +// Copyright (C) 2019 Socionext Inc. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +#define MLB_HDMAC_DMACR 0x0 /* global */ +#define MLB_HDMAC_DE BIT(31) +#define MLB_HDMAC_DS BIT(30) +#define MLB_HDMAC_PR BIT(28) +#define MLB_HDMAC_DH GENMASK(27, 24) + +#define MLB_HDMAC_CH_STRIDE 0x10 + +#define MLB_HDMAC_DMACA 0x0 /* channel */ +#define MLB_HDMAC_EB BIT(31) +#define MLB_HDMAC_PB BIT(30) +#define MLB_HDMAC_ST BIT(29) +#define MLB_HDMAC_IS GENMASK(28, 24) +#define MLB_HDMAC_BT GENMASK(23, 20) +#define MLB_HDMAC_BC GENMASK(19, 16) +#define MLB_HDMAC_TC GENMASK(15, 0) +#define MLB_HDMAC_DMACB 0x4 +#define MLB_HDMAC_TT GENMASK(31, 30) +#define MLB_HDMAC_MS GENMASK(29, 28) +#define MLB_HDMAC_TW GENMASK(27, 26) +#define MLB_HDMAC_FS BIT(25) +#define MLB_HDMAC_FD BIT(24) +#define MLB_HDMAC_RC BIT(23) +#define MLB_HDMAC_RS BIT(22) +#define MLB_HDMAC_RD BIT(21) +#define MLB_HDMAC_EI BIT(20) +#define MLB_HDMAC_CI BIT(19) +#define HDMAC_PAUSE 0x7 +#define MLB_HDMAC_SS GENMASK(18, 16) +#define MLB_HDMAC_SP GENMASK(15, 12) +#define MLB_HDMAC_DP GENMASK(11, 8) +#define MLB_HDMAC_DMACSA 0x8 +#define MLB_HDMAC_DMACDA 0xc + +#define MLB_HDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +struct milbeaut_hdmac_desc { + struct virt_dma_desc vd; + struct scatterlist *sgl; + unsigned int sg_len; + unsigned int sg_cur; + enum dma_transfer_direction dir; +}; + +struct milbeaut_hdmac_chan { + struct virt_dma_chan vc; + struct milbeaut_hdmac_device *mdev; + struct milbeaut_hdmac_desc *md; + void __iomem *reg_ch_base; + unsigned int slave_id; + struct dma_slave_config cfg; +}; + +struct milbeaut_hdmac_device { + struct dma_device ddev; + struct clk *clk; + void __iomem *reg_base; + struct milbeaut_hdmac_chan channels[0]; +}; + +static struct milbeaut_hdmac_chan * +to_milbeaut_hdmac_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct milbeaut_hdmac_chan, vc); +} + +static struct milbeaut_hdmac_desc * +to_milbeaut_hdmac_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct milbeaut_hdmac_desc, vd); +} + +/* mc->vc.lock must be held by caller */ +static struct milbeaut_hdmac_desc * +milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan *mc) +{ + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&mc->vc); + if (!vd) { + mc->md = NULL; + return NULL; + } + + list_del(&vd->node); + + mc->md = to_milbeaut_hdmac_desc(vd); + + return mc->md; +} + +/* mc->vc.lock must be held by caller */ +static void milbeaut_chan_start(struct milbeaut_hdmac_chan *mc, + struct milbeaut_hdmac_desc *md) +{ + struct scatterlist *sg; + u32 cb, ca, src_addr, dest_addr, len; + u32 width, burst; + + sg = &md->sgl[md->sg_cur]; + len = sg_dma_len(sg); + + cb = MLB_HDMAC_CI | MLB_HDMAC_EI; + if (md->dir == DMA_MEM_TO_DEV) { + cb |= MLB_HDMAC_FD; + width = mc->cfg.dst_addr_width; + burst = mc->cfg.dst_maxburst; + src_addr = sg_dma_address(sg); + dest_addr = mc->cfg.dst_addr; + } else { + cb |= MLB_HDMAC_FS; + width = mc->cfg.src_addr_width; + burst = mc->cfg.src_maxburst; + src_addr = mc->cfg.src_addr; + dest_addr = sg_dma_address(sg); + } + cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1)); + cb |= FIELD_PREP(MLB_HDMAC_MS, 2); + + writel_relaxed(MLB_HDMAC_DE, mc->mdev->reg_base + MLB_HDMAC_DMACR); + writel_relaxed(src_addr, mc->reg_ch_base + MLB_HDMAC_DMACSA); + writel_relaxed(dest_addr, mc->reg_ch_base + MLB_HDMAC_DMACDA); + writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB); + + ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id); + if (burst == 16) + ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf); + else if (burst == 8) + ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd); + else if (burst == 4) + ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb); + burst *= width; + ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1)); + writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA); + ca |= MLB_HDMAC_EB; + writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA); +} + +/* mc->vc.lock must be held by caller */ +static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan *mc) +{ + struct milbeaut_hdmac_desc *md; + + md = milbeaut_hdmac_next_desc(mc); + if (md) + milbeaut_chan_start(mc, md); +} + +static irqreturn_t milbeaut_hdmac_interrupt(int irq, void *dev_id) +{ + struct milbeaut_hdmac_chan *mc = dev_id; + struct milbeaut_hdmac_desc *md; + u32 val; + + spin_lock(&mc->vc.lock); + + /* Ack and Disable irqs */ + val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACB); + val &= ~(FIELD_PREP(MLB_HDMAC_SS, HDMAC_PAUSE)); + writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB); + val &= ~MLB_HDMAC_EI; + val &= ~MLB_HDMAC_CI; + writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB); + + md = mc->md; + if (!md) + goto out; + + md->sg_cur++; + + if (md->sg_cur >= md->sg_len) { + vchan_cookie_complete(&md->vd); + md = milbeaut_hdmac_next_desc(mc); + if (!md) + goto out; + } + + milbeaut_chan_start(mc, md); + +out: + spin_unlock(&mc->vc.lock); + return IRQ_HANDLED; +} + +static void milbeaut_hdmac_free_chan_resources(struct dma_chan *chan) +{ + vchan_free_chan_resources(to_virt_chan(chan)); +} + +static int +milbeaut_hdmac_chan_config(struct dma_chan *chan, struct dma_slave_config *cfg) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc); + + spin_lock(&mc->vc.lock); + mc->cfg = *cfg; + spin_unlock(&mc->vc.lock); + + return 0; +} + +static int milbeaut_hdmac_chan_pause(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc); + u32 val; + + spin_lock(&mc->vc.lock); + val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA); + val |= MLB_HDMAC_PB; + writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA); + spin_unlock(&mc->vc.lock); + + return 0; +} + +static int milbeaut_hdmac_chan_resume(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc); + u32 val; + + spin_lock(&mc->vc.lock); + val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA); + val &= ~MLB_HDMAC_PB; + writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA); + spin_unlock(&mc->vc.lock); + + return 0; +} + +static struct dma_async_tx_descriptor * +milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_hdmac_desc *md; + int i; + + if (!is_slave_direction(direction)) + return NULL; + + md = kzalloc(sizeof(*md), GFP_NOWAIT); + if (!md) + return NULL; + + md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT); + if (!md->sgl) { + kfree(md); + return NULL; + } + + for (i = 0; i < sg_len; i++) + md->sgl[i] = sgl[i]; + + md->sg_len = sg_len; + md->dir = direction; + + return vchan_tx_prep(vc, &md->vd, flags); +} + +static int milbeaut_hdmac_terminate_all(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc); + unsigned long flags; + u32 val; + + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + + val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA); + val &= ~MLB_HDMAC_EB; /* disable the channel */ + writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA); + + if (mc->md) { + vchan_terminate_vdesc(&mc->md->vd); + mc->md = NULL; + } + + vchan_get_all_descriptors(vc, &head); + + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); + + return 0; +} + +static void milbeaut_hdmac_synchronize(struct dma_chan *chan) +{ + vchan_synchronize(to_virt_chan(chan)); +} + +static enum dma_status milbeaut_hdmac_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct virt_dma_chan *vc; + struct virt_dma_desc *vd; + struct milbeaut_hdmac_chan *mc; + struct milbeaut_hdmac_desc *md = NULL; + enum dma_status stat; + unsigned long flags; + int i; + + stat = dma_cookie_status(chan, cookie, txstate); + /* Return immediately if we do not need to compute the residue. */ + if (stat == DMA_COMPLETE || !txstate) + return stat; + + vc = to_virt_chan(chan); + + spin_lock_irqsave(&vc->lock, flags); + + mc = to_milbeaut_hdmac_chan(vc); + + /* residue from the on-flight chunk */ + if (mc->md && mc->md->vd.tx.cookie == cookie) { + struct scatterlist *sg; + u32 done; + + md = mc->md; + sg = &md->sgl[md->sg_cur]; + + if (md->dir == DMA_DEV_TO_MEM) + done = readl_relaxed(mc->reg_ch_base + + MLB_HDMAC_DMACDA); + else + done = readl_relaxed(mc->reg_ch_base + + MLB_HDMAC_DMACSA); + done -= sg_dma_address(sg); + + txstate->residue = -done; + } + + if (!md) { + vd = vchan_find_desc(vc, cookie); + if (vd) + md = to_milbeaut_hdmac_desc(vd); + } + + if (md) { + /* residue from the queued chunks */ + for (i = md->sg_cur; i < md->sg_len; i++) + txstate->residue += sg_dma_len(&md->sgl[i]); + } + + spin_unlock_irqrestore(&vc->lock, flags); + + return stat; +} + +static void milbeaut_hdmac_issue_pending(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc); + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + + if (vchan_issue_pending(vc) && !mc->md) + milbeaut_hdmac_start(mc); + + spin_unlock_irqrestore(&vc->lock, flags); +} + +static void milbeaut_hdmac_desc_free(struct virt_dma_desc *vd) +{ + struct milbeaut_hdmac_desc *md = to_milbeaut_hdmac_desc(vd); + + kfree(md->sgl); + kfree(md); +} + +static struct dma_chan * +milbeaut_hdmac_xlate(struct of_phandle_args *dma_spec, struct of_dma *of_dma) +{ + struct milbeaut_hdmac_device *mdev = of_dma->of_dma_data; + struct milbeaut_hdmac_chan *mc; + struct virt_dma_chan *vc; + struct dma_chan *chan; + + if (dma_spec->args_count != 1) + return NULL; + + chan = dma_get_any_slave_channel(&mdev->ddev); + if (!chan) + return NULL; + + vc = to_virt_chan(chan); + mc = to_milbeaut_hdmac_chan(vc); + mc->slave_id = dma_spec->args[0]; + + return chan; +} + +static int milbeaut_hdmac_chan_init(struct platform_device *pdev, + struct milbeaut_hdmac_device *mdev, + int chan_id) +{ + struct device *dev = &pdev->dev; + struct milbeaut_hdmac_chan *mc = &mdev->channels[chan_id]; + char *irq_name; + int irq, ret; + + irq = platform_get_irq(pdev, chan_id); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n", + chan_id); + return irq; + } + + irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d", + chan_id); + if (!irq_name) + return -ENOMEM; + + ret = devm_request_irq(dev, irq, milbeaut_hdmac_interrupt, + IRQF_SHARED, irq_name, mc); + if (ret) + return ret; + + mc->mdev = mdev; + mc->reg_ch_base = mdev->reg_base + MLB_HDMAC_CH_STRIDE * (chan_id + 1); + mc->vc.desc_free = milbeaut_hdmac_desc_free; + vchan_init(&mc->vc, &mdev->ddev); + + return 0; +} + +static int milbeaut_hdmac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct milbeaut_hdmac_device *mdev; + struct dma_device *ddev; + int nr_chans, ret, i; + + nr_chans = platform_irq_count(pdev); + if (nr_chans < 0) + return nr_chans; + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), + GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mdev->reg_base)) + return PTR_ERR(mdev->reg_base); + + mdev->clk = devm_clk_get(dev, NULL); + if (IS_ERR(mdev->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(mdev->clk); + } + + ret = clk_prepare_enable(mdev->clk); + if (ret) + return ret; + + ddev = &mdev->ddev; + ddev->dev = dev; + dma_cap_set(DMA_SLAVE, ddev->cap_mask); + dma_cap_set(DMA_PRIVATE, ddev->cap_mask); + ddev->src_addr_widths = MLB_HDMAC_BUSWIDTHS; + ddev->dst_addr_widths = MLB_HDMAC_BUSWIDTHS; + ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + ddev->device_free_chan_resources = milbeaut_hdmac_free_chan_resources; + ddev->device_config = milbeaut_hdmac_chan_config; + ddev->device_pause = milbeaut_hdmac_chan_pause; + ddev->device_resume = milbeaut_hdmac_chan_resume; + ddev->device_prep_slave_sg = milbeaut_hdmac_prep_slave_sg; + ddev->device_terminate_all = milbeaut_hdmac_terminate_all; + ddev->device_synchronize = milbeaut_hdmac_synchronize; + ddev->device_tx_status = milbeaut_hdmac_tx_status; + ddev->device_issue_pending = milbeaut_hdmac_issue_pending; + INIT_LIST_HEAD(&ddev->channels); + + for (i = 0; i < nr_chans; i++) { + ret = milbeaut_hdmac_chan_init(pdev, mdev, i); + if (ret) + goto disable_clk; + } + + ret = dma_async_device_register(ddev); + if (ret) + goto disable_clk; + + ret = of_dma_controller_register(dev->of_node, + milbeaut_hdmac_xlate, mdev); + if (ret) + goto unregister_dmac; + + platform_set_drvdata(pdev, mdev); + + return 0; + +unregister_dmac: + dma_async_device_unregister(ddev); +disable_clk: + clk_disable_unprepare(mdev->clk); + + return ret; +} + +static int milbeaut_hdmac_remove(struct platform_device *pdev) +{ + struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev); + struct dma_chan *chan; + int ret; + + /* + * Before reaching here, almost all descriptors have been freed by the + * ->device_free_chan_resources() hook. However, each channel might + * be still holding one descriptor that was on-flight at that moment. + * Terminate it to make sure this hardware is no longer running. Then, + * free the channel resources once again to avoid memory leak. + */ + list_for_each_entry(chan, &mdev->ddev.channels, device_node) { + ret = dmaengine_terminate_sync(chan); + if (ret) + return ret; + milbeaut_hdmac_free_chan_resources(chan); + } + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&mdev->ddev); + clk_disable_unprepare(mdev->clk); + + return 0; +} + +static const struct of_device_id milbeaut_hdmac_match[] = { + { .compatible = "socionext,milbeaut-m10v-hdmac" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match); + +static struct platform_driver milbeaut_hdmac_driver = { + .probe = milbeaut_hdmac_probe, + .remove = milbeaut_hdmac_remove, + .driver = { + .name = "milbeaut-m10v-hdmac", + .of_match_table = milbeaut_hdmac_match, + }, +}; +module_platform_driver(milbeaut_hdmac_driver); + +MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver"); +MODULE_LICENSE("GPL v2"); From 3708f89b33cc2aef5e121221704c1f833ca712c4 Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Mon, 14 Oct 2019 22:31:57 -0500 Subject: [PATCH 23/61] dt-bindings: milbeaut-m10v-xdmac: Add Socionext Milbeaut XDMAC bindings Document the devicetree bindings for Socionext Milbeaut XDMAC controller. Controller only supports Mem->Mem transfers. Number of physical channels are determined by the number of irqs registered. Reviewed-by: Rob Herring Signed-off-by: Jassi Brar Link: https://lore.kernel.org/r/20191015033157.14656-1-jassisinghbrar@gmail.com Signed-off-by: Vinod Koul --- .../bindings/dma/milbeaut-m10v-xdmac.txt | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt diff --git a/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt b/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt new file mode 100644 index 000000000000..305791804062 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt @@ -0,0 +1,24 @@ +* Milbeaut AXI DMA Controller + +Milbeaut AXI DMA controller has only memory to memory transfer capability. + +* DMA controller + +Required property: +- compatible: Should be "socionext,milbeaut-m10v-xdmac" +- reg: Should contain DMA registers location and length. +- interrupts: Should contain all of the per-channel DMA interrupts. + Number of channels is configurable - 2, 4 or 8, so + the number of interrupts specified should be {2,4,8}. +- #dma-cells: Should be 1. + +Example: + xdmac0: dma-controller@1c250000 { + compatible = "socionext,milbeaut-m10v-xdmac"; + reg = <0x1c250000 0x1000>; + interrupts = <0 17 0x4>, + <0 18 0x4>, + <0 19 0x4>, + <0 20 0x4>; + #dma-cells = <1>; + }; From a6e9be055d47fecdb090c2fb6cd2fdeaf820353c Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Mon, 14 Oct 2019 22:32:19 -0500 Subject: [PATCH 24/61] dmaengine: milbeaut-xdmac: Add XDMAC driver for Milbeaut platforms Driver for Socionext Milbeaut XDMAC controller. The controller only supports Mem-To-Mem transfers over upto 8 configurable channels. Signed-off-by: Jassi Brar Link: https://lore.kernel.org/r/20191015033219.14713-1-jassisinghbrar@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 10 + drivers/dma/Makefile | 1 + drivers/dma/milbeaut-xdmac.c | 418 +++++++++++++++++++++++++++++++++++ 3 files changed, 429 insertions(+) create mode 100644 drivers/dma/milbeaut-xdmac.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a0ab82243b92..59390e80b26c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -352,6 +352,16 @@ config MILBEAUT_HDMAC Say yes here to support the Socionext Milbeaut HDMAC device. +config MILBEAUT_XDMAC + tristate "Milbeaut AXI DMA support" + depends on ARCH_MILBEAUT || COMPILE_TEST + depends on OF + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Say yes here to support the Socionext Milbeaut + XDMAC device. + config MMP_PDMA bool "MMP PDMA support" depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 55b65ec7575a..3fdea8a1cc86 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_K3_DMA) += k3dma.o obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o +obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_MOXART_DMA) += moxart-dma.o diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c new file mode 100644 index 000000000000..3d5b1926a58d --- /dev/null +++ b/drivers/dma/milbeaut-xdmac.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Linaro Ltd. +// Copyright (C) 2019 Socionext Inc. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +/* global register */ +#define M10V_XDACS 0x00 + +/* channel local register */ +#define M10V_XDTBC 0x10 +#define M10V_XDSSA 0x14 +#define M10V_XDDSA 0x18 +#define M10V_XDSAC 0x1C +#define M10V_XDDAC 0x20 +#define M10V_XDDCC 0x24 +#define M10V_XDDES 0x28 +#define M10V_XDDPC 0x2C +#define M10V_XDDSD 0x30 + +#define M10V_XDACS_XE BIT(28) + +#define M10V_DEFBS 0x3 +#define M10V_DEFBL 0xf + +#define M10V_XDSAC_SBS GENMASK(17, 16) +#define M10V_XDSAC_SBL GENMASK(11, 8) + +#define M10V_XDDAC_DBS GENMASK(17, 16) +#define M10V_XDDAC_DBL GENMASK(11, 8) + +#define M10V_XDDES_CE BIT(28) +#define M10V_XDDES_SE BIT(24) +#define M10V_XDDES_SA BIT(15) +#define M10V_XDDES_TF GENMASK(23, 20) +#define M10V_XDDES_EI BIT(1) +#define M10V_XDDES_TI BIT(0) + +#define M10V_XDDSD_IS_MASK GENMASK(3, 0) +#define M10V_XDDSD_IS_NORMAL 0x8 + +#define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +struct milbeaut_xdmac_desc { + struct virt_dma_desc vd; + size_t len; + dma_addr_t src; + dma_addr_t dst; +}; + +struct milbeaut_xdmac_chan { + struct virt_dma_chan vc; + struct milbeaut_xdmac_desc *md; + void __iomem *reg_ch_base; +}; + +struct milbeaut_xdmac_device { + struct dma_device ddev; + void __iomem *reg_base; + struct milbeaut_xdmac_chan channels[0]; +}; + +static struct milbeaut_xdmac_chan * +to_milbeaut_xdmac_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct milbeaut_xdmac_chan, vc); +} + +static struct milbeaut_xdmac_desc * +to_milbeaut_xdmac_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct milbeaut_xdmac_desc, vd); +} + +/* mc->vc.lock must be held by caller */ +static struct milbeaut_xdmac_desc * +milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc) +{ + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&mc->vc); + if (!vd) { + mc->md = NULL; + return NULL; + } + + list_del(&vd->node); + + mc->md = to_milbeaut_xdmac_desc(vd); + + return mc->md; +} + +/* mc->vc.lock must be held by caller */ +static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc, + struct milbeaut_xdmac_desc *md) +{ + u32 val; + + /* Setup the channel */ + val = md->len - 1; + writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC); + + val = md->src; + writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA); + + val = md->dst; + writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA); + + val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC); + val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL); + val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) | + FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL); + writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC); + + val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC); + val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL); + val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) | + FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL); + writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC); + + /* Start the channel */ + val = readl_relaxed(mc->reg_ch_base + M10V_XDDES); + val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF | + M10V_XDDES_EI | M10V_XDDES_TI); + val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) | + FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) | + FIELD_PREP(M10V_XDDES_TI, 1); + writel_relaxed(val, mc->reg_ch_base + M10V_XDDES); +} + +/* mc->vc.lock must be held by caller */ +static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc) +{ + struct milbeaut_xdmac_desc *md; + + md = milbeaut_xdmac_next_desc(mc); + if (md) + milbeaut_chan_start(mc, md); +} + +static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id) +{ + struct milbeaut_xdmac_chan *mc = dev_id; + struct milbeaut_xdmac_desc *md; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&mc->vc.lock, flags); + + /* Ack and Stop */ + val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0); + writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD); + + md = mc->md; + if (!md) + goto out; + + vchan_cookie_complete(&md->vd); + + milbeaut_xdmac_start(mc); +out: + spin_unlock_irqrestore(&mc->vc.lock, flags); + return IRQ_HANDLED; +} + +static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan) +{ + vchan_free_chan_resources(to_virt_chan(chan)); +} + +static struct dma_async_tx_descriptor * +milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_xdmac_desc *md; + + md = kzalloc(sizeof(*md), GFP_NOWAIT); + if (!md) + return NULL; + + md->len = len; + md->src = src; + md->dst = dst; + + return vchan_tx_prep(vc, &md->vd, flags); +} + +static int milbeaut_xdmac_terminate_all(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc); + unsigned long flags; + u32 val; + + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + + /* Halt the channel */ + val = readl(mc->reg_ch_base + M10V_XDDES); + val &= ~M10V_XDDES_CE; + val |= FIELD_PREP(M10V_XDDES_CE, 0); + writel(val, mc->reg_ch_base + M10V_XDDES); + + if (mc->md) { + vchan_terminate_vdesc(&mc->md->vd); + mc->md = NULL; + } + + vchan_get_all_descriptors(vc, &head); + + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); + + return 0; +} + +static void milbeaut_xdmac_synchronize(struct dma_chan *chan) +{ + vchan_synchronize(to_virt_chan(chan)); +} + +static void milbeaut_xdmac_issue_pending(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc); + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + + if (vchan_issue_pending(vc) && !mc->md) + milbeaut_xdmac_start(mc); + + spin_unlock_irqrestore(&vc->lock, flags); +} + +static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd) +{ + kfree(to_milbeaut_xdmac_desc(vd)); +} + +static int milbeaut_xdmac_chan_init(struct platform_device *pdev, + struct milbeaut_xdmac_device *mdev, + int chan_id) +{ + struct device *dev = &pdev->dev; + struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id]; + char *irq_name; + int irq, ret; + + irq = platform_get_irq(pdev, chan_id); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n", + chan_id); + return irq; + } + + irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d", + chan_id); + if (!irq_name) + return -ENOMEM; + + ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt, + IRQF_SHARED, irq_name, mc); + if (ret) + return ret; + + mc->reg_ch_base = mdev->reg_base + chan_id * 0x30; + + mc->vc.desc_free = milbeaut_xdmac_desc_free; + vchan_init(&mc->vc, &mdev->ddev); + + return 0; +} + +static void enable_xdmac(struct milbeaut_xdmac_device *mdev) +{ + unsigned int val; + + val = readl(mdev->reg_base + M10V_XDACS); + val |= M10V_XDACS_XE; + writel(val, mdev->reg_base + M10V_XDACS); +} + +static void disable_xdmac(struct milbeaut_xdmac_device *mdev) +{ + unsigned int val; + + val = readl(mdev->reg_base + M10V_XDACS); + val &= ~M10V_XDACS_XE; + writel(val, mdev->reg_base + M10V_XDACS); +} + +static int milbeaut_xdmac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct milbeaut_xdmac_device *mdev; + struct dma_device *ddev; + int nr_chans, ret, i; + + nr_chans = platform_irq_count(pdev); + if (nr_chans < 0) + return nr_chans; + + mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), + GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mdev->reg_base)) + return PTR_ERR(mdev->reg_base); + + ddev = &mdev->ddev; + ddev->dev = dev; + dma_cap_set(DMA_MEMCPY, ddev->cap_mask); + ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS; + ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS; + ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources; + ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy; + ddev->device_terminate_all = milbeaut_xdmac_terminate_all; + ddev->device_synchronize = milbeaut_xdmac_synchronize; + ddev->device_tx_status = dma_cookie_status; + ddev->device_issue_pending = milbeaut_xdmac_issue_pending; + INIT_LIST_HEAD(&ddev->channels); + + for (i = 0; i < nr_chans; i++) { + ret = milbeaut_xdmac_chan_init(pdev, mdev, i); + if (ret) + return ret; + } + + enable_xdmac(mdev); + + ret = dma_async_device_register(ddev); + if (ret) + return ret; + + ret = of_dma_controller_register(dev->of_node, + of_dma_simple_xlate, mdev); + if (ret) + goto unregister_dmac; + + platform_set_drvdata(pdev, mdev); + + return 0; + +unregister_dmac: + dma_async_device_unregister(ddev); + return ret; +} + +static int milbeaut_xdmac_remove(struct platform_device *pdev) +{ + struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev); + struct dma_chan *chan; + int ret; + + /* + * Before reaching here, almost all descriptors have been freed by the + * ->device_free_chan_resources() hook. However, each channel might + * be still holding one descriptor that was on-flight at that moment. + * Terminate it to make sure this hardware is no longer running. Then, + * free the channel resources once again to avoid memory leak. + */ + list_for_each_entry(chan, &mdev->ddev.channels, device_node) { + ret = dmaengine_terminate_sync(chan); + if (ret) + return ret; + milbeaut_xdmac_free_chan_resources(chan); + } + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&mdev->ddev); + + disable_xdmac(mdev); + + return 0; +} + +static const struct of_device_id milbeaut_xdmac_match[] = { + { .compatible = "socionext,milbeaut-m10v-xdmac" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match); + +static struct platform_driver milbeaut_xdmac_driver = { + .probe = milbeaut_xdmac_probe, + .remove = milbeaut_xdmac_remove, + .driver = { + .name = "milbeaut-m10v-xdmac", + .of_match_table = milbeaut_xdmac_match, + }, +}; +module_platform_driver(milbeaut_xdmac_driver); + +MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver"); +MODULE_LICENSE("GPL v2"); From 005a017926ffe648b38b6462748a74c850a31e62 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 15 Oct 2019 20:18:18 +0530 Subject: [PATCH 25/61] dmaengine: xilinx_dma: Remove desc_callback_valid check In descriptor cleanup the call to desc_callback_valid can be safely removed as both callback pointers i.e callback_result and callback are anyway checked in invoke(). There is no much benefit in having redundant checks. Signed-off-by: Radhey Shyam Pandey Signed-off-by: Nicholas Graumann Reviewed-by: Appana Durga Kedareswara rao Link: https://lore.kernel.org/r/1571150904-3988-2-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 94cdc410977b..3e690ec45c13 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -832,11 +832,9 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) /* Run the link descriptor callback function */ dmaengine_desc_get_callback(&desc->async_tx, &cb); - if (dmaengine_desc_callback_valid(&cb)) { - spin_unlock_irqrestore(&chan->lock, flags); - dmaengine_desc_callback_invoke(&cb, NULL); - spin_lock_irqsave(&chan->lock, flags); - } + spin_unlock_irqrestore(&chan->lock, flags); + dmaengine_desc_callback_invoke(&cb, NULL); + spin_lock_irqsave(&chan->lock, flags); /* Run any dependencies, then free the descriptor */ dma_run_dependencies(&desc->async_tx); From 0f45e75e336f85aeee0392042e022814bf033aa2 Mon Sep 17 00:00:00 2001 From: Nicholas Graumann Date: Tue, 15 Oct 2019 20:18:19 +0530 Subject: [PATCH 26/61] dmaengine: xilinx_dma: Merge get_callback and _invoke The dma api provides a single interface to get the appropriate callback and invoke it directly. Prefer using it. Signed-off-by: Nicholas Graumann Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571150904-3988-3-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 3e690ec45c13..87132bd8cd36 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -820,8 +820,6 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) spin_lock_irqsave(&chan->lock, flags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { - struct dmaengine_desc_callback cb; - if (desc->cyclic) { xilinx_dma_chan_handle_cyclic(chan, desc, &flags); break; @@ -831,9 +829,8 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) list_del(&desc->node); /* Run the link descriptor callback function */ - dmaengine_desc_get_callback(&desc->async_tx, &cb); spin_unlock_irqrestore(&chan->lock, flags); - dmaengine_desc_callback_invoke(&cb, NULL); + dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); spin_lock_irqsave(&chan->lock, flags); /* Run any dependencies, then free the descriptor */ From 95f68c62628085abff83a5add8db057fbf532f7f Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 15 Oct 2019 20:18:20 +0530 Subject: [PATCH 27/61] dmaengine: xilinx_dma: Remove residue from channel data There is no use of storing channel data residue field. So clean it up. In tx_status simply pass calculated residue to dma_set_residue. Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571150904-3988-4-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 87132bd8cd36..41d536c32957 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -336,7 +336,6 @@ struct xilinx_dma_tx_descriptor { * @desc_pendingcount: Descriptor pending count * @ext_addr: Indicates 64 bit addressing is supported by dma channel * @desc_submitcount: Descriptor h/w submitted count - * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base * @seg_p: Physical allocated segments base * @cyclic_seg_v: Statically allocated segment base for cyclic transfers @@ -373,7 +372,6 @@ struct xilinx_dma_chan { u32 desc_pendingcount; bool ext_addr; u32 desc_submitcount; - u32 residue; struct xilinx_axidma_tx_segment *seg_v; dma_addr_t seg_p; struct xilinx_axidma_tx_segment *cyclic_seg_v; @@ -1019,8 +1017,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, } spin_unlock_irqrestore(&chan->lock, flags); - chan->residue = residue; - dma_set_residue(txstate, chan->residue); + dma_set_residue(txstate, residue); } return ret; From a575d0b4e663d818803f75de0cf4bc3c4b35b203 Mon Sep 17 00:00:00 2001 From: Nicholas Graumann Date: Tue, 15 Oct 2019 20:18:21 +0530 Subject: [PATCH 28/61] dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue Introduce a function that can calculate residues for IPs that support it: AXI DMA and CDMA. Signed-off-by: Nicholas Graumann Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571150904-3988-5-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 71 +++++++++++++++++++++++++-------- 1 file changed, 54 insertions(+), 17 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 41d536c32957..fe265d942ed2 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -784,6 +784,44 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) } } +/** + * xilinx_dma_get_residue - Compute residue for a given descriptor + * @chan: Driver specific dma channel + * @desc: dma transaction descriptor + * + * Return: The number of residue bytes for the descriptor. + */ +static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan, + struct xilinx_dma_tx_descriptor *desc) +{ + struct xilinx_cdma_tx_segment *cdma_seg; + struct xilinx_axidma_tx_segment *axidma_seg; + struct xilinx_cdma_desc_hw *cdma_hw; + struct xilinx_axidma_desc_hw *axidma_hw; + struct list_head *entry; + u32 residue = 0; + + list_for_each(entry, &desc->segments) { + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { + cdma_seg = list_entry(entry, + struct xilinx_cdma_tx_segment, + node); + cdma_hw = &cdma_seg->hw; + residue += (cdma_hw->control - cdma_hw->status) & + chan->xdev->max_buffer_len; + } else { + axidma_seg = list_entry(entry, + struct xilinx_axidma_tx_segment, + node); + axidma_hw = &axidma_seg->hw; + residue += (axidma_hw->control - axidma_hw->status) & + chan->xdev->max_buffer_len; + } + } + + return residue; +} + /** * xilinx_dma_chan_handle_cyclic - Cyclic dma callback * @chan: Driver specific dma channel @@ -993,8 +1031,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; enum dma_status ret; unsigned long flags; u32 residue = 0; @@ -1003,22 +1039,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, if (ret == DMA_COMPLETE || !txstate) return ret; - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - spin_lock_irqsave(&chan->lock, flags); + spin_lock_irqsave(&chan->lock, flags); - desc = list_last_entry(&chan->active_list, - struct xilinx_dma_tx_descriptor, node); - if (chan->has_sg) { - list_for_each_entry(segment, &desc->segments, node) { - hw = &segment->hw; - residue += (hw->control - hw->status) & - chan->xdev->max_buffer_len; - } - } - spin_unlock_irqrestore(&chan->lock, flags); + desc = list_last_entry(&chan->active_list, + struct xilinx_dma_tx_descriptor, node); + /* + * VDMA and simple mode do not support residue reporting, so the + * residue field will always be 0. + */ + if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) + residue = xilinx_dma_get_residue(chan, desc); - dma_set_residue(txstate, residue); - } + spin_unlock_irqrestore(&chan->lock, flags); + + dma_set_residue(txstate, residue); return ret; } @@ -2705,12 +2739,15 @@ static int xilinx_dma_probe(struct platform_device *pdev) xilinx_dma_prep_dma_cyclic; xdev->common.device_prep_interleaved_dma = xilinx_dma_prep_interleaved; - /* Residue calculation is supported by only AXI DMA */ + /* Residue calculation is supported by only AXI DMA and CDMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; + /* Residue calculation is supported by only AXI DMA and CDMA */ + xdev->common.residue_granularity = + DMA_RESIDUE_GRANULARITY_SEGMENT; } else { xdev->common.device_prep_interleaved_dma = xilinx_vdma_dma_prep_interleaved; From d8bae21a48dbe132788291257638f323f4ee5c94 Mon Sep 17 00:00:00 2001 From: Nicholas Graumann Date: Tue, 15 Oct 2019 20:18:22 +0530 Subject: [PATCH 29/61] dmaengine: xilinx_dma: Add callback_result support Take advantage of dmaengine_desc_get_callback_invoke which allows either a callback or callback_result to be specified. This can be useful when using the AXI DMA transfer unknown quantities of data where the residue contained in the result can be used to calculate the number of bytes transferred. Signed-off-by: Nicholas Graumann Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571150904-3988-6-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index fe265d942ed2..d81f387cfcc7 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -300,12 +300,16 @@ struct xilinx_cdma_tx_segment { * @segments: TX segments list * @node: Node in the channel descriptors list * @cyclic: Check for cyclic transfers. + * @err: Whether the descriptor has an error. + * @residue: Residue of the completed descriptor */ struct xilinx_dma_tx_descriptor { struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; bool cyclic; + bool err; + u32 residue; }; /** @@ -856,6 +860,8 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) spin_lock_irqsave(&chan->lock, flags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { + struct dmaengine_result result; + if (desc->cyclic) { xilinx_dma_chan_handle_cyclic(chan, desc, &flags); break; @@ -864,9 +870,20 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) /* Remove from the list of running transactions */ list_del(&desc->node); + if (unlikely(desc->err)) { + if (chan->direction == DMA_DEV_TO_MEM) + result.result = DMA_TRANS_READ_FAILED; + else + result.result = DMA_TRANS_WRITE_FAILED; + } else { + result.result = DMA_TRANS_NOERROR; + } + + result.residue = desc->residue; + /* Run the link descriptor callback function */ spin_unlock_irqrestore(&chan->lock, flags); - dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); + dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); spin_lock_irqsave(&chan->lock, flags); /* Run any dependencies, then free the descriptor */ @@ -1421,6 +1438,13 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) return; list_for_each_entry_safe(desc, next, &chan->active_list, node) { + if (chan->has_sg && chan->xdev->dma_config->dmatype != + XDMA_TYPE_VDMA) + desc->residue = xilinx_dma_get_residue(chan, desc); + else + desc->residue = 0; + desc->err = chan->err; + list_del(&desc->node); if (!desc->cyclic) dma_cookie_complete(&desc->async_tx); From 722b9e6d7e49b1dc429f9b65680b325c1ce9a763 Mon Sep 17 00:00:00 2001 From: Nicholas Graumann Date: Tue, 15 Oct 2019 20:18:23 +0530 Subject: [PATCH 30/61] dmaengine: xilinx_dma: Print debug message when no free tx segments The driver should not run out of tx segments in normal operation. But, if the user attempts to prepare a transaction with a large sg list, the driver may not have enough free segments to accommodate the request. Log a message at the debug level to inform the user in case they are experiencing issues. Signed-off-by: Nicholas Graumann Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571150904-3988-7-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index d81f387cfcc7..93471a6199ce 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -612,6 +612,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) } spin_unlock_irqrestore(&chan->lock, flags); + if (!segment) + dev_dbg(chan->dev, "Could not find free tx segment\n"); + return segment; } From 8a631a5a0f7d4a4a24dba8587d5d9152be0871cc Mon Sep 17 00:00:00 2001 From: Nicholas Graumann Date: Tue, 15 Oct 2019 20:18:24 +0530 Subject: [PATCH 31/61] dmaengine: xilinx_dma: Clear desc_pendingcount in xilinx_dma_reset Whenever we reset the channel, we need to clear desc_pendingcount along with desc_submitcount. Otherwise when a new transaction is submitted, the irq coalesce level could be programmed to an incorrect value in the axidma case. This behavior can be observed when terminating pending transactions with xilinx_dma_terminate_all() and then submitting new transactions without releasing and requesting the channel. Signed-off-by: Nicholas Graumann Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571150904-3988-8-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 93471a6199ce..41189b6d4301 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1482,6 +1482,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) chan->err = false; chan->idle = true; + chan->desc_pendingcount = 0; chan->desc_submitcount = 0; return err; From 53596dfa59807d6e4a30e9f157f73cfc14d7da89 Mon Sep 17 00:00:00 2001 From: Peng Ma Date: Wed, 23 Oct 2019 12:56:17 +0800 Subject: [PATCH 32/61] dmaengine: fsl-dpaa2-qdma: export the symbols The symbols were not exported leading to error: WARNING: modpost: missing MODULE_LICENSE() in drivers/dma/fsl-dpaa2-qdma/dpdmai.o see include/linux/module.h for more information GZIP arch/arm64/boot/Image.gz ERROR: "dpdmai_enable" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_set_rx_queue" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_get_tx_queue" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_get_rx_queue" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_get_attributes" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_open" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_close" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_disable" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! ERROR: "dpdmai_reset" [drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.ko] undefined! WARNING: "HYPERVISOR_platform_op" [vmlinux] is a static EXPORT_SYMBOL_GPL make[2]: *** [__modpost] Error 1 make[1]: *** [modules] Error 2 make[1]: *** Waiting for unfinished jobs.... make: *** [sub-make] Error 2 So export it. Signed-off-by: Peng Ma Reported-by: Anders Roxell Link: https://lore.kernel.org/r/20191023045617.22764-1-peng.ma@nxp.com Signed-off-by: Vinod Koul --- drivers/dma/fsl-dpaa2-qdma/dpdmai.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c index fbc2b2f39bec..f8a1f663145b 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright 2019 NXP +#include #include #include #include @@ -90,6 +91,7 @@ int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, return 0; } +EXPORT_SYMBOL_GPL(dpdmai_open); /** * dpdmai_close() - Close the control session of the object @@ -113,6 +115,7 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) /* send command to mc*/ return mc_send_command(mc_io, &cmd); } +EXPORT_SYMBOL_GPL(dpdmai_close); /** * dpdmai_create() - Create the DPDMAI object @@ -177,6 +180,7 @@ int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) /* send command to mc*/ return mc_send_command(mc_io, &cmd); } +EXPORT_SYMBOL_GPL(dpdmai_enable); /** * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. @@ -197,6 +201,7 @@ int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) /* send command to mc*/ return mc_send_command(mc_io, &cmd); } +EXPORT_SYMBOL_GPL(dpdmai_disable); /** * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. @@ -217,6 +222,7 @@ int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) /* send command to mc*/ return mc_send_command(mc_io, &cmd); } +EXPORT_SYMBOL_GPL(dpdmai_reset); /** * dpdmai_get_attributes() - Retrieve DPDMAI attributes. @@ -252,6 +258,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, return 0; } +EXPORT_SYMBOL_GPL(dpdmai_get_attributes); /** * dpdmai_set_rx_queue() - Set Rx queue configuration @@ -285,6 +292,7 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } +EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue); /** * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. @@ -325,6 +333,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, return 0; } +EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue); /** * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. @@ -364,3 +373,6 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, return 0; } +EXPORT_SYMBOL_GPL(dpdmai_get_tx_queue); + +MODULE_LICENSE("GPL v2"); From 41814c4eadf8a791b6d07114f96e7e120e59555c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 4 Oct 2019 17:08:26 +0200 Subject: [PATCH 33/61] dmaengine: fsl-qdma: Handle invalid qdma-queue0 IRQ platform_get_irq_byname() might return -errno which later would be cast to an unsigned int and used in IRQ handling code leading to usage of wrong ID and errors about wrong irq_base. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Peng Ma Tested-by: Peng Ma Link: https://lore.kernel.org/r/20191004150826.6656-1-krzk@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/fsl-qdma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 06664fbd2d91..89792083d62c 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev) return ret; fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); + if (fsl_qdma->irq_base < 0) + return fsl_qdma->irq_base; + fsl_qdma->feature = of_property_read_bool(np, "big-endian"); INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); From 7208474d1c7af9f86dfb1f20bfa2abbe22ad2017 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 22 Oct 2019 10:16:48 -0700 Subject: [PATCH 34/61] dmaengine: fsl-dpaa2-qdma: Remove unnecessary local variables in DPDMAI_CMD_CREATE macro Clang warns: drivers/dma/fsl-dpaa2-qdma/dpdmai.c:148:25: warning: variable 'cfg' is uninitialized when used within its own initialization [-Wuninitialized] DPDMAI_CMD_CREATE(cmd, cfg); ~~~~~~~~~~~~~~~~~~~~~~~^~~~ drivers/dma/fsl-dpaa2-qdma/dpdmai.c:42:24: note: expanded from macro 'DPDMAI_CMD_CREATE' typeof(_cfg) (cfg) = (_cfg); \ ~~~ ^~~~ 1 warning generated. Looking at the preprocessed source, we can see that this is true. int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, const struct dpdmai_cfg *cfg, u16 *token) { struct fsl_mc_command cmd = { 0 }; int err; cmd.header = mc_encode_cmd_header((((0x90E) << 4) | 0), cmd_flags, 0); do { typeof(cmd)(cmd) = (cmd); typeof(cfg)(cfg) = (cfg); ((cmd).params[0] |= mc_enc((8), (8), (cfg)->priorities[0])); ((cmd).params[0] |= mc_enc((16), (8), (cfg)->priorities[1])); } while (0); I cannot see a good reason to create another version of cfg when the parameter one will work perfectly fine and cmd can just be used as is. Remove them to fix this warning. Fixes: f2835adf8afb ("dmaengine: fsl-dpaa2-qdma: Add the DPDMAI(Data Path DMA Interface) support") Link: https://github.com/ClangBuiltLinux/linux/issues/746 Signed-off-by: Nathan Chancellor Link: https://lore.kernel.org/r/20191022171648.37732-1-natechancellor@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/fsl-dpaa2-qdma/dpdmai.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c index f8a1f663145b..f8d22115154a 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c @@ -37,10 +37,8 @@ struct dpdmai_rsp_get_tx_queue { ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) /* cmd, param, offset, width, type, arg_name */ -#define DPDMAI_CMD_CREATE(_cmd, _cfg) \ +#define DPDMAI_CMD_CREATE(cmd, cfg) \ do { \ - typeof(_cmd) (cmd) = (_cmd); \ - typeof(_cfg) (cfg) = (_cfg); \ MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\ MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\ } while (0) From 4868d87c18aa47ed155f48167387a3b716b461d1 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 25 Oct 2019 10:30:54 +0300 Subject: [PATCH 35/61] dt-bindings: dmaengine: dma-common: Change dma-channel-mask to uint32-array Make the dma-channel-mask to be usable for controllers with more than 32 channels. Signed-off-by: Peter Ujfalusi Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20191025073056.25450-2-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/dma-common.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/dma-common.yaml b/Documentation/devicetree/bindings/dma/dma-common.yaml index ed0a49a6f020..02a34ba2b49b 100644 --- a/Documentation/devicetree/bindings/dma/dma-common.yaml +++ b/Documentation/devicetree/bindings/dma/dma-common.yaml @@ -25,11 +25,18 @@ properties: Used to provide DMA controller specific information. dma-channel-mask: - $ref: /schemas/types.yaml#definitions/uint32 description: Bitmask of available DMA channels in ascending order that are not reserved by firmware and are available to the kernel. i.e. first channel corresponds to LSB. + The first item in the array is for channels 0-31, the second is for + channels 32-63, etc. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + items: + minItems: 1 + # Should be enough + maxItems: 255 dma-channels: $ref: /schemas/types.yaml#definitions/uint32 From 115b60a93ee4e29e103c4bf067aeab45c6d51725 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 25 Oct 2019 10:30:55 +0300 Subject: [PATCH 36/61] dt-bindings: dma: ti-edma: Document dma-channel-mask for EDMA Similarly to paRAM slots, channels can be used by other cores. The common dma-channel-mask property can be used for specifying the available channels. Signed-off-by: Peter Ujfalusi Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/20191025073056.25450-3-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/ti-edma.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 4bbc94d829c8..0e1398f93aa2 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt @@ -42,6 +42,11 @@ Optional properties: - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by the driver, they are allocated to be used by for example the DSP. See example. +- dma-channel-mask: Mask of usable channels. + Single uint32 for EDMA with 32 channels, array of two uint32 for + EDMA with 64 channels. See example and + Documentation/devicetree/bindings/dma/dma-common.yaml + ------------------------------------------------------------------------------ eDMA3 Transfer Controller @@ -91,6 +96,9 @@ edma: edma@49000000 { ti,edma-memcpy-channels = <20 21>; /* The following PaRAM slots are reserved: 35-44 and 100-109 */ ti,edma-reserved-slot-ranges = <35 10>, <100 10>; + /* The following channels are reserved: 35-44 */ + dma-channel-mask = <0xffffffff /* Channel 0-31 */ + 0xffffe007>; /* Channel 32-63 */ }; edma_tptc0: tptc@49800000 { From 31f4b28f6c41f734957ea66ac84c6abb69e696f0 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 25 Oct 2019 10:30:56 +0300 Subject: [PATCH 37/61] dmaengine: ti: edma: Add support for handling reserved channels Like paRAM slots, channels could be used by other cores and in this case we need to make sure that the driver do not alter these channels. Handle the generic dma-channel-mask property to mark channels in a bitmap which can not be used by Linux and convert the legacy rsv_chans if it is provided by platform_data. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20191025073056.25450-4-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/edma.c | 59 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 6 deletions(-) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 54fd981e3db5..0ecfc2e1d798 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -260,6 +260,13 @@ struct edma_cc { */ unsigned long *slot_inuse; + /* + * For tracking reserved channels used by DSP. + * If the bit is cleared, the channel is allocated to be used by DSP + * and Linux must not touch it. + */ + unsigned long *channels_mask; + struct dma_device dma_slave; struct dma_device *dma_memcpy; struct edma_chan *slave_chans; @@ -716,6 +723,12 @@ static int edma_alloc_channel(struct edma_chan *echan, struct edma_cc *ecc = echan->ecc; int channel = EDMA_CHAN_SLOT(echan->ch_num); + if (!test_bit(echan->ch_num, ecc->channels_mask)) { + dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n", + echan->ch_num); + return -EINVAL; + } + /* ensure access through shadow region 0 */ edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel), EDMA_CHANNEL_BIT(channel)); @@ -2249,7 +2262,7 @@ static int edma_probe(struct platform_device *pdev) { struct edma_soc_info *info = pdev->dev.platform_data; s8 (*queue_priority_mapping)[2]; - const s16 (*rsv_slots)[2]; + const s16 (*reserved)[2]; int i, irq; char *irq_name; struct resource *mem; @@ -2329,15 +2342,32 @@ static int edma_probe(struct platform_device *pdev) if (!ecc->slot_inuse) return -ENOMEM; + ecc->channels_mask = devm_kcalloc(dev, + BITS_TO_LONGS(ecc->num_channels), + sizeof(unsigned long), GFP_KERNEL); + if (!ecc->channels_mask) + return -ENOMEM; + + /* Mark all channels available initially */ + bitmap_fill(ecc->channels_mask, ecc->num_channels); + ecc->default_queue = info->default_queue; if (info->rsv) { /* Set the reserved slots in inuse list */ - rsv_slots = info->rsv->rsv_slots; - if (rsv_slots) { - for (i = 0; rsv_slots[i][0] != -1; i++) - bitmap_set(ecc->slot_inuse, rsv_slots[i][0], - rsv_slots[i][1]); + reserved = info->rsv->rsv_slots; + if (reserved) { + for (i = 0; reserved[i][0] != -1; i++) + bitmap_set(ecc->slot_inuse, reserved[i][0], + reserved[i][1]); + } + + /* Clear channels not usable for Linux */ + reserved = info->rsv->rsv_chans; + if (reserved) { + for (i = 0; reserved[i][0] != -1; i++) + bitmap_clear(ecc->channels_mask, reserved[i][0], + reserved[i][1]); } } @@ -2389,6 +2419,7 @@ static int edma_probe(struct platform_device *pdev) if (!ecc->legacy_mode) { int lowest_priority = 0; + unsigned int array_max; struct of_phandle_args tc_args; ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, @@ -2410,6 +2441,18 @@ static int edma_probe(struct platform_device *pdev) info->default_queue = i; } } + + /* See if we have optional dma-channel-mask array */ + array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32)); + ret = of_property_read_variable_u32_array(node, + "dma-channel-mask", + (u32 *)ecc->channels_mask, + 1, array_max); + if (ret > 0 && ret != array_max) + dev_warn(dev, "dma-channel-mask is not complete.\n"); + else if (ret == -EOVERFLOW || ret == -ENODATA) + dev_warn(dev, + "dma-channel-mask is out of range or empty\n"); } /* Event queue priority mapping */ @@ -2427,6 +2470,10 @@ static int edma_probe(struct platform_device *pdev) edma_dma_init(ecc, legacy_mode); for (i = 0; i < ecc->num_channels; i++) { + /* Do not touch reserved channels */ + if (!test_bit(i, ecc->channels_mask)) + continue; + /* Assign all channels to the default queue */ edma_assign_channel_eventq(&ecc->slave_chans[i], info->default_queue); From 96336cc043ba8f4999bbeb7a9a29b46fc05d2bc4 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 22 Oct 2019 22:30:17 +0530 Subject: [PATCH 38/61] dt-bindings: dmaengine: xilinx_dma: Remove axidma multichannel support The AXI DMA multichannel support is deprecated in the IP and it is no longer actively supported. For multichannel support, refer to the AXI multichannel direct memory access IP product guide(PG228) and MCDMA driver(added in the subsequent commits). Inline with it remove axidma multichannel optional properties i.e xlnx,mcdma and dma-channels from the binding description. Signed-off-by: Radhey Shyam Pandey Acked-by: Rob Herring Link: https://lore.kernel.org/r/1571763622-29281-2-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt index 93b6d961dd4f..99d06f9daee6 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt @@ -42,7 +42,6 @@ Optional properties for AXI DMA: register as configured in h/w. Takes values {8...26}. If the property is missing or invalid then the default value 23 is used. This is the maximum value that is supported by all IP versions. -- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. Optional properties for VDMA: - xlnx,flush-fsync: Tells which channel to Flush on Frame sync. It takes following values: @@ -69,8 +68,6 @@ Optional child node properties for VDMA: enabled/disabled in hardware. - xlnx,enable-vert-flip: Tells vertical flip is enabled/disabled in hardware(S2MM path). -Optional child node properties for AXI DMA: --dma-channels: Number of dma channels in child node. Example: ++++++++ From 535b4b0c050b79db6a63097599fc87a156db6b2c Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 22 Oct 2019 22:30:18 +0530 Subject: [PATCH 39/61] dt-bindings: dmaengine: xilinx_dma: Fix formatting and style Trivial formatting(keep compatible string one per line, caps change etc). It doesn't modify the content of the binding. Signed-off-by: Radhey Shyam Pandey Acked-by: Rob Herring Link: https://lore.kernel.org/r/1571763622-29281-3-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/xilinx/xilinx_dma.txt | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt index 99d06f9daee6..d4ba1cbcbc27 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt @@ -12,8 +12,10 @@ Xilinx AXI CDMA engine, it does transfers between memory-mapped source address and a memory-mapped destination address. Required properties: -- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or - "xlnx,axi-cdma-1.00.a"" +- compatible: Should be one of- + "xlnx,axi-vdma-1.00.a" + "xlnx,axi-dma-1.00.a" + "xlnx,axi-cdma-1.00.a" - #dma-cells: Should be <1>, see "dmas" property below - reg: Should contain VDMA registers location and length. - xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits). @@ -29,7 +31,7 @@ Required properties: "m_axis_mm2s_aclk", "s_axis_s2mm_aclk" For CDMA: Required elements: "s_axi_lite_aclk", "m_axi_aclk" - FOR AXIDMA: + For AXIDMA: Required elements: "s_axi_lite_aclk" Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", "m_axi_sg_aclk" From 7cb1e57544e5d11e3a2742c5acb69562d02af235 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 22 Oct 2019 22:30:19 +0530 Subject: [PATCH 40/61] dt-bindings: dmaengine: xilinx_dma: Add binding for Xilinx MCDMA IP Add devicetree binding for Xilinx AXI Multichannel Direct Memory Access (AXI MCDMA) IP. The AXI MCDMA provides high-bandwidth direct memory access between memory and AXI4-Stream target peripherals. The AXI MCDMA core provides a scatter-gather interface with multiple channel support with independent configuration. Signed-off-by: Radhey Shyam Pandey Acked-by: Rob Herring Link: https://lore.kernel.org/r/1571763622-29281-4-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/xilinx/xilinx_dma.txt | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt index d4ba1cbcbc27..325aca52cd43 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt +++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt @@ -11,11 +11,16 @@ is to receive from the device. Xilinx AXI CDMA engine, it does transfers between memory-mapped source address and a memory-mapped destination address. +Xilinx AXI MCDMA engine, it does transfer between memory and AXI4 stream +target devices. It can be configured to have up to 16 independent transmit +and receive channels. + Required properties: - compatible: Should be one of- "xlnx,axi-vdma-1.00.a" "xlnx,axi-dma-1.00.a" "xlnx,axi-cdma-1.00.a" + "xlnx,axi-mcdma-1.00.a" - #dma-cells: Should be <1>, see "dmas" property below - reg: Should contain VDMA registers location and length. - xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits). @@ -31,7 +36,7 @@ Required properties: "m_axis_mm2s_aclk", "s_axis_s2mm_aclk" For CDMA: Required elements: "s_axi_lite_aclk", "m_axi_aclk" - For AXIDMA: + For AXIDMA and MCDMA: Required elements: "s_axi_lite_aclk" Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk", "m_axi_sg_aclk" @@ -39,7 +44,7 @@ Required properties: Required properties for VDMA: - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. -Optional properties for AXI DMA: +Optional properties for AXI DMA and MCDMA: - xlnx,sg-length-width: Should be set to the width in bits of the length register as configured in h/w. Takes values {8...26}. If the property is missing or invalid then the default value 23 is used. This is the @@ -56,8 +61,8 @@ Required child node properties: For VDMA: It should be either "xlnx,axi-vdma-mm2s-channel" or "xlnx,axi-vdma-s2mm-channel". For CDMA: It should be "xlnx,axi-cdma-channel". - For AXIDMA: It should be either "xlnx,axi-dma-mm2s-channel" or - "xlnx,axi-dma-s2mm-channel". + For AXIDMA and MCDMA: It should be either "xlnx,axi-dma-mm2s-channel" + or "xlnx,axi-dma-s2mm-channel". - interrupts: Should contain per channel VDMA interrupts. - xlnx,datawidth: Should contain the stream data width, take values {32,64...1024}. @@ -70,6 +75,8 @@ Optional child node properties for VDMA: enabled/disabled in hardware. - xlnx,enable-vert-flip: Tells vertical flip is enabled/disabled in hardware(S2MM path). +Optional child node properties for MCDMA: +- dma-channels: Number of dma channels in child node. Example: ++++++++ From bcb2dc7b6c1ed0d13f640f4a0ea713088f188b19 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 22 Oct 2019 22:30:20 +0530 Subject: [PATCH 41/61] dmaengine: xilinx_dma: Remove axidma multichannel mode support The AXI DMA multichannel support is deprecated in the IP and it is no longer actively supported. For multichannel support, refer to the AXI multichannel direct memory access IP product guide(PG228) and MCDMA driver. So inline with it remove axidma multichannel support from from the driver. Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571763622-29281-5-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 155 ++------------------------------ 1 file changed, 8 insertions(+), 147 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 41189b6d4301..6d4586550f56 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -116,7 +116,7 @@ #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) /* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ @@ -170,18 +170,6 @@ #define XILINX_DMA_NUM_DESCS 255 #define XILINX_DMA_NUM_APP_WORDS 5 -/* Multi-Channel DMA Descriptor offsets*/ -#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) -#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) - -/* Multi-Channel DMA Masks/Shifts */ -#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) -#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) -#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) -#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) -#define XILINX_DMA_BD_STRIDE_SHIFT 0 -#define XILINX_DMA_BD_VSIZE_SHIFT 19 - /* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 @@ -218,8 +206,8 @@ struct xilinx_vdma_desc_hw { * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 * @buf_addr_msb: MSB of Buffer address @0x0C - * @mcdma_control: Control field for mcdma @0x10 - * @vsize_stride: Vsize and Stride field for mcdma @0x14 + * @reserved1: Reserved @0x10 + * @reserved2: Reserved @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 @@ -229,8 +217,8 @@ struct xilinx_axidma_desc_hw { u32 next_desc_msb; u32 buf_addr; u32 buf_addr_msb; - u32 mcdma_control; - u32 vsize_stride; + u32 reserved1; + u32 reserved2; u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; @@ -346,7 +334,6 @@ struct xilinx_dma_tx_descriptor { * @cyclic_seg_p: Physical allocated segments base for cyclic dma * @start_transfer: Differentiate b/w DMA IP's transfer * @stop_transfer: Differentiate b/w DMA IP's quiesce - * @tdest: TDEST value for mcdma * @has_vflip: S2MM vertical flip */ struct xilinx_dma_chan { @@ -382,7 +369,6 @@ struct xilinx_dma_chan { dma_addr_t cyclic_seg_p; void (*start_transfer)(struct xilinx_dma_chan *chan); int (*stop_transfer)(struct xilinx_dma_chan *chan); - u16 tdest; bool has_vflip; }; @@ -413,7 +399,6 @@ struct xilinx_dma_config { * @dev: Device Structure * @common: DMA device structure * @chan: Driver specific DMA channel - * @mcdma: Specifies whether Multi-Channel is present or not * @flush_on_fsync: Flush on frame sync * @ext_addr: Indicates 64 bit addressing is supported by dma device * @pdev: Platform device structure pointer @@ -432,7 +417,6 @@ struct xilinx_dma_device { struct device *dev; struct dma_device common; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; - bool mcdma; u32 flush_on_fsync; bool ext_addr; struct platform_device *pdev; @@ -1344,53 +1328,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } - if (chan->has_sg && !chan->xdev->mcdma) + if (chan->has_sg) xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); - if (chan->has_sg && chan->xdev->mcdma) { - if (chan->direction == DMA_MEM_TO_DEV) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - } else { - if (!chan->tdest) { - dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, - head_desc->async_tx.phys); - } else { - dma_ctrl_write(chan, - XILINX_DMA_MCRX_CDESC(chan->tdest), - head_desc->async_tx.phys); - } - } - } - xilinx_dma_start(chan); if (chan->err) return; /* Start the transfer */ - if (chan->has_sg && !chan->xdev->mcdma) { + if (chan->has_sg) { if (chan->cyclic) xilinx_write(chan, XILINX_DMA_REG_TAILDESC, chan->cyclic_seg_v->phys); else xilinx_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); - } else if (chan->has_sg && chan->xdev->mcdma) { - if (chan->direction == DMA_MEM_TO_DEV) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - if (!chan->tdest) { - dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, - tail_segment->phys); - } else { - dma_ctrl_write(chan, - XILINX_DMA_MCRX_TDESC(chan->tdest), - tail_segment->phys); - } - } } else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; @@ -2016,90 +1970,6 @@ error: return NULL; } -/** - * xilinx_dma_prep_interleaved - prepare a descriptor for a - * DMA_SLAVE transaction - * @dchan: DMA channel - * @xt: Interleaved template pointer - * @flags: transfer ack flags - * - * Return: Async transaction descriptor on success and NULL on failure - */ -static struct dma_async_tx_descriptor * -xilinx_dma_prep_interleaved(struct dma_chan *dchan, - struct dma_interleaved_template *xt, - unsigned long flags) -{ - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); - struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment; - struct xilinx_axidma_desc_hw *hw; - - if (!is_slave_direction(xt->dir)) - return NULL; - - if (!xt->numf || !xt->sgl[0].size) - return NULL; - - if (xt->frame_size != 1) - return NULL; - - /* Allocate a transaction descriptor. */ - desc = xilinx_dma_alloc_tx_descriptor(chan); - if (!desc) - return NULL; - - chan->direction = xt->dir; - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = xilinx_dma_tx_submit; - - /* Get a free segment */ - segment = xilinx_axidma_alloc_tx_segment(chan); - if (!segment) - goto error; - - hw = &segment->hw; - - /* Fill in the descriptor */ - if (xt->dir != DMA_MEM_TO_DEV) - hw->buf_addr = xt->dst_start; - else - hw->buf_addr = xt->src_start; - - hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; - hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & - XILINX_DMA_BD_VSIZE_MASK; - hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & - XILINX_DMA_BD_STRIDE_MASK; - hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; - - /* - * Insert the segment into the descriptor segments - * list. - */ - list_add_tail(&segment->node, &desc->segments); - - - segment = list_first_entry(&desc->segments, - struct xilinx_axidma_tx_segment, node); - desc->async_tx.phys = segment->phys; - - /* For the last DMA_MEM_TO_DEV transfer, set EOP */ - if (xt->dir == DMA_MEM_TO_DEV) { - segment->hw.control |= XILINX_DMA_BD_SOP; - segment = list_last_entry(&desc->segments, - struct xilinx_axidma_tx_segment, - node); - segment->hw.control |= XILINX_DMA_BD_EOP; - } - - return &desc->async_tx; - -error: - xilinx_dma_free_tx_descriptor(chan, desc); - return NULL; -} - /** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @dchan: Driver specific DMA Channel pointer @@ -2492,7 +2362,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; chan->id = chan_id; - chan->tdest = chan_id; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2509,7 +2378,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; chan->id = chan_id; - chan->tdest = chan_id - xdev->nr_channels; chan->has_vflip = of_property_read_bool(node, "xlnx,enable-vert-flip"); if (chan->has_vflip) { @@ -2597,11 +2465,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) { - int ret, i, nr_channels = 1; - - ret = of_property_read_u32(node, "dma-channels", &nr_channels); - if ((ret < 0) && xdev->mcdma) - dev_warn(xdev->dev, "missing dma-channels property\n"); + int i, nr_channels = 1; for (i = 0; i < nr_channels; i++) xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); @@ -2700,7 +2564,6 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); if (!of_property_read_u32(node, "xlnx,sg-length-width", &len_width)) { if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || @@ -2765,8 +2628,6 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; xdev->common.device_prep_dma_cyclic = xilinx_dma_prep_dma_cyclic; - xdev->common.device_prep_interleaved_dma = - xilinx_dma_prep_interleaved; /* Residue calculation is supported by only AXI DMA and CDMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; From c2f6b67db2bd2c333ccd30099c9bde197fa3943d Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 22 Oct 2019 22:30:21 +0530 Subject: [PATCH 42/61] dmaengine: xilinx_dma: Extend dma_config struct to store irq routine handle Extend dma_config structure to store irq routine handle. It enables runtime handler selection based on xdma_ip_type and serves as preparatory patch for adding MCDMA IP support. Signed-off-by: Radhey Shyam Pandey Suggested-by: Vinod Koul Link: https://lore.kernel.org/r/1571763622-29281-6-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 6d4586550f56..25042a9fd11e 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -391,6 +391,7 @@ struct xilinx_dma_config { int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, struct clk **tx_clk, struct clk **txs_clk, struct clk **rx_clk, struct clk **rxs_clk); + irqreturn_t (*irq_handler)(int irq, void *data); }; /** @@ -2402,8 +2403,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, /* Request the interrupt */ chan->irq = irq_of_parse_and_map(node, 0); - err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, - "xilinx-dma-controller", chan); + err = request_irq(chan->irq, xdev->dma_config->irq_handler, + IRQF_SHARED, "xilinx-dma-controller", chan); if (err) { dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); return err; @@ -2497,16 +2498,19 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, static const struct xilinx_dma_config axidma_config = { .dmatype = XDMA_TYPE_AXIDMA, .clk_init = axidma_clk_init, + .irq_handler = xilinx_dma_irq_handler, }; static const struct xilinx_dma_config axicdma_config = { .dmatype = XDMA_TYPE_CDMA, .clk_init = axicdma_clk_init, + .irq_handler = xilinx_dma_irq_handler, }; static const struct xilinx_dma_config axivdma_config = { .dmatype = XDMA_TYPE_VDMA, .clk_init = axivdma_clk_init, + .irq_handler = xilinx_dma_irq_handler, }; static const struct of_device_id xilinx_dma_of_ids[] = { From 6ccd692bfb7fc44a6b4acd97874d8be78ecb5c91 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Tue, 22 Oct 2019 22:30:22 +0530 Subject: [PATCH 43/61] dmaengine: xilinx_dma: Add Xilinx AXI MCDMA Engine driver support Add support for AXI Multichannel Direct Memory Access (AXI MCDMA) core, which is a soft Xilinx IP core that provides high-bandwidth direct memory access between memory and AXI4-Stream target peripherals. The AXI MCDMA core provides scatter-gather interface with multiple independent transmit and receive channels. The driver supports device_prep_slave_sg slave transfer mode. Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1571763622-29281-7-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 4 + drivers/dma/xilinx/xilinx_dma.c | 460 +++++++++++++++++++++++++++++++- 2 files changed, 455 insertions(+), 9 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 59390e80b26c..cc5780139d28 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -655,6 +655,10 @@ config XILINX_DMA destination address. AXI DMA engine provides high-bandwidth one dimensional direct memory access between memory and AXI4-Stream target peripherals. + AXI MCDMA engine provides high-bandwidth direct memory access + between memory and AXI4-Stream target peripherals. It provides + the scatter gather interface with multiple channels independent + configuration support. config XILINX_ZYNQMP_DMA tristate "Xilinx ZynqMP DMA Engine" diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 25042a9fd11e..d24d1a2f2bff 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -25,6 +25,12 @@ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory * Access (DMA) between a memory-mapped source address and a memory-mapped * destination address. + * + * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft + * Xilinx IP that provides high-bandwidth direct memory access between + * memory and AXI4-Stream target peripherals. It provides scatter gather + * (SG) interface with multiple channels independent configuration support. + * */ #include @@ -116,7 +122,7 @@ #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) /* HW specific definitions */ -#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 +#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ @@ -179,6 +185,31 @@ #define xilinx_prep_dma_addr_t(addr) \ ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) + +/* AXI MCDMA Specific Registers/Offsets */ +#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000 +#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500 +#define XILINX_MCDMA_CHEN_OFFSET 0x0008 +#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010 +#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020 +#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028 +#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40) +#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40) +#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40) +#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40) + +/* AXI MCDMA Specific Masks/Shifts */ +#define XILINX_MCDMA_COALESCE_SHIFT 16 +#define XILINX_MCDMA_COALESCE_MAX 24 +#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5) +#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16) +#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0) +#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5) +#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6) +#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7) +#define XILINX_MCDMA_BD_EOP BIT(30) +#define XILINX_MCDMA_BD_SOP BIT(31) + /** * struct xilinx_vdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 @@ -224,6 +255,30 @@ struct xilinx_axidma_desc_hw { u32 app[XILINX_DMA_NUM_APP_WORDS]; } __aligned(64); +/** + * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA + * @next_desc: Next Descriptor Pointer @0x00 + * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 + * @buf_addr: Buffer address @0x08 + * @buf_addr_msb: MSB of Buffer address @0x0C + * @rsvd: Reserved field @0x10 + * @control: Control Information field @0x14 + * @status: Status field @0x18 + * @sideband_status: Status of sideband signals @0x1C + * @app: APP Fields @0x20 - 0x30 + */ +struct xilinx_aximcdma_desc_hw { + u32 next_desc; + u32 next_desc_msb; + u32 buf_addr; + u32 buf_addr_msb; + u32 rsvd; + u32 control; + u32 status; + u32 sideband_status; + u32 app[XILINX_DMA_NUM_APP_WORDS]; +} __aligned(64); + /** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 @@ -270,6 +325,18 @@ struct xilinx_axidma_tx_segment { dma_addr_t phys; } __aligned(64); +/** + * struct xilinx_aximcdma_tx_segment - Descriptor segment + * @hw: Hardware descriptor + * @node: Node in the descriptor segments list + * @phys: Physical address of segment + */ +struct xilinx_aximcdma_tx_segment { + struct xilinx_aximcdma_desc_hw hw; + struct list_head node; + dma_addr_t phys; +} __aligned(64); + /** * struct xilinx_cdma_tx_segment - Descriptor segment * @hw: Hardware descriptor @@ -329,11 +396,13 @@ struct xilinx_dma_tx_descriptor { * @ext_addr: Indicates 64 bit addressing is supported by dma channel * @desc_submitcount: Descriptor h/w submitted count * @seg_v: Statically allocated segments base + * @seg_mv: Statically allocated segments base for MCDMA * @seg_p: Physical allocated segments base * @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @cyclic_seg_p: Physical allocated segments base for cyclic dma * @start_transfer: Differentiate b/w DMA IP's transfer * @stop_transfer: Differentiate b/w DMA IP's quiesce + * @tdest: TDEST value for mcdma * @has_vflip: S2MM vertical flip */ struct xilinx_dma_chan { @@ -364,11 +433,13 @@ struct xilinx_dma_chan { bool ext_addr; u32 desc_submitcount; struct xilinx_axidma_tx_segment *seg_v; + struct xilinx_aximcdma_tx_segment *seg_mv; dma_addr_t seg_p; struct xilinx_axidma_tx_segment *cyclic_seg_v; dma_addr_t cyclic_seg_p; void (*start_transfer)(struct xilinx_dma_chan *chan); int (*stop_transfer)(struct xilinx_dma_chan *chan); + u16 tdest; bool has_vflip; }; @@ -378,12 +449,14 @@ struct xilinx_dma_chan { * @XDMA_TYPE_AXIDMA: Axi dma ip. * @XDMA_TYPE_CDMA: Axi cdma ip. * @XDMA_TYPE_VDMA: Axi vdma ip. + * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip. * */ enum xdma_ip_type { XDMA_TYPE_AXIDMA = 0, XDMA_TYPE_CDMA, XDMA_TYPE_VDMA, + XDMA_TYPE_AXIMCDMA }; struct xilinx_dma_config { @@ -412,6 +485,7 @@ struct xilinx_dma_config { * @nr_channels: Number of channels DMA device supports * @chan_id: DMA channel identifier * @max_buffer_len: Max buffer length + * @s2mm_index: S2MM channel index */ struct xilinx_dma_device { void __iomem *regs; @@ -430,6 +504,7 @@ struct xilinx_dma_device { u32 nr_channels; u32 chan_id; u32 max_buffer_len; + u32 s2mm_index; }; /* Macros */ @@ -530,6 +605,18 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, } } +static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan, + struct xilinx_aximcdma_desc_hw *hw, + dma_addr_t buf_addr, size_t sg_used) +{ + if (chan->ext_addr) { + hw->buf_addr = lower_32_bits(buf_addr + sg_used); + hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used); + } else { + hw->buf_addr = buf_addr + sg_used; + } +} + /* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ @@ -603,6 +690,30 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) return segment; } +/** + * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment + * @chan: Driver specific DMA channel + * + * Return: The allocated segment on success and NULL on failure. + */ +static struct xilinx_aximcdma_tx_segment * +xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan) +{ + struct xilinx_aximcdma_tx_segment *segment = NULL; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (!list_empty(&chan->free_seg_list)) { + segment = list_first_entry(&chan->free_seg_list, + struct xilinx_aximcdma_tx_segment, + node); + list_del(&segment->node); + } + spin_unlock_irqrestore(&chan->lock, flags); + + return segment; +} + static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) { u32 next_desc = hw->next_desc; @@ -614,6 +725,17 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) hw->next_desc_msb = next_desc_msb; } +static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw) +{ + u32 next_desc = hw->next_desc; + u32 next_desc_msb = hw->next_desc_msb; + + memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw)); + + hw->next_desc = next_desc; + hw->next_desc_msb = next_desc_msb; +} + /** * xilinx_dma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel @@ -627,6 +749,20 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, list_add_tail(&segment->node, &chan->free_seg_list); } +/** + * xilinx_mcdma_free_tx_segment - Free transaction segment + * @chan: Driver specific DMA channel + * @segment: DMA transaction segment + */ +static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan, + struct xilinx_aximcdma_tx_segment * + segment) +{ + xilinx_mcdma_clean_hw_desc(&segment->hw); + + list_add_tail(&segment->node, &chan->free_seg_list); +} + /** * xilinx_cdma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel @@ -681,6 +817,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, struct xilinx_vdma_tx_segment *segment, *next; struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; + struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next; if (!desc) return; @@ -696,12 +833,18 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, list_del(&cdma_segment->node); xilinx_cdma_free_tx_segment(chan, cdma_segment); } - } else { + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { list_for_each_entry_safe(axidma_segment, axidma_next, &desc->segments, node) { list_del(&axidma_segment->node); xilinx_dma_free_tx_segment(chan, axidma_segment); } + } else { + list_for_each_entry_safe(aximcdma_segment, aximcdma_next, + &desc->segments, node) { + list_del(&aximcdma_segment->node); + xilinx_mcdma_free_tx_segment(chan, aximcdma_segment); + } } kfree(desc); @@ -770,10 +913,23 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) chan->cyclic_seg_v, chan->cyclic_seg_p); } - if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + spin_lock_irqsave(&chan->lock, flags); + INIT_LIST_HEAD(&chan->free_seg_list); + spin_unlock_irqrestore(&chan->lock, flags); + + /* Free memory that is allocated for BD */ + dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) * + XILINX_DMA_NUM_DESCS, chan->seg_mv, + chan->seg_p); + } + + if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA && + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) { dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } + } /** @@ -955,6 +1111,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) list_add_tail(&chan->seg_v[i].node, &chan->free_seg_list); } + } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + /* Allocate the buffer descriptors. */ + chan->seg_mv = dma_alloc_coherent(chan->dev, + sizeof(*chan->seg_mv) * + XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); + if (!chan->seg_mv) { + dev_err(chan->dev, + "unable to allocate channel %d descriptors\n", + chan->id); + return -ENOMEM; + } + for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { + chan->seg_mv[i].hw.next_desc = + lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_mv[i].hw.next_desc_msb = + upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_mv[i].phys = chan->seg_p + + sizeof(*chan->seg_v) * i; + list_add_tail(&chan->seg_mv[i].node, + &chan->free_seg_list); + } } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", chan->dev, @@ -970,7 +1150,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) } if (!chan->desc_pool && - (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { + ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) && + chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) { dev_err(chan->dev, "unable to allocate channel %d descriptor pool\n", chan->id); @@ -1367,6 +1548,76 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) chan->idle = false; } +/** + * xilinx_mcdma_start_transfer - Starts MCDMA transfer + * @chan: Driver specific channel struct pointer + */ +static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) +{ + struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; + struct xilinx_axidma_tx_segment *tail_segment; + u32 reg; + + /* + * lock has been held by calling functions, so we don't need it + * to take it here again. + */ + + if (chan->err) + return; + + if (!chan->idle) + return; + + if (list_empty(&chan->pending_list)) + return; + + head_desc = list_first_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_desc = list_last_entry(&chan->pending_list, + struct xilinx_dma_tx_descriptor, node); + tail_segment = list_last_entry(&tail_desc->segments, + struct xilinx_axidma_tx_segment, node); + + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); + + if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) { + reg &= ~XILINX_MCDMA_COALESCE_MASK; + reg |= chan->desc_pendingcount << + XILINX_MCDMA_COALESCE_SHIFT; + } + + reg |= XILINX_MCDMA_IRQ_ALL_MASK; + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); + + /* Program current descriptor */ + xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), + head_desc->async_tx.phys); + + /* Program channel enable register */ + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET); + reg |= BIT(chan->tdest); + dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg); + + /* Start the fetch of BDs for the channel */ + reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest)); + reg |= XILINX_MCDMA_CR_RUNSTOP_MASK; + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); + + xilinx_dma_start(chan); + + if (chan->err) + return; + + /* Start the transfer */ + xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest), + tail_segment->phys); + + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; + chan->idle = false; +} + /** * xilinx_dma_issue_pending - Issue pending transactions * @dchan: DMA channel @@ -1465,6 +1716,74 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) return 0; } +/** + * xilinx_mcdma_irq_handler - MCDMA Interrupt handler + * @irq: IRQ number + * @data: Pointer to the Xilinx MCDMA channel structure + * + * Return: IRQ_HANDLED/IRQ_NONE + */ +static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) +{ + struct xilinx_dma_chan *chan = data; + u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id; + + if (chan->direction == DMA_DEV_TO_MEM) + ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET; + else + ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET; + + /* Read the channel id raising the interrupt*/ + chan_sermask = dma_ctrl_read(chan, ser_offset); + chan_id = ffs(chan_sermask); + + if (!chan_id) + return IRQ_NONE; + + if (chan->direction == DMA_DEV_TO_MEM) + chan_offset = chan->xdev->s2mm_index; + + chan_offset = chan_offset + (chan_id - 1); + chan = chan->xdev->chan[chan_offset]; + /* Read the status and ack the interrupts. */ + status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest)); + if (!(status & XILINX_MCDMA_IRQ_ALL_MASK)) + return IRQ_NONE; + + dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest), + status & XILINX_MCDMA_IRQ_ALL_MASK); + + if (status & XILINX_MCDMA_IRQ_ERR_MASK) { + dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n", + chan, + dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET), + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET + (chan->tdest)), + dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET + (chan->tdest))); + chan->err = true; + } + + if (status & XILINX_MCDMA_IRQ_DELAY_MASK) { + /* + * Device takes too long to do the transfer when user requires + * responsiveness. + */ + dev_dbg(chan->dev, "Inter-packet latency too long\n"); + } + + if (status & XILINX_MCDMA_IRQ_IOC_MASK) { + spin_lock(&chan->lock); + xilinx_dma_complete_descriptor(chan); + chan->idle = true; + chan->start_transfer(chan); + spin_unlock(&chan->lock); + } + + tasklet_schedule(&chan->tasklet); + return IRQ_HANDLED; +} + /** * xilinx_dma_irq_handler - DMA Interrupt handler * @irq: IRQ number @@ -1971,6 +2290,104 @@ error: return NULL; } +/** + * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @dchan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: transfer ack flags + * @context: APP words of the descriptor + * + * Return: Async transaction descriptor on success and NULL on failure + */ +static struct dma_async_tx_descriptor * +xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int sg_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + struct xilinx_dma_tx_descriptor *desc; + struct xilinx_aximcdma_tx_segment *segment = NULL; + u32 *app_w = (u32 *)context; + struct scatterlist *sg; + size_t copy; + size_t sg_used; + unsigned int i; + + if (!is_slave_direction(direction)) + return NULL; + + /* Allocate a transaction descriptor. */ + desc = xilinx_dma_alloc_tx_descriptor(chan); + if (!desc) + return NULL; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = xilinx_dma_tx_submit; + + /* Build transactions using information in the scatter gather list */ + for_each_sg(sgl, sg, sg_len, i) { + sg_used = 0; + + /* Loop until the entire scatterlist entry is used */ + while (sg_used < sg_dma_len(sg)) { + struct xilinx_aximcdma_desc_hw *hw; + + /* Get a free segment */ + segment = xilinx_aximcdma_alloc_tx_segment(chan); + if (!segment) + goto error; + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the hw limit + */ + copy = min_t(size_t, sg_dma_len(sg) - sg_used, + chan->xdev->max_buffer_len); + hw = &segment->hw; + + /* Fill in the descriptor */ + xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg), + sg_used); + hw->control = copy; + + if (chan->direction == DMA_MEM_TO_DEV && app_w) { + memcpy(hw->app, app_w, sizeof(u32) * + XILINX_DMA_NUM_APP_WORDS); + } + + sg_used += copy; + /* + * Insert the segment into the descriptor segments + * list. + */ + list_add_tail(&segment->node, &desc->segments); + } + } + + segment = list_first_entry(&desc->segments, + struct xilinx_aximcdma_tx_segment, node); + desc->async_tx.phys = segment->phys; + + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ + if (chan->direction == DMA_MEM_TO_DEV) { + segment->hw.control |= XILINX_MCDMA_BD_SOP; + segment = list_last_entry(&desc->segments, + struct xilinx_aximcdma_tx_segment, + node); + segment->hw.control |= XILINX_MCDMA_BD_EOP; + } + + return &desc->async_tx; + +error: + xilinx_dma_free_tx_descriptor(chan, desc); + + return NULL; +} + /** * xilinx_dma_terminate_all - Halt the channel and free descriptors * @dchan: Driver specific DMA Channel pointer @@ -2363,6 +2780,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { chan->direction = DMA_MEM_TO_DEV; chan->id = chan_id; + chan->tdest = chan_id; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { @@ -2379,6 +2797,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, "xlnx,axi-dma-s2mm-channel")) { chan->direction = DMA_DEV_TO_MEM; chan->id = chan_id; + xdev->s2mm_index = xdev->nr_channels; + chan->tdest = chan_id - xdev->nr_channels; chan->has_vflip = of_property_read_bool(node, "xlnx,enable-vert-flip"); if (chan->has_vflip) { @@ -2387,7 +2807,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, XILINX_VDMA_ENABLE_VERTICAL_FLIP; } - chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) + chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET; + else + chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; + if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; chan->config.park = 1; @@ -2402,7 +2826,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, } /* Request the interrupt */ - chan->irq = irq_of_parse_and_map(node, 0); + chan->irq = irq_of_parse_and_map(node, chan->tdest); err = request_irq(chan->irq, xdev->dma_config->irq_handler, IRQF_SHARED, "xilinx-dma-controller", chan); if (err) { @@ -2413,6 +2837,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { chan->start_transfer = xilinx_dma_start_transfer; chan->stop_transfer = xilinx_dma_stop_transfer; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + chan->start_transfer = xilinx_mcdma_start_transfer; + chan->stop_transfer = xilinx_dma_stop_transfer; } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { chan->start_transfer = xilinx_cdma_start_transfer; chan->stop_transfer = xilinx_cdma_stop_transfer; @@ -2466,7 +2893,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) { - int i, nr_channels = 1; + int ret, i, nr_channels = 1; + + ret = of_property_read_u32(node, "dma-channels", &nr_channels); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0) + dev_warn(xdev->dev, "missing dma-channels property\n"); for (i = 0; i < nr_channels; i++) xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); @@ -2501,6 +2932,11 @@ static const struct xilinx_dma_config axidma_config = { .irq_handler = xilinx_dma_irq_handler, }; +static const struct xilinx_dma_config aximcdma_config = { + .dmatype = XDMA_TYPE_AXIMCDMA, + .clk_init = axidma_clk_init, + .irq_handler = xilinx_mcdma_irq_handler, +}; static const struct xilinx_dma_config axicdma_config = { .dmatype = XDMA_TYPE_CDMA, .clk_init = axicdma_clk_init, @@ -2517,6 +2953,7 @@ static const struct of_device_id xilinx_dma_of_ids[] = { { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, + { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config }, {} }; MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); @@ -2567,7 +3004,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) /* Retrieve the DMA engine properties from the device tree */ xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); - if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || + xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { if (!of_property_read_u32(node, "xlnx,sg-length-width", &len_width)) { if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || @@ -2640,7 +3078,9 @@ static int xilinx_dma_probe(struct platform_device *pdev) xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; /* Residue calculation is supported by only AXI DMA and CDMA */ xdev->common.residue_granularity = - DMA_RESIDUE_GRANULARITY_SEGMENT; + DMA_RESIDUE_GRANULARITY_SEGMENT; + } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { + xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg; } else { xdev->common.device_prep_interleaved_dma = xilinx_vdma_dma_prep_interleaved; @@ -2676,6 +3116,8 @@ static int xilinx_dma_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); + else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) + dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n"); else dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); From be80507d45bea0358b54d1918db144d90a2ecd78 Mon Sep 17 00:00:00 2001 From: Zhou Yanjie Date: Fri, 25 Oct 2019 01:21:09 +0800 Subject: [PATCH 44/61] dt-bindings: dmaengine: Add X1000 bindings. Add the dmaengine bindings for the X1000 Soc from Ingenic. Signed-off-by: Zhou Yanjie Reviewed-by: Rob Herring Link: https://lore.kernel.org/r/1571937670-30828-2-git-send-email-zhouyanjie@zoho.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/jz4780-dma.txt | 3 +- include/dt-bindings/dma/x1000-dma.h | 40 +++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 include/dt-bindings/dma/x1000-dma.h diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt index 636fcb26b164..ec89782d9498 100644 --- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt +++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt @@ -7,10 +7,11 @@ Required properties: * ingenic,jz4725b-dma * ingenic,jz4770-dma * ingenic,jz4780-dma + * ingenic,x1000-dma - reg: Should contain the DMA channel registers location and length, followed by the DMA controller registers location and length. - interrupts: Should contain the interrupt specifier of the DMA controller. -- clocks: Should contain a clock specifier for the JZ4780 PDMA clock. +- clocks: Should contain a clock specifier for the JZ4780/X1000 PDMA clock. - #dma-cells: Must be <2>. Number of integer cells in the dmas property of DMA clients (see below). diff --git a/include/dt-bindings/dma/x1000-dma.h b/include/dt-bindings/dma/x1000-dma.h new file mode 100644 index 000000000000..401e1656e696 --- /dev/null +++ b/include/dt-bindings/dma/x1000-dma.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * This header provides macros for X1000 DMA bindings. + * + * Copyright (c) 2019 Zhou Yanjie + */ + +#ifndef __DT_BINDINGS_DMA_X1000_DMA_H__ +#define __DT_BINDINGS_DMA_X1000_DMA_H__ + +/* + * Request type numbers for the X1000 DMA controller (written to the DRTn + * register for the channel). + */ +#define X1000_DMA_DMIC_RX 0x5 +#define X1000_DMA_I2S0_TX 0x6 +#define X1000_DMA_I2S0_RX 0x7 +#define X1000_DMA_AUTO 0x8 +#define X1000_DMA_UART2_TX 0x10 +#define X1000_DMA_UART2_RX 0x11 +#define X1000_DMA_UART1_TX 0x12 +#define X1000_DMA_UART1_RX 0x13 +#define X1000_DMA_UART0_TX 0x14 +#define X1000_DMA_UART0_RX 0x15 +#define X1000_DMA_SSI0_TX 0x16 +#define X1000_DMA_SSI0_RX 0x17 +#define X1000_DMA_MSC0_TX 0x1a +#define X1000_DMA_MSC0_RX 0x1b +#define X1000_DMA_MSC1_TX 0x1c +#define X1000_DMA_MSC1_RX 0x1d +#define X1000_DMA_PCM0_TX 0x20 +#define X1000_DMA_PCM0_RX 0x21 +#define X1000_DMA_SMB0_TX 0x24 +#define X1000_DMA_SMB0_RX 0x25 +#define X1000_DMA_SMB1_TX 0x26 +#define X1000_DMA_SMB1_RX 0x27 +#define X1000_DMA_SMB2_TX 0x28 +#define X1000_DMA_SMB2_RX 0x29 + +#endif /* __DT_BINDINGS_DMA_X1000_DMA_H__ */ From fee175e44cb34ee86e589b2c22c617d00aaac21b Mon Sep 17 00:00:00 2001 From: Zhou Yanjie Date: Fri, 25 Oct 2019 01:21:10 +0800 Subject: [PATCH 45/61] dmaengine: JZ4780: Add support for the X1000. Add support for probing the dma-jz4780 driver on the X1000 Soc. Signed-off-by: Zhou Yanjie Reviewed-by: Paul Cercueil Link: https://lore.kernel.org/r/1571937670-30828-3-git-send-email-zhouyanjie@zoho.com Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index f42b3ef8e036..848c3281b7ef 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1013,11 +1013,18 @@ static const struct jz4780_dma_soc_data jz4780_dma_soc_data = { .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA, }; +static const struct jz4780_dma_soc_data x1000_dma_soc_data = { + .nb_channels = 8, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, + { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); From 9568feda4e2914bc50c5cd4fbca57210815f9dc9 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 5 Nov 2019 00:16:22 +0800 Subject: [PATCH 46/61] dmaengine: dma-jz4780: add missed clk_disable_unprepare in remove The remove misses to disable and unprepare jzdma->clk. Add a call to clk_disable_unprepare to fix it. Signed-off-by: Chuhong Yuan Acked-by: Paul Cercueil Link: https://lore.kernel.org/r/20191104161622.11758-1-hslester96@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 848c3281b7ef..fa626acdc9b9 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -981,6 +981,7 @@ static int jz4780_dma_remove(struct platform_device *pdev) of_dma_controller_free(pdev->dev.of_node); + clk_disable_unprepare(jzdma->clk); free_irq(jzdma->irq, jzdma); for (i = 0; i < jzdma->soc_data->nb_channels; i++) From 7d4a069c5889b4f36f89841d5dea538aef88a9e1 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 6 Nov 2019 22:01:27 +0530 Subject: [PATCH 47/61] dmaengine: milbeaut-hdmac: remove redundant error log platform_get_irq() prints the error message, so caller need not do so, remove the error line in this driver for platform_get_irq() Reported-by: kbuild test robot Signed-off-by: Vinod Koul Link: https://lore.kernel.org/r/20191106163128.1980714-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/milbeaut-hdmac.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c index 2bb33535ab9e..8853d442430b 100644 --- a/drivers/dma/milbeaut-hdmac.c +++ b/drivers/dma/milbeaut-hdmac.c @@ -431,11 +431,8 @@ static int milbeaut_hdmac_chan_init(struct platform_device *pdev, int irq, ret; irq = platform_get_irq(pdev, chan_id); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n", - chan_id); + if (irq < 0) return irq; - } irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d", chan_id); From cdc3e306236bf3a43962683b283a36095c21c202 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 6 Nov 2019 22:01:28 +0530 Subject: [PATCH 48/61] dmaengine: milbeaut-xdmac: remove redundant error log platform_get_irq() prints the error message, so caller need not do so, remove the error line in this driver for platform_get_irq() Reported-by: kbuild test robot Signed-off-by: Vinod Koul Link: https://lore.kernel.org/r/20191106163128.1980714-2-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/milbeaut-xdmac.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c index 3d5b1926a58d..ab3d2f395378 100644 --- a/drivers/dma/milbeaut-xdmac.c +++ b/drivers/dma/milbeaut-xdmac.c @@ -269,11 +269,8 @@ static int milbeaut_xdmac_chan_init(struct platform_device *pdev, int irq, ret; irq = platform_get_irq(pdev, chan_id); - if (irq < 0) { - dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n", - chan_id); + if (irq < 0) return irq; - } irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d", chan_id); From 051f5175f226118ca220b93b87e07e515e727bcb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 12 Nov 2019 19:11:43 +0000 Subject: [PATCH 49/61] dmaengine: iop-adma: clean up an indentation issue There is a statement that is indented too deeply, remove the extraneous indentation. Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20191112191143.282814-1-colin.king@canonical.com Signed-off-by: Vinod Koul --- drivers/dma/iop-adma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 4dc5478fc156..db0e274126fb 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -173,7 +173,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) &iop_chan->chain, chain_node) { zero_sum_result |= iop_desc_get_zero_result(grp_iter); - pr_debug("\titer%d result: %d\n", + pr_debug("\titer%d result: %d\n", grp_iter->idx, zero_sum_result); slot_cnt -= slots_per_op; if (slot_cnt == 0) From 1ff95243257fad07290dcbc5f7a6ad79d6e703e2 Mon Sep 17 00:00:00 2001 From: Satendra Singh Thakur Date: Sat, 9 Nov 2019 17:05:23 +0530 Subject: [PATCH 50/61] dmaengine: mediatek: hsdma_probe: fixed a memory leak when devm_request_irq fails When devm_request_irq fails, currently, the function dma_async_device_unregister gets called. This doesn't free the resources allocated by of_dma_controller_register. Therefore, we have called of_dma_controller_free for this purpose. Signed-off-by: Satendra Singh Thakur Link: https://lore.kernel.org/r/20191109113523.6067-1-sst2005@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/mediatek/mtk-hsdma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 1a2028e1c29e..4c58da742143 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); - goto err_unregister; + goto err_free; } platform_set_drvdata(pdev, hsdma); @@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev) return 0; +err_free: + of_dma_controller_free(pdev->dev.of_node); err_unregister: dma_async_device_unregister(dd); From 5c5332a6a229acc856811bdce1c0845986e8306a Mon Sep 17 00:00:00 2001 From: Satendra Singh Thakur Date: Sat, 9 Nov 2019 17:06:09 +0530 Subject: [PATCH 51/61] dmaengine: zx: remove: removed dmam_pool_destroy In the probe method dmam_pool_create is used. Therefore, there is no need to explicitly call dmam_pool_destroy in remove method as this will be automatically taken care by devres Signed-off-by: Satendra Singh Thakur Link: https://lore.kernel.org/r/20191109113609.6159-1-sst2005@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/zx_dma.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c index 6b457e683e70..5fe2e8b9a7b8 100644 --- a/drivers/dma/zx_dma.c +++ b/drivers/dma/zx_dma.c @@ -889,7 +889,6 @@ static int zx_dma_remove(struct platform_device *op) list_del(&c->vc.chan.device_node); } clk_disable_unprepare(d->clk); - dmam_pool_destroy(d->pool); return 0; } From fa805360f4cfa34cb3575715e1c8b4f9a253b474 Mon Sep 17 00:00:00 2001 From: Green Wan Date: Thu, 7 Nov 2019 16:49:19 +0800 Subject: [PATCH 52/61] dt-bindings: dmaengine: sf-pdma: add bindins for SiFive PDMA Add DT bindings document for Platform DMA(PDMA) driver of board, HiFive Unleashed Rev A00. Reviewed-by: Rob Herring Reviewed-by: Pragnesh Patel Signed-off-by: Green Wan Link: https://lore.kernel.org/r/20191107084955.7580-2-green.wan@sifive.com Signed-off-by: Vinod Koul --- .../bindings/dma/sifive,fu540-c000-pdma.yaml | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml new file mode 100644 index 000000000000..2ca3ddbe1ff4 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dma/sifive,fu540-c000-pdma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: SiFive Unleashed Rev C000 Platform DMA + +maintainers: + - Green Wan + - Palmer Debbelt + - Paul Walmsley + +description: | + Platform DMA is a DMA engine of SiFive Unleashed. It supports 4 + channels. Each channel has 2 interrupts. One is for DMA done and + the other is for DME error. + + In different SoC, DMA could be attached to different IRQ line. + DT file need to be changed to meet the difference. For technical + doc, + + https://static.dev.sifive.com/FU540-C000-v1.0.pdf + +properties: + compatible: + items: + - const: sifive,fu540-c000-pdma + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 8 + + '#dma-cells': + const: 1 + +required: + - compatible + - reg + - interrupts + - '#dma-cells' + +examples: + - | + dma@3000000 { + compatible = "sifive,fu540-c000-pdma"; + reg = <0x0 0x3000000 0x0 0x8000>; + interrupts = <23 24 25 26 27 28 29 30>; + #dma-cells = <1>; + }; + +... From 6973886ad58e6b4988813331abb76ae0b364a9c2 Mon Sep 17 00:00:00 2001 From: Green Wan Date: Thu, 7 Nov 2019 16:49:21 +0800 Subject: [PATCH 53/61] dmaengine: sf-pdma: add platform DMA support for HiFive Unleashed A00 Add PDMA driver, sf-pdma, to enable DMA engine on HiFive Unleashed Rev A00 board. - Implement dmaengine APIs, support MEM_TO_MEM async copy. - Tested by DMA Test client - Supports 4 channels DMA, each channel has 1 done and 1 err interrupt connected to platform-level interrupt controller (PLIC). - Depends on DMA_ENGINE and DMA_VIRTUAL_CHANNELS The datasheet is here: https://static.dev.sifive.com/FU540-C000-v1.0.pdf Follow the DMAengine controller doc, "./Documentation/driver-api/dmaengine/provider.rst" to implement DMA engine. And use the dma test client in doc, "./Documentation/driver-api/dmaengine/dmatest.rst", to test. Each DMA channel has separate HW regs and support done and error ISRs. 4 channels share 1 done and 1 err ISRs. There's no expander/arbitrator in DMA HW. ------ ------ | |--< done 23 >--|ch 0| | |--< err 24 >--| | (dma0chan0) | | ------ | | ------ | |--< done 25 >--|ch 1| | |--< err 26 >--| | (dma0chan1) |PLIC| ------ | | ------ | |--< done 27 >--|ch 2| | |--< err 28 >--| | (dma0chan2) | | ------ | | ------ | |--< done 29 >--|ch 3| | |--< err 30 >--| | (dma0chan3) ------ ------ Signed-off-by: Green Wan Link: https://lore.kernel.org/r/20191107084955.7580-4-green.wan@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 + drivers/dma/Makefile | 1 + drivers/dma/sf-pdma/Kconfig | 6 + drivers/dma/sf-pdma/Makefile | 1 + drivers/dma/sf-pdma/sf-pdma.c | 621 ++++++++++++++++++++++++++++++++++ drivers/dma/sf-pdma/sf-pdma.h | 122 +++++++ 6 files changed, 753 insertions(+) create mode 100644 drivers/dma/sf-pdma/Kconfig create mode 100644 drivers/dma/sf-pdma/Makefile create mode 100644 drivers/dma/sf-pdma/sf-pdma.c create mode 100644 drivers/dma/sf-pdma/sf-pdma.h diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cc5780139d28..03dfee5c66d9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -689,6 +689,8 @@ source "drivers/dma/dw-edma/Kconfig" source "drivers/dma/hsu/Kconfig" +source "drivers/dma/sf-pdma/Kconfig" + source "drivers/dma/sh/Kconfig" source "drivers/dma/ti/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 3fdea8a1cc86..42d7e2fc64fa 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ +obj-$(CONFIG_SF_PDMA) += sf-pdma/ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STM32_DMA) += stm32-dma.o diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig new file mode 100644 index 000000000000..f8ffa02e279f --- /dev/null +++ b/drivers/dma/sf-pdma/Kconfig @@ -0,0 +1,6 @@ +config SF_PDMA + tristate "Sifive PDMA controller driver" + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the SiFive PDMA controller. diff --git a/drivers/dma/sf-pdma/Makefile b/drivers/dma/sf-pdma/Makefile new file mode 100644 index 000000000000..764552ab8d0a --- /dev/null +++ b/drivers/dma/sf-pdma/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SF_PDMA) += sf-pdma.o diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c new file mode 100644 index 000000000000..16fe00553496 --- /dev/null +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/** + * SiFive FU540 Platform DMA driver + * Copyright (C) 2019 SiFive + * + * Based partially on: + * - drivers/dma/fsl-edma.c + * - drivers/dma/dw-edma/ + * - drivers/dma/pxa-dma.c + * + * See the following sources for further documentation: + * - Chapter 12 "Platform DMA Engine (PDMA)" of + * SiFive FU540-C000 v1.0 + * https://static.dev.sifive.com/FU540-C000-v1.0.pdf + */ +#include +#include +#include +#include +#include +#include +#include + +#include "sf-pdma.h" + +#ifndef readq +static inline unsigned long long readq(void __iomem *addr) +{ + return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(unsigned long long v, void __iomem *addr) +{ + writel(lower_32_bits(v), addr); + writel(upper_32_bits(v), addr + 4); +} +#endif + +static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan) +{ + return container_of(dchan, struct sf_pdma_chan, vchan.chan); +} + +static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct sf_pdma_desc, vdesc); +} + +static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan) +{ + struct sf_pdma_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + if (chan->desc && !chan->desc->in_use) { + spin_unlock_irqrestore(&chan->lock, flags); + return chan->desc; + } + + spin_unlock_irqrestore(&chan->lock, flags); + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->chan = chan; + + return desc; +} + +static void sf_pdma_fill_desc(struct sf_pdma_desc *desc, + u64 dst, u64 src, u64 size) +{ + desc->xfer_type = PDMA_FULL_SPEED; + desc->xfer_size = size; + desc->dst_addr = dst; + desc->src_addr = src; +} + +static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan) +{ + struct pdma_regs *regs = &chan->regs; + + writel(PDMA_CLEAR_CTRL, regs->ctrl); +} + +static struct dma_async_tx_descriptor * +sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + struct sf_pdma_desc *desc; + + if (chan && (!len || !dest || !src)) { + dev_err(chan->pdma->dma_dev.dev, + "Please check dma len, dest, src!\n"); + return NULL; + } + + desc = sf_pdma_alloc_desc(chan); + if (!desc) + return NULL; + + desc->in_use = true; + desc->dirn = DMA_MEM_TO_MEM; + desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); + + spin_lock_irqsave(&chan->vchan.lock, flags); + chan->desc = desc; + sf_pdma_fill_desc(desc, dest, src, len); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + return desc->async_tx; +} + +static int sf_pdma_slave_config(struct dma_chan *dchan, + struct dma_slave_config *cfg) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + + memcpy(&chan->cfg, cfg, sizeof(*cfg)); + + return 0; +} + +static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + struct pdma_regs *regs = &chan->regs; + + dma_cookie_init(dchan); + writel(PDMA_CLAIM_MASK, regs->ctrl); + + return 0; +} + +static void sf_pdma_disable_request(struct sf_pdma_chan *chan) +{ + struct pdma_regs *regs = &chan->regs; + + writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl); +} + +static void sf_pdma_free_chan_resources(struct dma_chan *dchan) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vchan.lock, flags); + sf_pdma_disable_request(chan); + kfree(chan->desc); + chan->desc = NULL; + vchan_get_all_descriptors(&chan->vchan, &head); + vchan_dma_desc_free_list(&chan->vchan, &head); + sf_pdma_disclaim_chan(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); +} + +static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd = NULL; + struct pdma_regs *regs = &chan->regs; + unsigned long flags; + u64 residue = 0; + struct sf_pdma_desc *desc; + struct dma_async_tx_descriptor *tx; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + tx = &chan->desc->vdesc.tx; + if (cookie == tx->chan->completed_cookie) + goto out; + + if (cookie == tx->cookie) { + residue = readq(regs->residue); + } else { + vd = vchan_find_desc(&chan->vchan, cookie); + if (!vd) + goto out; + + desc = to_sf_pdma_desc(vd); + residue = desc->xfer_size; + } + +out: + spin_unlock_irqrestore(&chan->vchan.lock, flags); + return residue; +} + +static enum dma_status +sf_pdma_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + enum dma_status status; + + status = dma_cookie_status(dchan, cookie, txstate); + + if (txstate && status != DMA_ERROR) + dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie)); + + return status; +} + +static int sf_pdma_terminate_all(struct dma_chan *dchan) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vchan.lock, flags); + sf_pdma_disable_request(chan); + kfree(chan->desc); + chan->desc = NULL; + chan->xfer_err = false; + vchan_get_all_descriptors(&chan->vchan, &head); + vchan_dma_desc_free_list(&chan->vchan, &head); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + return 0; +} + +static void sf_pdma_enable_request(struct sf_pdma_chan *chan) +{ + struct pdma_regs *regs = &chan->regs; + u32 v; + + v = PDMA_CLAIM_MASK | + PDMA_ENABLE_DONE_INT_MASK | + PDMA_ENABLE_ERR_INT_MASK | + PDMA_RUN_MASK; + + writel(v, regs->ctrl); +} + +static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan) +{ + struct sf_pdma_desc *desc = chan->desc; + struct pdma_regs *regs = &chan->regs; + + if (!desc) { + dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n"); + return; + } + + writel(desc->xfer_type, regs->xfer_type); + writeq(desc->xfer_size, regs->xfer_size); + writeq(desc->dst_addr, regs->dst_addr); + writeq(desc->src_addr, regs->src_addr); + + chan->desc = desc; + chan->status = DMA_IN_PROGRESS; + sf_pdma_enable_request(chan); +} + +static void sf_pdma_issue_pending(struct dma_chan *dchan) +{ + struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + if (vchan_issue_pending(&chan->vchan) && chan->desc) + sf_pdma_xfer_desc(chan); + + spin_unlock_irqrestore(&chan->vchan.lock, flags); +} + +static void sf_pdma_free_desc(struct virt_dma_desc *vdesc) +{ + struct sf_pdma_desc *desc; + + desc = to_sf_pdma_desc(vdesc); + desc->in_use = false; +} + +static void sf_pdma_donebh_tasklet(unsigned long arg) +{ + struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg; + struct sf_pdma_desc *desc = chan->desc; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->xfer_err) { + chan->retries = MAX_RETRY; + chan->status = DMA_COMPLETE; + chan->xfer_err = false; + } + spin_unlock_irqrestore(&chan->lock, flags); + + dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); +} + +static void sf_pdma_errbh_tasklet(unsigned long arg) +{ + struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg; + struct sf_pdma_desc *desc = chan->desc; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->retries <= 0) { + /* fail to recover */ + spin_unlock_irqrestore(&chan->lock, flags); + dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); + } else { + /* retry */ + chan->retries--; + chan->xfer_err = true; + chan->status = DMA_ERROR; + + sf_pdma_enable_request(chan); + spin_unlock_irqrestore(&chan->lock, flags); + } +} + +static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id) +{ + struct sf_pdma_chan *chan = dev_id; + struct pdma_regs *regs = &chan->regs; + unsigned long flags; + u64 residue; + + spin_lock_irqsave(&chan->vchan.lock, flags); + writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl); + residue = readq(regs->residue); + + if (!residue) { + list_del(&chan->desc->vdesc.node); + vchan_cookie_complete(&chan->desc->vdesc); + } else { + /* submit next trascatioin if possible */ + struct sf_pdma_desc *desc = chan->desc; + + desc->src_addr += desc->xfer_size - residue; + desc->dst_addr += desc->xfer_size - residue; + desc->xfer_size = residue; + + sf_pdma_xfer_desc(chan); + } + + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + tasklet_hi_schedule(&chan->done_tasklet); + + return IRQ_HANDLED; +} + +static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id) +{ + struct sf_pdma_chan *chan = dev_id; + struct pdma_regs *regs = &chan->regs; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl); + spin_unlock_irqrestore(&chan->lock, flags); + + tasklet_schedule(&chan->err_tasklet); + + return IRQ_HANDLED; +} + +/** + * sf_pdma_irq_init() - Init PDMA IRQ Handlers + * @pdev: pointer of platform_device + * @pdma: pointer of PDMA engine. Caller should check NULL + * + * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should + * make sure the pointer passed in are non-NULL. This function should be called + * only one time during the device probe. + * + * Context: Any context. + * + * Return: + * * 0 - OK to init all IRQ handlers + * * -EINVAL - Fail to request IRQ + */ +static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma) +{ + int irq, r, i; + struct sf_pdma_chan *chan; + + for (i = 0; i < pdma->n_chans; i++) { + chan = &pdma->chans[i]; + + irq = platform_get_irq(pdev, i * 2); + if (irq < 0) { + dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i); + return -EINVAL; + } + + r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0, + dev_name(&pdev->dev), (void *)chan); + if (r) { + dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r); + return -EINVAL; + } + + chan->txirq = irq; + + irq = platform_get_irq(pdev, (i * 2) + 1); + if (irq < 0) { + dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i); + return -EINVAL; + } + + r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0, + dev_name(&pdev->dev), (void *)chan); + if (r) { + dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r); + return -EINVAL; + } + + chan->errirq = irq; + } + + return 0; +} + +/** + * sf_pdma_setup_chans() - Init settings of each channel + * @pdma: pointer of PDMA engine. Caller should check NULL + * + * Initialize all data structure and register base. Caller should make sure + * the pointer passed in are non-NULL. This function should be called only + * one time during the device probe. + * + * Context: Any context. + * + * Return: none + */ +#define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch))) +static void sf_pdma_setup_chans(struct sf_pdma *pdma) +{ + int i; + struct sf_pdma_chan *chan; + + INIT_LIST_HEAD(&pdma->dma_dev.channels); + + for (i = 0; i < pdma->n_chans; i++) { + chan = &pdma->chans[i]; + + chan->regs.ctrl = + SF_PDMA_REG_BASE(i) + PDMA_CTRL; + chan->regs.xfer_type = + SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE; + chan->regs.xfer_size = + SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE; + chan->regs.dst_addr = + SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR; + chan->regs.src_addr = + SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR; + chan->regs.act_type = + SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE; + chan->regs.residue = + SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE; + chan->regs.cur_dst_addr = + SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR; + chan->regs.cur_src_addr = + SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR; + + chan->pdma = pdma; + chan->pm_state = RUNNING; + chan->slave_id = i; + chan->xfer_err = false; + spin_lock_init(&chan->lock); + + chan->vchan.desc_free = sf_pdma_free_desc; + vchan_init(&chan->vchan, &pdma->dma_dev); + + writel(PDMA_CLEAR_CTRL, chan->regs.ctrl); + + tasklet_init(&chan->done_tasklet, + sf_pdma_donebh_tasklet, (unsigned long)chan); + tasklet_init(&chan->err_tasklet, + sf_pdma_errbh_tasklet, (unsigned long)chan); + } +} + +static int sf_pdma_probe(struct platform_device *pdev) +{ + struct sf_pdma *pdma; + struct sf_pdma_chan *chan; + struct resource *res; + int len, chans; + int ret; + const enum dma_slave_buswidth widths = + DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | + DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | + DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | + DMA_SLAVE_BUSWIDTH_64_BYTES; + + chans = PDMA_NR_CH; + len = sizeof(*pdma) + sizeof(*chan) * chans; + pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); + if (!pdma) + return -ENOMEM; + + pdma->n_chans = chans; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdma->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdma->membase)) + goto ERR_MEMBASE; + + ret = sf_pdma_irq_init(pdev, pdma); + if (ret) + goto ERR_INITIRQ; + + sf_pdma_setup_chans(pdma); + + pdma->dma_dev.dev = &pdev->dev; + + /* Setup capability */ + dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask); + pdma->dma_dev.copy_align = 2; + pdma->dma_dev.src_addr_widths = widths; + pdma->dma_dev.dst_addr_widths = widths; + pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM); + pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + pdma->dma_dev.descriptor_reuse = true; + + /* Setup DMA APIs */ + pdma->dma_dev.device_alloc_chan_resources = + sf_pdma_alloc_chan_resources; + pdma->dma_dev.device_free_chan_resources = + sf_pdma_free_chan_resources; + pdma->dma_dev.device_tx_status = sf_pdma_tx_status; + pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy; + pdma->dma_dev.device_config = sf_pdma_slave_config; + pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all; + pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending; + + platform_set_drvdata(pdev, pdma); + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) + dev_warn(&pdev->dev, + "Failed to set DMA mask. Fall back to default.\n"); + + ret = dma_async_device_register(&pdma->dma_dev); + if (ret) + goto ERR_REG_DMADEVICE; + + return 0; + +ERR_MEMBASE: + devm_kfree(&pdev->dev, pdma); + return PTR_ERR(pdma->membase); + +ERR_INITIRQ: + devm_kfree(&pdev->dev, pdma); + return ret; + +ERR_REG_DMADEVICE: + devm_kfree(&pdev->dev, pdma); + dev_err(&pdev->dev, + "Can't register SiFive Platform DMA. (%d)\n", ret); + return ret; +} + +static int sf_pdma_remove(struct platform_device *pdev) +{ + struct sf_pdma *pdma = platform_get_drvdata(pdev); + struct sf_pdma_chan *ch; + int i; + + for (i = 0; i < PDMA_NR_CH; i++) { + ch = &pdma->chans[i]; + + devm_free_irq(&pdev->dev, ch->txirq, ch); + devm_free_irq(&pdev->dev, ch->errirq, ch); + list_del(&ch->vchan.chan.device_node); + tasklet_kill(&ch->vchan.task); + tasklet_kill(&ch->done_tasklet); + tasklet_kill(&ch->err_tasklet); + } + + dma_async_device_unregister(&pdma->dma_dev); + + return 0; +} + +static const struct of_device_id sf_pdma_dt_ids[] = { + { .compatible = "sifive,fu540-c000-pdma" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); + +static struct platform_driver sf_pdma_driver = { + .probe = sf_pdma_probe, + .remove = sf_pdma_remove, + .driver = { + .name = "sf-pdma", + .of_match_table = of_match_ptr(sf_pdma_dt_ids), + }, +}; + +static int __init sf_pdma_init(void) +{ + return platform_driver_register(&sf_pdma_driver); +} + +static void __exit sf_pdma_exit(void) +{ + platform_driver_unregister(&sf_pdma_driver); +} + +/* do early init */ +subsys_initcall(sf_pdma_init); +module_exit(sf_pdma_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SiFive Platform DMA driver"); +MODULE_AUTHOR("Green Wan "); diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h new file mode 100644 index 000000000000..55816c9e0249 --- /dev/null +++ b/drivers/dma/sf-pdma/sf-pdma.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/** + * SiFive FU540 Platform DMA driver + * Copyright (C) 2019 SiFive + * + * Based partially on: + * - drivers/dma/fsl-edma.c + * - drivers/dma/dw-edma/ + * - drivers/dma/pxa-dma.c + * + * See the following sources for further documentation: + * - Chapter 12 "Platform DMA Engine (PDMA)" of + * SiFive FU540-C000 v1.0 + * https://static.dev.sifive.com/FU540-C000-v1.0.pdf + */ +#ifndef _SF_PDMA_H +#define _SF_PDMA_H + +#include +#include + +#include "../dmaengine.h" +#include "../virt-dma.h" + +#define PDMA_NR_CH 4 + +#if (PDMA_NR_CH != 4) +#error "Please define PDMA_NR_CH to 4" +#endif + +#define PDMA_BASE_ADDR 0x3000000 +#define PDMA_CHAN_OFFSET 0x1000 + +/* Register Offset */ +#define PDMA_CTRL 0x000 +#define PDMA_XFER_TYPE 0x004 +#define PDMA_XFER_SIZE 0x008 +#define PDMA_DST_ADDR 0x010 +#define PDMA_SRC_ADDR 0x018 +#define PDMA_ACT_TYPE 0x104 /* Read-only */ +#define PDMA_REMAINING_BYTE 0x108 /* Read-only */ +#define PDMA_CUR_DST_ADDR 0x110 /* Read-only*/ +#define PDMA_CUR_SRC_ADDR 0x118 /* Read-only*/ + +/* CTRL */ +#define PDMA_CLEAR_CTRL 0x0 +#define PDMA_CLAIM_MASK GENMASK(0, 0) +#define PDMA_RUN_MASK GENMASK(1, 1) +#define PDMA_ENABLE_DONE_INT_MASK GENMASK(14, 14) +#define PDMA_ENABLE_ERR_INT_MASK GENMASK(15, 15) +#define PDMA_DONE_STATUS_MASK GENMASK(30, 30) +#define PDMA_ERR_STATUS_MASK GENMASK(31, 31) + +/* Transfer Type */ +#define PDMA_FULL_SPEED 0xFF000008 + +/* Error Recovery */ +#define MAX_RETRY 1 + +struct pdma_regs { + /* read-write regs */ + void __iomem *ctrl; /* 4 bytes */ + + void __iomem *xfer_type; /* 4 bytes */ + void __iomem *xfer_size; /* 8 bytes */ + void __iomem *dst_addr; /* 8 bytes */ + void __iomem *src_addr; /* 8 bytes */ + + /* read-only */ + void __iomem *act_type; /* 4 bytes */ + void __iomem *residue; /* 8 bytes */ + void __iomem *cur_dst_addr; /* 8 bytes */ + void __iomem *cur_src_addr; /* 8 bytes */ +}; + +struct sf_pdma_desc { + u32 xfer_type; + u64 xfer_size; + u64 dst_addr; + u64 src_addr; + struct virt_dma_desc vdesc; + struct sf_pdma_chan *chan; + bool in_use; + enum dma_transfer_direction dirn; + struct dma_async_tx_descriptor *async_tx; +}; + +enum sf_pdma_pm_state { + RUNNING = 0, + SUSPENDED, +}; + +struct sf_pdma_chan { + struct virt_dma_chan vchan; + enum dma_status status; + enum sf_pdma_pm_state pm_state; + u32 slave_id; + struct sf_pdma *pdma; + struct sf_pdma_desc *desc; + struct dma_slave_config cfg; + u32 attr; + dma_addr_t dma_dev_addr; + u32 dma_dev_size; + struct tasklet_struct done_tasklet; + struct tasklet_struct err_tasklet; + struct pdma_regs regs; + spinlock_t lock; /* protect chan data */ + bool xfer_err; + int txirq; + int errirq; + int retries; +}; + +struct sf_pdma { + struct dma_device dma_dev; + void __iomem *membase; + void __iomem *mappedbase; + u32 n_chans; + struct sf_pdma_chan chans[PDMA_NR_CH]; +}; + +#endif /* _SF_PDMA_H */ From b37949560b93d01f20687dfb8e1d3b9aa5633d63 Mon Sep 17 00:00:00 2001 From: Green Wan Date: Thu, 7 Nov 2019 16:49:22 +0800 Subject: [PATCH 54/61] MAINTAINERS: Add Green as SiFive PDMA driver maintainer Update MAINTAINERS for SiFive PDMA driver. Signed-off-by: Green Wan Link: https://lore.kernel.org/r/20191107084955.7580-5-green.wan@sifive.com Signed-off-by: Vinod Koul --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 296de2b51c83..042bdcc3480b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14776,6 +14776,12 @@ F: drivers/media/usb/siano/ F: drivers/media/usb/siano/ F: drivers/media/mmc/siano/ +SIFIVE PDMA DRIVER +M: Green Wan +S: Maintained +F: drivers/dma/sf-pdma/ +F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml + SIFIVE DRIVERS M: Palmer Dabbelt M: Paul Walmsley From a7e335deed174a37fc6f84f69caaeff8a08f8ff8 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Wed, 23 Oct 2019 14:31:32 +0800 Subject: [PATCH 55/61] dmaengine: sprd: Add wrap address support for link-list mode The Spreadtrum Audio compress offload mode will use 2-stage DMA transfer to save power. That means we can request 2 dma channels, one for source channel, and another one for destination channel. Once the source channel's transaction is done, it will trigger the destination channel's transaction automatically by hardware signal. In this case, the source channel will transfer data from IRAM buffer to the DSP fifo to decoding/encoding, once IRAM buffer is empty by transferring done, the destination channel will start to transfer data from DDR buffer to IRAM buffer. Since the destination channel will use link-list mode to fill the IRAM data, and IRAM buffer is allocated by 32K, and DDR buffer is larger to 2M, that means we need lots of link-list nodes to do a cyclic transfer, instead wasting lots of link-list memory, we can use wrap address support to reduce link-list node number, which means when the transfer address reaches the wrap address, the transfer address will jump to the wrap_to address specified by wrap_to register, and only 2 link-list nodes can do a cyclic transfer to transfer data from DDR to IRAM. Thus this patch adds wrap address to support this case. [Baolin Wang changes the commit message] Signed-off-by: Eric Long Signed-off-by: Baolin Wang Link: https://lore.kernel.org/r/85a5484bc1f3dd53ce6f92700ad8b35f30a0b096.1571812029.git.baolin.wang@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 13 +++++++++++++ include/linux/dma/sprd-dma.h | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 32402c2615de..9a31a315dbef 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -99,6 +99,7 @@ /* DMA_CHN_WARP_* register definition */ #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28) #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0) +#define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0) #define SPRD_DMA_HIGH_ADDR_OFFSET 4 /* SPRD_DMA_CHN_INTC register definition */ @@ -118,6 +119,8 @@ #define SPRD_DMA_SWT_MODE_OFFSET 26 #define SPRD_DMA_REQ_MODE_OFFSET 24 #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0) +#define SPRD_DMA_WRAP_SEL_DEST BIT(23) +#define SPRD_DMA_WRAP_EN BIT(22) #define SPRD_DMA_FIX_SEL_OFFSET 21 #define SPRD_DMA_FIX_EN_OFFSET 20 #define SPRD_DMA_LLIST_END BIT(19) @@ -804,6 +807,8 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET; temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET; temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET; + temp |= schan->linklist.wrap_addr ? + SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0; temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; hw->frg_len = temp; @@ -831,6 +836,12 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, hw->llist_ptr = lower_32_bits(llist_ptr); hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) & SPRD_DMA_LLIST_HIGH_MASK; + + if (schan->linklist.wrap_addr) { + hw->wrap_ptr |= schan->linklist.wrap_addr & + SPRD_DMA_WRAP_ADDR_MASK; + hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK; + } } else { hw->llist_ptr = 0; hw->src_blk_step = 0; @@ -939,9 +950,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, schan->linklist.phy_addr = ll_cfg->phy_addr; schan->linklist.virt_addr = ll_cfg->virt_addr; + schan->linklist.wrap_addr = ll_cfg->wrap_addr; } else { schan->linklist.phy_addr = 0; schan->linklist.virt_addr = 0; + schan->linklist.wrap_addr = 0; } /* diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h index ab82df64682a..d09c6f6f6da5 100644 --- a/include/linux/dma/sprd-dma.h +++ b/include/linux/dma/sprd-dma.h @@ -118,6 +118,9 @@ enum sprd_dma_int_type { * struct sprd_dma_linklist - DMA link-list address structure * @virt_addr: link-list virtual address to configure link-list node * @phy_addr: link-list physical address to link DMA transfer + * @wrap_addr: the wrap address for link-list mode, which means once the + * transfer address reaches the wrap address, the next transfer address + * will jump to the address specified by wrap_to register. * * The Spreadtrum DMA controller supports the link-list mode, that means slaves * can supply several groups configurations (each configuration represents one @@ -181,6 +184,7 @@ enum sprd_dma_int_type { struct sprd_dma_linklist { unsigned long virt_addr; phys_addr_t phy_addr; + phys_addr_t wrap_addr; }; #endif From c236ba4ae7183c1add8dbce91b27c700f7ef4168 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Fri, 15 Nov 2019 16:31:00 +0800 Subject: [PATCH 56/61] dmaengine: mmp_tdma: add missed of_dma_controller_free The driver calls of_dma_controller_register in probe but does not free it in remove. Add the call to fix it. Signed-off-by: Chuhong Yuan Link: https://lore.kernel.org/r/20191115083100.12220-1-hslester96@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index e7d1e12bf464..10117f271b12 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -544,6 +544,9 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan) static int mmp_tdma_remove(struct platform_device *pdev) { + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + return 0; } From 39716c560c75619e174221d72f850d44166ef3ae Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Fri, 15 Nov 2019 16:31:53 +0800 Subject: [PATCH 57/61] dmaengine: mmp_pdma: add missed of_dma_controller_free The driver calls of_dma_controller_register in probe but does not free it in remove. Add the call to fix it. Signed-off-by: Chuhong Yuan Link: https://lore.kernel.org/r/20191115083153.12334-1-hslester96@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 7fe494fc50d4..ad06f260e907 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -945,6 +945,8 @@ static int mmp_pdma_remove(struct platform_device *op) struct mmp_pdma_phy *phy; int i, irq = 0, irq_num = 0; + if (op->dev.of_node) + of_dma_controller_free(op->dev.of_node); for (i = 0; i < pdev->dma_channels; i++) { if (platform_get_irq(op, i) > 0) From 340049d453682a9fe8d91fe794dd091730f4bb25 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Mon, 18 Nov 2019 15:38:02 +0800 Subject: [PATCH 58/61] dmaengine: ti: edma: fix missed failure handling When devm_kcalloc fails, it forgets to call edma_free_slot. Replace direct return with failure handler to fix it. Fixes: 1be5336bc7ba ("dmaengine: edma: New device tree binding") Signed-off-by: Chuhong Yuan Link: https://lore.kernel.org/r/20191118073802.28424-1-hslester96@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/ti/edma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 0ecfc2e1d798..756a3c951dc7 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2424,8 +2424,10 @@ static int edma_probe(struct platform_device *pdev) ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, sizeof(*ecc->tc_list), GFP_KERNEL); - if (!ecc->tc_list) - return -ENOMEM; + if (!ecc->tc_list) { + ret = -ENOMEM; + goto err_reg1; + } for (i = 0;; i++) { ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", From dd9c324a5e96bfdc4c5c965da6fbd680340cd3f2 Mon Sep 17 00:00:00 2001 From: Green Wan Date: Mon, 18 Nov 2019 22:35:52 +0800 Subject: [PATCH 59/61] dmaengine: sf-pdma: replace /** with /* for non-function comment There are several comments starting from "/**" but not for function comment purpose. It causes kernel-doc parsing wrong string. Replace "/**" with "/*" to fix them. Signed-off-by: Green Wan Link: https://lore.kernel.org/r/20191118143554.16129-1-green.wan@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/sf-pdma/sf-pdma.c | 2 +- drivers/dma/sf-pdma/sf-pdma.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 16fe00553496..e8b9770dcfba 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * SiFive FU540 Platform DMA driver * Copyright (C) 2019 SiFive * diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h index 55816c9e0249..aab65a0bdfcc 100644 --- a/drivers/dma/sf-pdma/sf-pdma.h +++ b/drivers/dma/sf-pdma/sf-pdma.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * SiFive FU540 Platform DMA driver * Copyright (C) 2019 SiFive * From 7d268a28ee336ae233b036057607fb73b844d512 Mon Sep 17 00:00:00 2001 From: Green Wan Date: Mon, 18 Nov 2019 22:35:53 +0800 Subject: [PATCH 60/61] dmaengine: sf-pdma: move macro to header file The place where the macro, SF_PDMA_REG_BASE(), is cause kernel-doc using wrong function declaration. Move it to header file. Signed-off-by: Green Wan Link: https://lore.kernel.org/r/20191118143554.16129-2-green.wan@sifive.com Signed-off-by: Vinod Koul --- drivers/dma/sf-pdma/sf-pdma.c | 1 - drivers/dma/sf-pdma/sf-pdma.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index e8b9770dcfba..465256fe8b1f 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -435,7 +435,6 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma) * * Return: none */ -#define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch))) static void sf_pdma_setup_chans(struct sf_pdma *pdma) { int i; diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h index aab65a0bdfcc..0c20167b097d 100644 --- a/drivers/dma/sf-pdma/sf-pdma.h +++ b/drivers/dma/sf-pdma/sf-pdma.h @@ -57,6 +57,8 @@ /* Error Recovery */ #define MAX_RETRY 1 +#define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch))) + struct pdma_regs { /* read-write regs */ void __iomem *ctrl; /* 4 bytes */ From 67805a4b3c924927d9e064bca235461941f89e4a Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 21 Nov 2019 04:19:08 +0100 Subject: [PATCH 61/61] dmaengine: Fix Kconfig indentation Adjust indentation from spaces to tab (+optional two spaces) as in coding style with command like: $ sed -e 's/^ /\t/' -i */Kconfig Signed-off-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/1574306348-29212-1-git-send-email-krzk@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 60 ++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 03dfee5c66d9..6fa1eba9d477 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -15,19 +15,19 @@ menuconfig DMADEVICES be empty in some cases. config DMADEVICES_DEBUG - bool "DMA Engine debugging" - depends on DMADEVICES != n - help - This is an option for use by developers; most people should - say N here. This enables DMA engine core and driver debugging. + bool "DMA Engine debugging" + depends on DMADEVICES != n + help + This is an option for use by developers; most people should + say N here. This enables DMA engine core and driver debugging. config DMADEVICES_VDEBUG - bool "DMA Engine verbose debugging" - depends on DMADEVICES_DEBUG != n - help - This is an option for use by developers; most people should - say N here. This enables deeper (more verbose) debugging of - the DMA engine core and drivers. + bool "DMA Engine verbose debugging" + depends on DMADEVICES_DEBUG != n + help + This is an option for use by developers; most people should + say N here. This enables deeper (more verbose) debugging of + the DMA engine core and drivers. if DMADEVICES @@ -215,28 +215,28 @@ config FSL_EDMA This module can be found on Freescale Vybrid and LS-1 SoCs. config FSL_QDMA - tristate "NXP Layerscape qDMA engine support" - depends on ARM || ARM64 - select DMA_ENGINE - select DMA_VIRTUAL_CHANNELS - select DMA_ENGINE_RAID - select ASYNC_TX_ENABLE_CHANNEL_SWITCH - help - Support the NXP Layerscape qDMA engine with command queue and legacy mode. - Channel virtualization is supported through enqueuing of DMA jobs to, - or dequeuing DMA jobs from, different work queues. - This module can be found on NXP Layerscape SoCs. + tristate "NXP Layerscape qDMA engine support" + depends on ARM || ARM64 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select DMA_ENGINE_RAID + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + help + Support the NXP Layerscape qDMA engine with command queue and legacy mode. + Channel virtualization is supported through enqueuing of DMA jobs to, + or dequeuing DMA jobs from, different work queues. + This module can be found on NXP Layerscape SoCs. The qdma driver only work on SoCs with a DPAA hardware block. config FSL_RAID - tristate "Freescale RAID engine Support" - depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH - select DMA_ENGINE - select DMA_ENGINE_RAID - ---help--- - Enable support for Freescale RAID Engine. RAID Engine is - available on some QorIQ SoCs (like P5020/P5040). It has - the capability to offload memcpy, xor and pq computation + tristate "Freescale RAID engine Support" + depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH + select DMA_ENGINE + select DMA_ENGINE_RAID + ---help--- + Enable support for Freescale RAID Engine. RAID Engine is + available on some QorIQ SoCs (like P5020/P5040). It has + the capability to offload memcpy, xor and pq computation for raid5/6. config IMG_MDC_DMA