From 7eb48dd094de5fe0e216b550e73aa85257903973 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Wed, 22 Jul 2020 17:54:21 +0800 Subject: [PATCH 01/11] dmaengine: acpi: Put the CSRT table after using it The acpi_get_table() should be coupled with acpi_put_table() if the mapped table is not used at runtime to release the table mapping, put the CSRT table buf after using it. Signed-off-by: Hanjun Guo Link: https://lore.kernel.org/r/1595411661-15936-1-git-send-email-guohanjun@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/acpi-dma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 35f4804ea4afa..235f1396f9686 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); - return; + break; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } + + acpi_put_table((struct acpi_table_header *)csrt); } /** From df841b17e809f48f740cd2dd8b63543073c91a02 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 27 Jul 2020 09:37:11 -0700 Subject: [PATCH 02/11] dmaengine: idxd: reset states after device disable or reset The state for WQs should be reset to disabled when a device is reset or disabled. Fixes: da32b28c95a7 ("dmaengine: idxd: cleanup workqueue config after disabling") Reported-by: Mona Hossain Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/159586777684.27150.17589406415773568534.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/device.c | 26 ++++++++++++++++++++++++++ drivers/dma/idxd/irq.c | 12 ------------ 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 14b45853aa5fc..b75d699160bfa 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -410,10 +410,27 @@ int idxd_device_enable(struct idxd_device *idxd) return 0; } +void idxd_device_wqs_clear_state(struct idxd_device *idxd) +{ + int i; + + lockdep_assert_held(&idxd->dev_lock); + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_ENABLED) { + idxd_wq_disable_cleanup(wq); + wq->state = IDXD_WQ_DISABLED; + } + } +} + int idxd_device_disable(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; u32 status; + unsigned long flags; if (!idxd_is_enabled(idxd)) { dev_dbg(dev, "Device is not enabled\n"); @@ -429,13 +446,22 @@ int idxd_device_disable(struct idxd_device *idxd) return -ENXIO; } + spin_lock_irqsave(&idxd->dev_lock, flags); + idxd_device_wqs_clear_state(idxd); idxd->state = IDXD_DEV_CONF_READY; + spin_unlock_irqrestore(&idxd->dev_lock, flags); return 0; } void idxd_device_reset(struct idxd_device *idxd) { + unsigned long flags; + idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); + spin_lock_irqsave(&idxd->dev_lock, flags); + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_CONF_READY; + spin_unlock_irqrestore(&idxd->dev_lock, flags); } /* Device configuration bits */ diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index b5142556cc4e8..1e9e6991f543d 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -11,18 +11,6 @@ #include "idxd.h" #include "registers.h" -void idxd_device_wqs_clear_state(struct idxd_device *idxd) -{ - int i; - - lockdep_assert_held(&idxd->dev_lock); - for (i = 0; i < idxd->max_wqs; i++) { - struct idxd_wq *wq = &idxd->wqs[i]; - - wq->state = IDXD_WQ_DISABLED; - } -} - static void idxd_device_reinit(struct work_struct *work) { struct idxd_device *idxd = container_of(work, struct idxd_device, work); From 5b2aa9f918f6837ae943557f8cec02c34fcf80e7 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 6 Aug 2020 13:49:28 +0300 Subject: [PATCH 03/11] dmaengine: of-dma: Fix of_dma_router_xlate's of_dma_xlate handling of_dma_xlate callback can return ERR_PTR as well NULL in case of failure. If error code is returned (not NULL) then the route should be released and the router should not be registered for the channel. Fixes: 56f13c0d9524c ("dmaengine: of_dma: Support for DMA routers") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200806104928.25975-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/of-dma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 863f2aaf5c8f7..8a4f608904b98 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -71,12 +71,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* From 0cef8e2c5a07d482ec907249dbd6687e8697677f Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Mon, 17 Aug 2020 19:57:26 +0800 Subject: [PATCH 04/11] dmaengine: at_hdmac: check return value of of_find_device_by_node() in at_dma_xlate() The reurn value of of_find_device_by_node() is not checked, thus null pointer dereference will be triggered if of_find_device_by_node() failed. Fixes: bbe89c8e3d59 ("at_hdmac: move to generic DMA binding") Signed-off-by: Yu Kuai Link: https://lore.kernel.org/r/20200817115728.1706719-2-yukuai3@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 45bbcd6146fd2..1c941f839c427 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1650,6 +1650,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); From 3832b78b3ec2cf51e07102f9b4480e343459b20f Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Mon, 17 Aug 2020 19:57:27 +0800 Subject: [PATCH 05/11] dmaengine: at_hdmac: add missing put_device() call in at_dma_xlate() If of_find_device_by_node() succeed, at_dma_xlate() doesn't have a corresponding put_device(). Thus add put_device() to fix the exception handling for this function implementation. Fixes: bbe89c8e3d59 ("at_hdmac: move to generic DMA binding") Signed-off-by: Yu Kuai Link: https://lore.kernel.org/r/20200817115728.1706719-3-yukuai3@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 1c941f839c427..bf874367097cc 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1657,8 +1657,10 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_set(DMA_SLAVE, mask); atslave = kmalloc(sizeof(*atslave), GFP_KERNEL); - if (!atslave) + if (!atslave) { + put_device(&dmac_pdev->dev); return NULL; + } atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW; /* @@ -1687,8 +1689,10 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, atslave->dma_dev = &dmac_pdev->dev; chan = dma_request_channel(mask, at_dma_filter, atslave); - if (!chan) + if (!chan) { + put_device(&dmac_pdev->dev); return NULL; + } atchan = to_at_dma_chan(chan); atchan->per_if = dma_spec->args[0] & 0xff; From e097eb7473d9e70da9e03276f61cd392ccb9d79f Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Mon, 17 Aug 2020 19:57:28 +0800 Subject: [PATCH 06/11] dmaengine: at_hdmac: add missing kfree() call in at_dma_xlate() If memory allocation for 'atslave' succeed, at_dma_xlate() doesn't have a corresponding kfree() in exception handling. Thus add kfree() for this function implementation. Fixes: bbe89c8e3d59 ("at_hdmac: move to generic DMA binding") Signed-off-by: Yu Kuai Link: https://lore.kernel.org/r/20200817115728.1706719-4-yukuai3@huawei.com Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index bf874367097cc..a2cf25c6e3b35 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1691,6 +1691,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, chan = dma_request_channel(mask, at_dma_filter, atslave); if (!chan) { put_device(&dmac_pdev->dev); + kfree(atslave); return NULL; } From 0661cef675d37e2c4b66a996389ebeae8568e49e Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 25 Aug 2020 08:46:17 +0200 Subject: [PATCH 07/11] dmaengine: pl330: Fix burst length if burst size is smaller than bus width Move the burst len fixup after setting the generic value for it. This finally enables the fixup introduced by commit 137bd11090d8 ("dmaengine: pl330: Align DMA memcpy operations to MFIFO width"), which otherwise was overwritten by the generic value. Reported-by: kernel test robot Fixes: 137bd11090d8 ("dmaengine: pl330: Align DMA memcpy operations to MFIFO width") Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200825064617.16193-1-m.szyprowski@samsung.com Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2c508ee672b90..e010064d88460 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2801,6 +2801,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. @@ -2808,7 +2809,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; From 33ebffa105990c43bf336cabe26c77384f59fe70 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 24 Aug 2020 15:01:08 +0300 Subject: [PATCH 08/11] dmaengine: ti: k3-udma: Fix the TR initialization for prep_slave_sg The TR which needs to be initialized for the next sg entry is indexed by tr_idx and not by the running i counter. In case any sub element in the SG needs more than one TR, the code would corrupt an already configured TR. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200824120108.9178-1-peter.ujfalusi@ti.com Fixes: 6cf668a4ef829 ("dmaengine: ti: k3-udma: Use the TR counter helper for slave_sg and cyclic") Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index c14e6cb105cd8..30cb514cee545 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2059,9 +2059,9 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, return NULL; } - cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, - CPPI5_TR_EVENT_SIZE_COMPLETION, 0); - cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); + cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false, + false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT); tr_req[tr_idx].addr = sg_addr; tr_req[tr_idx].icnt0 = tr0_cnt0; From 05655541c9503bfd01af4e6cbd7f5a29ac748e6c Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Thu, 13 Aug 2020 16:14:04 +0200 Subject: [PATCH 09/11] dmaengine: dw-edma: Fix scatter-gather address calculation Fix the source and destination physical address calculation of a peripheral device on scatter-gather implementation. This issue manifested during tests using a 64 bits architecture system. The abnormal behavior wasn't visible before due to all previous tests were done using 32 bits architecture system, that masked his effect. Fixes: e63d79d1ffcd ("dmaengine: Add Synopsys eDMA IP core driver") Cc: stable@vger.kernel.org Signed-off-by: Gustavo Pimentel Link: https://lore.kernel.org/r/8d3ab7e2ba96563fe3495b32f60077fffb85307d.1597327623.git.gustavo.pimentel@synopsys.com Signed-off-by: Vinod Koul --- drivers/dma/dw-edma/dw-edma-core.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index ed430ad9b3dd8..b971505b87152 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -405,7 +405,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) if (xfer->cyclic) { burst->dar = xfer->xfer.cyclic.paddr; } else { - burst->dar = sg_dma_address(sg); + burst->dar = dst_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -413,14 +413,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - src_addr += sg_dma_len(sg); } } else { burst->dar = dst_addr; if (xfer->cyclic) { burst->sar = xfer->xfer.cyclic.paddr; } else { - burst->sar = sg_dma_address(sg); + burst->sar = src_addr; /* Unlike the typical assumption by other * drivers/IPs the peripheral memory isn't * a FIFO memory, in this case, it's a @@ -428,12 +427,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer) * and destination addresses are increased * by the same portion (data length) */ - dst_addr += sg_dma_len(sg); } } - if (!xfer->cyclic) + if (!xfer->cyclic) { + src_addr += sg_dma_len(sg); + dst_addr += sg_dma_len(sg); sg = sg_next(sg); + } } return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); From 6d6018fc30bee67290dbed2fa51123f7c6f3d691 Mon Sep 17 00:00:00 2001 From: Madhuparna Bhowmik Date: Fri, 21 Aug 2020 09:14:23 +0530 Subject: [PATCH 10/11] drivers/dma/dma-jz4780: Fix race condition between probe and irq handler In probe, IRQ is requested before zchan->id is initialized which can be read in the irq handler. Hence, shift request irq after other initializations complete. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Madhuparna Bhowmik Reviewed-by: Paul Cercueil Link: https://lore.kernel.org/r/20200821034423.12713-1-madhuparnabhowmik10@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 448f663da89c6..8beed91428bd6 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -879,24 +879,11 @@ static int jz4780_dma_probe(struct platform_device *pdev) return -EINVAL; } - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - - jzdma->irq = ret; - - ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), - jzdma); - if (ret) { - dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); - return ret; - } - jzdma->clk = devm_clk_get(dev, NULL); if (IS_ERR(jzdma->clk)) { dev_err(dev, "failed to get clock\n"); ret = PTR_ERR(jzdma->clk); - goto err_free_irq; + return ret; } clk_prepare_enable(jzdma->clk); @@ -949,10 +936,23 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzchan->vchan.desc_free = jz4780_dma_desc_free; } + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_disable_clk; + + jzdma->irq = ret; + + ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), + jzdma); + if (ret) { + dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); + goto err_disable_clk; + } + ret = dmaenginem_async_device_register(dd); if (ret) { dev_err(dev, "failed to register device\n"); - goto err_disable_clk; + goto err_free_irq; } /* Register with OF DMA helpers. */ @@ -960,17 +960,17 @@ static int jz4780_dma_probe(struct platform_device *pdev) jzdma); if (ret) { dev_err(dev, "failed to register OF DMA controller\n"); - goto err_disable_clk; + goto err_free_irq; } dev_info(dev, "JZ4780 DMA controller initialised\n"); return 0; -err_disable_clk: - clk_disable_unprepare(jzdma->clk); - err_free_irq: free_irq(jzdma->irq, jzdma); + +err_disable_clk: + clk_disable_unprepare(jzdma->clk); return ret; } From 46815bf4d5a2e6ed64e4fa636c7d13f025bf40d8 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 31 Aug 2020 12:10:19 +0300 Subject: [PATCH 11/11] dmaengine: ti: k3-udma: Update rchan_oes_offset for am654 SYSFW ABI 3.0 SYSFW ABI 3.0 has changed the rchan_oes_offset value for am654 to support SR2. Since the kernel now needs SYSFW API 3.0 to work because the merged irqchip update, we need to also update the am654 rchan_oes_offset. Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200831091019.25273-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 30cb514cee545..d86dba0fd8e6b 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -3101,14 +3101,14 @@ static struct udma_match_data am654_main_data = { .psil_base = 0x1000, .enable_memcpy_support = true, .statictr_z_mask = GENMASK(11, 0), - .rchan_oes_offset = 0x2000, + .rchan_oes_offset = 0x200, }; static struct udma_match_data am654_mcu_data = { .psil_base = 0x6000, .enable_memcpy_support = false, .statictr_z_mask = GENMASK(11, 0), - .rchan_oes_offset = 0x2000, + .rchan_oes_offset = 0x200, }; static struct udma_match_data j721e_main_data = {