Browse Source

dmaengine updates for 4.8-rc1

This is bit large pile of code which bring in some nice additions:
  - Error reporting: we have added a new mechanism for users of dmaenegine to
    register a callback_result which tells them the result of the dma
    transaction. Right now only one user ntb is using it.
  - As we discussed on KS mailing list and pointed out NO_IRQ has no place in
    kernel, this also remove NO_IRQ from dmaengine subsystem (both arm and
    ppc users)
  - Support for IOMMU slave transfers and it implementation for arm.
  - To get better build coverage, enable COMPILE_TEST for bunch of driver,
    and fix the warning and sparse complaints on these.
  - Apart from above, usual updates spread across drivers.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJX9cGYAAoJEHwUBw8lI4NHXh0P/3OsctPYnwcangOz268hHDap
 7ZHwau96K7DRi8cFCc0XmG083Ivqih/fWMFJBUOEsuwS3zPHkfgfhsvm7MqrK3vv
 psJIwnubwTVVQ3lePYJlnna6mijcRNXVAooRLiqylA3QPIYRxECDFVDRNwf39D+I
 bYp5tmlFcobugOUUoMqq1D/gH8EHUWxrnrsS6UBBpYm+cusc6u9/JXlOb4pcJGSL
 V340zQ0S9FNuEM3b+1kMAeq3DG2wLXv9oJzz/6EN59sx5AdjlYUPHd/PvTYOeG0T
 crdtDfL+7xcqP0Ms4SGTOD4kXSe6nErr3bIBHQXI6ZmJn0j//+3yU21kTMl95kM+
 RM7nE4vItuQR0jPxVlhuLCcf3q7zMi+noOPZ1DVRTE1Yf9AizAgbPXyOE+jzGUUi
 6E+0Mj6CLpFH/Mffxphs7L6GKwfWqaLjAupbjR6EWZud37KAwvpcB1CkJEgT9C4s
 OiZ4INTPxXmw9dX/T9CPOyh8oZ8mB9LTUzHoJDvDGuwYm7HE0U9pzHG4bP0mjIIt
 y3RboP78t1HC9oZUrxCoGhvekJtok0k3RLGJTSx9ujklY9MJGG/F1KEC6APp5tXu
 0UToMXpgXSUkKEZesmsJFj/lbh1+h/yo5zTG5Hek8lh1K0sczaoWu3xTTSY9SSZQ
 ihlqyvdzSBweKo8ktU8A
 =9iA3
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-4.9-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "This is bit large pile of code which bring in some nice additions:

   - Error reporting: we have added a new mechanism for users of
     dmaenegine to register a callback_result which tells them the
     result of the dma transaction. Right now only one user (ntb) is
     using it.

   - As we discussed on KS mailing list and pointed out NO_IRQ has no
     place in kernel, this also remove NO_IRQ from dmaengine subsystem
     (both arm and ppc users)

   - Support for IOMMU slave transfers and its implementation for arm.

   - To get better build coverage, enable COMPILE_TEST for bunch of
     driver, and fix the warning and sparse complaints on these.

   - Apart from above, usual updates spread across drivers"

* tag 'dmaengine-4.9-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (169 commits)
  async_pq_val: fix DMA memory leak
  dmaengine: virt-dma: move function declarations
  dmaengine: omap-dma: Enable burst and data pack for SG
  DT: dmaengine: rcar-dmac: document R8A7743/5 support
  dmaengine: fsldma: Unmap region obtained by of_iomap
  dmaengine: jz4780: fix resource leaks on error exit return
  dma-debug: fix ia64 build, use PHYS_PFN
  dmaengine: coh901318: fix integer overflow when shifting more than 32 places
  dmaengine: edma: avoid uninitialized variable use
  dma-mapping: fix m32r build warning
  dma-mapping: fix ia64 build, use PHYS_PFN
  dmaengine: ti-dma-crossbar: enable COMPILE_TEST
  dmaengine: omap-dma: enable COMPILE_TEST
  dmaengine: edma: enable COMPILE_TEST
  dmaengine: ti-dma-crossbar: Fix of_device_id data parameter usage
  dmaengine: ti-dma-crossbar: Correct type for of_find_property() third parameter
  dmaengine/ARM: omap-dma: Fix the DMAengine compile test on non OMAP configs
  dmaengine: edma: Rename set_bits and remove unused clear_bits helper
  dmaengine: edma: Use correct type for of_find_property() third parameter
  dmaengine: edma: Fix of_device_id data parameter usage (legacy vs TPCC)
  ...
master
Linus Torvalds 5 years ago
parent
commit
553911c67e
  1. 22
      Documentation/DMA-API.txt
  2. 1
      Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
  3. 4
      Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
  4. 1
      Documentation/devicetree/bindings/dma/sun6i-dma.txt
  5. 11
      Documentation/dmaengine/provider.txt
  6. 35
      arch/arm/mach-s3c24xx/common.c
  7. 63
      arch/arm/mm/dma-mapping.c
  8. 8
      crypto/async_tx/async_pq.c
  9. 40
      drivers/dma/Kconfig
  10. 11
      drivers/dma/at_hdmac.c
  11. 8
      drivers/dma/at_xdmac.c
  12. 4
      drivers/dma/bestcomm/bestcomm.c
  13. 56
      drivers/dma/coh901318.c
  14. 4
      drivers/dma/coh901318_lli.c
  15. 142
      drivers/dma/cppi41.c
  16. 2
      drivers/dma/dma-jz4740.c
  17. 10
      drivers/dma/dma-jz4780.c
  18. 7
      drivers/dma/dmaengine.c
  19. 84
      drivers/dma/dmaengine.h
  20. 23
      drivers/dma/dmatest.c
  21. 14
      drivers/dma/dw/core.c
  22. 38
      drivers/dma/edma.c
  23. 28
      drivers/dma/ep93xx_dma.c
  24. 12
      drivers/dma/fsl_raid.c
  25. 24
      drivers/dma/fsldma.c
  26. 4
      drivers/dma/imx-dma.c
  27. 35
      drivers/dma/imx-sdma.c
  28. 213
      drivers/dma/ioat/dma.c
  29. 2
      drivers/dma/ioat/init.c
  30. 2
      drivers/dma/ioat/registers.h
  31. 3
      drivers/dma/iop-adma.c
  32. 18
      drivers/dma/ipu/ipu_idmac.c
  33. 9
      drivers/dma/ipu/ipu_irq.c
  34. 215
      drivers/dma/k3dma.c
  35. 6
      drivers/dma/mic_x100_dma.c
  36. 14
      drivers/dma/mmp_pdma.c
  37. 6
      drivers/dma/mmp_tdma.c
  38. 2
      drivers/dma/moxart-dma.c
  39. 7
      drivers/dma/mpc512x_dma.c
  40. 102
      drivers/dma/mv_xor.c
  41. 7
      drivers/dma/mv_xor.h
  42. 13
      drivers/dma/mxs-dma.c
  43. 9
      drivers/dma/nbpfaxi.c
  44. 243
      drivers/dma/omap-dma.c
  45. 7
      drivers/dma/pch_dma.c
  46. 25
      drivers/dma/pl330.c
  47. 11
      drivers/dma/ppc4xx/adma.c
  48. 57
      drivers/dma/qcom/hidma.c
  49. 2
      drivers/dma/qcom/hidma.h
  50. 32
      drivers/dma/qcom/hidma_ll.c
  51. 9
      drivers/dma/s3c24xx-dma.c
  52. 14
      drivers/dma/sa11x0-dma.c
  53. 132
      drivers/dma/sh/rcar-dmac.c
  54. 12
      drivers/dma/sh/shdma-base.c
  55. 9
      drivers/dma/sirf-dma.c
  56. 299
      drivers/dma/ste_dma40.c
  57. 2
      drivers/dma/stm32-dma.c
  58. 7
      drivers/dma/sun6i-dma.c
  59. 10
      drivers/dma/tegra20-apb-dma.c
  60. 14
      drivers/dma/tegra210-adma.c
  61. 30
      drivers/dma/ti-dma-crossbar.c
  62. 9
      drivers/dma/timb_dma.c
  63. 9
      drivers/dma/txx9dmac.c
  64. 17
      drivers/dma/virt-dma.c
  65. 10
      drivers/dma/virt-dma.h
  66. 6
      drivers/dma/xgene-dma.c
  67. 10
      drivers/dma/xilinx/xilinx_dma.c
  68. 193
      drivers/ntb/ntb_transport.c
  69. 19
      include/linux/dma-debug.h
  70. 41
      include/linux/dma-mapping.h
  71. 16
      include/linux/dmaengine.h
  72. 18
      include/linux/mbus.h
  73. 19
      include/linux/omap-dma.h
  74. 2
      include/linux/platform_data/dma-mmp_tdma.h
  75. 6
      include/linux/platform_data/dma-s3c24xx.h
  76. 52
      lib/dma-debug.c

22
Documentation/DMA-API.txt

@ -277,14 +277,26 @@ and <size> parameters are provided to do partial page mapping, it is
recommended that you never use these unless you really know what the
cache width is.
dma_addr_t
dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
void
dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
API for mapping and unmapping for MMIO resources. All the notes and
warnings for the other mapping APIs apply here. The API should only be
used to map device MMIO resources, mapping of RAM is not permitted.
int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
In some circumstances dma_map_single() and dma_map_page() will fail to create
a mapping. A driver can check for these errors by testing the returned
DMA address with dma_mapping_error(). A non-zero return value means the mapping
could not be created and the driver should take appropriate action (e.g.
reduce current DMA mapping usage or delay and try again later).
In some circumstances dma_map_single(), dma_map_page() and dma_map_resource()
will fail to create a mapping. A driver can check for these errors by testing
the returned DMA address with dma_mapping_error(). A non-zero return value
means the mapping could not be created and the driver should take appropriate
action (e.g. reduce current DMA mapping usage or delay and try again later).
int
dma_map_sg(struct device *dev, struct scatterlist *sg,

1
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt

@ -8,6 +8,7 @@ Required properties:
"fsl,imx51-sdma"
"fsl,imx53-sdma"
"fsl,imx6q-sdma"
"fsl,imx7d-sdma"
The -to variants should be preferred since they allow to determine the
correct ROM script addresses needed for the driver to work without additional
firmware.

4
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt

@ -1,4 +1,4 @@
* Renesas R-Car DMA Controller Device Tree bindings
* Renesas R-Car (RZ/G) DMA Controller Device Tree bindings
Renesas R-Car Generation 2 SoCs have multiple multi-channel DMA
controller instances named DMAC capable of serving multiple clients. Channels
@ -16,6 +16,8 @@ Required Properties:
- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
Examples with soctypes are:
- "renesas,dmac-r8a7743" (RZ/G1M)
- "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a7790" (R-Car H2)
- "renesas,dmac-r8a7791" (R-Car M2-W)
- "renesas,dmac-r8a7792" (R-Car V2H)

1
Documentation/devicetree/bindings/dma/sun6i-dma.txt

@ -7,6 +7,7 @@ Required properties:
- compatible: Must be one of
"allwinner,sun6i-a31-dma"
"allwinner,sun8i-a23-dma"
"allwinner,sun8i-a83t-dma"
"allwinner,sun8i-h3-dma"
- reg: Should contain the registers base address and length
- interrupts: Should contain a reference to the interrupt used by this device

11
Documentation/dmaengine/provider.txt

@ -282,6 +282,17 @@ supported.
that is supposed to push the current
transaction descriptor to a pending queue, waiting
for issue_pending to be called.
- In this structure the function pointer callback_result can be
initialized in order for the submitter to be notified that a
transaction has completed. In the earlier code the function pointer
callback has been used. However it does not provide any status to the
transaction and will be deprecated. The result structure defined as
dmaengine_result that is passed in to callback_result has two fields:
+ result: This provides the transfer result defined by
dmaengine_tx_result. Either success or some error
condition.
+ residue: Provides the residue bytes of the transfer for those that
support residue.
* device_issue_pending
- Takes the first transaction descriptor in the pending queue,

35
arch/arm/mach-s3c24xx/common.c

@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_data/dma-s3c24xx.h>
#include <linux/dmaengine.h>
#include <mach/hardware.h>
#include <mach/regs-clock.h>
@ -439,10 +440,44 @@ static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = {
[DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
};
static const struct dma_slave_map s3c2440_dma_slave_map[] = {
/* TODO: DMACH_XD0 */
/* TODO: DMACH_XD1 */
{ "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
{ "s3c2410-spi.0", "rx", (void *)DMACH_SPI0 },
{ "s3c2410-spi.0", "tx", (void *)DMACH_SPI0 },
{ "s3c2410-spi.1", "rx", (void *)DMACH_SPI1 },
{ "s3c2410-spi.1", "tx", (void *)DMACH_SPI1 },
{ "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
{ "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
{ "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
{ "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
{ "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
{ "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
{ "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
{ "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
/* TODO: DMACH_TIMER */
{ "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
{ "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
{ "samsung-ac97", "rx", (void *)DMACH_PCM_IN },
{ "samsung-ac97", "tx", (void *)DMACH_PCM_OUT },
{ "samsung-ac97", "rx", (void *)DMACH_MIC_IN },
{ "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
{ "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
{ "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
{ "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
{ "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
{ "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
{ "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
{ "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
};
static struct s3c24xx_dma_platdata s3c2440_dma_platdata = {
.num_phy_channels = 4,
.channels = s3c2440_dma_channels,
.num_channels = DMACH_MAX,
.slave_map = s3c2440_dma_slave_map,
.slavecnt = ARRAY_SIZE(s3c2440_dma_slave_map),
};
struct platform_device s3c2440_device_dma = {

63
arch/arm/mm/dma-mapping.c

@ -2014,6 +2014,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
__free_iova(mapping, iova, len);
}
/**
* arm_iommu_map_resource - map a device resource for DMA
* @dev: valid struct device pointer
* @phys_addr: physical address of resource
* @size: size of resource to map
* @dir: DMA transfer direction
*/
static dma_addr_t arm_iommu_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
int ret, prot;
phys_addr_t addr = phys_addr & PAGE_MASK;
unsigned int offset = phys_addr & ~PAGE_MASK;
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
if (ret < 0)
goto fail;
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
return DMA_ERROR_CODE;
}
/**
* arm_iommu_unmap_resource - unmap a device DMA resource
* @dev: valid struct device pointer
* @dma_handle: DMA address to resource
* @size: size of resource to map
* @dir: DMA transfer direction
*/
static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = dma_handle & PAGE_MASK;
unsigned int offset = dma_handle & ~PAGE_MASK;
size_t len = PAGE_ALIGN(size + offset);
if (!iova)
return;
iommu_unmap(mapping->domain, iova, len);
__free_iova(mapping, iova, len);
}
static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
@ -2057,6 +2114,9 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
};
struct dma_map_ops iommu_coherent_ops = {
@ -2070,6 +2130,9 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
};
/**

8
crypto/async_tx/async_pq.c

@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
return tx;
} else {
struct page *p_src = P(blocks, disks);
struct page *q_src = Q(blocks, disks);
@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
return NULL;
tx = NULL;
}
dmaengine_unmap_put(unmap);
return tx;
}
EXPORT_SYMBOL_GPL(async_syndrome_val);

40
drivers/dma/Kconfig

@ -102,7 +102,7 @@ config AXI_DMAC
config COH901318
bool "ST-Ericsson COH901318 DMA support"
select DMA_ENGINE
depends on ARCH_U300
depends on ARCH_U300 || COMPILE_TEST
help
Enable support for ST-Ericsson COH 901 318 DMA.
@ -114,13 +114,13 @@ config DMA_BCM2835
config DMA_JZ4740
tristate "JZ4740 DMA support"
depends on MACH_JZ4740
depends on MACH_JZ4740 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
config DMA_JZ4780
tristate "JZ4780 DMA support"
depends on MACH_JZ4780
depends on MACH_JZ4780 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@ -130,14 +130,14 @@ config DMA_JZ4780
config DMA_OMAP
tristate "OMAP DMA support"
depends on ARCH_OMAP
depends on ARCH_OMAP || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if SOC_DRA7XX
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
config DMA_SA11X0
tristate "SA-11x0 DMA support"
depends on ARCH_SA1100
depends on ARCH_SA1100 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@ -150,7 +150,6 @@ config DMA_SUN4I
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
select DMA_ENGINE
select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the DMA controller present in the sun4i,
@ -167,7 +166,7 @@ config DMA_SUN6I
config EP93XX_DMA
bool "Cirrus Logic EP93xx DMA support"
depends on ARCH_EP93XX
depends on ARCH_EP93XX || COMPILE_TEST
select DMA_ENGINE
help
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
@ -279,7 +278,7 @@ config INTEL_MIC_X100_DMA
config K3_DMA
tristate "Hisilicon K3 DMA support"
depends on ARCH_HI3xxx
depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@ -297,16 +296,16 @@ config LPC18XX_DMAMUX
config MMP_PDMA
bool "MMP PDMA support"
depends on (ARCH_MMP || ARCH_PXA)
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
config MMP_TDMA
bool "MMP Two-Channel DMA support"
depends on ARCH_MMP
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
select MMP_SRAM
select MMP_SRAM if ARCH_MMP
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
@ -316,7 +315,6 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
@ -439,9 +437,8 @@ config STE_DMA40
config STM32_DMA
bool "STMicroelectronics STM32 DMA support"
depends on ARCH_STM32
depends on ARCH_STM32 || COMPILE_TEST
select DMA_ENGINE
select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the on-chip DMA controller on STMicroelectronics
@ -451,7 +448,7 @@ config STM32_DMA
config S3C24XX_DMAC
bool "Samsung S3C24XX DMA support"
depends on ARCH_S3C24XX
depends on ARCH_S3C24XX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@ -483,10 +480,9 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
bool "NVIDIA Tegra210 ADMA support"
depends on ARCH_TEGRA_210_SOC
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select PM_CLK
help
Support for the NVIDIA Tegra210 ADMA controller driver. The
DMA controller has multiple DMA channels and is used to service
@ -497,7 +493,7 @@ config TEGRA210_ADMA
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
depends on MFD_TIMBERDALE
depends on MFD_TIMBERDALE || COMPILE_TEST
select DMA_ENGINE
help
Enable support for the Timberdale FPGA DMA engine.
@ -515,10 +511,10 @@ config TI_DMA_CROSSBAR
config TI_EDMA
bool "TI EDMA support"
depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if ARCH_OMAP
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
default n
help
Enable support for the TI EDMA controller. This DMA
@ -561,7 +557,7 @@ config XILINX_ZYNQMP_DMA
config ZX_DMA
tristate "ZTE ZX296702 DMA support"
depends on ARCH_ZX
depends on ARCH_ZX || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help

11
drivers/dma/at_hdmac.c

@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
if (callback)
callback(param);
dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);
@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
{
struct at_desc *first = atc_first_active(atchan);
struct dma_async_tx_descriptor *txd = &first->txd;
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
dev_vdbg(chan2dev(&atchan->chan_common),
"new cyclic period llp 0x%08x\n",
channel_readl(atchan, DSCR));
if (callback)
callback(param);
dmaengine_desc_get_callback_invoke(txd, NULL);
}
/*-- IRQ & Tasklet ---------------------------------------------------*/

8
drivers/dma/at_xdmac.c

@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc;
if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
txd->callback(txd->callback_param);
if (txd->flags & DMA_PREP_INTERRUPT)
dmaengine_desc_get_callback_invoke(txd, NULL);
}
static void at_xdmac_tasklet(unsigned long data)
@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
txd->callback(txd->callback_param);
if (txd->flags & DMA_PREP_INTERRUPT)
dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd);

4
drivers/dma/bestcomm/bestcomm.c

@ -82,7 +82,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
/* Get IRQ of that task */
tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
if (tsk->irq == NO_IRQ)
if (!tsk->irq)
goto error;
/* Init the BDs, if needed */
@ -104,7 +104,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
error:
if (tsk) {
if (tsk->irq != NO_IRQ)
if (tsk->irq)
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);

56
drivers/dma/coh901318.c

@ -1319,10 +1319,10 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
int i = 0;
while (l) {
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr,
l->link_addr, l->virt_link_addr);
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad"
", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n",
i, l, l->control, &l->src_addr, &l->dst_addr,
&l->link_addr, l->virt_link_addr);
i++;
l = l->virt_link_addr;
}
@ -1335,7 +1335,7 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
static struct coh901318_base *debugfs_dma_base;
static struct dentry *dma_dentry;
static int coh901318_debugfs_read(struct file *file, char __user *buf,
static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *f_pos)
{
u64 started_channels = debugfs_dma_base->pm.started_channels;
@ -1352,9 +1352,10 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
for (i = 0; i < U300_DMA_CHANNELS; i++)
if (started_channels & (1 << i))
for (i = 0; i < U300_DMA_CHANNELS; i++) {
if (started_channels & (1ULL << i))
tmp += sprintf(tmp, "channel %d\n", i);
}
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
@ -1553,15 +1554,8 @@ coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
static struct coh901318_desc *
coh901318_first_active_get(struct coh901318_chan *cohc)
{
struct coh901318_desc *d;
if (list_empty(&cohc->active))
return NULL;
d = list_first_entry(&cohc->active,
struct coh901318_desc,
node);
return d;
return list_first_entry_or_null(&cohc->active, struct coh901318_desc,
node);
}
static void
@ -1579,15 +1573,8 @@ coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
static struct coh901318_desc *
coh901318_first_queued(struct coh901318_chan *cohc)
{
struct coh901318_desc *d;
if (list_empty(&cohc->queue))
return NULL;
d = list_first_entry(&cohc->queue,
struct coh901318_desc,
node);
return d;
return list_first_entry_or_null(&cohc->queue, struct coh901318_desc,
node);
}
static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli)
@ -1766,7 +1753,7 @@ static int coh901318_resume(struct dma_chan *chan)
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
unsigned int ch_nr = (unsigned int) chan_id;
unsigned long ch_nr = (unsigned long) chan_id;
if (ch_nr == to_coh901318_chan(chan)->id)
return true;
@ -1888,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
struct coh901318_desc *cohd_fin;
unsigned long flags;
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_desc_callback cb;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__,
@ -1904,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
goto err;
/* locate callback to client */
callback = cohd_fin->desc.callback;
callback_param = cohd_fin->desc.callback_param;
dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
/* sign this job as completed on the channel */
dma_cookie_complete(&cohd_fin->desc);
@ -1920,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&cohc->lock, flags);
/* Call the callback when we're done */
if (callback)
callback(callback_param);
dmaengine_desc_callback_invoke(&cb, NULL);
spin_lock_irqsave(&cohc->lock, flags);
@ -2247,8 +2231,8 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
"[%s] channel %d src 0x%x dest 0x%x size %d\n",
__func__, cohc->id, src, dest, size);
"[%s] channel %d src 0x%pad dest 0x%pad size %zu\n",
__func__, cohc->id, &src, &dest, size);
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last lli */
@ -2744,8 +2728,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
goto err_register_of_dma;
platform_set_drvdata(pdev, base);
dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n",
base->virtbase);
return err;

4
drivers/dma/coh901318_lli.c

@ -75,7 +75,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head;
lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
lli->virt_link_addr = NULL;
for (i = 1; i < len; i++) {
lli_prev = lli;
@ -88,7 +88,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
lli->virt_link_addr = NULL;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;

142
drivers/dma/cppi41.c

@ -108,6 +108,8 @@ struct cppi41_channel {
unsigned td_queued:1;
unsigned td_seen:1;
unsigned td_desc_seen:1;
struct list_head node; /* Node for pending list */
};
struct cppi41_desc {
@ -146,6 +148,9 @@ struct cppi41_dd {
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
struct list_head pending; /* Pending queued transfers */
spinlock_t lock; /* Lock for pending list */
/* context for suspend/resume */
unsigned int dma_tdfdq;
};
@ -331,7 +336,11 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
c->txd.callback(c->txd.callback_param);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
/* Paired with cppi41_dma_issue_pending */
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
}
}
return IRQ_HANDLED;
@ -349,6 +358,12 @@ static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct cppi41_dd *cdd = c->cdd;
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
if (error < 0)
return error;
dma_cookie_init(chan);
dma_async_tx_descriptor_init(&c->txd, chan);
@ -357,11 +372,26 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
if (!c->is_tx)
cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
return 0;
}
static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct cppi41_dd *cdd = c->cdd;
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
if (error < 0)
return;
WARN_ON(!list_empty(&cdd->pending));
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
}
static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
@ -386,21 +416,6 @@ static void push_desc_queue(struct cppi41_channel *c)
u32 desc_phys;
u32 reg;
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = c;
reg = (sizeof(struct cppi41_desc) - 24) / 4;
reg |= desc_phys;
cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
}
static void cppi41_dma_issue_pending(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
u32 reg;
c->residue = 0;
reg = GCR_CHAN_ENABLE;
@ -418,7 +433,46 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
* before starting the dma engine.
*/
__iowmb();
push_desc_queue(c);
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = c;
reg = (sizeof(struct cppi41_desc) - 24) / 4;
reg |= desc_phys;
cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
}
static void pending_desc(struct cppi41_channel *c)
{
struct cppi41_dd *cdd = c->cdd;
unsigned long flags;
spin_lock_irqsave(&cdd->lock, flags);
list_add_tail(&c->node, &cdd->pending);
spin_unlock_irqrestore(&cdd->lock, flags);
}
static void cppi41_dma_issue_pending(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct cppi41_dd *cdd = c->cdd;
int error;
/* PM runtime paired with dmaengine_desc_get_callback_invoke */
error = pm_runtime_get(cdd->ddev.dev);
if ((error != -EINPROGRESS) && error < 0) {
dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
error);
return;
}
if (likely(pm_runtime_active(cdd->ddev.dev)))
push_desc_queue(c);
else
pending_desc(c);
}
static u32 get_host_pd0(u32 length)
@ -940,12 +994,18 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ctrl_mem = of_iomap(dev->of_node, 1);
cdd->sched_mem = of_iomap(dev->of_node, 2);
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
spin_lock_init(&cdd->lock);
INIT_LIST_HEAD(&cdd->pending);
platform_set_drvdata(pdev, cdd);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem)
return -ENXIO;
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_get_sync;
@ -985,7 +1045,9 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_of;
platform_set_drvdata(pdev, cdd);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
err_of:
dma_async_device_unregister(&cdd->ddev);
@ -996,7 +1058,8 @@ err_irq:
err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_put(dev);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_sync(dev);
err_get_sync:
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
@ -1021,13 +1084,13 @@ static int cppi41_dma_remove(struct platform_device *pdev)
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
iounmap(cdd->qmgr_mem);
pm_runtime_put(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int cppi41_suspend(struct device *dev)
static int __maybe_unused cppi41_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
@ -1038,7 +1101,7 @@ static int cppi41_suspend(struct device *dev)
return 0;
}
static int cppi41_resume(struct device *dev)
static int __maybe_unused cppi41_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
struct cppi41_channel *c;
@ -1062,9 +1125,38 @@ static int cppi41_resume(struct device *dev)
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
WARN_ON(!list_empty(&cdd->pending));
return 0;
}
static int __maybe_unused cppi41_runtime_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
struct cppi41_channel *c, *_c;
unsigned long flags;
spin_lock_irqsave(&cdd->lock, flags);
list_for_each_entry_safe(c, _c, &cdd->pending, node) {
push_desc_queue(c);
list_del(&c->node);
}
spin_unlock_irqrestore(&cdd->lock, flags);
return 0;
}
static const struct dev_pm_ops cppi41_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
cppi41_runtime_resume,
NULL)
};
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,

2
drivers/dma/dma-jz4740.c

@ -21,8 +21,6 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/mach-jz4740/dma.h>
#include "virt-dma.h"
#define JZ_DMA_NR_CHANS 6

10
drivers/dma/dma-jz4780.c

@ -324,8 +324,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
sg_dma_address(&sgl[i]),
sg_dma_len(&sgl[i]),
direction);
if (err < 0)
if (err < 0) {
jz4780_dma_desc_free(&jzchan->desc->vdesc);
return NULL;
}
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
@ -368,8 +370,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
for (i = 0; i < periods; i++) {
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
period_len, direction);
if (err < 0)
if (err < 0) {
jz4780_dma_desc_free(&jzchan->desc->vdesc);
return NULL;
}
buf_addr += period_len;
@ -396,7 +400,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
}
struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{

7
drivers/dma/dmaengine.c

@ -997,6 +997,13 @@ int dma_async_device_register(struct dma_device *device)
}
chan->client_count = 0;
}
if (!chancnt) {
dev_err(device->dev, "%s: device has no channels!\n", __func__);
rc = -ENODEV;
goto err_out;
}
device->chancnt = chancnt;
mutex_lock(&dma_list_mutex);

84
drivers/dma/dmaengine.h

@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
struct dmaengine_desc_callback {
dma_async_tx_callback callback;
dma_async_tx_callback_result callback_result;
void *callback_param;
};
/**
* dmaengine_desc_get_callback - get the passed in callback function
* @tx: tx descriptor
* @cb: temp struct to hold the callback info
*
* Fill the passed in cb struct with what's available in the passed in
* tx descriptor struct
* No locking is required.
*/
static inline void
dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
struct dmaengine_desc_callback *cb)
{
cb->callback = tx->callback;
cb->callback_result = tx->callback_result;
cb->callback_param = tx->callback_param;
}
/**
* dmaengine_desc_callback_invoke - call the callback function in cb struct
* @cb: temp struct that is holding the callback info
* @result: transaction result
*
* Call the callback function provided in the cb struct with the parameter
* in the cb struct.
* Locking is dependent on the driver.
*/
static inline void
dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
const struct dmaengine_result *result)
{
struct dmaengine_result dummy_result = {
.result = DMA_TRANS_NOERROR,
.residue = 0
};
if (cb->callback_result) {
if (!result)
result = &dummy_result;
cb->callback_result(cb->callback_param, result);
} else if (cb->callback) {
cb->callback(cb->callback_param);
}
}
/**
* dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
* then immediately call the callback.
* @tx: dma async tx descriptor
* @result: transaction result
*
* Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
* in a single function since no work is necessary in between for the driver.
* Locking is dependent on the driver.
*/
static inline void
dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
const struct dmaengine_result *result)
{
struct dmaengine_desc_callback cb;
dmaengine_desc_get_callback(tx, &cb);
dmaengine_desc_callback_invoke(&cb, result);
}
/**
* dmaengine_desc_callback_valid - verify the callback is valid in cb
* @cb: callback info struct
*
* Return a bool that verifies whether callback in cb is valid or not.
* No locking is required.
*/
static inline bool
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
{
return (cb->callback) ? true : false;
}
#endif

23
drivers/dma/dmatest.c

@ -56,10 +56,10 @@ module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sg_buffers,
"Number of scatter gather buffers (default: 1)");
static unsigned int dmatest = 1;
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-slave_sg (default: 1)");
"dmatest 0-memcpy 1-slave_sg (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@ -426,7 +426,9 @@ static int dmatest_func(void *data)
int src_cnt;
int dst_cnt;
int i;
ktime_t ktime;
ktime_t ktime, start, diff;
ktime_t filltime = ktime_set(0, 0);
ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
@ -503,7 +505,7 @@ static int dmatest_func(void *data)
total_tests++;
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
align = dev->copy_align;
else if (thread->type == DMA_XOR)
align = dev->xor_align;
@ -531,6 +533,7 @@ static int dmatest_func(void *data)
src_off = 0;
dst_off = 0;
} else {
start = ktime_get();
src_off = dmatest_random() % (params->buf_size - len + 1);
dst_off = dmatest_random() % (params->buf_size - len + 1);
@ -541,6 +544,9 @@ static int dmatest_func(void *data)
params->buf_size);
dmatest_init_dsts(thread->dsts, dst_off, len,
params->buf_size);
diff = ktime_sub(ktime_get(), start);
filltime = ktime_add(filltime, diff);
}
um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
@ -683,6 +689,7 @@ static int dmatest_func(void *data)
continue;
}
start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
@ -703,6 +710,9 @@ static int dmatest_func(void *data)
params->buf_size, dst_off + len,
PATTERN_DST, false);
diff = ktime_sub(ktime_get(), start);
comparetime = ktime_add(comparetime, diff);
if (error_count) {
result("data error", total_tests, src_off, dst_off,
len, error_count);
@ -712,7 +722,10 @@ static int dmatest_func(void *data)
dst_off, len, 0);
}
}
runtime = ktime_us_delta(ktime_get(), ktime);
ktime = ktime_sub(ktime_get(), ktime);
ktime = ktime_sub(ktime, comparetime);
ktime = ktime_sub(ktime, filltime);
runtime = ktime_to_us(ktime);
ret = 0;
err_dstbuf:

14
drivers/dma/dw/core.c

@ -274,20 +274,19 @@ static void
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
bool callback_required)
{
dma_async_tx_callback callback = NULL;
void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
unsigned long flags;
struct dmaengine_desc_callback cb;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&dwc->lock, flags);
dma_cookie_complete(txd);
if (callback_required) {
callback = txd->callback;
param = txd->callback_param;
}
if (callback_required)
dmaengine_desc_get_callback(txd, &cb);
else
memset(&cb, 0, sizeof(cb));
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
@ -296,8 +295,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
dwc_desc_put(dwc, desc);
spin_unlock_irqrestore(&dwc->lock, flags);
if (callback)
callback(param);
dmaengine_desc_callback_invoke(&cb, NULL);
}
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)

38
drivers/dma/edma.c

@ -263,22 +263,29 @@ static const struct edmacc_param dummy_paramset = {
#define EDMA_BINDING_LEGACY 0
#define EDMA_BINDING_TPCC 1
static const u32 edma_binding_type[] = {
[EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
[EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
};
static const struct of_device_id edma_of_ids[] = {
{
.compatible = "ti,edma3",
.data = (void *)EDMA_BINDING_LEGACY,
.data = &edma_binding_type[EDMA_BINDING_LEGACY],
},
{
.compatible = "ti,edma3-tpcc",
.data = (void *)EDMA_BINDING_TPCC,
.data = &edma_binding_type[EDMA_BINDING_TPCC],
},
{}
};
MODULE_DEVICE_TABLE(of, edma_of_ids);
static const struct of_device_id edma_tptc_of_ids[] = {
{ .compatible = "ti,edma3-tptc", },
{}
};
MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
{
@ -405,18 +412,12 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
}
static inline void set_bits(int offset, int len, unsigned long *p)
static inline void edma_set_bits(int offset, int len, unsigned long *p)
{
for (; len > 0; len--)
set_bit(offset + (len - 1), p);
}
static inline void clear_bits(int offset, int len, unsigned long *p)
{
for (; len > 0; len--)
clear_bit(offset + (len - 1), p);
}
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@ -464,13 +465,15 @@ static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
}
static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
struct edmacc_param *param)
{
slot = EDMA_CHAN_SLOT(slot);
if (slot >= ecc->num_slots)
return;
return -EINVAL;
memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
return 0;
}
/**
@ -1476,13 +1479,15 @@ static void edma_error_handler(struct edma_chan *echan)
struct edma_cc *ecc = echan->ecc;
struct device *dev = echan->vchan.chan.device->dev;
struct edmacc_param p;
int err;
if (!echan->edesc)
return;
spin_lock(&echan->vchan.lock);
edma_read_slot(ecc, echan->slot[0], &p);
err = edma_read_slot(ecc, echan->slot[0], &p);
/*
* Issue later based on missed flag which will be sure
* to happen as:
@ -1495,7 +1500,7 @@ static void edma_error_handler(struct edma_chan *echan)
* lead to some nasty recursion when we are in a NULL
* slot. So we avoid doing so and set the missed flag.
*/
if (p.a_b_cnt == 0 && p.ccnt == 0) {
if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
dev_dbg(dev, "Error on null slot, setting miss\n");
echan->missed = 1;
} else {
@ -2019,8 +2024,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
{
struct edma_soc_info *info;
struct property *prop;
size_t sz;
int ret;
int sz, ret;
info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
if (!info)
@ -2182,7 +2186,7 @@ static int edma_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(edma_of_ids, node);
if (match && (u32)match->data == EDMA_BINDING_TPCC)
if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
legacy_mode = false;
info = edma_setup_info_from_dt(dev, legacy_mode);
@ -2260,7 +2264,7 @@ static int edma_probe(struct platform_device *pdev)
for (i = 0; rsv_slots[i][0] != -1; i++) {
off = rsv_slots[i][0];
ln = rsv_slots[i][1];
set_bits(off, ln, ecc->slot_inuse);
edma_set_bits(off, ln, ecc->slot_inuse);
}
}
}

28
drivers/dma/ep93xx_dma.c

@ -262,10 +262,8 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
if (list_empty(&edmac->active))
return NULL;
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
return list_first_entry_or_null(&edmac->active,
struct ep93xx_dma_desc, node);
}
/**
@ -739,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;