12ee67178SXianjun Jiao /* 22ee67178SXianjun Jiao * DMA driver for Xilinx Video DMA Engine 3*a6085186SLina Ceballos * SPDX-FileCopyrightText: Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved 4*a6085186SLina Ceballos * Based on the Freescale DMA driver 52ee67178SXianjun Jiao * Modified by Xianjun Jiao. [email protected]; [email protected] 6*a6085186SLina Ceballos * SPDX-License-Identifier: GPL-2.0-or-later 72ee67178SXianjun Jiao * 82ee67178SXianjun Jiao * Description: 92ee67178SXianjun Jiao * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP 102ee67178SXianjun Jiao * core that provides high-bandwidth direct memory access between memory 112ee67178SXianjun Jiao * and AXI4-Stream type video target peripherals. The core provides efficient 122ee67178SXianjun Jiao * two dimensional DMA operations with independent asynchronous read (S2MM) 132ee67178SXianjun Jiao * and write (MM2S) channel operation. It can be configured to have either 142ee67178SXianjun Jiao * one channel or two channels. If configured as two channels, one is to 152ee67178SXianjun Jiao * transmit to the video device (MM2S) and another is to receive from the 162ee67178SXianjun Jiao * video device (S2MM). Initialization, status, interrupt and management 172ee67178SXianjun Jiao * registers are accessed through an AXI4-Lite slave interface. 182ee67178SXianjun Jiao * 192ee67178SXianjun Jiao * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that 202ee67178SXianjun Jiao * provides high-bandwidth one dimensional direct memory access between memory 212ee67178SXianjun Jiao * and AXI4-Stream target peripherals. It supports one receive and one 222ee67178SXianjun Jiao * transmit channel, both of them optional at synthesis time. 232ee67178SXianjun Jiao * 242ee67178SXianjun Jiao * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory 252ee67178SXianjun Jiao * Access (DMA) between a memory-mapped source address and a memory-mapped 262ee67178SXianjun Jiao * destination address. 272ee67178SXianjun Jiao * 282ee67178SXianjun Jiao * This program is free software: you can redistribute it and/or modify 292ee67178SXianjun Jiao * it under the terms of the GNU General Public License as published by 302ee67178SXianjun Jiao * the Free Software Foundation, either version 2 of the License, or 312ee67178SXianjun Jiao * (at your option) any later version. 322ee67178SXianjun Jiao */ 332ee67178SXianjun Jiao 342ee67178SXianjun Jiao #include <linux/bitops.h> 352ee67178SXianjun Jiao #include <linux/dmapool.h> 362ee67178SXianjun Jiao #include <linux/dma/xilinx_dma.h> 372ee67178SXianjun Jiao #include <linux/init.h> 382ee67178SXianjun Jiao #include <linux/interrupt.h> 392ee67178SXianjun Jiao #include <linux/io.h> 402ee67178SXianjun Jiao #include <linux/iopoll.h> 412ee67178SXianjun Jiao #include <linux/module.h> 422ee67178SXianjun Jiao #include <linux/of_address.h> 432ee67178SXianjun Jiao #include <linux/of_dma.h> 442ee67178SXianjun Jiao #include <linux/of_platform.h> 452ee67178SXianjun Jiao #include <linux/of_irq.h> 462ee67178SXianjun Jiao #include <linux/slab.h> 472ee67178SXianjun Jiao #include <linux/clk.h> 482ee67178SXianjun Jiao #include <linux/io-64-nonatomic-lo-hi.h> 492ee67178SXianjun Jiao 502ee67178SXianjun Jiao #include "../dmaengine.h" 512ee67178SXianjun Jiao 522ee67178SXianjun Jiao /* Register/Descriptor Offsets */ 532ee67178SXianjun Jiao #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 542ee67178SXianjun Jiao #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 552ee67178SXianjun Jiao #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 562ee67178SXianjun Jiao #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 572ee67178SXianjun Jiao 582ee67178SXianjun Jiao /* Control Registers */ 592ee67178SXianjun Jiao #define XILINX_DMA_REG_DMACR 0x0000 602ee67178SXianjun Jiao #define XILINX_DMA_DMACR_DELAY_MAX 0xff 612ee67178SXianjun Jiao #define XILINX_DMA_DMACR_DELAY_SHIFT 24 622ee67178SXianjun Jiao #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff 632ee67178SXianjun Jiao #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 642ee67178SXianjun Jiao #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) 652ee67178SXianjun Jiao #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) 662ee67178SXianjun Jiao #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) 672ee67178SXianjun Jiao #define XILINX_DMA_DMACR_MASTER_SHIFT 8 682ee67178SXianjun Jiao #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 692ee67178SXianjun Jiao #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) 702ee67178SXianjun Jiao #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) 712ee67178SXianjun Jiao #define XILINX_DMA_DMACR_RESET BIT(2) 722ee67178SXianjun Jiao #define XILINX_DMA_DMACR_CIRC_EN BIT(1) 732ee67178SXianjun Jiao #define XILINX_DMA_DMACR_RUNSTOP BIT(0) 742ee67178SXianjun Jiao #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 752ee67178SXianjun Jiao 762ee67178SXianjun Jiao #define XILINX_DMA_REG_DMASR 0x0004 772ee67178SXianjun Jiao #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) 782ee67178SXianjun Jiao #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) 792ee67178SXianjun Jiao #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) 802ee67178SXianjun Jiao #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) 812ee67178SXianjun Jiao #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) 822ee67178SXianjun Jiao #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) 832ee67178SXianjun Jiao #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) 842ee67178SXianjun Jiao #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) 852ee67178SXianjun Jiao #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) 862ee67178SXianjun Jiao #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) 872ee67178SXianjun Jiao #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) 882ee67178SXianjun Jiao #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) 892ee67178SXianjun Jiao #define XILINX_DMA_DMASR_IDLE BIT(1) 902ee67178SXianjun Jiao #define XILINX_DMA_DMASR_HALTED BIT(0) 912ee67178SXianjun Jiao #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) 922ee67178SXianjun Jiao #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 932ee67178SXianjun Jiao 942ee67178SXianjun Jiao #define XILINX_DMA_REG_CURDESC 0x0008 952ee67178SXianjun Jiao #define XILINX_DMA_REG_TAILDESC 0x0010 962ee67178SXianjun Jiao #define XILINX_DMA_REG_REG_INDEX 0x0014 972ee67178SXianjun Jiao #define XILINX_DMA_REG_FRMSTORE 0x0018 982ee67178SXianjun Jiao #define XILINX_DMA_REG_THRESHOLD 0x001c 992ee67178SXianjun Jiao #define XILINX_DMA_REG_FRMPTR_STS 0x0024 1002ee67178SXianjun Jiao #define XILINX_DMA_REG_PARK_PTR 0x0028 1012ee67178SXianjun Jiao #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 1022ee67178SXianjun Jiao #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) 1032ee67178SXianjun Jiao #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 1042ee67178SXianjun Jiao #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) 1052ee67178SXianjun Jiao #define XILINX_DMA_REG_VDMA_VERSION 0x002c 1062ee67178SXianjun Jiao 1072ee67178SXianjun Jiao /* Register Direct Mode Registers */ 1082ee67178SXianjun Jiao #define XILINX_DMA_REG_VSIZE 0x0000 1092ee67178SXianjun Jiao #define XILINX_DMA_REG_HSIZE 0x0004 1102ee67178SXianjun Jiao 1112ee67178SXianjun Jiao #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 1122ee67178SXianjun Jiao #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 1132ee67178SXianjun Jiao #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 1142ee67178SXianjun Jiao 1152ee67178SXianjun Jiao #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 1162ee67178SXianjun Jiao #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 1172ee67178SXianjun Jiao 1182ee67178SXianjun Jiao #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 1192ee67178SXianjun Jiao #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 1202ee67178SXianjun Jiao 1212ee67178SXianjun Jiao /* HW specific definitions */ 122febc5adfSXianjun Jiao #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 1232ee67178SXianjun Jiao 1242ee67178SXianjun Jiao #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 1252ee67178SXianjun Jiao (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 1262ee67178SXianjun Jiao XILINX_DMA_DMASR_DLY_CNT_IRQ | \ 1272ee67178SXianjun Jiao XILINX_DMA_DMASR_ERR_IRQ) 1282ee67178SXianjun Jiao 1292ee67178SXianjun Jiao #define XILINX_DMA_DMASR_ALL_ERR_MASK \ 1302ee67178SXianjun Jiao (XILINX_DMA_DMASR_EOL_LATE_ERR | \ 1312ee67178SXianjun Jiao XILINX_DMA_DMASR_SOF_LATE_ERR | \ 1322ee67178SXianjun Jiao XILINX_DMA_DMASR_SG_DEC_ERR | \ 1332ee67178SXianjun Jiao XILINX_DMA_DMASR_SG_SLV_ERR | \ 1342ee67178SXianjun Jiao XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 1352ee67178SXianjun Jiao XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 1362ee67178SXianjun Jiao XILINX_DMA_DMASR_DMA_DEC_ERR | \ 1372ee67178SXianjun Jiao XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ 1382ee67178SXianjun Jiao XILINX_DMA_DMASR_DMA_INT_ERR) 1392ee67178SXianjun Jiao 1402ee67178SXianjun Jiao /* 1412ee67178SXianjun Jiao * Recoverable errors are DMA Internal error, SOF Early, EOF Early 1422ee67178SXianjun Jiao * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 1432ee67178SXianjun Jiao * is enabled in the h/w system. 1442ee67178SXianjun Jiao */ 1452ee67178SXianjun Jiao #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ 1462ee67178SXianjun Jiao (XILINX_DMA_DMASR_SOF_LATE_ERR | \ 1472ee67178SXianjun Jiao XILINX_DMA_DMASR_EOF_EARLY_ERR | \ 1482ee67178SXianjun Jiao XILINX_DMA_DMASR_SOF_EARLY_ERR | \ 1492ee67178SXianjun Jiao XILINX_DMA_DMASR_DMA_INT_ERR) 1502ee67178SXianjun Jiao 1512ee67178SXianjun Jiao /* Axi VDMA Flush on Fsync bits */ 1522ee67178SXianjun Jiao #define XILINX_DMA_FLUSH_S2MM 3 1532ee67178SXianjun Jiao #define XILINX_DMA_FLUSH_MM2S 2 1542ee67178SXianjun Jiao #define XILINX_DMA_FLUSH_BOTH 1 1552ee67178SXianjun Jiao 1562ee67178SXianjun Jiao /* Delay loop counter to prevent hardware failure */ 1572ee67178SXianjun Jiao #define XILINX_DMA_LOOP_COUNT 1000000 1582ee67178SXianjun Jiao 1592ee67178SXianjun Jiao /* AXI DMA Specific Registers/Offsets */ 1602ee67178SXianjun Jiao #define XILINX_DMA_REG_SRCDSTADDR 0x18 1612ee67178SXianjun Jiao #define XILINX_DMA_REG_BTT 0x28 1622ee67178SXianjun Jiao 1632ee67178SXianjun Jiao /* AXI DMA Specific Masks/Bit fields */ 1642ee67178SXianjun Jiao #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 1652ee67178SXianjun Jiao #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 1662ee67178SXianjun Jiao #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 1672ee67178SXianjun Jiao #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 1682ee67178SXianjun Jiao #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) 1692ee67178SXianjun Jiao #define XILINX_DMA_CR_COALESCE_SHIFT 16 1702ee67178SXianjun Jiao #define XILINX_DMA_BD_SOP BIT(27) 1712ee67178SXianjun Jiao #define XILINX_DMA_BD_EOP BIT(26) 1722ee67178SXianjun Jiao #define XILINX_DMA_COALESCE_MAX 255 1732ee67178SXianjun Jiao #define XILINX_DMA_NUM_DESCS 255 1742ee67178SXianjun Jiao #define XILINX_DMA_NUM_APP_WORDS 5 1752ee67178SXianjun Jiao 176febc5adfSXianjun Jiao /* Multi-Channel DMA Descriptor offsets*/ 177febc5adfSXianjun Jiao #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) 178febc5adfSXianjun Jiao #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) 179febc5adfSXianjun Jiao 180febc5adfSXianjun Jiao /* Multi-Channel DMA Masks/Shifts */ 181febc5adfSXianjun Jiao #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) 182febc5adfSXianjun Jiao #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) 183febc5adfSXianjun Jiao #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) 184febc5adfSXianjun Jiao #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) 185febc5adfSXianjun Jiao #define XILINX_DMA_BD_STRIDE_SHIFT 0 186febc5adfSXianjun Jiao #define XILINX_DMA_BD_VSIZE_SHIFT 19 187febc5adfSXianjun Jiao 1882ee67178SXianjun Jiao /* AXI CDMA Specific Registers/Offsets */ 1892ee67178SXianjun Jiao #define XILINX_CDMA_REG_SRCADDR 0x18 1902ee67178SXianjun Jiao #define XILINX_CDMA_REG_DSTADDR 0x20 1912ee67178SXianjun Jiao 1922ee67178SXianjun Jiao /* AXI CDMA Specific Masks */ 1932ee67178SXianjun Jiao #define XILINX_CDMA_CR_SGMODE BIT(3) 1942ee67178SXianjun Jiao 1952ee67178SXianjun Jiao /** 1962ee67178SXianjun Jiao * struct xilinx_vdma_desc_hw - Hardware Descriptor 1972ee67178SXianjun Jiao * @next_desc: Next Descriptor Pointer @0x00 1982ee67178SXianjun Jiao * @pad1: Reserved @0x04 1992ee67178SXianjun Jiao * @buf_addr: Buffer address @0x08 2002ee67178SXianjun Jiao * @buf_addr_msb: MSB of Buffer address @0x0C 2012ee67178SXianjun Jiao * @vsize: Vertical Size @0x10 2022ee67178SXianjun Jiao * @hsize: Horizontal Size @0x14 2032ee67178SXianjun Jiao * @stride: Number of bytes between the first 2042ee67178SXianjun Jiao * pixels of each horizontal line @0x18 2052ee67178SXianjun Jiao */ 2062ee67178SXianjun Jiao struct xilinx_vdma_desc_hw { 2072ee67178SXianjun Jiao u32 next_desc; 2082ee67178SXianjun Jiao u32 pad1; 2092ee67178SXianjun Jiao u32 buf_addr; 2102ee67178SXianjun Jiao u32 buf_addr_msb; 2112ee67178SXianjun Jiao u32 vsize; 2122ee67178SXianjun Jiao u32 hsize; 2132ee67178SXianjun Jiao u32 stride; 2142ee67178SXianjun Jiao } __aligned(64); 2152ee67178SXianjun Jiao 2162ee67178SXianjun Jiao /** 2172ee67178SXianjun Jiao * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 2182ee67178SXianjun Jiao * @next_desc: Next Descriptor Pointer @0x00 2192ee67178SXianjun Jiao * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 2202ee67178SXianjun Jiao * @buf_addr: Buffer address @0x08 2212ee67178SXianjun Jiao * @buf_addr_msb: MSB of Buffer address @0x0C 222febc5adfSXianjun Jiao * @mcdma_control: Control field for mcdma @0x10 223febc5adfSXianjun Jiao * @vsize_stride: Vsize and Stride field for mcdma @0x14 2242ee67178SXianjun Jiao * @control: Control field @0x18 2252ee67178SXianjun Jiao * @status: Status field @0x1C 2262ee67178SXianjun Jiao * @app: APP Fields @0x20 - 0x30 2272ee67178SXianjun Jiao */ 2282ee67178SXianjun Jiao struct xilinx_axidma_desc_hw { 2292ee67178SXianjun Jiao u32 next_desc; 2302ee67178SXianjun Jiao u32 next_desc_msb; 2312ee67178SXianjun Jiao u32 buf_addr; 2322ee67178SXianjun Jiao u32 buf_addr_msb; 233febc5adfSXianjun Jiao u32 mcdma_control; 234febc5adfSXianjun Jiao u32 vsize_stride; 2352ee67178SXianjun Jiao u32 control; 2362ee67178SXianjun Jiao u32 status; 2372ee67178SXianjun Jiao u32 app[XILINX_DMA_NUM_APP_WORDS]; 2382ee67178SXianjun Jiao } __aligned(64); 2392ee67178SXianjun Jiao 2402ee67178SXianjun Jiao /** 2412ee67178SXianjun Jiao * struct xilinx_cdma_desc_hw - Hardware Descriptor 2422ee67178SXianjun Jiao * @next_desc: Next Descriptor Pointer @0x00 2432ee67178SXianjun Jiao * @next_desc_msb: Next Descriptor Pointer MSB @0x04 2442ee67178SXianjun Jiao * @src_addr: Source address @0x08 2452ee67178SXianjun Jiao * @src_addr_msb: Source address MSB @0x0C 2462ee67178SXianjun Jiao * @dest_addr: Destination address @0x10 2472ee67178SXianjun Jiao * @dest_addr_msb: Destination address MSB @0x14 2482ee67178SXianjun Jiao * @control: Control field @0x18 2492ee67178SXianjun Jiao * @status: Status field @0x1C 2502ee67178SXianjun Jiao */ 2512ee67178SXianjun Jiao struct xilinx_cdma_desc_hw { 2522ee67178SXianjun Jiao u32 next_desc; 2532ee67178SXianjun Jiao u32 next_desc_msb; 2542ee67178SXianjun Jiao u32 src_addr; 2552ee67178SXianjun Jiao u32 src_addr_msb; 2562ee67178SXianjun Jiao u32 dest_addr; 2572ee67178SXianjun Jiao u32 dest_addr_msb; 2582ee67178SXianjun Jiao u32 control; 2592ee67178SXianjun Jiao u32 status; 2602ee67178SXianjun Jiao } __aligned(64); 2612ee67178SXianjun Jiao 2622ee67178SXianjun Jiao /** 2632ee67178SXianjun Jiao * struct xilinx_vdma_tx_segment - Descriptor segment 2642ee67178SXianjun Jiao * @hw: Hardware descriptor 2652ee67178SXianjun Jiao * @node: Node in the descriptor segments list 2662ee67178SXianjun Jiao * @phys: Physical address of segment 2672ee67178SXianjun Jiao */ 2682ee67178SXianjun Jiao struct xilinx_vdma_tx_segment { 2692ee67178SXianjun Jiao struct xilinx_vdma_desc_hw hw; 2702ee67178SXianjun Jiao struct list_head node; 2712ee67178SXianjun Jiao dma_addr_t phys; 2722ee67178SXianjun Jiao } __aligned(64); 2732ee67178SXianjun Jiao 2742ee67178SXianjun Jiao /** 2752ee67178SXianjun Jiao * struct xilinx_axidma_tx_segment - Descriptor segment 2762ee67178SXianjun Jiao * @hw: Hardware descriptor 2772ee67178SXianjun Jiao * @node: Node in the descriptor segments list 2782ee67178SXianjun Jiao * @phys: Physical address of segment 2792ee67178SXianjun Jiao */ 2802ee67178SXianjun Jiao struct xilinx_axidma_tx_segment { 2812ee67178SXianjun Jiao struct xilinx_axidma_desc_hw hw; 2822ee67178SXianjun Jiao struct list_head node; 2832ee67178SXianjun Jiao dma_addr_t phys; 2842ee67178SXianjun Jiao } __aligned(64); 2852ee67178SXianjun Jiao 2862ee67178SXianjun Jiao /** 2872ee67178SXianjun Jiao * struct xilinx_cdma_tx_segment - Descriptor segment 2882ee67178SXianjun Jiao * @hw: Hardware descriptor 2892ee67178SXianjun Jiao * @node: Node in the descriptor segments list 2902ee67178SXianjun Jiao * @phys: Physical address of segment 2912ee67178SXianjun Jiao */ 2922ee67178SXianjun Jiao struct xilinx_cdma_tx_segment { 2932ee67178SXianjun Jiao struct xilinx_cdma_desc_hw hw; 2942ee67178SXianjun Jiao struct list_head node; 2952ee67178SXianjun Jiao dma_addr_t phys; 2962ee67178SXianjun Jiao } __aligned(64); 2972ee67178SXianjun Jiao 2982ee67178SXianjun Jiao /** 2992ee67178SXianjun Jiao * struct xilinx_dma_tx_descriptor - Per Transaction structure 3002ee67178SXianjun Jiao * @async_tx: Async transaction descriptor 3012ee67178SXianjun Jiao * @segments: TX segments list 3022ee67178SXianjun Jiao * @node: Node in the channel descriptors list 3032ee67178SXianjun Jiao * @cyclic: Check for cyclic transfers. 3042ee67178SXianjun Jiao */ 3052ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor { 3062ee67178SXianjun Jiao struct dma_async_tx_descriptor async_tx; 3072ee67178SXianjun Jiao struct list_head segments; 3082ee67178SXianjun Jiao struct list_head node; 3092ee67178SXianjun Jiao bool cyclic; 3102ee67178SXianjun Jiao }; 3112ee67178SXianjun Jiao 3122ee67178SXianjun Jiao /** 3132ee67178SXianjun Jiao * struct xilinx_dma_chan - Driver specific DMA channel structure 3142ee67178SXianjun Jiao * @xdev: Driver specific device structure 3152ee67178SXianjun Jiao * @ctrl_offset: Control registers offset 3162ee67178SXianjun Jiao * @desc_offset: TX descriptor registers offset 3172ee67178SXianjun Jiao * @lock: Descriptor operation lock 3182ee67178SXianjun Jiao * @pending_list: Descriptors waiting 3192ee67178SXianjun Jiao * @active_list: Descriptors ready to submit 3202ee67178SXianjun Jiao * @done_list: Complete descriptors 3212ee67178SXianjun Jiao * @free_seg_list: Free descriptors 3222ee67178SXianjun Jiao * @common: DMA common channel 3232ee67178SXianjun Jiao * @desc_pool: Descriptors pool 3242ee67178SXianjun Jiao * @dev: The dma device 3252ee67178SXianjun Jiao * @irq: Channel IRQ 3262ee67178SXianjun Jiao * @id: Channel ID 3272ee67178SXianjun Jiao * @direction: Transfer direction 3282ee67178SXianjun Jiao * @num_frms: Number of frames 3292ee67178SXianjun Jiao * @has_sg: Support scatter transfers 3302ee67178SXianjun Jiao * @cyclic: Check for cyclic transfers. 3312ee67178SXianjun Jiao * @genlock: Support genlock mode 3322ee67178SXianjun Jiao * @err: Channel has errors 3332ee67178SXianjun Jiao * @idle: Check for channel idle 3342ee67178SXianjun Jiao * @tasklet: Cleanup work after irq 3352ee67178SXianjun Jiao * @config: Device configuration info 3362ee67178SXianjun Jiao * @flush_on_fsync: Flush on Frame sync 3372ee67178SXianjun Jiao * @desc_pendingcount: Descriptor pending count 3382ee67178SXianjun Jiao * @ext_addr: Indicates 64 bit addressing is supported by dma channel 3392ee67178SXianjun Jiao * @desc_submitcount: Descriptor h/w submitted count 340febc5adfSXianjun Jiao * @residue: Residue for AXI DMA 3412ee67178SXianjun Jiao * @seg_v: Statically allocated segments base 3422ee67178SXianjun Jiao * @seg_p: Physical allocated segments base 3432ee67178SXianjun Jiao * @cyclic_seg_v: Statically allocated segment base for cyclic transfers 3442ee67178SXianjun Jiao * @cyclic_seg_p: Physical allocated segments base for cyclic dma 3452ee67178SXianjun Jiao * @start_transfer: Differentiate b/w DMA IP's transfer 3462ee67178SXianjun Jiao * @stop_transfer: Differentiate b/w DMA IP's quiesce 347febc5adfSXianjun Jiao * @tdest: TDEST value for mcdma 3482ee67178SXianjun Jiao * @has_vflip: S2MM vertical flip 3492ee67178SXianjun Jiao */ 3502ee67178SXianjun Jiao struct xilinx_dma_chan { 3512ee67178SXianjun Jiao struct xilinx_dma_device *xdev; 3522ee67178SXianjun Jiao u32 ctrl_offset; 3532ee67178SXianjun Jiao u32 desc_offset; 3542ee67178SXianjun Jiao spinlock_t lock; 3552ee67178SXianjun Jiao struct list_head pending_list; 3562ee67178SXianjun Jiao struct list_head active_list; 3572ee67178SXianjun Jiao struct list_head done_list; 3582ee67178SXianjun Jiao struct list_head free_seg_list; 3592ee67178SXianjun Jiao struct dma_chan common; 3602ee67178SXianjun Jiao struct dma_pool *desc_pool; 3612ee67178SXianjun Jiao struct device *dev; 3622ee67178SXianjun Jiao int irq; 3632ee67178SXianjun Jiao int id; 3642ee67178SXianjun Jiao enum dma_transfer_direction direction; 3652ee67178SXianjun Jiao int num_frms; 3662ee67178SXianjun Jiao bool has_sg; 3672ee67178SXianjun Jiao bool cyclic; 3682ee67178SXianjun Jiao bool genlock; 3692ee67178SXianjun Jiao bool err; 3702ee67178SXianjun Jiao bool idle; 3712ee67178SXianjun Jiao struct tasklet_struct tasklet; 3722ee67178SXianjun Jiao struct xilinx_vdma_config config; 3732ee67178SXianjun Jiao bool flush_on_fsync; 3742ee67178SXianjun Jiao u32 desc_pendingcount; 3752ee67178SXianjun Jiao bool ext_addr; 3762ee67178SXianjun Jiao u32 desc_submitcount; 377febc5adfSXianjun Jiao u32 residue; 3782ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *seg_v; 3792ee67178SXianjun Jiao dma_addr_t seg_p; 3802ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *cyclic_seg_v; 3812ee67178SXianjun Jiao dma_addr_t cyclic_seg_p; 3822ee67178SXianjun Jiao void (*start_transfer)(struct xilinx_dma_chan *chan); 3832ee67178SXianjun Jiao int (*stop_transfer)(struct xilinx_dma_chan *chan); 384febc5adfSXianjun Jiao u16 tdest; 3852ee67178SXianjun Jiao bool has_vflip; 3862ee67178SXianjun Jiao u32 buf_idx; // each irq this value increase 1. in cyclic mode, we use residue return this idx via device_tx_status/xilinx_dma_tx_status 3872ee67178SXianjun Jiao }; 3882ee67178SXianjun Jiao 3892ee67178SXianjun Jiao /** 3902ee67178SXianjun Jiao * enum xdma_ip_type - DMA IP type. 3912ee67178SXianjun Jiao * 3922ee67178SXianjun Jiao * @XDMA_TYPE_AXIDMA: Axi dma ip. 3932ee67178SXianjun Jiao * @XDMA_TYPE_CDMA: Axi cdma ip. 3942ee67178SXianjun Jiao * @XDMA_TYPE_VDMA: Axi vdma ip. 3952ee67178SXianjun Jiao * 3962ee67178SXianjun Jiao */ 3972ee67178SXianjun Jiao enum xdma_ip_type { 3982ee67178SXianjun Jiao XDMA_TYPE_AXIDMA = 0, 3992ee67178SXianjun Jiao XDMA_TYPE_CDMA, 4002ee67178SXianjun Jiao XDMA_TYPE_VDMA, 4012ee67178SXianjun Jiao }; 4022ee67178SXianjun Jiao 4032ee67178SXianjun Jiao struct xilinx_dma_config { 4042ee67178SXianjun Jiao enum xdma_ip_type dmatype; 4052ee67178SXianjun Jiao int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, 4062ee67178SXianjun Jiao struct clk **tx_clk, struct clk **txs_clk, 4072ee67178SXianjun Jiao struct clk **rx_clk, struct clk **rxs_clk); 4082ee67178SXianjun Jiao }; 4092ee67178SXianjun Jiao 4102ee67178SXianjun Jiao /** 4112ee67178SXianjun Jiao * struct xilinx_dma_device - DMA device structure 4122ee67178SXianjun Jiao * @regs: I/O mapped base address 4132ee67178SXianjun Jiao * @dev: Device Structure 4142ee67178SXianjun Jiao * @common: DMA device structure 4152ee67178SXianjun Jiao * @chan: Driver specific DMA channel 4162ee67178SXianjun Jiao * @has_sg: Specifies whether Scatter-Gather is present or not 417febc5adfSXianjun Jiao * @mcdma: Specifies whether Multi-Channel is present or not 4182ee67178SXianjun Jiao * @flush_on_fsync: Flush on frame sync 4192ee67178SXianjun Jiao * @ext_addr: Indicates 64 bit addressing is supported by dma device 4202ee67178SXianjun Jiao * @pdev: Platform device structure pointer 4212ee67178SXianjun Jiao * @dma_config: DMA config structure 4222ee67178SXianjun Jiao * @axi_clk: DMA Axi4-lite interace clock 4232ee67178SXianjun Jiao * @tx_clk: DMA mm2s clock 4242ee67178SXianjun Jiao * @txs_clk: DMA mm2s stream clock 4252ee67178SXianjun Jiao * @rx_clk: DMA s2mm clock 4262ee67178SXianjun Jiao * @rxs_clk: DMA s2mm stream clock 4272ee67178SXianjun Jiao * @nr_channels: Number of channels DMA device supports 4282ee67178SXianjun Jiao * @chan_id: DMA channel identifier 4292ee67178SXianjun Jiao * @max_buffer_len: Max buffer length 4302ee67178SXianjun Jiao */ 4312ee67178SXianjun Jiao struct xilinx_dma_device { 4322ee67178SXianjun Jiao void __iomem *regs; 4332ee67178SXianjun Jiao struct device *dev; 4342ee67178SXianjun Jiao struct dma_device common; 4352ee67178SXianjun Jiao struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 4362ee67178SXianjun Jiao bool has_sg; 437febc5adfSXianjun Jiao bool mcdma; 4382ee67178SXianjun Jiao u32 flush_on_fsync; 4392ee67178SXianjun Jiao bool ext_addr; 4402ee67178SXianjun Jiao struct platform_device *pdev; 4412ee67178SXianjun Jiao const struct xilinx_dma_config *dma_config; 4422ee67178SXianjun Jiao struct clk *axi_clk; 4432ee67178SXianjun Jiao struct clk *tx_clk; 4442ee67178SXianjun Jiao struct clk *txs_clk; 4452ee67178SXianjun Jiao struct clk *rx_clk; 4462ee67178SXianjun Jiao struct clk *rxs_clk; 4472ee67178SXianjun Jiao u32 nr_channels; 4482ee67178SXianjun Jiao u32 chan_id; 4492ee67178SXianjun Jiao u32 max_buffer_len; 4502ee67178SXianjun Jiao }; 4512ee67178SXianjun Jiao 4522ee67178SXianjun Jiao /* Macros */ 4532ee67178SXianjun Jiao #define to_xilinx_chan(chan) \ 4542ee67178SXianjun Jiao container_of(chan, struct xilinx_dma_chan, common) 4552ee67178SXianjun Jiao #define to_dma_tx_descriptor(tx) \ 4562ee67178SXianjun Jiao container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 4572ee67178SXianjun Jiao #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 4582ee67178SXianjun Jiao readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 4592ee67178SXianjun Jiao cond, delay_us, timeout_us) 4602ee67178SXianjun Jiao 4612ee67178SXianjun Jiao /* IO accessors */ 4622ee67178SXianjun Jiao static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) 4632ee67178SXianjun Jiao { 4642ee67178SXianjun Jiao return ioread32(chan->xdev->regs + reg); 4652ee67178SXianjun Jiao } 4662ee67178SXianjun Jiao 4672ee67178SXianjun Jiao static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) 4682ee67178SXianjun Jiao { 4692ee67178SXianjun Jiao iowrite32(value, chan->xdev->regs + reg); 4702ee67178SXianjun Jiao } 4712ee67178SXianjun Jiao 4722ee67178SXianjun Jiao static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, 4732ee67178SXianjun Jiao u32 value) 4742ee67178SXianjun Jiao { 4752ee67178SXianjun Jiao dma_write(chan, chan->desc_offset + reg, value); 4762ee67178SXianjun Jiao } 4772ee67178SXianjun Jiao 4782ee67178SXianjun Jiao static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) 4792ee67178SXianjun Jiao { 4802ee67178SXianjun Jiao return dma_read(chan, chan->ctrl_offset + reg); 4812ee67178SXianjun Jiao } 4822ee67178SXianjun Jiao 4832ee67178SXianjun Jiao static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, 4842ee67178SXianjun Jiao u32 value) 4852ee67178SXianjun Jiao { 4862ee67178SXianjun Jiao dma_write(chan, chan->ctrl_offset + reg, value); 4872ee67178SXianjun Jiao } 4882ee67178SXianjun Jiao 4892ee67178SXianjun Jiao static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, 4902ee67178SXianjun Jiao u32 clr) 4912ee67178SXianjun Jiao { 4922ee67178SXianjun Jiao dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); 4932ee67178SXianjun Jiao } 4942ee67178SXianjun Jiao 4952ee67178SXianjun Jiao static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, 4962ee67178SXianjun Jiao u32 set) 4972ee67178SXianjun Jiao { 4982ee67178SXianjun Jiao dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); 4992ee67178SXianjun Jiao } 5002ee67178SXianjun Jiao 5012ee67178SXianjun Jiao /** 5022ee67178SXianjun Jiao * vdma_desc_write_64 - 64-bit descriptor write 5032ee67178SXianjun Jiao * @chan: Driver specific VDMA channel 5042ee67178SXianjun Jiao * @reg: Register to write 5052ee67178SXianjun Jiao * @value_lsb: lower address of the descriptor. 5062ee67178SXianjun Jiao * @value_msb: upper address of the descriptor. 5072ee67178SXianjun Jiao * 5082ee67178SXianjun Jiao * Since vdma driver is trying to write to a register offset which is not a 5092ee67178SXianjun Jiao * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits 5102ee67178SXianjun Jiao * instead of a single 64 bit register write. 5112ee67178SXianjun Jiao */ 5122ee67178SXianjun Jiao static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, 5132ee67178SXianjun Jiao u32 value_lsb, u32 value_msb) 5142ee67178SXianjun Jiao { 5152ee67178SXianjun Jiao /* Write the lsb 32 bits*/ 5162ee67178SXianjun Jiao writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); 5172ee67178SXianjun Jiao 5182ee67178SXianjun Jiao /* Write the msb 32 bits */ 5192ee67178SXianjun Jiao writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 5202ee67178SXianjun Jiao } 5212ee67178SXianjun Jiao 5222ee67178SXianjun Jiao static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) 5232ee67178SXianjun Jiao { 5242ee67178SXianjun Jiao lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); 5252ee67178SXianjun Jiao } 5262ee67178SXianjun Jiao 5272ee67178SXianjun Jiao static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, 5282ee67178SXianjun Jiao dma_addr_t addr) 5292ee67178SXianjun Jiao { 5302ee67178SXianjun Jiao if (chan->ext_addr) 5312ee67178SXianjun Jiao dma_writeq(chan, reg, addr); 5322ee67178SXianjun Jiao else 5332ee67178SXianjun Jiao dma_ctrl_write(chan, reg, addr); 5342ee67178SXianjun Jiao } 5352ee67178SXianjun Jiao 5362ee67178SXianjun Jiao static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, 5372ee67178SXianjun Jiao struct xilinx_axidma_desc_hw *hw, 5382ee67178SXianjun Jiao dma_addr_t buf_addr, size_t sg_used, 5392ee67178SXianjun Jiao size_t period_len) 5402ee67178SXianjun Jiao { 5412ee67178SXianjun Jiao if (chan->ext_addr) { 5422ee67178SXianjun Jiao hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); 5432ee67178SXianjun Jiao hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + 5442ee67178SXianjun Jiao period_len); 5452ee67178SXianjun Jiao } else { 5462ee67178SXianjun Jiao hw->buf_addr = buf_addr + sg_used + period_len; 5472ee67178SXianjun Jiao } 5482ee67178SXianjun Jiao } 5492ee67178SXianjun Jiao 5502ee67178SXianjun Jiao /* ----------------------------------------------------------------------------- 5512ee67178SXianjun Jiao * Descriptors and segments alloc and free 5522ee67178SXianjun Jiao */ 5532ee67178SXianjun Jiao 5542ee67178SXianjun Jiao /** 5552ee67178SXianjun Jiao * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 5562ee67178SXianjun Jiao * @chan: Driver specific DMA channel 5572ee67178SXianjun Jiao * 5582ee67178SXianjun Jiao * Return: The allocated segment on success and NULL on failure. 5592ee67178SXianjun Jiao */ 5602ee67178SXianjun Jiao static struct xilinx_vdma_tx_segment * 5612ee67178SXianjun Jiao xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 5622ee67178SXianjun Jiao { 5632ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *segment; 5642ee67178SXianjun Jiao dma_addr_t phys; 5652ee67178SXianjun Jiao 5662ee67178SXianjun Jiao segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 5672ee67178SXianjun Jiao if (!segment) 5682ee67178SXianjun Jiao return NULL; 5692ee67178SXianjun Jiao 5702ee67178SXianjun Jiao segment->phys = phys; 5712ee67178SXianjun Jiao 5722ee67178SXianjun Jiao return segment; 5732ee67178SXianjun Jiao } 5742ee67178SXianjun Jiao 5752ee67178SXianjun Jiao /** 5762ee67178SXianjun Jiao * xilinx_cdma_alloc_tx_segment - Allocate transaction segment 5772ee67178SXianjun Jiao * @chan: Driver specific DMA channel 5782ee67178SXianjun Jiao * 5792ee67178SXianjun Jiao * Return: The allocated segment on success and NULL on failure. 5802ee67178SXianjun Jiao */ 5812ee67178SXianjun Jiao static struct xilinx_cdma_tx_segment * 5822ee67178SXianjun Jiao xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) 5832ee67178SXianjun Jiao { 5842ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *segment; 5852ee67178SXianjun Jiao dma_addr_t phys; 5862ee67178SXianjun Jiao 5872ee67178SXianjun Jiao segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); 5882ee67178SXianjun Jiao if (!segment) 5892ee67178SXianjun Jiao return NULL; 5902ee67178SXianjun Jiao 5912ee67178SXianjun Jiao segment->phys = phys; 5922ee67178SXianjun Jiao 5932ee67178SXianjun Jiao return segment; 5942ee67178SXianjun Jiao } 5952ee67178SXianjun Jiao 5962ee67178SXianjun Jiao /** 5972ee67178SXianjun Jiao * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 5982ee67178SXianjun Jiao * @chan: Driver specific DMA channel 5992ee67178SXianjun Jiao * 6002ee67178SXianjun Jiao * Return: The allocated segment on success and NULL on failure. 6012ee67178SXianjun Jiao */ 6022ee67178SXianjun Jiao static struct xilinx_axidma_tx_segment * 6032ee67178SXianjun Jiao xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) 6042ee67178SXianjun Jiao { 6052ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *segment = NULL; 6062ee67178SXianjun Jiao unsigned long flags; 6072ee67178SXianjun Jiao 6082ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 6092ee67178SXianjun Jiao if (!list_empty(&chan->free_seg_list)) { 6102ee67178SXianjun Jiao segment = list_first_entry(&chan->free_seg_list, 6112ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, 6122ee67178SXianjun Jiao node); 6132ee67178SXianjun Jiao list_del(&segment->node); 6142ee67178SXianjun Jiao } 6152ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 6162ee67178SXianjun Jiao 6172ee67178SXianjun Jiao return segment; 6182ee67178SXianjun Jiao } 6192ee67178SXianjun Jiao 6202ee67178SXianjun Jiao static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) 6212ee67178SXianjun Jiao { 6222ee67178SXianjun Jiao u32 next_desc = hw->next_desc; 6232ee67178SXianjun Jiao u32 next_desc_msb = hw->next_desc_msb; 6242ee67178SXianjun Jiao 6252ee67178SXianjun Jiao memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); 6262ee67178SXianjun Jiao 6272ee67178SXianjun Jiao hw->next_desc = next_desc; 6282ee67178SXianjun Jiao hw->next_desc_msb = next_desc_msb; 6292ee67178SXianjun Jiao } 6302ee67178SXianjun Jiao 6312ee67178SXianjun Jiao /** 6322ee67178SXianjun Jiao * xilinx_dma_free_tx_segment - Free transaction segment 6332ee67178SXianjun Jiao * @chan: Driver specific DMA channel 6342ee67178SXianjun Jiao * @segment: DMA transaction segment 6352ee67178SXianjun Jiao */ 6362ee67178SXianjun Jiao static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, 6372ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *segment) 6382ee67178SXianjun Jiao { 6392ee67178SXianjun Jiao xilinx_dma_clean_hw_desc(&segment->hw); 6402ee67178SXianjun Jiao 6412ee67178SXianjun Jiao list_add_tail(&segment->node, &chan->free_seg_list); 6422ee67178SXianjun Jiao } 6432ee67178SXianjun Jiao 6442ee67178SXianjun Jiao /** 6452ee67178SXianjun Jiao * xilinx_cdma_free_tx_segment - Free transaction segment 6462ee67178SXianjun Jiao * @chan: Driver specific DMA channel 6472ee67178SXianjun Jiao * @segment: DMA transaction segment 6482ee67178SXianjun Jiao */ 6492ee67178SXianjun Jiao static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, 6502ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *segment) 6512ee67178SXianjun Jiao { 6522ee67178SXianjun Jiao dma_pool_free(chan->desc_pool, segment, segment->phys); 6532ee67178SXianjun Jiao } 6542ee67178SXianjun Jiao 6552ee67178SXianjun Jiao /** 6562ee67178SXianjun Jiao * xilinx_vdma_free_tx_segment - Free transaction segment 6572ee67178SXianjun Jiao * @chan: Driver specific DMA channel 6582ee67178SXianjun Jiao * @segment: DMA transaction segment 6592ee67178SXianjun Jiao */ 6602ee67178SXianjun Jiao static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, 6612ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *segment) 6622ee67178SXianjun Jiao { 6632ee67178SXianjun Jiao dma_pool_free(chan->desc_pool, segment, segment->phys); 6642ee67178SXianjun Jiao } 6652ee67178SXianjun Jiao 6662ee67178SXianjun Jiao /** 6672ee67178SXianjun Jiao * xilinx_dma_tx_descriptor - Allocate transaction descriptor 6682ee67178SXianjun Jiao * @chan: Driver specific DMA channel 6692ee67178SXianjun Jiao * 6702ee67178SXianjun Jiao * Return: The allocated descriptor on success and NULL on failure. 6712ee67178SXianjun Jiao */ 6722ee67178SXianjun Jiao static struct xilinx_dma_tx_descriptor * 6732ee67178SXianjun Jiao xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) 6742ee67178SXianjun Jiao { 6752ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 6762ee67178SXianjun Jiao 6772ee67178SXianjun Jiao desc = kzalloc(sizeof(*desc), GFP_KERNEL); 6782ee67178SXianjun Jiao if (!desc) 6792ee67178SXianjun Jiao return NULL; 6802ee67178SXianjun Jiao 6812ee67178SXianjun Jiao INIT_LIST_HEAD(&desc->segments); 6822ee67178SXianjun Jiao 6832ee67178SXianjun Jiao return desc; 6842ee67178SXianjun Jiao } 6852ee67178SXianjun Jiao 6862ee67178SXianjun Jiao /** 6872ee67178SXianjun Jiao * xilinx_dma_free_tx_descriptor - Free transaction descriptor 6882ee67178SXianjun Jiao * @chan: Driver specific DMA channel 6892ee67178SXianjun Jiao * @desc: DMA transaction descriptor 6902ee67178SXianjun Jiao */ 6912ee67178SXianjun Jiao static void 6922ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, 6932ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc) 6942ee67178SXianjun Jiao { 6952ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *segment, *next; 6962ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; 6972ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 6982ee67178SXianjun Jiao 6992ee67178SXianjun Jiao if (!desc) 7002ee67178SXianjun Jiao return; 7012ee67178SXianjun Jiao 7022ee67178SXianjun Jiao if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 7032ee67178SXianjun Jiao list_for_each_entry_safe(segment, next, &desc->segments, node) { 7042ee67178SXianjun Jiao list_del(&segment->node); 7052ee67178SXianjun Jiao xilinx_vdma_free_tx_segment(chan, segment); 7062ee67178SXianjun Jiao } 7072ee67178SXianjun Jiao } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 7082ee67178SXianjun Jiao list_for_each_entry_safe(cdma_segment, cdma_next, 7092ee67178SXianjun Jiao &desc->segments, node) { 7102ee67178SXianjun Jiao list_del(&cdma_segment->node); 7112ee67178SXianjun Jiao xilinx_cdma_free_tx_segment(chan, cdma_segment); 7122ee67178SXianjun Jiao } 7132ee67178SXianjun Jiao } else { 7142ee67178SXianjun Jiao list_for_each_entry_safe(axidma_segment, axidma_next, 7152ee67178SXianjun Jiao &desc->segments, node) { 7162ee67178SXianjun Jiao list_del(&axidma_segment->node); 7172ee67178SXianjun Jiao xilinx_dma_free_tx_segment(chan, axidma_segment); 7182ee67178SXianjun Jiao } 7192ee67178SXianjun Jiao } 7202ee67178SXianjun Jiao 7212ee67178SXianjun Jiao kfree(desc); 7222ee67178SXianjun Jiao } 7232ee67178SXianjun Jiao 7242ee67178SXianjun Jiao /* Required functions */ 7252ee67178SXianjun Jiao 7262ee67178SXianjun Jiao /** 7272ee67178SXianjun Jiao * xilinx_dma_free_desc_list - Free descriptors list 7282ee67178SXianjun Jiao * @chan: Driver specific DMA channel 7292ee67178SXianjun Jiao * @list: List to parse and delete the descriptor 7302ee67178SXianjun Jiao */ 7312ee67178SXianjun Jiao static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, 7322ee67178SXianjun Jiao struct list_head *list) 7332ee67178SXianjun Jiao { 7342ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc, *next; 7352ee67178SXianjun Jiao 7362ee67178SXianjun Jiao list_for_each_entry_safe(desc, next, list, node) { 7372ee67178SXianjun Jiao list_del(&desc->node); 7382ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 7392ee67178SXianjun Jiao } 7402ee67178SXianjun Jiao } 7412ee67178SXianjun Jiao 7422ee67178SXianjun Jiao /** 7432ee67178SXianjun Jiao * xilinx_dma_free_descriptors - Free channel descriptors 7442ee67178SXianjun Jiao * @chan: Driver specific DMA channel 7452ee67178SXianjun Jiao */ 7462ee67178SXianjun Jiao static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) 7472ee67178SXianjun Jiao { 7482ee67178SXianjun Jiao unsigned long flags; 7492ee67178SXianjun Jiao 7502ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 7512ee67178SXianjun Jiao 7522ee67178SXianjun Jiao xilinx_dma_free_desc_list(chan, &chan->pending_list); 7532ee67178SXianjun Jiao xilinx_dma_free_desc_list(chan, &chan->done_list); 7542ee67178SXianjun Jiao xilinx_dma_free_desc_list(chan, &chan->active_list); 7552ee67178SXianjun Jiao 7562ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 7572ee67178SXianjun Jiao } 7582ee67178SXianjun Jiao 7592ee67178SXianjun Jiao /** 7602ee67178SXianjun Jiao * xilinx_dma_free_chan_resources - Free channel resources 7612ee67178SXianjun Jiao * @dchan: DMA channel 7622ee67178SXianjun Jiao */ 7632ee67178SXianjun Jiao static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) 7642ee67178SXianjun Jiao { 7652ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 7662ee67178SXianjun Jiao unsigned long flags; 7672ee67178SXianjun Jiao 7682ee67178SXianjun Jiao dev_dbg(chan->dev, "Free all channel resources.\n"); 7692ee67178SXianjun Jiao 7702ee67178SXianjun Jiao xilinx_dma_free_descriptors(chan); 7712ee67178SXianjun Jiao 7722ee67178SXianjun Jiao if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 7732ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 7742ee67178SXianjun Jiao INIT_LIST_HEAD(&chan->free_seg_list); 7752ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 7762ee67178SXianjun Jiao 7772ee67178SXianjun Jiao /* Free memory that is allocated for BD */ 7782ee67178SXianjun Jiao dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 7792ee67178SXianjun Jiao XILINX_DMA_NUM_DESCS, chan->seg_v, 7802ee67178SXianjun Jiao chan->seg_p); 7812ee67178SXianjun Jiao 7822ee67178SXianjun Jiao /* Free Memory that is allocated for cyclic DMA Mode */ 7832ee67178SXianjun Jiao dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), 7842ee67178SXianjun Jiao chan->cyclic_seg_v, chan->cyclic_seg_p); 7852ee67178SXianjun Jiao } 7862ee67178SXianjun Jiao 7872ee67178SXianjun Jiao if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { 7882ee67178SXianjun Jiao dma_pool_destroy(chan->desc_pool); 7892ee67178SXianjun Jiao chan->desc_pool = NULL; 7902ee67178SXianjun Jiao } 7912ee67178SXianjun Jiao } 7922ee67178SXianjun Jiao 7932ee67178SXianjun Jiao /** 7942ee67178SXianjun Jiao * xilinx_dma_chan_handle_cyclic - Cyclic dma callback 7952ee67178SXianjun Jiao * @chan: Driver specific dma channel 7962ee67178SXianjun Jiao * @desc: dma transaction descriptor 7972ee67178SXianjun Jiao * @flags: flags for spin lock 7982ee67178SXianjun Jiao */ 7992ee67178SXianjun Jiao static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, 8002ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc, 8012ee67178SXianjun Jiao unsigned long *flags) 8022ee67178SXianjun Jiao { 8032ee67178SXianjun Jiao dma_async_tx_callback callback; 8042ee67178SXianjun Jiao void *callback_param; 8052ee67178SXianjun Jiao 8062ee67178SXianjun Jiao callback = desc->async_tx.callback; 8072ee67178SXianjun Jiao callback_param = desc->async_tx.callback_param; 8082ee67178SXianjun Jiao if (callback) { 8092ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, *flags); 8102ee67178SXianjun Jiao callback(callback_param); 8112ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, *flags); 8122ee67178SXianjun Jiao } 8132ee67178SXianjun Jiao } 8142ee67178SXianjun Jiao 8152ee67178SXianjun Jiao /** 8162ee67178SXianjun Jiao * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 8172ee67178SXianjun Jiao * @chan: Driver specific DMA channel 8182ee67178SXianjun Jiao */ 8192ee67178SXianjun Jiao static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) 8202ee67178SXianjun Jiao { 8212ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc, *next; 8222ee67178SXianjun Jiao unsigned long flags; 8232ee67178SXianjun Jiao 8242ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 8252ee67178SXianjun Jiao 8262ee67178SXianjun Jiao list_for_each_entry_safe(desc, next, &chan->done_list, node) { 827febc5adfSXianjun Jiao struct dmaengine_desc_callback cb; 8282ee67178SXianjun Jiao 8292ee67178SXianjun Jiao if (desc->cyclic) { 8302ee67178SXianjun Jiao xilinx_dma_chan_handle_cyclic(chan, desc, &flags); 8312ee67178SXianjun Jiao break; 8322ee67178SXianjun Jiao } 8332ee67178SXianjun Jiao 8342ee67178SXianjun Jiao /* Remove from the list of running transactions */ 8352ee67178SXianjun Jiao list_del(&desc->node); 8362ee67178SXianjun Jiao 8372ee67178SXianjun Jiao /* Run the link descriptor callback function */ 838febc5adfSXianjun Jiao dmaengine_desc_get_callback(&desc->async_tx, &cb); 839febc5adfSXianjun Jiao if (dmaengine_desc_callback_valid(&cb)) { 8402ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 841febc5adfSXianjun Jiao dmaengine_desc_callback_invoke(&cb, NULL); 8422ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 843febc5adfSXianjun Jiao } 8442ee67178SXianjun Jiao 8452ee67178SXianjun Jiao /* Run any dependencies, then free the descriptor */ 8462ee67178SXianjun Jiao dma_run_dependencies(&desc->async_tx); 8472ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 8482ee67178SXianjun Jiao } 8492ee67178SXianjun Jiao 8502ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 8512ee67178SXianjun Jiao } 8522ee67178SXianjun Jiao 8532ee67178SXianjun Jiao /** 8542ee67178SXianjun Jiao * xilinx_dma_do_tasklet - Schedule completion tasklet 8552ee67178SXianjun Jiao * @data: Pointer to the Xilinx DMA channel structure 8562ee67178SXianjun Jiao */ 8572ee67178SXianjun Jiao static void xilinx_dma_do_tasklet(unsigned long data) 8582ee67178SXianjun Jiao { 8592ee67178SXianjun Jiao struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; 8602ee67178SXianjun Jiao 8612ee67178SXianjun Jiao xilinx_dma_chan_desc_cleanup(chan); 8622ee67178SXianjun Jiao } 8632ee67178SXianjun Jiao 8642ee67178SXianjun Jiao /** 8652ee67178SXianjun Jiao * xilinx_dma_alloc_chan_resources - Allocate channel resources 8662ee67178SXianjun Jiao * @dchan: DMA channel 8672ee67178SXianjun Jiao * 8682ee67178SXianjun Jiao * Return: '0' on success and failure value on error 8692ee67178SXianjun Jiao */ 8702ee67178SXianjun Jiao static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 8712ee67178SXianjun Jiao { 8722ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 8732ee67178SXianjun Jiao int i; 8742ee67178SXianjun Jiao 8752ee67178SXianjun Jiao /* Has this channel already been allocated? */ 8762ee67178SXianjun Jiao if (chan->desc_pool) 8772ee67178SXianjun Jiao return 0; 8782ee67178SXianjun Jiao 8792ee67178SXianjun Jiao /* 8802ee67178SXianjun Jiao * We need the descriptor to be aligned to 64bytes 8812ee67178SXianjun Jiao * for meeting Xilinx VDMA specification requirement. 8822ee67178SXianjun Jiao */ 8832ee67178SXianjun Jiao if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 8842ee67178SXianjun Jiao /* Allocate the buffer descriptors. */ 8852ee67178SXianjun Jiao chan->seg_v = dma_zalloc_coherent(chan->dev, 8862ee67178SXianjun Jiao sizeof(*chan->seg_v) * 8872ee67178SXianjun Jiao XILINX_DMA_NUM_DESCS, 8882ee67178SXianjun Jiao &chan->seg_p, GFP_KERNEL); 8892ee67178SXianjun Jiao if (!chan->seg_v) { 8902ee67178SXianjun Jiao dev_err(chan->dev, 8912ee67178SXianjun Jiao "unable to allocate channel %d descriptors\n", 8922ee67178SXianjun Jiao chan->id); 8932ee67178SXianjun Jiao return -ENOMEM; 8942ee67178SXianjun Jiao } 8952ee67178SXianjun Jiao /* 8962ee67178SXianjun Jiao * For cyclic DMA mode we need to program the tail Descriptor 8972ee67178SXianjun Jiao * register with a value which is not a part of the BD chain 8982ee67178SXianjun Jiao * so allocating a desc segment during channel allocation for 8992ee67178SXianjun Jiao * programming tail descriptor. 9002ee67178SXianjun Jiao */ 9012ee67178SXianjun Jiao chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 9022ee67178SXianjun Jiao sizeof(*chan->cyclic_seg_v), 9032ee67178SXianjun Jiao &chan->cyclic_seg_p, GFP_KERNEL); 9042ee67178SXianjun Jiao if (!chan->cyclic_seg_v) { 9052ee67178SXianjun Jiao dev_err(chan->dev, 9062ee67178SXianjun Jiao "unable to allocate desc segment for cyclic DMA\n"); 9072ee67178SXianjun Jiao dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * 9082ee67178SXianjun Jiao XILINX_DMA_NUM_DESCS, chan->seg_v, 9092ee67178SXianjun Jiao chan->seg_p); 9102ee67178SXianjun Jiao return -ENOMEM; 9112ee67178SXianjun Jiao } 9122ee67178SXianjun Jiao chan->cyclic_seg_v->phys = chan->cyclic_seg_p; 9132ee67178SXianjun Jiao 9142ee67178SXianjun Jiao for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { 9152ee67178SXianjun Jiao chan->seg_v[i].hw.next_desc = 9162ee67178SXianjun Jiao lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 9172ee67178SXianjun Jiao ((i + 1) % XILINX_DMA_NUM_DESCS)); 9182ee67178SXianjun Jiao chan->seg_v[i].hw.next_desc_msb = 9192ee67178SXianjun Jiao upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * 9202ee67178SXianjun Jiao ((i + 1) % XILINX_DMA_NUM_DESCS)); 9212ee67178SXianjun Jiao chan->seg_v[i].phys = chan->seg_p + 9222ee67178SXianjun Jiao sizeof(*chan->seg_v) * i; 9232ee67178SXianjun Jiao list_add_tail(&chan->seg_v[i].node, 9242ee67178SXianjun Jiao &chan->free_seg_list); 9252ee67178SXianjun Jiao } 9262ee67178SXianjun Jiao } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 9272ee67178SXianjun Jiao chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 9282ee67178SXianjun Jiao chan->dev, 9292ee67178SXianjun Jiao sizeof(struct xilinx_cdma_tx_segment), 9302ee67178SXianjun Jiao __alignof__(struct xilinx_cdma_tx_segment), 9312ee67178SXianjun Jiao 0); 9322ee67178SXianjun Jiao } else { 9332ee67178SXianjun Jiao chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 9342ee67178SXianjun Jiao chan->dev, 9352ee67178SXianjun Jiao sizeof(struct xilinx_vdma_tx_segment), 9362ee67178SXianjun Jiao __alignof__(struct xilinx_vdma_tx_segment), 9372ee67178SXianjun Jiao 0); 9382ee67178SXianjun Jiao } 9392ee67178SXianjun Jiao 9402ee67178SXianjun Jiao if (!chan->desc_pool && 9412ee67178SXianjun Jiao (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { 9422ee67178SXianjun Jiao dev_err(chan->dev, 9432ee67178SXianjun Jiao "unable to allocate channel %d descriptor pool\n", 9442ee67178SXianjun Jiao chan->id); 9452ee67178SXianjun Jiao return -ENOMEM; 9462ee67178SXianjun Jiao } 9472ee67178SXianjun Jiao 9482ee67178SXianjun Jiao dma_cookie_init(dchan); 9492ee67178SXianjun Jiao 9502ee67178SXianjun Jiao if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 9512ee67178SXianjun Jiao /* For AXI DMA resetting once channel will reset the 9522ee67178SXianjun Jiao * other channel as well so enable the interrupts here. 9532ee67178SXianjun Jiao */ 9542ee67178SXianjun Jiao dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 9552ee67178SXianjun Jiao XILINX_DMA_DMAXR_ALL_IRQ_MASK); 9562ee67178SXianjun Jiao } 9572ee67178SXianjun Jiao 9582ee67178SXianjun Jiao if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 9592ee67178SXianjun Jiao dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 9602ee67178SXianjun Jiao XILINX_CDMA_CR_SGMODE); 9612ee67178SXianjun Jiao 9622ee67178SXianjun Jiao return 0; 9632ee67178SXianjun Jiao } 9642ee67178SXianjun Jiao 9652ee67178SXianjun Jiao /** 9662ee67178SXianjun Jiao * xilinx_dma_tx_status - Get DMA transaction status 9672ee67178SXianjun Jiao * @dchan: DMA channel 9682ee67178SXianjun Jiao * @cookie: Transaction identifier 9692ee67178SXianjun Jiao * @txstate: Transaction state 9702ee67178SXianjun Jiao * 9712ee67178SXianjun Jiao * Return: DMA transaction status 9722ee67178SXianjun Jiao */ 9732ee67178SXianjun Jiao static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, 9742ee67178SXianjun Jiao dma_cookie_t cookie, 9752ee67178SXianjun Jiao struct dma_tx_state *txstate) 9762ee67178SXianjun Jiao { 9772ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 9782ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 979febc5adfSXianjun Jiao struct xilinx_axidma_tx_segment *segment; 980febc5adfSXianjun Jiao struct xilinx_axidma_desc_hw *hw; 9812ee67178SXianjun Jiao enum dma_status ret; 9822ee67178SXianjun Jiao unsigned long flags; 983febc5adfSXianjun Jiao u32 residue = 0; 9842ee67178SXianjun Jiao 9852ee67178SXianjun Jiao ret = dma_cookie_status(dchan, cookie, txstate); 9862ee67178SXianjun Jiao if (ret == DMA_COMPLETE || !txstate) 9872ee67178SXianjun Jiao return ret; 9882ee67178SXianjun Jiao 989febc5adfSXianjun Jiao if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 9902ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 9912ee67178SXianjun Jiao 9922ee67178SXianjun Jiao desc = list_last_entry(&chan->active_list, 9932ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 994febc5adfSXianjun Jiao if (chan->has_sg) { 995febc5adfSXianjun Jiao list_for_each_entry(segment, &desc->segments, node) { 996febc5adfSXianjun Jiao hw = &segment->hw; 997febc5adfSXianjun Jiao residue += (hw->control - hw->status) & 998febc5adfSXianjun Jiao chan->xdev->max_buffer_len; 999febc5adfSXianjun Jiao } 1000febc5adfSXianjun Jiao } 10012ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 10022ee67178SXianjun Jiao 1003febc5adfSXianjun Jiao chan->residue = residue; 10042ee67178SXianjun Jiao if (chan->cyclic) 10052ee67178SXianjun Jiao dma_set_residue(txstate, chan->buf_idx); 10062ee67178SXianjun Jiao else 1007febc5adfSXianjun Jiao dma_set_residue(txstate, chan->residue); 1008febc5adfSXianjun Jiao } 10092ee67178SXianjun Jiao 10102ee67178SXianjun Jiao return ret; 10112ee67178SXianjun Jiao } 10122ee67178SXianjun Jiao 10132ee67178SXianjun Jiao /** 10142ee67178SXianjun Jiao * xilinx_dma_stop_transfer - Halt DMA channel 10152ee67178SXianjun Jiao * @chan: Driver specific DMA channel 10162ee67178SXianjun Jiao * 10172ee67178SXianjun Jiao * Return: '0' on success and failure value on error 10182ee67178SXianjun Jiao */ 10192ee67178SXianjun Jiao static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) 10202ee67178SXianjun Jiao { 10212ee67178SXianjun Jiao u32 val; 10222ee67178SXianjun Jiao 10232ee67178SXianjun Jiao dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 10242ee67178SXianjun Jiao 10252ee67178SXianjun Jiao /* Wait for the hardware to halt */ 10262ee67178SXianjun Jiao return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 1027febc5adfSXianjun Jiao val & XILINX_DMA_DMASR_HALTED, 0, 1028febc5adfSXianjun Jiao XILINX_DMA_LOOP_COUNT); 10292ee67178SXianjun Jiao } 10302ee67178SXianjun Jiao 10312ee67178SXianjun Jiao /** 10322ee67178SXianjun Jiao * xilinx_cdma_stop_transfer - Wait for the current transfer to complete 10332ee67178SXianjun Jiao * @chan: Driver specific DMA channel 10342ee67178SXianjun Jiao * 10352ee67178SXianjun Jiao * Return: '0' on success and failure value on error 10362ee67178SXianjun Jiao */ 10372ee67178SXianjun Jiao static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) 10382ee67178SXianjun Jiao { 10392ee67178SXianjun Jiao u32 val; 10402ee67178SXianjun Jiao 10412ee67178SXianjun Jiao return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 10422ee67178SXianjun Jiao val & XILINX_DMA_DMASR_IDLE, 0, 10432ee67178SXianjun Jiao XILINX_DMA_LOOP_COUNT); 10442ee67178SXianjun Jiao } 10452ee67178SXianjun Jiao 10462ee67178SXianjun Jiao /** 10472ee67178SXianjun Jiao * xilinx_dma_start - Start DMA channel 10482ee67178SXianjun Jiao * @chan: Driver specific DMA channel 10492ee67178SXianjun Jiao */ 10502ee67178SXianjun Jiao static void xilinx_dma_start(struct xilinx_dma_chan *chan) 10512ee67178SXianjun Jiao { 10522ee67178SXianjun Jiao int err; 10532ee67178SXianjun Jiao u32 val; 10542ee67178SXianjun Jiao 10552ee67178SXianjun Jiao dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); 10562ee67178SXianjun Jiao 10572ee67178SXianjun Jiao /* Wait for the hardware to start */ 10582ee67178SXianjun Jiao err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, 10592ee67178SXianjun Jiao !(val & XILINX_DMA_DMASR_HALTED), 0, 10602ee67178SXianjun Jiao XILINX_DMA_LOOP_COUNT); 10612ee67178SXianjun Jiao 10622ee67178SXianjun Jiao if (err) { 10632ee67178SXianjun Jiao dev_err(chan->dev, "Cannot start channel %p: %x\n", 10642ee67178SXianjun Jiao chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 10652ee67178SXianjun Jiao 10662ee67178SXianjun Jiao chan->err = true; 10672ee67178SXianjun Jiao } 10682ee67178SXianjun Jiao } 10692ee67178SXianjun Jiao 10702ee67178SXianjun Jiao /** 10712ee67178SXianjun Jiao * xilinx_vdma_start_transfer - Starts VDMA transfer 10722ee67178SXianjun Jiao * @chan: Driver specific channel struct pointer 10732ee67178SXianjun Jiao */ 10742ee67178SXianjun Jiao static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 10752ee67178SXianjun Jiao { 10762ee67178SXianjun Jiao struct xilinx_vdma_config *config = &chan->config; 10772ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc, *tail_desc; 10782ee67178SXianjun Jiao u32 reg, j; 10792ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *tail_segment; 10802ee67178SXianjun Jiao 10812ee67178SXianjun Jiao /* This function was invoked with lock held */ 10822ee67178SXianjun Jiao if (chan->err) 10832ee67178SXianjun Jiao return; 10842ee67178SXianjun Jiao 10852ee67178SXianjun Jiao if (!chan->idle) 10862ee67178SXianjun Jiao return; 10872ee67178SXianjun Jiao 10882ee67178SXianjun Jiao if (list_empty(&chan->pending_list)) 10892ee67178SXianjun Jiao return; 10902ee67178SXianjun Jiao 10912ee67178SXianjun Jiao desc = list_first_entry(&chan->pending_list, 10922ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 10932ee67178SXianjun Jiao tail_desc = list_last_entry(&chan->pending_list, 10942ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 10952ee67178SXianjun Jiao 10962ee67178SXianjun Jiao tail_segment = list_last_entry(&tail_desc->segments, 10972ee67178SXianjun Jiao struct xilinx_vdma_tx_segment, node); 10982ee67178SXianjun Jiao 10992ee67178SXianjun Jiao /* 11002ee67178SXianjun Jiao * If hardware is idle, then all descriptors on the running lists are 11012ee67178SXianjun Jiao * done, start new transfers 11022ee67178SXianjun Jiao */ 11032ee67178SXianjun Jiao if (chan->has_sg) 11042ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 11052ee67178SXianjun Jiao desc->async_tx.phys); 11062ee67178SXianjun Jiao 11072ee67178SXianjun Jiao /* Configure the hardware using info in the config structure */ 11082ee67178SXianjun Jiao if (chan->has_vflip) { 11092ee67178SXianjun Jiao reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 11102ee67178SXianjun Jiao reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 11112ee67178SXianjun Jiao reg |= config->vflip_en; 11122ee67178SXianjun Jiao dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 11132ee67178SXianjun Jiao reg); 11142ee67178SXianjun Jiao } 11152ee67178SXianjun Jiao 11162ee67178SXianjun Jiao reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 11172ee67178SXianjun Jiao 11182ee67178SXianjun Jiao if (config->frm_cnt_en) 11192ee67178SXianjun Jiao reg |= XILINX_DMA_DMACR_FRAMECNT_EN; 11202ee67178SXianjun Jiao else 11212ee67178SXianjun Jiao reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; 11222ee67178SXianjun Jiao 11232ee67178SXianjun Jiao /* 11242ee67178SXianjun Jiao * With SG, start with circular mode, so that BDs can be fetched. 11252ee67178SXianjun Jiao * In direct register mode, if not parking, enable circular mode 11262ee67178SXianjun Jiao */ 11272ee67178SXianjun Jiao if (chan->has_sg || !config->park) 11282ee67178SXianjun Jiao reg |= XILINX_DMA_DMACR_CIRC_EN; 11292ee67178SXianjun Jiao 11302ee67178SXianjun Jiao if (config->park) 11312ee67178SXianjun Jiao reg &= ~XILINX_DMA_DMACR_CIRC_EN; 11322ee67178SXianjun Jiao 11332ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 11342ee67178SXianjun Jiao 11352ee67178SXianjun Jiao j = chan->desc_submitcount; 11362ee67178SXianjun Jiao reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); 11372ee67178SXianjun Jiao if (chan->direction == DMA_MEM_TO_DEV) { 11382ee67178SXianjun Jiao reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; 11392ee67178SXianjun Jiao reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; 11402ee67178SXianjun Jiao } else { 11412ee67178SXianjun Jiao reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; 11422ee67178SXianjun Jiao reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; 11432ee67178SXianjun Jiao } 11442ee67178SXianjun Jiao dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); 11452ee67178SXianjun Jiao 11462ee67178SXianjun Jiao /* Start the hardware */ 11472ee67178SXianjun Jiao xilinx_dma_start(chan); 11482ee67178SXianjun Jiao 11492ee67178SXianjun Jiao if (chan->err) 11502ee67178SXianjun Jiao return; 11512ee67178SXianjun Jiao 11522ee67178SXianjun Jiao /* Start the transfer */ 11532ee67178SXianjun Jiao if (chan->has_sg) { 11542ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 11552ee67178SXianjun Jiao tail_segment->phys); 11562ee67178SXianjun Jiao list_splice_tail_init(&chan->pending_list, &chan->active_list); 11572ee67178SXianjun Jiao chan->desc_pendingcount = 0; 11582ee67178SXianjun Jiao } else { 11592ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *segment, *last = NULL; 11602ee67178SXianjun Jiao int i = 0; 11612ee67178SXianjun Jiao 11622ee67178SXianjun Jiao if (chan->desc_submitcount < chan->num_frms) 11632ee67178SXianjun Jiao i = chan->desc_submitcount; 11642ee67178SXianjun Jiao 11652ee67178SXianjun Jiao list_for_each_entry(segment, &desc->segments, node) { 11662ee67178SXianjun Jiao if (chan->ext_addr) 11672ee67178SXianjun Jiao vdma_desc_write_64(chan, 11682ee67178SXianjun Jiao XILINX_VDMA_REG_START_ADDRESS_64(i++), 11692ee67178SXianjun Jiao segment->hw.buf_addr, 11702ee67178SXianjun Jiao segment->hw.buf_addr_msb); 11712ee67178SXianjun Jiao else 11722ee67178SXianjun Jiao vdma_desc_write(chan, 11732ee67178SXianjun Jiao XILINX_VDMA_REG_START_ADDRESS(i++), 11742ee67178SXianjun Jiao segment->hw.buf_addr); 11752ee67178SXianjun Jiao 11762ee67178SXianjun Jiao last = segment; 11772ee67178SXianjun Jiao } 11782ee67178SXianjun Jiao 11792ee67178SXianjun Jiao if (!last) 11802ee67178SXianjun Jiao return; 11812ee67178SXianjun Jiao 11822ee67178SXianjun Jiao /* HW expects these parameters to be same for one transaction */ 11832ee67178SXianjun Jiao vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); 11842ee67178SXianjun Jiao vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, 11852ee67178SXianjun Jiao last->hw.stride); 11862ee67178SXianjun Jiao vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); 11872ee67178SXianjun Jiao 11882ee67178SXianjun Jiao chan->desc_submitcount++; 11892ee67178SXianjun Jiao chan->desc_pendingcount--; 11902ee67178SXianjun Jiao list_del(&desc->node); 11912ee67178SXianjun Jiao list_add_tail(&desc->node, &chan->active_list); 11922ee67178SXianjun Jiao if (chan->desc_submitcount == chan->num_frms) 11932ee67178SXianjun Jiao chan->desc_submitcount = 0; 11942ee67178SXianjun Jiao } 11952ee67178SXianjun Jiao 11962ee67178SXianjun Jiao chan->idle = false; 11972ee67178SXianjun Jiao } 11982ee67178SXianjun Jiao 11992ee67178SXianjun Jiao /** 12002ee67178SXianjun Jiao * xilinx_cdma_start_transfer - Starts cdma transfer 12012ee67178SXianjun Jiao * @chan: Driver specific channel struct pointer 12022ee67178SXianjun Jiao */ 12032ee67178SXianjun Jiao static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) 12042ee67178SXianjun Jiao { 12052ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 12062ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *tail_segment; 12072ee67178SXianjun Jiao u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); 12082ee67178SXianjun Jiao 12092ee67178SXianjun Jiao if (chan->err) 12102ee67178SXianjun Jiao return; 12112ee67178SXianjun Jiao 12122ee67178SXianjun Jiao if (!chan->idle) 12132ee67178SXianjun Jiao return; 12142ee67178SXianjun Jiao 12152ee67178SXianjun Jiao if (list_empty(&chan->pending_list)) 12162ee67178SXianjun Jiao return; 12172ee67178SXianjun Jiao 12182ee67178SXianjun Jiao head_desc = list_first_entry(&chan->pending_list, 12192ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 12202ee67178SXianjun Jiao tail_desc = list_last_entry(&chan->pending_list, 12212ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 12222ee67178SXianjun Jiao tail_segment = list_last_entry(&tail_desc->segments, 12232ee67178SXianjun Jiao struct xilinx_cdma_tx_segment, node); 12242ee67178SXianjun Jiao 12252ee67178SXianjun Jiao if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 12262ee67178SXianjun Jiao ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; 12272ee67178SXianjun Jiao ctrl_reg |= chan->desc_pendingcount << 12282ee67178SXianjun Jiao XILINX_DMA_CR_COALESCE_SHIFT; 12292ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); 12302ee67178SXianjun Jiao } 12312ee67178SXianjun Jiao 12322ee67178SXianjun Jiao if (chan->has_sg) { 12332ee67178SXianjun Jiao dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 12342ee67178SXianjun Jiao XILINX_CDMA_CR_SGMODE); 12352ee67178SXianjun Jiao 12362ee67178SXianjun Jiao dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 12372ee67178SXianjun Jiao XILINX_CDMA_CR_SGMODE); 12382ee67178SXianjun Jiao 12392ee67178SXianjun Jiao xilinx_write(chan, XILINX_DMA_REG_CURDESC, 12402ee67178SXianjun Jiao head_desc->async_tx.phys); 12412ee67178SXianjun Jiao 12422ee67178SXianjun Jiao /* Update tail ptr register which will start the transfer */ 12432ee67178SXianjun Jiao xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 12442ee67178SXianjun Jiao tail_segment->phys); 12452ee67178SXianjun Jiao } else { 12462ee67178SXianjun Jiao /* In simple mode */ 12472ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *segment; 12482ee67178SXianjun Jiao struct xilinx_cdma_desc_hw *hw; 12492ee67178SXianjun Jiao 12502ee67178SXianjun Jiao segment = list_first_entry(&head_desc->segments, 12512ee67178SXianjun Jiao struct xilinx_cdma_tx_segment, 12522ee67178SXianjun Jiao node); 12532ee67178SXianjun Jiao 12542ee67178SXianjun Jiao hw = &segment->hw; 12552ee67178SXianjun Jiao 1256febc5adfSXianjun Jiao xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, (dma_addr_t) 1257febc5adfSXianjun Jiao ((u64)hw->src_addr_msb << 32 | hw->src_addr)); 1258febc5adfSXianjun Jiao xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, (dma_addr_t) 1259febc5adfSXianjun Jiao ((u64)hw->dest_addr_msb << 32 | hw->dest_addr)); 12602ee67178SXianjun Jiao 12612ee67178SXianjun Jiao /* Start the transfer */ 12622ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 12632ee67178SXianjun Jiao hw->control & chan->xdev->max_buffer_len); 12642ee67178SXianjun Jiao } 12652ee67178SXianjun Jiao 12662ee67178SXianjun Jiao list_splice_tail_init(&chan->pending_list, &chan->active_list); 12672ee67178SXianjun Jiao chan->desc_pendingcount = 0; 12682ee67178SXianjun Jiao chan->idle = false; 12692ee67178SXianjun Jiao } 12702ee67178SXianjun Jiao 12712ee67178SXianjun Jiao /** 12722ee67178SXianjun Jiao * xilinx_dma_start_transfer - Starts DMA transfer 12732ee67178SXianjun Jiao * @chan: Driver specific channel struct pointer 12742ee67178SXianjun Jiao */ 12752ee67178SXianjun Jiao static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) 12762ee67178SXianjun Jiao { 12772ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; 12782ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *tail_segment; 12792ee67178SXianjun Jiao u32 reg; 12802ee67178SXianjun Jiao 12812ee67178SXianjun Jiao if (chan->err) 12822ee67178SXianjun Jiao return; 12832ee67178SXianjun Jiao 12842ee67178SXianjun Jiao if (!chan->idle) 12852ee67178SXianjun Jiao return; 12862ee67178SXianjun Jiao 12872ee67178SXianjun Jiao if (list_empty(&chan->pending_list)) 12882ee67178SXianjun Jiao return; 12892ee67178SXianjun Jiao 12902ee67178SXianjun Jiao head_desc = list_first_entry(&chan->pending_list, 12912ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 12922ee67178SXianjun Jiao tail_desc = list_last_entry(&chan->pending_list, 12932ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 12942ee67178SXianjun Jiao tail_segment = list_last_entry(&tail_desc->segments, 12952ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, node); 12962ee67178SXianjun Jiao 12972ee67178SXianjun Jiao reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 12982ee67178SXianjun Jiao 12992ee67178SXianjun Jiao if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { 13002ee67178SXianjun Jiao reg &= ~XILINX_DMA_CR_COALESCE_MAX; 13012ee67178SXianjun Jiao reg |= chan->desc_pendingcount << 13022ee67178SXianjun Jiao XILINX_DMA_CR_COALESCE_SHIFT; 13032ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 13042ee67178SXianjun Jiao } 13052ee67178SXianjun Jiao 1306febc5adfSXianjun Jiao if (chan->has_sg && !chan->xdev->mcdma) 13072ee67178SXianjun Jiao xilinx_write(chan, XILINX_DMA_REG_CURDESC, 13082ee67178SXianjun Jiao head_desc->async_tx.phys); 13092ee67178SXianjun Jiao 1310febc5adfSXianjun Jiao if (chan->has_sg && chan->xdev->mcdma) { 1311febc5adfSXianjun Jiao if (chan->direction == DMA_MEM_TO_DEV) { 1312febc5adfSXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1313febc5adfSXianjun Jiao head_desc->async_tx.phys); 1314febc5adfSXianjun Jiao } else { 1315febc5adfSXianjun Jiao if (!chan->tdest) { 1316febc5adfSXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1317febc5adfSXianjun Jiao head_desc->async_tx.phys); 1318febc5adfSXianjun Jiao } else { 1319febc5adfSXianjun Jiao dma_ctrl_write(chan, 1320febc5adfSXianjun Jiao XILINX_DMA_MCRX_CDESC(chan->tdest), 1321febc5adfSXianjun Jiao head_desc->async_tx.phys); 1322febc5adfSXianjun Jiao } 1323febc5adfSXianjun Jiao } 1324febc5adfSXianjun Jiao } 1325febc5adfSXianjun Jiao 13262ee67178SXianjun Jiao xilinx_dma_start(chan); 13272ee67178SXianjun Jiao 13282ee67178SXianjun Jiao if (chan->err) 13292ee67178SXianjun Jiao return; 13302ee67178SXianjun Jiao 13312ee67178SXianjun Jiao /* Start the transfer */ 1332febc5adfSXianjun Jiao if (chan->has_sg && !chan->xdev->mcdma) { 13332ee67178SXianjun Jiao if (chan->cyclic) 13342ee67178SXianjun Jiao xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 13352ee67178SXianjun Jiao chan->cyclic_seg_v->phys); 13362ee67178SXianjun Jiao else 13372ee67178SXianjun Jiao xilinx_write(chan, XILINX_DMA_REG_TAILDESC, 13382ee67178SXianjun Jiao tail_segment->phys); 1339febc5adfSXianjun Jiao } else if (chan->has_sg && chan->xdev->mcdma) { 1340febc5adfSXianjun Jiao if (chan->direction == DMA_MEM_TO_DEV) { 1341febc5adfSXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1342febc5adfSXianjun Jiao tail_segment->phys); 1343febc5adfSXianjun Jiao } else { 1344febc5adfSXianjun Jiao if (!chan->tdest) { 1345febc5adfSXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1346febc5adfSXianjun Jiao tail_segment->phys); 1347febc5adfSXianjun Jiao } else { 1348febc5adfSXianjun Jiao dma_ctrl_write(chan, 1349febc5adfSXianjun Jiao XILINX_DMA_MCRX_TDESC(chan->tdest), 1350febc5adfSXianjun Jiao tail_segment->phys); 1351febc5adfSXianjun Jiao } 1352febc5adfSXianjun Jiao } 13532ee67178SXianjun Jiao } else { 13542ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *segment; 13552ee67178SXianjun Jiao struct xilinx_axidma_desc_hw *hw; 13562ee67178SXianjun Jiao 13572ee67178SXianjun Jiao segment = list_first_entry(&head_desc->segments, 13582ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, 13592ee67178SXianjun Jiao node); 13602ee67178SXianjun Jiao hw = &segment->hw; 13612ee67178SXianjun Jiao 1362febc5adfSXianjun Jiao xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 13632ee67178SXianjun Jiao 13642ee67178SXianjun Jiao /* Start the transfer */ 13652ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 13662ee67178SXianjun Jiao hw->control & chan->xdev->max_buffer_len); 13672ee67178SXianjun Jiao } 13682ee67178SXianjun Jiao 13692ee67178SXianjun Jiao list_splice_tail_init(&chan->pending_list, &chan->active_list); 13702ee67178SXianjun Jiao chan->desc_pendingcount = 0; 13712ee67178SXianjun Jiao chan->idle = false; 13722ee67178SXianjun Jiao } 13732ee67178SXianjun Jiao 13742ee67178SXianjun Jiao /** 13752ee67178SXianjun Jiao * xilinx_dma_issue_pending - Issue pending transactions 13762ee67178SXianjun Jiao * @dchan: DMA channel 13772ee67178SXianjun Jiao */ 13782ee67178SXianjun Jiao static void xilinx_dma_issue_pending(struct dma_chan *dchan) 13792ee67178SXianjun Jiao { 13802ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 13812ee67178SXianjun Jiao unsigned long flags; 13822ee67178SXianjun Jiao 13832ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 13842ee67178SXianjun Jiao chan->start_transfer(chan); 13852ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 13862ee67178SXianjun Jiao } 13872ee67178SXianjun Jiao 13882ee67178SXianjun Jiao /** 13892ee67178SXianjun Jiao * xilinx_dma_complete_descriptor - Mark the active descriptor as complete 13902ee67178SXianjun Jiao * @chan : xilinx DMA channel 13912ee67178SXianjun Jiao * 13922ee67178SXianjun Jiao * CONTEXT: hardirq 13932ee67178SXianjun Jiao */ 13942ee67178SXianjun Jiao static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) 13952ee67178SXianjun Jiao { 13962ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc, *next; 13972ee67178SXianjun Jiao 13982ee67178SXianjun Jiao /* This function was invoked with lock held */ 13992ee67178SXianjun Jiao if (list_empty(&chan->active_list)) 14002ee67178SXianjun Jiao return; 14012ee67178SXianjun Jiao 14022ee67178SXianjun Jiao list_for_each_entry_safe(desc, next, &chan->active_list, node) { 14032ee67178SXianjun Jiao list_del(&desc->node); 14042ee67178SXianjun Jiao if (!desc->cyclic) 14052ee67178SXianjun Jiao dma_cookie_complete(&desc->async_tx); 14062ee67178SXianjun Jiao list_add_tail(&desc->node, &chan->done_list); 14072ee67178SXianjun Jiao } 14082ee67178SXianjun Jiao } 14092ee67178SXianjun Jiao 14102ee67178SXianjun Jiao /** 14112ee67178SXianjun Jiao * xilinx_dma_reset - Reset DMA channel 14122ee67178SXianjun Jiao * @chan: Driver specific DMA channel 14132ee67178SXianjun Jiao * 14142ee67178SXianjun Jiao * Return: '0' on success and failure value on error 14152ee67178SXianjun Jiao */ 14162ee67178SXianjun Jiao static int xilinx_dma_reset(struct xilinx_dma_chan *chan) 14172ee67178SXianjun Jiao { 14182ee67178SXianjun Jiao int err; 14192ee67178SXianjun Jiao u32 tmp; 14202ee67178SXianjun Jiao 14212ee67178SXianjun Jiao dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); 14222ee67178SXianjun Jiao 14232ee67178SXianjun Jiao /* Wait for the hardware to finish reset */ 14242ee67178SXianjun Jiao err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, 14252ee67178SXianjun Jiao !(tmp & XILINX_DMA_DMACR_RESET), 0, 14262ee67178SXianjun Jiao XILINX_DMA_LOOP_COUNT); 14272ee67178SXianjun Jiao 14282ee67178SXianjun Jiao if (err) { 14292ee67178SXianjun Jiao dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 14302ee67178SXianjun Jiao dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), 14312ee67178SXianjun Jiao dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); 14322ee67178SXianjun Jiao return -ETIMEDOUT; 14332ee67178SXianjun Jiao } 14342ee67178SXianjun Jiao 14352ee67178SXianjun Jiao chan->err = false; 14362ee67178SXianjun Jiao chan->idle = true; 14372ee67178SXianjun Jiao chan->desc_submitcount = 0; 14382ee67178SXianjun Jiao 14392ee67178SXianjun Jiao return err; 14402ee67178SXianjun Jiao } 14412ee67178SXianjun Jiao 14422ee67178SXianjun Jiao /** 14432ee67178SXianjun Jiao * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts 14442ee67178SXianjun Jiao * @chan: Driver specific DMA channel 14452ee67178SXianjun Jiao * 14462ee67178SXianjun Jiao * Return: '0' on success and failure value on error 14472ee67178SXianjun Jiao */ 14482ee67178SXianjun Jiao static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) 14492ee67178SXianjun Jiao { 14502ee67178SXianjun Jiao int err; 14512ee67178SXianjun Jiao 14522ee67178SXianjun Jiao /* Reset VDMA */ 14532ee67178SXianjun Jiao err = xilinx_dma_reset(chan); 14542ee67178SXianjun Jiao if (err) 14552ee67178SXianjun Jiao return err; 14562ee67178SXianjun Jiao 14572ee67178SXianjun Jiao /* Enable interrupts */ 14582ee67178SXianjun Jiao dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 14592ee67178SXianjun Jiao XILINX_DMA_DMAXR_ALL_IRQ_MASK); 14602ee67178SXianjun Jiao 14612ee67178SXianjun Jiao return 0; 14622ee67178SXianjun Jiao } 14632ee67178SXianjun Jiao 14642ee67178SXianjun Jiao /** 14652ee67178SXianjun Jiao * xilinx_dma_irq_handler - DMA Interrupt handler 14662ee67178SXianjun Jiao * @irq: IRQ number 14672ee67178SXianjun Jiao * @data: Pointer to the Xilinx DMA channel structure 14682ee67178SXianjun Jiao * 14692ee67178SXianjun Jiao * Return: IRQ_HANDLED/IRQ_NONE 14702ee67178SXianjun Jiao */ 14712ee67178SXianjun Jiao static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) 14722ee67178SXianjun Jiao { 14732ee67178SXianjun Jiao struct xilinx_dma_chan *chan = data; 14742ee67178SXianjun Jiao u32 status; 14752ee67178SXianjun Jiao 14762ee67178SXianjun Jiao /* Read the status and ack the interrupts. */ 14772ee67178SXianjun Jiao status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); 14782ee67178SXianjun Jiao if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) 14792ee67178SXianjun Jiao return IRQ_NONE; 14802ee67178SXianjun Jiao 14812ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 14822ee67178SXianjun Jiao status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); 14832ee67178SXianjun Jiao 14842ee67178SXianjun Jiao if (status & XILINX_DMA_DMASR_ERR_IRQ) { 14852ee67178SXianjun Jiao /* 14862ee67178SXianjun Jiao * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 14872ee67178SXianjun Jiao * error is recoverable, ignore it. Otherwise flag the error. 14882ee67178SXianjun Jiao * 14892ee67178SXianjun Jiao * Only recoverable errors can be cleared in the DMASR register, 14902ee67178SXianjun Jiao * make sure not to write to other error bits to 1. 14912ee67178SXianjun Jiao */ 14922ee67178SXianjun Jiao u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; 14932ee67178SXianjun Jiao 14942ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, 14952ee67178SXianjun Jiao errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); 14962ee67178SXianjun Jiao 14972ee67178SXianjun Jiao if (!chan->flush_on_fsync || 14982ee67178SXianjun Jiao (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { 14992ee67178SXianjun Jiao dev_err(chan->dev, 15002ee67178SXianjun Jiao "Channel %p has errors %x, cdr %x tdr %x\n", 15012ee67178SXianjun Jiao chan, errors, 15022ee67178SXianjun Jiao dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), 15032ee67178SXianjun Jiao dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); 15042ee67178SXianjun Jiao chan->err = true; 15052ee67178SXianjun Jiao } 15062ee67178SXianjun Jiao } 15072ee67178SXianjun Jiao 15082ee67178SXianjun Jiao if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { 15092ee67178SXianjun Jiao /* 15102ee67178SXianjun Jiao * Device takes too long to do the transfer when user requires 15112ee67178SXianjun Jiao * responsiveness. 15122ee67178SXianjun Jiao */ 15132ee67178SXianjun Jiao dev_dbg(chan->dev, "Inter-packet latency too long\n"); 15142ee67178SXianjun Jiao } 15152ee67178SXianjun Jiao 15162ee67178SXianjun Jiao if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { 15172ee67178SXianjun Jiao spin_lock(&chan->lock); 15182ee67178SXianjun Jiao xilinx_dma_complete_descriptor(chan); 15192ee67178SXianjun Jiao chan->idle = true; 15202ee67178SXianjun Jiao chan->start_transfer(chan); 15212ee67178SXianjun Jiao chan->buf_idx++; 15222ee67178SXianjun Jiao spin_unlock(&chan->lock); 15232ee67178SXianjun Jiao } 15242ee67178SXianjun Jiao 15252ee67178SXianjun Jiao tasklet_schedule(&chan->tasklet); 15262ee67178SXianjun Jiao return IRQ_HANDLED; 15272ee67178SXianjun Jiao } 15282ee67178SXianjun Jiao 15292ee67178SXianjun Jiao /** 15302ee67178SXianjun Jiao * append_desc_queue - Queuing descriptor 15312ee67178SXianjun Jiao * @chan: Driver specific dma channel 15322ee67178SXianjun Jiao * @desc: dma transaction descriptor 15332ee67178SXianjun Jiao */ 15342ee67178SXianjun Jiao static void append_desc_queue(struct xilinx_dma_chan *chan, 15352ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc) 15362ee67178SXianjun Jiao { 15372ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *tail_segment; 15382ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *tail_desc; 15392ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *axidma_tail_segment; 15402ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *cdma_tail_segment; 15412ee67178SXianjun Jiao 15422ee67178SXianjun Jiao if (list_empty(&chan->pending_list)) 15432ee67178SXianjun Jiao goto append; 15442ee67178SXianjun Jiao 15452ee67178SXianjun Jiao /* 15462ee67178SXianjun Jiao * Add the hardware descriptor to the chain of hardware descriptors 15472ee67178SXianjun Jiao * that already exists in memory. 15482ee67178SXianjun Jiao */ 15492ee67178SXianjun Jiao tail_desc = list_last_entry(&chan->pending_list, 15502ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor, node); 15512ee67178SXianjun Jiao if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 15522ee67178SXianjun Jiao tail_segment = list_last_entry(&tail_desc->segments, 15532ee67178SXianjun Jiao struct xilinx_vdma_tx_segment, 15542ee67178SXianjun Jiao node); 15552ee67178SXianjun Jiao tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 15562ee67178SXianjun Jiao } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 15572ee67178SXianjun Jiao cdma_tail_segment = list_last_entry(&tail_desc->segments, 15582ee67178SXianjun Jiao struct xilinx_cdma_tx_segment, 15592ee67178SXianjun Jiao node); 15602ee67178SXianjun Jiao cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 15612ee67178SXianjun Jiao } else { 15622ee67178SXianjun Jiao axidma_tail_segment = list_last_entry(&tail_desc->segments, 15632ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, 15642ee67178SXianjun Jiao node); 15652ee67178SXianjun Jiao axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 15662ee67178SXianjun Jiao } 15672ee67178SXianjun Jiao 15682ee67178SXianjun Jiao /* 15692ee67178SXianjun Jiao * Add the software descriptor and all children to the list 15702ee67178SXianjun Jiao * of pending transactions 15712ee67178SXianjun Jiao */ 15722ee67178SXianjun Jiao append: 15732ee67178SXianjun Jiao list_add_tail(&desc->node, &chan->pending_list); 15742ee67178SXianjun Jiao chan->desc_pendingcount++; 15752ee67178SXianjun Jiao 15762ee67178SXianjun Jiao if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) 15772ee67178SXianjun Jiao && unlikely(chan->desc_pendingcount > chan->num_frms)) { 15782ee67178SXianjun Jiao dev_dbg(chan->dev, "desc pendingcount is too high\n"); 15792ee67178SXianjun Jiao chan->desc_pendingcount = chan->num_frms; 15802ee67178SXianjun Jiao } 15812ee67178SXianjun Jiao } 15822ee67178SXianjun Jiao 15832ee67178SXianjun Jiao /** 15842ee67178SXianjun Jiao * xilinx_dma_tx_submit - Submit DMA transaction 15852ee67178SXianjun Jiao * @tx: Async transaction descriptor 15862ee67178SXianjun Jiao * 15872ee67178SXianjun Jiao * Return: cookie value on success and failure value on error 15882ee67178SXianjun Jiao */ 15892ee67178SXianjun Jiao static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) 15902ee67178SXianjun Jiao { 15912ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); 15922ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); 15932ee67178SXianjun Jiao dma_cookie_t cookie; 15942ee67178SXianjun Jiao unsigned long flags; 15952ee67178SXianjun Jiao int err; 15962ee67178SXianjun Jiao 15972ee67178SXianjun Jiao if (chan->cyclic) { 15982ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 15992ee67178SXianjun Jiao return -EBUSY; 16002ee67178SXianjun Jiao } 16012ee67178SXianjun Jiao 16022ee67178SXianjun Jiao if (chan->err) { 16032ee67178SXianjun Jiao /* 16042ee67178SXianjun Jiao * If reset fails, need to hard reset the system. 16052ee67178SXianjun Jiao * Channel is no longer functional 16062ee67178SXianjun Jiao */ 16072ee67178SXianjun Jiao err = xilinx_dma_chan_reset(chan); 16082ee67178SXianjun Jiao if (err < 0) 16092ee67178SXianjun Jiao return err; 16102ee67178SXianjun Jiao } 16112ee67178SXianjun Jiao 16122ee67178SXianjun Jiao spin_lock_irqsave(&chan->lock, flags); 16132ee67178SXianjun Jiao 16142ee67178SXianjun Jiao cookie = dma_cookie_assign(tx); 16152ee67178SXianjun Jiao 16162ee67178SXianjun Jiao /* Put this transaction onto the tail of the pending queue */ 16172ee67178SXianjun Jiao append_desc_queue(chan, desc); 16182ee67178SXianjun Jiao 16192ee67178SXianjun Jiao if (desc->cyclic) 16202ee67178SXianjun Jiao chan->cyclic = true; 16212ee67178SXianjun Jiao 16222ee67178SXianjun Jiao spin_unlock_irqrestore(&chan->lock, flags); 16232ee67178SXianjun Jiao 16242ee67178SXianjun Jiao return cookie; 16252ee67178SXianjun Jiao } 16262ee67178SXianjun Jiao 16272ee67178SXianjun Jiao /** 16282ee67178SXianjun Jiao * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a 16292ee67178SXianjun Jiao * DMA_SLAVE transaction 16302ee67178SXianjun Jiao * @dchan: DMA channel 16312ee67178SXianjun Jiao * @xt: Interleaved template pointer 16322ee67178SXianjun Jiao * @flags: transfer ack flags 16332ee67178SXianjun Jiao * 16342ee67178SXianjun Jiao * Return: Async transaction descriptor on success and NULL on failure 16352ee67178SXianjun Jiao */ 16362ee67178SXianjun Jiao static struct dma_async_tx_descriptor * 16372ee67178SXianjun Jiao xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, 16382ee67178SXianjun Jiao struct dma_interleaved_template *xt, 16392ee67178SXianjun Jiao unsigned long flags) 16402ee67178SXianjun Jiao { 16412ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 16422ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 16432ee67178SXianjun Jiao struct xilinx_vdma_tx_segment *segment; 16442ee67178SXianjun Jiao struct xilinx_vdma_desc_hw *hw; 16452ee67178SXianjun Jiao 16462ee67178SXianjun Jiao if (!is_slave_direction(xt->dir)) 16472ee67178SXianjun Jiao return NULL; 16482ee67178SXianjun Jiao 16492ee67178SXianjun Jiao if (!xt->numf || !xt->sgl[0].size) 16502ee67178SXianjun Jiao return NULL; 16512ee67178SXianjun Jiao 16522ee67178SXianjun Jiao if (xt->frame_size != 1) 16532ee67178SXianjun Jiao return NULL; 16542ee67178SXianjun Jiao 16552ee67178SXianjun Jiao /* Allocate a transaction descriptor. */ 16562ee67178SXianjun Jiao desc = xilinx_dma_alloc_tx_descriptor(chan); 16572ee67178SXianjun Jiao if (!desc) 16582ee67178SXianjun Jiao return NULL; 16592ee67178SXianjun Jiao 16602ee67178SXianjun Jiao dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 16612ee67178SXianjun Jiao desc->async_tx.tx_submit = xilinx_dma_tx_submit; 16622ee67178SXianjun Jiao async_tx_ack(&desc->async_tx); 16632ee67178SXianjun Jiao 16642ee67178SXianjun Jiao /* Allocate the link descriptor from DMA pool */ 16652ee67178SXianjun Jiao segment = xilinx_vdma_alloc_tx_segment(chan); 16662ee67178SXianjun Jiao if (!segment) 16672ee67178SXianjun Jiao goto error; 16682ee67178SXianjun Jiao 16692ee67178SXianjun Jiao /* Fill in the hardware descriptor */ 16702ee67178SXianjun Jiao hw = &segment->hw; 16712ee67178SXianjun Jiao hw->vsize = xt->numf; 16722ee67178SXianjun Jiao hw->hsize = xt->sgl[0].size; 16732ee67178SXianjun Jiao hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 16742ee67178SXianjun Jiao XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; 16752ee67178SXianjun Jiao hw->stride |= chan->config.frm_dly << 16762ee67178SXianjun Jiao XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 16772ee67178SXianjun Jiao 16782ee67178SXianjun Jiao if (xt->dir != DMA_MEM_TO_DEV) { 16792ee67178SXianjun Jiao if (chan->ext_addr) { 16802ee67178SXianjun Jiao hw->buf_addr = lower_32_bits(xt->dst_start); 16812ee67178SXianjun Jiao hw->buf_addr_msb = upper_32_bits(xt->dst_start); 16822ee67178SXianjun Jiao } else { 16832ee67178SXianjun Jiao hw->buf_addr = xt->dst_start; 16842ee67178SXianjun Jiao } 16852ee67178SXianjun Jiao } else { 16862ee67178SXianjun Jiao if (chan->ext_addr) { 16872ee67178SXianjun Jiao hw->buf_addr = lower_32_bits(xt->src_start); 16882ee67178SXianjun Jiao hw->buf_addr_msb = upper_32_bits(xt->src_start); 16892ee67178SXianjun Jiao } else { 16902ee67178SXianjun Jiao hw->buf_addr = xt->src_start; 16912ee67178SXianjun Jiao } 16922ee67178SXianjun Jiao } 16932ee67178SXianjun Jiao 16942ee67178SXianjun Jiao /* Insert the segment into the descriptor segments list. */ 16952ee67178SXianjun Jiao list_add_tail(&segment->node, &desc->segments); 16962ee67178SXianjun Jiao 16972ee67178SXianjun Jiao /* Link the last hardware descriptor with the first. */ 16982ee67178SXianjun Jiao segment = list_first_entry(&desc->segments, 16992ee67178SXianjun Jiao struct xilinx_vdma_tx_segment, node); 17002ee67178SXianjun Jiao desc->async_tx.phys = segment->phys; 17012ee67178SXianjun Jiao 17022ee67178SXianjun Jiao return &desc->async_tx; 17032ee67178SXianjun Jiao 17042ee67178SXianjun Jiao error: 17052ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 17062ee67178SXianjun Jiao return NULL; 17072ee67178SXianjun Jiao } 17082ee67178SXianjun Jiao 17092ee67178SXianjun Jiao /** 17102ee67178SXianjun Jiao * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction 17112ee67178SXianjun Jiao * @dchan: DMA channel 17122ee67178SXianjun Jiao * @dma_dst: destination address 17132ee67178SXianjun Jiao * @dma_src: source address 17142ee67178SXianjun Jiao * @len: transfer length 17152ee67178SXianjun Jiao * @flags: transfer ack flags 17162ee67178SXianjun Jiao * 17172ee67178SXianjun Jiao * Return: Async transaction descriptor on success and NULL on failure 17182ee67178SXianjun Jiao */ 17192ee67178SXianjun Jiao static struct dma_async_tx_descriptor * 17202ee67178SXianjun Jiao xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, 17212ee67178SXianjun Jiao dma_addr_t dma_src, size_t len, unsigned long flags) 17222ee67178SXianjun Jiao { 17232ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 17242ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 17252ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *segment; 17262ee67178SXianjun Jiao struct xilinx_cdma_desc_hw *hw; 17272ee67178SXianjun Jiao 17282ee67178SXianjun Jiao if (!len || len > chan->xdev->max_buffer_len) 17292ee67178SXianjun Jiao return NULL; 17302ee67178SXianjun Jiao 17312ee67178SXianjun Jiao desc = xilinx_dma_alloc_tx_descriptor(chan); 17322ee67178SXianjun Jiao if (!desc) 17332ee67178SXianjun Jiao return NULL; 17342ee67178SXianjun Jiao 17352ee67178SXianjun Jiao dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 17362ee67178SXianjun Jiao desc->async_tx.tx_submit = xilinx_dma_tx_submit; 17372ee67178SXianjun Jiao 17382ee67178SXianjun Jiao /* Allocate the link descriptor from DMA pool */ 17392ee67178SXianjun Jiao segment = xilinx_cdma_alloc_tx_segment(chan); 17402ee67178SXianjun Jiao if (!segment) 17412ee67178SXianjun Jiao goto error; 17422ee67178SXianjun Jiao 17432ee67178SXianjun Jiao hw = &segment->hw; 17442ee67178SXianjun Jiao hw->control = len; 17452ee67178SXianjun Jiao hw->src_addr = dma_src; 17462ee67178SXianjun Jiao hw->dest_addr = dma_dst; 17472ee67178SXianjun Jiao if (chan->ext_addr) { 17482ee67178SXianjun Jiao hw->src_addr_msb = upper_32_bits(dma_src); 17492ee67178SXianjun Jiao hw->dest_addr_msb = upper_32_bits(dma_dst); 17502ee67178SXianjun Jiao } 17512ee67178SXianjun Jiao 17522ee67178SXianjun Jiao /* Insert the segment into the descriptor segments list. */ 17532ee67178SXianjun Jiao list_add_tail(&segment->node, &desc->segments); 17542ee67178SXianjun Jiao 17552ee67178SXianjun Jiao desc->async_tx.phys = segment->phys; 17562ee67178SXianjun Jiao hw->next_desc = segment->phys; 17572ee67178SXianjun Jiao 17582ee67178SXianjun Jiao return &desc->async_tx; 17592ee67178SXianjun Jiao 17602ee67178SXianjun Jiao error: 17612ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 17622ee67178SXianjun Jiao return NULL; 17632ee67178SXianjun Jiao } 17642ee67178SXianjun Jiao 17652ee67178SXianjun Jiao /** 17662ee67178SXianjun Jiao * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction 17672ee67178SXianjun Jiao * @dchan: DMA channel 17682ee67178SXianjun Jiao * @dst_sg: Destination scatter list 17692ee67178SXianjun Jiao * @dst_sg_len: Number of entries in destination scatter list 17702ee67178SXianjun Jiao * @src_sg: Source scatter list 17712ee67178SXianjun Jiao * @src_sg_len: Number of entries in source scatter list 17722ee67178SXianjun Jiao * @flags: transfer ack flags 17732ee67178SXianjun Jiao * 17742ee67178SXianjun Jiao * Return: Async transaction descriptor on success and NULL on failure 17752ee67178SXianjun Jiao */ 17762ee67178SXianjun Jiao static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg( 17772ee67178SXianjun Jiao struct dma_chan *dchan, struct scatterlist *dst_sg, 17782ee67178SXianjun Jiao unsigned int dst_sg_len, struct scatterlist *src_sg, 17792ee67178SXianjun Jiao unsigned int src_sg_len, unsigned long flags) 17802ee67178SXianjun Jiao { 17812ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 17822ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 17832ee67178SXianjun Jiao struct xilinx_cdma_tx_segment *segment, *prev = NULL; 17842ee67178SXianjun Jiao struct xilinx_cdma_desc_hw *hw; 17852ee67178SXianjun Jiao size_t len, dst_avail, src_avail; 17862ee67178SXianjun Jiao dma_addr_t dma_dst, dma_src; 17872ee67178SXianjun Jiao 17882ee67178SXianjun Jiao if (unlikely(dst_sg_len == 0 || src_sg_len == 0)) 17892ee67178SXianjun Jiao return NULL; 17902ee67178SXianjun Jiao 17912ee67178SXianjun Jiao if (unlikely(dst_sg == NULL || src_sg == NULL)) 17922ee67178SXianjun Jiao return NULL; 17932ee67178SXianjun Jiao 17942ee67178SXianjun Jiao desc = xilinx_dma_alloc_tx_descriptor(chan); 17952ee67178SXianjun Jiao if (!desc) 17962ee67178SXianjun Jiao return NULL; 17972ee67178SXianjun Jiao 17982ee67178SXianjun Jiao dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 17992ee67178SXianjun Jiao desc->async_tx.tx_submit = xilinx_dma_tx_submit; 18002ee67178SXianjun Jiao 18012ee67178SXianjun Jiao dst_avail = sg_dma_len(dst_sg); 18022ee67178SXianjun Jiao src_avail = sg_dma_len(src_sg); 18032ee67178SXianjun Jiao /* 18042ee67178SXianjun Jiao * loop until there is either no more source or no more destination 18052ee67178SXianjun Jiao * scatterlist entry 18062ee67178SXianjun Jiao */ 18072ee67178SXianjun Jiao while (true) { 18082ee67178SXianjun Jiao len = min_t(size_t, src_avail, dst_avail); 18092ee67178SXianjun Jiao len = min_t(size_t, len, chan->xdev->max_buffer_len); 18102ee67178SXianjun Jiao if (len == 0) 18112ee67178SXianjun Jiao goto fetch; 18122ee67178SXianjun Jiao 18132ee67178SXianjun Jiao /* Allocate the link descriptor from DMA pool */ 18142ee67178SXianjun Jiao segment = xilinx_cdma_alloc_tx_segment(chan); 18152ee67178SXianjun Jiao if (!segment) 18162ee67178SXianjun Jiao goto error; 18172ee67178SXianjun Jiao 18182ee67178SXianjun Jiao dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - 18192ee67178SXianjun Jiao dst_avail; 18202ee67178SXianjun Jiao dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - 18212ee67178SXianjun Jiao src_avail; 18222ee67178SXianjun Jiao hw = &segment->hw; 18232ee67178SXianjun Jiao hw->control = len; 18242ee67178SXianjun Jiao hw->src_addr = dma_src; 18252ee67178SXianjun Jiao hw->dest_addr = dma_dst; 18262ee67178SXianjun Jiao if (chan->ext_addr) { 18272ee67178SXianjun Jiao hw->src_addr_msb = upper_32_bits(dma_src); 18282ee67178SXianjun Jiao hw->dest_addr_msb = upper_32_bits(dma_dst); 18292ee67178SXianjun Jiao } 18302ee67178SXianjun Jiao 18312ee67178SXianjun Jiao if (prev) 18322ee67178SXianjun Jiao prev->hw.next_desc = segment->phys; 18332ee67178SXianjun Jiao 18342ee67178SXianjun Jiao prev = segment; 18352ee67178SXianjun Jiao dst_avail -= len; 18362ee67178SXianjun Jiao src_avail -= len; 18372ee67178SXianjun Jiao list_add_tail(&segment->node, &desc->segments); 18382ee67178SXianjun Jiao 18392ee67178SXianjun Jiao fetch: 18402ee67178SXianjun Jiao /* Fetch the next dst scatterlist entry */ 18412ee67178SXianjun Jiao if (dst_avail == 0) { 18422ee67178SXianjun Jiao if (dst_sg_len == 0) 18432ee67178SXianjun Jiao break; 18442ee67178SXianjun Jiao dst_sg = sg_next(dst_sg); 18452ee67178SXianjun Jiao if (dst_sg == NULL) 18462ee67178SXianjun Jiao break; 18472ee67178SXianjun Jiao dst_sg_len--; 18482ee67178SXianjun Jiao dst_avail = sg_dma_len(dst_sg); 18492ee67178SXianjun Jiao } 18502ee67178SXianjun Jiao /* Fetch the next src scatterlist entry */ 18512ee67178SXianjun Jiao if (src_avail == 0) { 18522ee67178SXianjun Jiao if (src_sg_len == 0) 18532ee67178SXianjun Jiao break; 18542ee67178SXianjun Jiao src_sg = sg_next(src_sg); 18552ee67178SXianjun Jiao if (src_sg == NULL) 18562ee67178SXianjun Jiao break; 18572ee67178SXianjun Jiao src_sg_len--; 18582ee67178SXianjun Jiao src_avail = sg_dma_len(src_sg); 18592ee67178SXianjun Jiao } 18602ee67178SXianjun Jiao } 18612ee67178SXianjun Jiao 18622ee67178SXianjun Jiao /* Link the last hardware descriptor with the first. */ 18632ee67178SXianjun Jiao segment = list_first_entry(&desc->segments, 18642ee67178SXianjun Jiao struct xilinx_cdma_tx_segment, node); 18652ee67178SXianjun Jiao desc->async_tx.phys = segment->phys; 18662ee67178SXianjun Jiao prev->hw.next_desc = segment->phys; 18672ee67178SXianjun Jiao 18682ee67178SXianjun Jiao return &desc->async_tx; 18692ee67178SXianjun Jiao 18702ee67178SXianjun Jiao error: 18712ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 18722ee67178SXianjun Jiao return NULL; 18732ee67178SXianjun Jiao } 18742ee67178SXianjun Jiao 18752ee67178SXianjun Jiao /** 18762ee67178SXianjun Jiao * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 18772ee67178SXianjun Jiao * @dchan: DMA channel 18782ee67178SXianjun Jiao * @sgl: scatterlist to transfer to/from 18792ee67178SXianjun Jiao * @sg_len: number of entries in @scatterlist 18802ee67178SXianjun Jiao * @direction: DMA direction 18812ee67178SXianjun Jiao * @flags: transfer ack flags 18822ee67178SXianjun Jiao * @context: APP words of the descriptor 18832ee67178SXianjun Jiao * 18842ee67178SXianjun Jiao * Return: Async transaction descriptor on success and NULL on failure 18852ee67178SXianjun Jiao */ 18862ee67178SXianjun Jiao static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( 18872ee67178SXianjun Jiao struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 18882ee67178SXianjun Jiao enum dma_transfer_direction direction, unsigned long flags, 18892ee67178SXianjun Jiao void *context) 18902ee67178SXianjun Jiao { 18912ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 18922ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 18932ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *segment = NULL; 18942ee67178SXianjun Jiao u32 *app_w = (u32 *)context; 18952ee67178SXianjun Jiao struct scatterlist *sg; 18962ee67178SXianjun Jiao size_t copy; 18972ee67178SXianjun Jiao size_t sg_used; 18982ee67178SXianjun Jiao unsigned int i; 18992ee67178SXianjun Jiao 19002ee67178SXianjun Jiao if (!is_slave_direction(direction)) 19012ee67178SXianjun Jiao return NULL; 19022ee67178SXianjun Jiao 19032ee67178SXianjun Jiao /* Allocate a transaction descriptor. */ 19042ee67178SXianjun Jiao desc = xilinx_dma_alloc_tx_descriptor(chan); 19052ee67178SXianjun Jiao if (!desc) 19062ee67178SXianjun Jiao return NULL; 19072ee67178SXianjun Jiao 19082ee67178SXianjun Jiao dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 19092ee67178SXianjun Jiao desc->async_tx.tx_submit = xilinx_dma_tx_submit; 19102ee67178SXianjun Jiao 19112ee67178SXianjun Jiao /* Build transactions using information in the scatter gather list */ 19122ee67178SXianjun Jiao for_each_sg(sgl, sg, sg_len, i) { 19132ee67178SXianjun Jiao sg_used = 0; 19142ee67178SXianjun Jiao 19152ee67178SXianjun Jiao /* Loop until the entire scatterlist entry is used */ 19162ee67178SXianjun Jiao while (sg_used < sg_dma_len(sg)) { 19172ee67178SXianjun Jiao struct xilinx_axidma_desc_hw *hw; 19182ee67178SXianjun Jiao 19192ee67178SXianjun Jiao /* Get a free segment */ 19202ee67178SXianjun Jiao segment = xilinx_axidma_alloc_tx_segment(chan); 19212ee67178SXianjun Jiao if (!segment) 19222ee67178SXianjun Jiao goto error; 19232ee67178SXianjun Jiao 19242ee67178SXianjun Jiao /* 19252ee67178SXianjun Jiao * Calculate the maximum number of bytes to transfer, 19262ee67178SXianjun Jiao * making sure it is less than the hw limit 19272ee67178SXianjun Jiao */ 19282ee67178SXianjun Jiao copy = min_t(size_t, sg_dma_len(sg) - sg_used, 19292ee67178SXianjun Jiao chan->xdev->max_buffer_len); 19302ee67178SXianjun Jiao hw = &segment->hw; 19312ee67178SXianjun Jiao 19322ee67178SXianjun Jiao /* Fill in the descriptor */ 19332ee67178SXianjun Jiao xilinx_axidma_buf(chan, hw, sg_dma_address(sg), 19342ee67178SXianjun Jiao sg_used, 0); 19352ee67178SXianjun Jiao 19362ee67178SXianjun Jiao hw->control = copy; 19372ee67178SXianjun Jiao 19382ee67178SXianjun Jiao if (chan->direction == DMA_MEM_TO_DEV) { 19392ee67178SXianjun Jiao if (app_w) 19402ee67178SXianjun Jiao memcpy(hw->app, app_w, sizeof(u32) * 19412ee67178SXianjun Jiao XILINX_DMA_NUM_APP_WORDS); 19422ee67178SXianjun Jiao } 19432ee67178SXianjun Jiao 19442ee67178SXianjun Jiao sg_used += copy; 19452ee67178SXianjun Jiao 19462ee67178SXianjun Jiao /* 19472ee67178SXianjun Jiao * Insert the segment into the descriptor segments 19482ee67178SXianjun Jiao * list. 19492ee67178SXianjun Jiao */ 19502ee67178SXianjun Jiao list_add_tail(&segment->node, &desc->segments); 19512ee67178SXianjun Jiao } 19522ee67178SXianjun Jiao } 19532ee67178SXianjun Jiao 19542ee67178SXianjun Jiao segment = list_first_entry(&desc->segments, 19552ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, node); 19562ee67178SXianjun Jiao desc->async_tx.phys = segment->phys; 19572ee67178SXianjun Jiao 19582ee67178SXianjun Jiao /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 19592ee67178SXianjun Jiao if (chan->direction == DMA_MEM_TO_DEV) { 19602ee67178SXianjun Jiao segment->hw.control |= XILINX_DMA_BD_SOP; 19612ee67178SXianjun Jiao segment = list_last_entry(&desc->segments, 19622ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, 19632ee67178SXianjun Jiao node); 19642ee67178SXianjun Jiao segment->hw.control |= XILINX_DMA_BD_EOP; 19652ee67178SXianjun Jiao } 19662ee67178SXianjun Jiao 19672ee67178SXianjun Jiao return &desc->async_tx; 19682ee67178SXianjun Jiao 19692ee67178SXianjun Jiao error: 19702ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 19712ee67178SXianjun Jiao return NULL; 19722ee67178SXianjun Jiao } 19732ee67178SXianjun Jiao 19742ee67178SXianjun Jiao /** 19752ee67178SXianjun Jiao * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction 19762ee67178SXianjun Jiao * @dchan: DMA channel 19772ee67178SXianjun Jiao * @buf_addr: Physical address of the buffer 19782ee67178SXianjun Jiao * @buf_len: Total length of the cyclic buffers 19792ee67178SXianjun Jiao * @period_len: length of individual cyclic buffer 19802ee67178SXianjun Jiao * @direction: DMA direction 19812ee67178SXianjun Jiao * @flags: transfer ack flags 19822ee67178SXianjun Jiao * 19832ee67178SXianjun Jiao * Return: Async transaction descriptor on success and NULL on failure 19842ee67178SXianjun Jiao */ 19852ee67178SXianjun Jiao static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( 19862ee67178SXianjun Jiao struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, 19872ee67178SXianjun Jiao size_t period_len, enum dma_transfer_direction direction, 19882ee67178SXianjun Jiao unsigned long flags) 19892ee67178SXianjun Jiao { 19902ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 19912ee67178SXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 19922ee67178SXianjun Jiao struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; 19932ee67178SXianjun Jiao size_t copy, sg_used; 19942ee67178SXianjun Jiao unsigned int num_periods; 19952ee67178SXianjun Jiao int i; 19962ee67178SXianjun Jiao u32 reg; 19972ee67178SXianjun Jiao 19982ee67178SXianjun Jiao if (!period_len) 19992ee67178SXianjun Jiao return NULL; 20002ee67178SXianjun Jiao 20012ee67178SXianjun Jiao num_periods = buf_len / period_len; 20022ee67178SXianjun Jiao 20032ee67178SXianjun Jiao if (!num_periods) 20042ee67178SXianjun Jiao return NULL; 20052ee67178SXianjun Jiao 20062ee67178SXianjun Jiao if (!is_slave_direction(direction)) 20072ee67178SXianjun Jiao return NULL; 20082ee67178SXianjun Jiao 20092ee67178SXianjun Jiao /* Allocate a transaction descriptor. */ 20102ee67178SXianjun Jiao desc = xilinx_dma_alloc_tx_descriptor(chan); 20112ee67178SXianjun Jiao if (!desc) 20122ee67178SXianjun Jiao return NULL; 20132ee67178SXianjun Jiao 20142ee67178SXianjun Jiao chan->direction = direction; 20152ee67178SXianjun Jiao dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 20162ee67178SXianjun Jiao desc->async_tx.tx_submit = xilinx_dma_tx_submit; 20172ee67178SXianjun Jiao 20182ee67178SXianjun Jiao chan->buf_idx = 0; 20192ee67178SXianjun Jiao 20202ee67178SXianjun Jiao for (i = 0; i < num_periods; ++i) { 20212ee67178SXianjun Jiao sg_used = 0; 20222ee67178SXianjun Jiao 20232ee67178SXianjun Jiao while (sg_used < period_len) { 20242ee67178SXianjun Jiao struct xilinx_axidma_desc_hw *hw; 20252ee67178SXianjun Jiao 20262ee67178SXianjun Jiao /* Get a free segment */ 20272ee67178SXianjun Jiao segment = xilinx_axidma_alloc_tx_segment(chan); 20282ee67178SXianjun Jiao if (!segment) 20292ee67178SXianjun Jiao goto error; 20302ee67178SXianjun Jiao 20312ee67178SXianjun Jiao /* 20322ee67178SXianjun Jiao * Calculate the maximum number of bytes to transfer, 20332ee67178SXianjun Jiao * making sure it is less than the hw limit 20342ee67178SXianjun Jiao */ 20352ee67178SXianjun Jiao copy = min_t(size_t, period_len - sg_used, 20362ee67178SXianjun Jiao chan->xdev->max_buffer_len); 20372ee67178SXianjun Jiao hw = &segment->hw; 20382ee67178SXianjun Jiao xilinx_axidma_buf(chan, hw, buf_addr, sg_used, 20392ee67178SXianjun Jiao period_len * i); 20402ee67178SXianjun Jiao hw->control = copy; 20412ee67178SXianjun Jiao 20422ee67178SXianjun Jiao if (prev) 20432ee67178SXianjun Jiao prev->hw.next_desc = segment->phys; 20442ee67178SXianjun Jiao 20452ee67178SXianjun Jiao prev = segment; 20462ee67178SXianjun Jiao sg_used += copy; 20472ee67178SXianjun Jiao 20482ee67178SXianjun Jiao /* 20492ee67178SXianjun Jiao * Insert the segment into the descriptor segments 20502ee67178SXianjun Jiao * list. 20512ee67178SXianjun Jiao */ 20522ee67178SXianjun Jiao list_add_tail(&segment->node, &desc->segments); 20532ee67178SXianjun Jiao } 20542ee67178SXianjun Jiao } 20552ee67178SXianjun Jiao 20562ee67178SXianjun Jiao head_segment = list_first_entry(&desc->segments, 20572ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, node); 20582ee67178SXianjun Jiao desc->async_tx.phys = head_segment->phys; 20592ee67178SXianjun Jiao 20602ee67178SXianjun Jiao desc->cyclic = true; 20612ee67178SXianjun Jiao reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 20622ee67178SXianjun Jiao reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 20632ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 20642ee67178SXianjun Jiao 20652ee67178SXianjun Jiao segment = list_last_entry(&desc->segments, 20662ee67178SXianjun Jiao struct xilinx_axidma_tx_segment, 20672ee67178SXianjun Jiao node); 20682ee67178SXianjun Jiao segment->hw.next_desc = (u32) head_segment->phys; 20692ee67178SXianjun Jiao 20702ee67178SXianjun Jiao /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 20712ee67178SXianjun Jiao if (direction == DMA_MEM_TO_DEV) { 20722ee67178SXianjun Jiao head_segment->hw.control |= XILINX_DMA_BD_SOP; 20732ee67178SXianjun Jiao segment->hw.control |= XILINX_DMA_BD_EOP; 20742ee67178SXianjun Jiao } 20752ee67178SXianjun Jiao 20762ee67178SXianjun Jiao return &desc->async_tx; 20772ee67178SXianjun Jiao 20782ee67178SXianjun Jiao error: 20792ee67178SXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 20802ee67178SXianjun Jiao return NULL; 20812ee67178SXianjun Jiao } 20822ee67178SXianjun Jiao 20832ee67178SXianjun Jiao /** 2084febc5adfSXianjun Jiao * xilinx_dma_prep_interleaved - prepare a descriptor for a 2085febc5adfSXianjun Jiao * DMA_SLAVE transaction 2086febc5adfSXianjun Jiao * @dchan: DMA channel 2087febc5adfSXianjun Jiao * @xt: Interleaved template pointer 2088febc5adfSXianjun Jiao * @flags: transfer ack flags 2089febc5adfSXianjun Jiao * 2090febc5adfSXianjun Jiao * Return: Async transaction descriptor on success and NULL on failure 2091febc5adfSXianjun Jiao */ 2092febc5adfSXianjun Jiao static struct dma_async_tx_descriptor * 2093febc5adfSXianjun Jiao xilinx_dma_prep_interleaved(struct dma_chan *dchan, 2094febc5adfSXianjun Jiao struct dma_interleaved_template *xt, 2095febc5adfSXianjun Jiao unsigned long flags) 2096febc5adfSXianjun Jiao { 2097febc5adfSXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2098febc5adfSXianjun Jiao struct xilinx_dma_tx_descriptor *desc; 2099febc5adfSXianjun Jiao struct xilinx_axidma_tx_segment *segment; 2100febc5adfSXianjun Jiao struct xilinx_axidma_desc_hw *hw; 2101febc5adfSXianjun Jiao 2102febc5adfSXianjun Jiao if (!is_slave_direction(xt->dir)) 2103febc5adfSXianjun Jiao return NULL; 2104febc5adfSXianjun Jiao 2105febc5adfSXianjun Jiao if (!xt->numf || !xt->sgl[0].size) 2106febc5adfSXianjun Jiao return NULL; 2107febc5adfSXianjun Jiao 2108febc5adfSXianjun Jiao if (xt->frame_size != 1) 2109febc5adfSXianjun Jiao return NULL; 2110febc5adfSXianjun Jiao 2111febc5adfSXianjun Jiao /* Allocate a transaction descriptor. */ 2112febc5adfSXianjun Jiao desc = xilinx_dma_alloc_tx_descriptor(chan); 2113febc5adfSXianjun Jiao if (!desc) 2114febc5adfSXianjun Jiao return NULL; 2115febc5adfSXianjun Jiao 2116febc5adfSXianjun Jiao chan->direction = xt->dir; 2117febc5adfSXianjun Jiao dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2118febc5adfSXianjun Jiao desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2119febc5adfSXianjun Jiao 2120febc5adfSXianjun Jiao /* Get a free segment */ 2121febc5adfSXianjun Jiao segment = xilinx_axidma_alloc_tx_segment(chan); 2122febc5adfSXianjun Jiao if (!segment) 2123febc5adfSXianjun Jiao goto error; 2124febc5adfSXianjun Jiao 2125febc5adfSXianjun Jiao hw = &segment->hw; 2126febc5adfSXianjun Jiao 2127febc5adfSXianjun Jiao /* Fill in the descriptor */ 2128febc5adfSXianjun Jiao if (xt->dir != DMA_MEM_TO_DEV) 2129febc5adfSXianjun Jiao hw->buf_addr = xt->dst_start; 2130febc5adfSXianjun Jiao else 2131febc5adfSXianjun Jiao hw->buf_addr = xt->src_start; 2132febc5adfSXianjun Jiao 2133febc5adfSXianjun Jiao hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; 2134febc5adfSXianjun Jiao hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & 2135febc5adfSXianjun Jiao XILINX_DMA_BD_VSIZE_MASK; 2136febc5adfSXianjun Jiao hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & 2137febc5adfSXianjun Jiao XILINX_DMA_BD_STRIDE_MASK; 2138febc5adfSXianjun Jiao hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; 2139febc5adfSXianjun Jiao 2140febc5adfSXianjun Jiao /* 2141febc5adfSXianjun Jiao * Insert the segment into the descriptor segments 2142febc5adfSXianjun Jiao * list. 2143febc5adfSXianjun Jiao */ 2144febc5adfSXianjun Jiao list_add_tail(&segment->node, &desc->segments); 2145febc5adfSXianjun Jiao 2146febc5adfSXianjun Jiao 2147febc5adfSXianjun Jiao segment = list_first_entry(&desc->segments, 2148febc5adfSXianjun Jiao struct xilinx_axidma_tx_segment, node); 2149febc5adfSXianjun Jiao desc->async_tx.phys = segment->phys; 2150febc5adfSXianjun Jiao 2151febc5adfSXianjun Jiao /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2152febc5adfSXianjun Jiao if (xt->dir == DMA_MEM_TO_DEV) { 2153febc5adfSXianjun Jiao segment->hw.control |= XILINX_DMA_BD_SOP; 2154febc5adfSXianjun Jiao segment = list_last_entry(&desc->segments, 2155febc5adfSXianjun Jiao struct xilinx_axidma_tx_segment, 2156febc5adfSXianjun Jiao node); 2157febc5adfSXianjun Jiao segment->hw.control |= XILINX_DMA_BD_EOP; 2158febc5adfSXianjun Jiao } 2159febc5adfSXianjun Jiao 2160febc5adfSXianjun Jiao return &desc->async_tx; 2161febc5adfSXianjun Jiao 2162febc5adfSXianjun Jiao error: 2163febc5adfSXianjun Jiao xilinx_dma_free_tx_descriptor(chan, desc); 2164febc5adfSXianjun Jiao return NULL; 2165febc5adfSXianjun Jiao } 2166febc5adfSXianjun Jiao 2167febc5adfSXianjun Jiao /** 21682ee67178SXianjun Jiao * xilinx_dma_terminate_all - Halt the channel and free descriptors 21692ee67178SXianjun Jiao * @dchan: Driver specific DMA Channel pointer 21702ee67178SXianjun Jiao * 21712ee67178SXianjun Jiao * Return: '0' always. 21722ee67178SXianjun Jiao */ 21732ee67178SXianjun Jiao static int xilinx_dma_terminate_all(struct dma_chan *dchan) 21742ee67178SXianjun Jiao { 21752ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 21762ee67178SXianjun Jiao u32 reg; 21772ee67178SXianjun Jiao int err; 21782ee67178SXianjun Jiao 21792ee67178SXianjun Jiao if (!chan->cyclic) { 21802ee67178SXianjun Jiao err = chan->stop_transfer(chan); 21812ee67178SXianjun Jiao if (err) { 21822ee67178SXianjun Jiao dev_err(chan->dev, "Cannot stop channel %p: %x\n", 21832ee67178SXianjun Jiao chan, dma_ctrl_read(chan, 21842ee67178SXianjun Jiao XILINX_DMA_REG_DMASR)); 21852ee67178SXianjun Jiao chan->err = true; 21862ee67178SXianjun Jiao } 21872ee67178SXianjun Jiao } 21882ee67178SXianjun Jiao 21892ee67178SXianjun Jiao xilinx_dma_chan_reset(chan); 21902ee67178SXianjun Jiao /* Remove and free all of the descriptors in the lists */ 21912ee67178SXianjun Jiao xilinx_dma_free_descriptors(chan); 21922ee67178SXianjun Jiao chan->idle = true; 21932ee67178SXianjun Jiao 21942ee67178SXianjun Jiao if (chan->cyclic) { 21952ee67178SXianjun Jiao reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 21962ee67178SXianjun Jiao reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; 21972ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 21982ee67178SXianjun Jiao chan->cyclic = false; 21992ee67178SXianjun Jiao } 22002ee67178SXianjun Jiao 22012ee67178SXianjun Jiao if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 22022ee67178SXianjun Jiao dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 22032ee67178SXianjun Jiao XILINX_CDMA_CR_SGMODE); 22042ee67178SXianjun Jiao 22052ee67178SXianjun Jiao return 0; 22062ee67178SXianjun Jiao } 22072ee67178SXianjun Jiao 22082ee67178SXianjun Jiao /** 22092ee67178SXianjun Jiao * xilinx_dma_channel_set_config - Configure VDMA channel 22102ee67178SXianjun Jiao * Run-time configuration for Axi VDMA, supports: 22112ee67178SXianjun Jiao * . halt the channel 22122ee67178SXianjun Jiao * . configure interrupt coalescing and inter-packet delay threshold 22132ee67178SXianjun Jiao * . start/stop parking 22142ee67178SXianjun Jiao * . enable genlock 22152ee67178SXianjun Jiao * 22162ee67178SXianjun Jiao * @dchan: DMA channel 22172ee67178SXianjun Jiao * @cfg: VDMA device configuration pointer 22182ee67178SXianjun Jiao * 22192ee67178SXianjun Jiao * Return: '0' on success and failure value on error 22202ee67178SXianjun Jiao */ 22212ee67178SXianjun Jiao int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 22222ee67178SXianjun Jiao struct xilinx_vdma_config *cfg) 22232ee67178SXianjun Jiao { 22242ee67178SXianjun Jiao struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 22252ee67178SXianjun Jiao u32 dmacr; 22262ee67178SXianjun Jiao 22272ee67178SXianjun Jiao if (cfg->reset) 22282ee67178SXianjun Jiao return xilinx_dma_chan_reset(chan); 22292ee67178SXianjun Jiao 22302ee67178SXianjun Jiao dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 22312ee67178SXianjun Jiao 22322ee67178SXianjun Jiao chan->config.frm_dly = cfg->frm_dly; 22332ee67178SXianjun Jiao chan->config.park = cfg->park; 22342ee67178SXianjun Jiao 22352ee67178SXianjun Jiao /* genlock settings */ 22362ee67178SXianjun Jiao chan->config.gen_lock = cfg->gen_lock; 22372ee67178SXianjun Jiao chan->config.master = cfg->master; 22382ee67178SXianjun Jiao 22392ee67178SXianjun Jiao if (cfg->gen_lock && chan->genlock) { 22402ee67178SXianjun Jiao dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; 22412ee67178SXianjun Jiao dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; 22422ee67178SXianjun Jiao } 22432ee67178SXianjun Jiao 22442ee67178SXianjun Jiao chan->config.frm_cnt_en = cfg->frm_cnt_en; 22452ee67178SXianjun Jiao chan->config.vflip_en = cfg->vflip_en; 22462ee67178SXianjun Jiao 22472ee67178SXianjun Jiao if (cfg->park) 22482ee67178SXianjun Jiao chan->config.park_frm = cfg->park_frm; 22492ee67178SXianjun Jiao else 22502ee67178SXianjun Jiao chan->config.park_frm = -1; 22512ee67178SXianjun Jiao 22522ee67178SXianjun Jiao chan->config.coalesc = cfg->coalesc; 22532ee67178SXianjun Jiao chan->config.delay = cfg->delay; 22542ee67178SXianjun Jiao 22552ee67178SXianjun Jiao if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { 22562ee67178SXianjun Jiao dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; 22572ee67178SXianjun Jiao chan->config.coalesc = cfg->coalesc; 22582ee67178SXianjun Jiao } 22592ee67178SXianjun Jiao 22602ee67178SXianjun Jiao if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { 22612ee67178SXianjun Jiao dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; 22622ee67178SXianjun Jiao chan->config.delay = cfg->delay; 22632ee67178SXianjun Jiao } 22642ee67178SXianjun Jiao 22652ee67178SXianjun Jiao /* FSync Source selection */ 22662ee67178SXianjun Jiao dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; 22672ee67178SXianjun Jiao dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; 22682ee67178SXianjun Jiao 22692ee67178SXianjun Jiao dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); 22702ee67178SXianjun Jiao 22712ee67178SXianjun Jiao return 0; 22722ee67178SXianjun Jiao } 22732ee67178SXianjun Jiao EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 22742ee67178SXianjun Jiao 22752ee67178SXianjun Jiao /* ----------------------------------------------------------------------------- 22762ee67178SXianjun Jiao * Probe and remove 22772ee67178SXianjun Jiao */ 22782ee67178SXianjun Jiao 22792ee67178SXianjun Jiao /** 22802ee67178SXianjun Jiao * xilinx_dma_chan_remove - Per Channel remove function 22812ee67178SXianjun Jiao * @chan: Driver specific DMA channel 22822ee67178SXianjun Jiao */ 22832ee67178SXianjun Jiao static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) 22842ee67178SXianjun Jiao { 22852ee67178SXianjun Jiao /* Disable all interrupts */ 22862ee67178SXianjun Jiao dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, 22872ee67178SXianjun Jiao XILINX_DMA_DMAXR_ALL_IRQ_MASK); 22882ee67178SXianjun Jiao 22892ee67178SXianjun Jiao if (chan->irq > 0) 22902ee67178SXianjun Jiao free_irq(chan->irq, chan); 22912ee67178SXianjun Jiao 22922ee67178SXianjun Jiao tasklet_kill(&chan->tasklet); 22932ee67178SXianjun Jiao 22942ee67178SXianjun Jiao list_del(&chan->common.device_node); 22952ee67178SXianjun Jiao } 22962ee67178SXianjun Jiao 22972ee67178SXianjun Jiao static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 22982ee67178SXianjun Jiao struct clk **tx_clk, struct clk **rx_clk, 22992ee67178SXianjun Jiao struct clk **sg_clk, struct clk **tmp_clk) 23002ee67178SXianjun Jiao { 23012ee67178SXianjun Jiao int err; 23022ee67178SXianjun Jiao 23032ee67178SXianjun Jiao *tmp_clk = NULL; 23042ee67178SXianjun Jiao 23052ee67178SXianjun Jiao *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 23062ee67178SXianjun Jiao if (IS_ERR(*axi_clk)) { 23072ee67178SXianjun Jiao err = PTR_ERR(*axi_clk); 23082ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 23092ee67178SXianjun Jiao return err; 23102ee67178SXianjun Jiao } 23112ee67178SXianjun Jiao 23122ee67178SXianjun Jiao *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 23132ee67178SXianjun Jiao if (IS_ERR(*tx_clk)) 23142ee67178SXianjun Jiao *tx_clk = NULL; 23152ee67178SXianjun Jiao 23162ee67178SXianjun Jiao *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 23172ee67178SXianjun Jiao if (IS_ERR(*rx_clk)) 23182ee67178SXianjun Jiao *rx_clk = NULL; 23192ee67178SXianjun Jiao 23202ee67178SXianjun Jiao *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); 23212ee67178SXianjun Jiao if (IS_ERR(*sg_clk)) 23222ee67178SXianjun Jiao *sg_clk = NULL; 23232ee67178SXianjun Jiao 23242ee67178SXianjun Jiao err = clk_prepare_enable(*axi_clk); 23252ee67178SXianjun Jiao if (err) { 23262ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 23272ee67178SXianjun Jiao return err; 23282ee67178SXianjun Jiao } 23292ee67178SXianjun Jiao 23302ee67178SXianjun Jiao err = clk_prepare_enable(*tx_clk); 23312ee67178SXianjun Jiao if (err) { 23322ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 23332ee67178SXianjun Jiao goto err_disable_axiclk; 23342ee67178SXianjun Jiao } 23352ee67178SXianjun Jiao 23362ee67178SXianjun Jiao err = clk_prepare_enable(*rx_clk); 23372ee67178SXianjun Jiao if (err) { 23382ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 23392ee67178SXianjun Jiao goto err_disable_txclk; 23402ee67178SXianjun Jiao } 23412ee67178SXianjun Jiao 23422ee67178SXianjun Jiao err = clk_prepare_enable(*sg_clk); 23432ee67178SXianjun Jiao if (err) { 23442ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err); 23452ee67178SXianjun Jiao goto err_disable_rxclk; 23462ee67178SXianjun Jiao } 23472ee67178SXianjun Jiao 23482ee67178SXianjun Jiao return 0; 23492ee67178SXianjun Jiao 23502ee67178SXianjun Jiao err_disable_rxclk: 23512ee67178SXianjun Jiao clk_disable_unprepare(*rx_clk); 23522ee67178SXianjun Jiao err_disable_txclk: 23532ee67178SXianjun Jiao clk_disable_unprepare(*tx_clk); 23542ee67178SXianjun Jiao err_disable_axiclk: 23552ee67178SXianjun Jiao clk_disable_unprepare(*axi_clk); 23562ee67178SXianjun Jiao 23572ee67178SXianjun Jiao return err; 23582ee67178SXianjun Jiao } 23592ee67178SXianjun Jiao 23602ee67178SXianjun Jiao static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 23612ee67178SXianjun Jiao struct clk **dev_clk, struct clk **tmp_clk, 23622ee67178SXianjun Jiao struct clk **tmp1_clk, struct clk **tmp2_clk) 23632ee67178SXianjun Jiao { 23642ee67178SXianjun Jiao int err; 23652ee67178SXianjun Jiao 23662ee67178SXianjun Jiao *tmp_clk = NULL; 23672ee67178SXianjun Jiao *tmp1_clk = NULL; 23682ee67178SXianjun Jiao *tmp2_clk = NULL; 23692ee67178SXianjun Jiao 23702ee67178SXianjun Jiao *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 23712ee67178SXianjun Jiao if (IS_ERR(*axi_clk)) { 23722ee67178SXianjun Jiao err = PTR_ERR(*axi_clk); 23732ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err); 23742ee67178SXianjun Jiao return err; 23752ee67178SXianjun Jiao } 23762ee67178SXianjun Jiao 23772ee67178SXianjun Jiao *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); 23782ee67178SXianjun Jiao if (IS_ERR(*dev_clk)) { 23792ee67178SXianjun Jiao err = PTR_ERR(*dev_clk); 23802ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err); 23812ee67178SXianjun Jiao return err; 23822ee67178SXianjun Jiao } 23832ee67178SXianjun Jiao 23842ee67178SXianjun Jiao err = clk_prepare_enable(*axi_clk); 23852ee67178SXianjun Jiao if (err) { 23862ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 23872ee67178SXianjun Jiao return err; 23882ee67178SXianjun Jiao } 23892ee67178SXianjun Jiao 23902ee67178SXianjun Jiao err = clk_prepare_enable(*dev_clk); 23912ee67178SXianjun Jiao if (err) { 23922ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err); 23932ee67178SXianjun Jiao goto err_disable_axiclk; 23942ee67178SXianjun Jiao } 23952ee67178SXianjun Jiao 23962ee67178SXianjun Jiao return 0; 23972ee67178SXianjun Jiao 23982ee67178SXianjun Jiao err_disable_axiclk: 23992ee67178SXianjun Jiao clk_disable_unprepare(*axi_clk); 24002ee67178SXianjun Jiao 24012ee67178SXianjun Jiao return err; 24022ee67178SXianjun Jiao } 24032ee67178SXianjun Jiao 24042ee67178SXianjun Jiao static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, 24052ee67178SXianjun Jiao struct clk **tx_clk, struct clk **txs_clk, 24062ee67178SXianjun Jiao struct clk **rx_clk, struct clk **rxs_clk) 24072ee67178SXianjun Jiao { 24082ee67178SXianjun Jiao int err; 24092ee67178SXianjun Jiao 24102ee67178SXianjun Jiao *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); 24112ee67178SXianjun Jiao if (IS_ERR(*axi_clk)) { 24122ee67178SXianjun Jiao err = PTR_ERR(*axi_clk); 24132ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err); 24142ee67178SXianjun Jiao return err; 24152ee67178SXianjun Jiao } 24162ee67178SXianjun Jiao 24172ee67178SXianjun Jiao *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); 24182ee67178SXianjun Jiao if (IS_ERR(*tx_clk)) 24192ee67178SXianjun Jiao *tx_clk = NULL; 24202ee67178SXianjun Jiao 24212ee67178SXianjun Jiao *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); 24222ee67178SXianjun Jiao if (IS_ERR(*txs_clk)) 24232ee67178SXianjun Jiao *txs_clk = NULL; 24242ee67178SXianjun Jiao 24252ee67178SXianjun Jiao *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); 24262ee67178SXianjun Jiao if (IS_ERR(*rx_clk)) 24272ee67178SXianjun Jiao *rx_clk = NULL; 24282ee67178SXianjun Jiao 24292ee67178SXianjun Jiao *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); 24302ee67178SXianjun Jiao if (IS_ERR(*rxs_clk)) 24312ee67178SXianjun Jiao *rxs_clk = NULL; 24322ee67178SXianjun Jiao 24332ee67178SXianjun Jiao err = clk_prepare_enable(*axi_clk); 24342ee67178SXianjun Jiao if (err) { 24352ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err); 24362ee67178SXianjun Jiao return err; 24372ee67178SXianjun Jiao } 24382ee67178SXianjun Jiao 24392ee67178SXianjun Jiao err = clk_prepare_enable(*tx_clk); 24402ee67178SXianjun Jiao if (err) { 24412ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 24422ee67178SXianjun Jiao goto err_disable_axiclk; 24432ee67178SXianjun Jiao } 24442ee67178SXianjun Jiao 24452ee67178SXianjun Jiao err = clk_prepare_enable(*txs_clk); 24462ee67178SXianjun Jiao if (err) { 24472ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err); 24482ee67178SXianjun Jiao goto err_disable_txclk; 24492ee67178SXianjun Jiao } 24502ee67178SXianjun Jiao 24512ee67178SXianjun Jiao err = clk_prepare_enable(*rx_clk); 24522ee67178SXianjun Jiao if (err) { 24532ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 24542ee67178SXianjun Jiao goto err_disable_txsclk; 24552ee67178SXianjun Jiao } 24562ee67178SXianjun Jiao 24572ee67178SXianjun Jiao err = clk_prepare_enable(*rxs_clk); 24582ee67178SXianjun Jiao if (err) { 24592ee67178SXianjun Jiao dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err); 24602ee67178SXianjun Jiao goto err_disable_rxclk; 24612ee67178SXianjun Jiao } 24622ee67178SXianjun Jiao 24632ee67178SXianjun Jiao return 0; 24642ee67178SXianjun Jiao 24652ee67178SXianjun Jiao err_disable_rxclk: 24662ee67178SXianjun Jiao clk_disable_unprepare(*rx_clk); 24672ee67178SXianjun Jiao err_disable_txsclk: 24682ee67178SXianjun Jiao clk_disable_unprepare(*txs_clk); 24692ee67178SXianjun Jiao err_disable_txclk: 24702ee67178SXianjun Jiao clk_disable_unprepare(*tx_clk); 24712ee67178SXianjun Jiao err_disable_axiclk: 24722ee67178SXianjun Jiao clk_disable_unprepare(*axi_clk); 24732ee67178SXianjun Jiao 24742ee67178SXianjun Jiao return err; 24752ee67178SXianjun Jiao } 24762ee67178SXianjun Jiao 24772ee67178SXianjun Jiao static void xdma_disable_allclks(struct xilinx_dma_device *xdev) 24782ee67178SXianjun Jiao { 24792ee67178SXianjun Jiao clk_disable_unprepare(xdev->rxs_clk); 24802ee67178SXianjun Jiao clk_disable_unprepare(xdev->rx_clk); 24812ee67178SXianjun Jiao clk_disable_unprepare(xdev->txs_clk); 24822ee67178SXianjun Jiao clk_disable_unprepare(xdev->tx_clk); 24832ee67178SXianjun Jiao clk_disable_unprepare(xdev->axi_clk); 24842ee67178SXianjun Jiao } 24852ee67178SXianjun Jiao 24862ee67178SXianjun Jiao /** 24872ee67178SXianjun Jiao * xilinx_dma_chan_probe - Per Channel Probing 24882ee67178SXianjun Jiao * It get channel features from the device tree entry and 24892ee67178SXianjun Jiao * initialize special channel handling routines 24902ee67178SXianjun Jiao * 24912ee67178SXianjun Jiao * @xdev: Driver specific device structure 24922ee67178SXianjun Jiao * @node: Device node 24932ee67178SXianjun Jiao * @chan_id: DMA Channel id 24942ee67178SXianjun Jiao * 24952ee67178SXianjun Jiao * Return: '0' on success and failure value on error 24962ee67178SXianjun Jiao */ 24972ee67178SXianjun Jiao static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 24982ee67178SXianjun Jiao struct device_node *node, int chan_id) 24992ee67178SXianjun Jiao { 25002ee67178SXianjun Jiao struct xilinx_dma_chan *chan; 25012ee67178SXianjun Jiao bool has_dre = false; 25022ee67178SXianjun Jiao u32 value, width; 25032ee67178SXianjun Jiao int err; 25042ee67178SXianjun Jiao 25052ee67178SXianjun Jiao /* Allocate and initialize the channel structure */ 25062ee67178SXianjun Jiao chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 25072ee67178SXianjun Jiao if (!chan) 25082ee67178SXianjun Jiao return -ENOMEM; 25092ee67178SXianjun Jiao 25102ee67178SXianjun Jiao chan->dev = xdev->dev; 25112ee67178SXianjun Jiao chan->xdev = xdev; 25122ee67178SXianjun Jiao chan->has_sg = xdev->has_sg; 25132ee67178SXianjun Jiao chan->desc_pendingcount = 0x0; 25142ee67178SXianjun Jiao chan->ext_addr = xdev->ext_addr; 25152ee67178SXianjun Jiao /* This variable ensures that descriptors are not 25162ee67178SXianjun Jiao * Submitted when dma engine is in progress. This variable is 25172ee67178SXianjun Jiao * Added to avoid polling for a bit in the status register to 25182ee67178SXianjun Jiao * Know dma state in the driver hot path. 25192ee67178SXianjun Jiao */ 25202ee67178SXianjun Jiao chan->idle = true; 25212ee67178SXianjun Jiao 25222ee67178SXianjun Jiao spin_lock_init(&chan->lock); 25232ee67178SXianjun Jiao INIT_LIST_HEAD(&chan->pending_list); 25242ee67178SXianjun Jiao INIT_LIST_HEAD(&chan->done_list); 25252ee67178SXianjun Jiao INIT_LIST_HEAD(&chan->active_list); 25262ee67178SXianjun Jiao INIT_LIST_HEAD(&chan->free_seg_list); 25272ee67178SXianjun Jiao 25282ee67178SXianjun Jiao /* Retrieve the channel properties from the device tree */ 25292ee67178SXianjun Jiao has_dre = of_property_read_bool(node, "xlnx,include-dre"); 25302ee67178SXianjun Jiao 25312ee67178SXianjun Jiao chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); 25322ee67178SXianjun Jiao 25332ee67178SXianjun Jiao err = of_property_read_u32(node, "xlnx,datawidth", &value); 25342ee67178SXianjun Jiao if (err) { 25352ee67178SXianjun Jiao dev_err(xdev->dev, "missing xlnx,datawidth property\n"); 25362ee67178SXianjun Jiao return err; 25372ee67178SXianjun Jiao } 25382ee67178SXianjun Jiao width = value >> 3; /* Convert bits to bytes */ 25392ee67178SXianjun Jiao 25402ee67178SXianjun Jiao /* If data width is greater than 8 bytes, DRE is not in hw */ 25412ee67178SXianjun Jiao if (width > 8) 25422ee67178SXianjun Jiao has_dre = false; 25432ee67178SXianjun Jiao 25442ee67178SXianjun Jiao if (!has_dre) 25452ee67178SXianjun Jiao xdev->common.copy_align = fls(width - 1); 25462ee67178SXianjun Jiao 25472ee67178SXianjun Jiao if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || 25482ee67178SXianjun Jiao of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || 25492ee67178SXianjun Jiao of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { 25502ee67178SXianjun Jiao chan->direction = DMA_MEM_TO_DEV; 25512ee67178SXianjun Jiao chan->id = chan_id; 2552febc5adfSXianjun Jiao chan->tdest = chan_id; 25532ee67178SXianjun Jiao xdev->common.directions = BIT(DMA_MEM_TO_DEV); 25542ee67178SXianjun Jiao 25552ee67178SXianjun Jiao chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 25562ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 25572ee67178SXianjun Jiao chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 25582ee67178SXianjun Jiao chan->config.park = 1; 25592ee67178SXianjun Jiao 25602ee67178SXianjun Jiao if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 25612ee67178SXianjun Jiao xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) 25622ee67178SXianjun Jiao chan->flush_on_fsync = true; 25632ee67178SXianjun Jiao } 25642ee67178SXianjun Jiao } else if (of_device_is_compatible(node, 25652ee67178SXianjun Jiao "xlnx,axi-vdma-s2mm-channel") || 25662ee67178SXianjun Jiao of_device_is_compatible(node, 25672ee67178SXianjun Jiao "xlnx,axi-dma-s2mm-channel")) { 25682ee67178SXianjun Jiao chan->direction = DMA_DEV_TO_MEM; 25692ee67178SXianjun Jiao chan->id = chan_id; 2570febc5adfSXianjun Jiao chan->tdest = chan_id - xdev->nr_channels; 25712ee67178SXianjun Jiao xdev->common.directions |= BIT(DMA_DEV_TO_MEM); 25722ee67178SXianjun Jiao chan->has_vflip = of_property_read_bool(node, 25732ee67178SXianjun Jiao "xlnx,enable-vert-flip"); 25742ee67178SXianjun Jiao if (chan->has_vflip) { 25752ee67178SXianjun Jiao chan->config.vflip_en = dma_read(chan, 25762ee67178SXianjun Jiao XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 25772ee67178SXianjun Jiao XILINX_VDMA_ENABLE_VERTICAL_FLIP; 25782ee67178SXianjun Jiao } 25792ee67178SXianjun Jiao 25802ee67178SXianjun Jiao chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 25812ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 25822ee67178SXianjun Jiao chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 25832ee67178SXianjun Jiao chan->config.park = 1; 25842ee67178SXianjun Jiao 25852ee67178SXianjun Jiao if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 25862ee67178SXianjun Jiao xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) 25872ee67178SXianjun Jiao chan->flush_on_fsync = true; 25882ee67178SXianjun Jiao } 25892ee67178SXianjun Jiao } else { 25902ee67178SXianjun Jiao dev_err(xdev->dev, "Invalid channel compatible node\n"); 25912ee67178SXianjun Jiao return -EINVAL; 25922ee67178SXianjun Jiao } 25932ee67178SXianjun Jiao 25942ee67178SXianjun Jiao /* Request the interrupt */ 25952ee67178SXianjun Jiao chan->irq = irq_of_parse_and_map(node, 0); 25962ee67178SXianjun Jiao err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, 25972ee67178SXianjun Jiao "xilinx-dma-controller", chan); 25982ee67178SXianjun Jiao if (err) { 25992ee67178SXianjun Jiao dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 26002ee67178SXianjun Jiao return err; 26012ee67178SXianjun Jiao } 26022ee67178SXianjun Jiao 26032ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 26042ee67178SXianjun Jiao chan->start_transfer = xilinx_dma_start_transfer; 26052ee67178SXianjun Jiao chan->stop_transfer = xilinx_dma_stop_transfer; 26062ee67178SXianjun Jiao } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 26072ee67178SXianjun Jiao chan->start_transfer = xilinx_cdma_start_transfer; 26082ee67178SXianjun Jiao chan->stop_transfer = xilinx_cdma_stop_transfer; 26092ee67178SXianjun Jiao } else { 26102ee67178SXianjun Jiao chan->start_transfer = xilinx_vdma_start_transfer; 26112ee67178SXianjun Jiao chan->stop_transfer = xilinx_dma_stop_transfer; 26122ee67178SXianjun Jiao } 26132ee67178SXianjun Jiao 26142ee67178SXianjun Jiao /* Initialize the tasklet */ 26152ee67178SXianjun Jiao tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, 26162ee67178SXianjun Jiao (unsigned long)chan); 26172ee67178SXianjun Jiao 26182ee67178SXianjun Jiao /* 26192ee67178SXianjun Jiao * Initialize the DMA channel and add it to the DMA engine channels 26202ee67178SXianjun Jiao * list. 26212ee67178SXianjun Jiao */ 26222ee67178SXianjun Jiao chan->common.device = &xdev->common; 26232ee67178SXianjun Jiao 26242ee67178SXianjun Jiao list_add_tail(&chan->common.device_node, &xdev->common.channels); 26252ee67178SXianjun Jiao xdev->chan[chan->id] = chan; 26262ee67178SXianjun Jiao 26272ee67178SXianjun Jiao /* Reset the channel */ 26282ee67178SXianjun Jiao err = xilinx_dma_chan_reset(chan); 26292ee67178SXianjun Jiao if (err < 0) { 26302ee67178SXianjun Jiao dev_err(xdev->dev, "Reset channel failed\n"); 26312ee67178SXianjun Jiao return err; 26322ee67178SXianjun Jiao } 26332ee67178SXianjun Jiao 26342ee67178SXianjun Jiao return 0; 26352ee67178SXianjun Jiao } 26362ee67178SXianjun Jiao 26372ee67178SXianjun Jiao /** 26382ee67178SXianjun Jiao * xilinx_dma_child_probe - Per child node probe 26392ee67178SXianjun Jiao * It get number of dma-channels per child node from 26402ee67178SXianjun Jiao * device-tree and initializes all the channels. 26412ee67178SXianjun Jiao * 26422ee67178SXianjun Jiao * @xdev: Driver specific device structure 26432ee67178SXianjun Jiao * @node: Device node 26442ee67178SXianjun Jiao * 26452ee67178SXianjun Jiao * Return: 0 always. 26462ee67178SXianjun Jiao */ 26472ee67178SXianjun Jiao static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, 26482ee67178SXianjun Jiao struct device_node *node) 26492ee67178SXianjun Jiao { 2650febc5adfSXianjun Jiao int ret, i, nr_channels = 1; 2651febc5adfSXianjun Jiao 2652febc5adfSXianjun Jiao ret = of_property_read_u32(node, "dma-channels", &nr_channels); 2653febc5adfSXianjun Jiao if ((ret < 0) && xdev->mcdma) 2654febc5adfSXianjun Jiao dev_warn(xdev->dev, "missing dma-channels property\n"); 26552ee67178SXianjun Jiao 26562ee67178SXianjun Jiao for (i = 0; i < nr_channels; i++) 26572ee67178SXianjun Jiao xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); 26582ee67178SXianjun Jiao 26592ee67178SXianjun Jiao xdev->nr_channels += nr_channels; 26602ee67178SXianjun Jiao 26612ee67178SXianjun Jiao return 0; 26622ee67178SXianjun Jiao } 26632ee67178SXianjun Jiao 26642ee67178SXianjun Jiao /** 26652ee67178SXianjun Jiao * of_dma_xilinx_xlate - Translation function 26662ee67178SXianjun Jiao * @dma_spec: Pointer to DMA specifier as found in the device tree 26672ee67178SXianjun Jiao * @ofdma: Pointer to DMA controller data 26682ee67178SXianjun Jiao * 26692ee67178SXianjun Jiao * Return: DMA channel pointer on success and NULL on error 26702ee67178SXianjun Jiao */ 26712ee67178SXianjun Jiao static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 26722ee67178SXianjun Jiao struct of_dma *ofdma) 26732ee67178SXianjun Jiao { 26742ee67178SXianjun Jiao struct xilinx_dma_device *xdev = ofdma->of_dma_data; 26752ee67178SXianjun Jiao int chan_id = dma_spec->args[0]; 26762ee67178SXianjun Jiao 26772ee67178SXianjun Jiao if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) 26782ee67178SXianjun Jiao return NULL; 26792ee67178SXianjun Jiao 26802ee67178SXianjun Jiao return dma_get_slave_channel(&xdev->chan[chan_id]->common); 26812ee67178SXianjun Jiao } 26822ee67178SXianjun Jiao 26832ee67178SXianjun Jiao static const struct xilinx_dma_config axidma_config = { 26842ee67178SXianjun Jiao .dmatype = XDMA_TYPE_AXIDMA, 26852ee67178SXianjun Jiao .clk_init = axidma_clk_init, 26862ee67178SXianjun Jiao }; 26872ee67178SXianjun Jiao 26882ee67178SXianjun Jiao static const struct xilinx_dma_config axicdma_config = { 26892ee67178SXianjun Jiao .dmatype = XDMA_TYPE_CDMA, 26902ee67178SXianjun Jiao .clk_init = axicdma_clk_init, 26912ee67178SXianjun Jiao }; 26922ee67178SXianjun Jiao 26932ee67178SXianjun Jiao static const struct xilinx_dma_config axivdma_config = { 26942ee67178SXianjun Jiao .dmatype = XDMA_TYPE_VDMA, 26952ee67178SXianjun Jiao .clk_init = axivdma_clk_init, 26962ee67178SXianjun Jiao }; 26972ee67178SXianjun Jiao 26982ee67178SXianjun Jiao static const struct of_device_id xilinx_dma_of_ids[] = { 26992ee67178SXianjun Jiao { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, 27002ee67178SXianjun Jiao { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, 27012ee67178SXianjun Jiao { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, 27022ee67178SXianjun Jiao {} 27032ee67178SXianjun Jiao }; 27042ee67178SXianjun Jiao MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 27052ee67178SXianjun Jiao 27062ee67178SXianjun Jiao /** 27072ee67178SXianjun Jiao * xilinx_dma_probe - Driver probe function 27082ee67178SXianjun Jiao * @pdev: Pointer to the platform_device structure 27092ee67178SXianjun Jiao * 27102ee67178SXianjun Jiao * Return: '0' on success and failure value on error 27112ee67178SXianjun Jiao */ 27122ee67178SXianjun Jiao static int xilinx_dma_probe(struct platform_device *pdev) 27132ee67178SXianjun Jiao { 27142ee67178SXianjun Jiao int (*clk_init)(struct platform_device *, struct clk **, struct clk **, 27152ee67178SXianjun Jiao struct clk **, struct clk **, struct clk **) 27162ee67178SXianjun Jiao = axivdma_clk_init; 27172ee67178SXianjun Jiao struct device_node *node = pdev->dev.of_node; 27182ee67178SXianjun Jiao struct xilinx_dma_device *xdev; 27192ee67178SXianjun Jiao struct device_node *child, *np = pdev->dev.of_node; 27202ee67178SXianjun Jiao struct resource *io; 27212ee67178SXianjun Jiao u32 num_frames, addr_width, len_width; 27222ee67178SXianjun Jiao int i, err; 27232ee67178SXianjun Jiao 27242ee67178SXianjun Jiao /* Allocate and initialize the DMA engine structure */ 27252ee67178SXianjun Jiao xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 27262ee67178SXianjun Jiao if (!xdev) 27272ee67178SXianjun Jiao return -ENOMEM; 27282ee67178SXianjun Jiao 27292ee67178SXianjun Jiao xdev->dev = &pdev->dev; 27302ee67178SXianjun Jiao if (np) { 27312ee67178SXianjun Jiao const struct of_device_id *match; 27322ee67178SXianjun Jiao 27332ee67178SXianjun Jiao match = of_match_node(xilinx_dma_of_ids, np); 27342ee67178SXianjun Jiao if (match && match->data) { 27352ee67178SXianjun Jiao xdev->dma_config = match->data; 27362ee67178SXianjun Jiao clk_init = xdev->dma_config->clk_init; 27372ee67178SXianjun Jiao } 27382ee67178SXianjun Jiao } 27392ee67178SXianjun Jiao 27402ee67178SXianjun Jiao err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, 27412ee67178SXianjun Jiao &xdev->rx_clk, &xdev->rxs_clk); 27422ee67178SXianjun Jiao if (err) 27432ee67178SXianjun Jiao return err; 27442ee67178SXianjun Jiao 27452ee67178SXianjun Jiao /* Request and map I/O memory */ 27462ee67178SXianjun Jiao io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 27472ee67178SXianjun Jiao xdev->regs = devm_ioremap_resource(&pdev->dev, io); 27482ee67178SXianjun Jiao if (IS_ERR(xdev->regs)) 27492ee67178SXianjun Jiao return PTR_ERR(xdev->regs); 27502ee67178SXianjun Jiao 27512ee67178SXianjun Jiao /* Retrieve the DMA engine properties from the device tree */ 27522ee67178SXianjun Jiao xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 27532ee67178SXianjun Jiao xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); 27542ee67178SXianjun Jiao 27552ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2756febc5adfSXianjun Jiao xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); 27572ee67178SXianjun Jiao if (!of_property_read_u32(node, "xlnx,sg-length-width", 27582ee67178SXianjun Jiao &len_width)) { 27592ee67178SXianjun Jiao if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || 27602ee67178SXianjun Jiao len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { 27612ee67178SXianjun Jiao dev_warn(xdev->dev, 27622ee67178SXianjun Jiao "invalid xlnx,sg-length-width property value using default width\n"); 27632ee67178SXianjun Jiao } else { 27642ee67178SXianjun Jiao if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) 27652ee67178SXianjun Jiao dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); 27662ee67178SXianjun Jiao 27672ee67178SXianjun Jiao xdev->max_buffer_len = GENMASK(len_width - 1, 0); 27682ee67178SXianjun Jiao } 27692ee67178SXianjun Jiao } 27702ee67178SXianjun Jiao } 27712ee67178SXianjun Jiao 27722ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 27732ee67178SXianjun Jiao err = of_property_read_u32(node, "xlnx,num-fstores", 27742ee67178SXianjun Jiao &num_frames); 27752ee67178SXianjun Jiao if (err < 0) { 27762ee67178SXianjun Jiao dev_err(xdev->dev, 27772ee67178SXianjun Jiao "missing xlnx,num-fstores property\n"); 27782ee67178SXianjun Jiao return err; 27792ee67178SXianjun Jiao } 27802ee67178SXianjun Jiao 27812ee67178SXianjun Jiao err = of_property_read_u32(node, "xlnx,flush-fsync", 27822ee67178SXianjun Jiao &xdev->flush_on_fsync); 27832ee67178SXianjun Jiao if (err < 0) 27842ee67178SXianjun Jiao dev_warn(xdev->dev, 27852ee67178SXianjun Jiao "missing xlnx,flush-fsync property\n"); 27862ee67178SXianjun Jiao } 27872ee67178SXianjun Jiao 27882ee67178SXianjun Jiao err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 27892ee67178SXianjun Jiao if (err < 0) 27902ee67178SXianjun Jiao dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 27912ee67178SXianjun Jiao 27922ee67178SXianjun Jiao if (addr_width > 32) 27932ee67178SXianjun Jiao xdev->ext_addr = true; 27942ee67178SXianjun Jiao else 27952ee67178SXianjun Jiao xdev->ext_addr = false; 27962ee67178SXianjun Jiao 27972ee67178SXianjun Jiao /* Set the dma mask bits */ 27982ee67178SXianjun Jiao dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); 27992ee67178SXianjun Jiao 28002ee67178SXianjun Jiao /* Initialize the DMA engine */ 28012ee67178SXianjun Jiao xdev->common.dev = &pdev->dev; 28022ee67178SXianjun Jiao 28032ee67178SXianjun Jiao INIT_LIST_HEAD(&xdev->common.channels); 28042ee67178SXianjun Jiao if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { 28052ee67178SXianjun Jiao dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 28062ee67178SXianjun Jiao dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 28072ee67178SXianjun Jiao } 28082ee67178SXianjun Jiao 28092ee67178SXianjun Jiao xdev->common.dst_addr_widths = BIT(addr_width / 8); 28102ee67178SXianjun Jiao xdev->common.src_addr_widths = BIT(addr_width / 8); 28112ee67178SXianjun Jiao xdev->common.device_alloc_chan_resources = 28122ee67178SXianjun Jiao xilinx_dma_alloc_chan_resources; 28132ee67178SXianjun Jiao xdev->common.device_free_chan_resources = 28142ee67178SXianjun Jiao xilinx_dma_free_chan_resources; 28152ee67178SXianjun Jiao xdev->common.device_terminate_all = xilinx_dma_terminate_all; 28162ee67178SXianjun Jiao xdev->common.device_tx_status = xilinx_dma_tx_status; 28172ee67178SXianjun Jiao xdev->common.device_issue_pending = xilinx_dma_issue_pending; 28182ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 28192ee67178SXianjun Jiao dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 28202ee67178SXianjun Jiao xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 28212ee67178SXianjun Jiao xdev->common.device_prep_dma_cyclic = 28222ee67178SXianjun Jiao xilinx_dma_prep_dma_cyclic; 2823febc5adfSXianjun Jiao xdev->common.device_prep_interleaved_dma = 2824febc5adfSXianjun Jiao xilinx_dma_prep_interleaved; 2825febc5adfSXianjun Jiao /* Residue calculation is supported by only AXI DMA */ 28262ee67178SXianjun Jiao xdev->common.residue_granularity = 28272ee67178SXianjun Jiao DMA_RESIDUE_GRANULARITY_SEGMENT; 28282ee67178SXianjun Jiao } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 28292ee67178SXianjun Jiao dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 28302ee67178SXianjun Jiao dma_cap_set(DMA_SG, xdev->common.cap_mask); 28312ee67178SXianjun Jiao xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 28322ee67178SXianjun Jiao xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg; 28332ee67178SXianjun Jiao } else { 28342ee67178SXianjun Jiao xdev->common.device_prep_interleaved_dma = 28352ee67178SXianjun Jiao xilinx_vdma_dma_prep_interleaved; 28362ee67178SXianjun Jiao } 28372ee67178SXianjun Jiao 28382ee67178SXianjun Jiao platform_set_drvdata(pdev, xdev); 28392ee67178SXianjun Jiao 28402ee67178SXianjun Jiao /* Initialize the channels */ 28412ee67178SXianjun Jiao for_each_child_of_node(node, child) { 28422ee67178SXianjun Jiao err = xilinx_dma_child_probe(xdev, child); 28432ee67178SXianjun Jiao if (err < 0) 28442ee67178SXianjun Jiao goto disable_clks; 28452ee67178SXianjun Jiao } 28462ee67178SXianjun Jiao 28472ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 28482ee67178SXianjun Jiao for (i = 0; i < xdev->nr_channels; i++) 28492ee67178SXianjun Jiao if (xdev->chan[i]) 28502ee67178SXianjun Jiao xdev->chan[i]->num_frms = num_frames; 28512ee67178SXianjun Jiao } 28522ee67178SXianjun Jiao 28532ee67178SXianjun Jiao /* Register the DMA engine with the core */ 28542ee67178SXianjun Jiao dma_async_device_register(&xdev->common); 28552ee67178SXianjun Jiao 28562ee67178SXianjun Jiao err = of_dma_controller_register(node, of_dma_xilinx_xlate, 28572ee67178SXianjun Jiao xdev); 28582ee67178SXianjun Jiao if (err < 0) { 28592ee67178SXianjun Jiao dev_err(&pdev->dev, "Unable to register DMA to DT\n"); 28602ee67178SXianjun Jiao dma_async_device_unregister(&xdev->common); 28612ee67178SXianjun Jiao goto error; 28622ee67178SXianjun Jiao } 28632ee67178SXianjun Jiao 28642ee67178SXianjun Jiao if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 28652ee67178SXianjun Jiao dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); 28662ee67178SXianjun Jiao else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) 28672ee67178SXianjun Jiao dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); 28682ee67178SXianjun Jiao else 28692ee67178SXianjun Jiao dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); 28702ee67178SXianjun Jiao 28712ee67178SXianjun Jiao return 0; 28722ee67178SXianjun Jiao 28732ee67178SXianjun Jiao disable_clks: 28742ee67178SXianjun Jiao xdma_disable_allclks(xdev); 28752ee67178SXianjun Jiao error: 28762ee67178SXianjun Jiao for (i = 0; i < xdev->nr_channels; i++) 28772ee67178SXianjun Jiao if (xdev->chan[i]) 28782ee67178SXianjun Jiao xilinx_dma_chan_remove(xdev->chan[i]); 28792ee67178SXianjun Jiao 28802ee67178SXianjun Jiao return err; 28812ee67178SXianjun Jiao } 28822ee67178SXianjun Jiao 28832ee67178SXianjun Jiao /** 28842ee67178SXianjun Jiao * xilinx_dma_remove - Driver remove function 28852ee67178SXianjun Jiao * @pdev: Pointer to the platform_device structure 28862ee67178SXianjun Jiao * 28872ee67178SXianjun Jiao * Return: Always '0' 28882ee67178SXianjun Jiao */ 28892ee67178SXianjun Jiao static int xilinx_dma_remove(struct platform_device *pdev) 28902ee67178SXianjun Jiao { 28912ee67178SXianjun Jiao struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 28922ee67178SXianjun Jiao int i; 28932ee67178SXianjun Jiao 28942ee67178SXianjun Jiao of_dma_controller_free(pdev->dev.of_node); 28952ee67178SXianjun Jiao 28962ee67178SXianjun Jiao dma_async_device_unregister(&xdev->common); 28972ee67178SXianjun Jiao 28982ee67178SXianjun Jiao for (i = 0; i < xdev->nr_channels; i++) 28992ee67178SXianjun Jiao if (xdev->chan[i]) 29002ee67178SXianjun Jiao xilinx_dma_chan_remove(xdev->chan[i]); 29012ee67178SXianjun Jiao 29022ee67178SXianjun Jiao xdma_disable_allclks(xdev); 29032ee67178SXianjun Jiao 29042ee67178SXianjun Jiao return 0; 29052ee67178SXianjun Jiao } 29062ee67178SXianjun Jiao 29072ee67178SXianjun Jiao static struct platform_driver xilinx_vdma_driver = { 29082ee67178SXianjun Jiao .driver = { 29092ee67178SXianjun Jiao .name = "xilinx-vdma", 29102ee67178SXianjun Jiao .of_match_table = xilinx_dma_of_ids, 29112ee67178SXianjun Jiao }, 29122ee67178SXianjun Jiao .probe = xilinx_dma_probe, 29132ee67178SXianjun Jiao .remove = xilinx_dma_remove, 29142ee67178SXianjun Jiao }; 29152ee67178SXianjun Jiao 29162ee67178SXianjun Jiao module_platform_driver(xilinx_vdma_driver); 29172ee67178SXianjun Jiao 2918febc5adfSXianjun Jiao MODULE_AUTHOR("Xilinx, Inc. and Xianjun Jiao"); 29192ee67178SXianjun Jiao MODULE_DESCRIPTION("Xilinx VDMA driver"); 29202ee67178SXianjun Jiao MODULE_LICENSE("GPL v2"); 2921