xref: /openwifi/driver/xilinx_dma/xilinx_dma.c (revision 14124d7306727ee7bbae83655e3d9ecacc4c3bee)
1 /*
2  * DMA driver for Xilinx Video DMA Engine
3  *
4  * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
5  *
6  * Based on the Freescale DMA driver.
7  *
8  * Modified by Xianjun Jiao. [email protected]; [email protected]
9  *
10  * Description:
11  * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
12  * core that provides high-bandwidth direct memory access between memory
13  * and AXI4-Stream type video target peripherals. The core provides efficient
14  * two dimensional DMA operations with independent asynchronous read (S2MM)
15  * and write (MM2S) channel operation. It can be configured to have either
16  * one channel or two channels. If configured as two channels, one is to
17  * transmit to the video device (MM2S) and another is to receive from the
18  * video device (S2MM). Initialization, status, interrupt and management
19  * registers are accessed through an AXI4-Lite slave interface.
20  *
21  * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
22  * provides high-bandwidth one dimensional direct memory access between memory
23  * and AXI4-Stream target peripherals. It supports one receive and one
24  * transmit channel, both of them optional at synthesis time.
25  *
26  * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
27  * Access (DMA) between a memory-mapped source address and a memory-mapped
28  * destination address.
29  *
30  * This program is free software: you can redistribute it and/or modify
31  * it under the terms of the GNU General Public License as published by
32  * the Free Software Foundation, either version 2 of the License, or
33  * (at your option) any later version.
34  */
35 
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of_address.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_platform.h>
47 #include <linux/of_irq.h>
48 #include <linux/slab.h>
49 #include <linux/clk.h>
50 #include <linux/io-64-nonatomic-lo-hi.h>
51 
52 #include "../dmaengine.h"
53 
54 /* Register/Descriptor Offsets */
55 #define XILINX_DMA_MM2S_CTRL_OFFSET		0x0000
56 #define XILINX_DMA_S2MM_CTRL_OFFSET		0x0030
57 #define XILINX_VDMA_MM2S_DESC_OFFSET		0x0050
58 #define XILINX_VDMA_S2MM_DESC_OFFSET		0x00a0
59 
60 /* Control Registers */
61 #define XILINX_DMA_REG_DMACR			0x0000
62 #define XILINX_DMA_DMACR_DELAY_MAX		0xff
63 #define XILINX_DMA_DMACR_DELAY_SHIFT		24
64 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX	0xff
65 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT	16
66 #define XILINX_DMA_DMACR_ERR_IRQ		BIT(14)
67 #define XILINX_DMA_DMACR_DLY_CNT_IRQ		BIT(13)
68 #define XILINX_DMA_DMACR_FRM_CNT_IRQ		BIT(12)
69 #define XILINX_DMA_DMACR_MASTER_SHIFT		8
70 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT	5
71 #define XILINX_DMA_DMACR_FRAMECNT_EN		BIT(4)
72 #define XILINX_DMA_DMACR_GENLOCK_EN		BIT(3)
73 #define XILINX_DMA_DMACR_RESET			BIT(2)
74 #define XILINX_DMA_DMACR_CIRC_EN		BIT(1)
75 #define XILINX_DMA_DMACR_RUNSTOP		BIT(0)
76 #define XILINX_DMA_DMACR_FSYNCSRC_MASK		GENMASK(6, 5)
77 #define XILINX_DMA_DMACR_DELAY_MASK		GENMASK(31, 24)
78 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK	GENMASK(23, 16)
79 #define XILINX_DMA_DMACR_MASTER_MASK		GENMASK(11, 8)
80 
81 #define XILINX_DMA_REG_DMASR			0x0004
82 #define XILINX_DMA_DMASR_EOL_LATE_ERR		BIT(15)
83 #define XILINX_DMA_DMASR_ERR_IRQ		BIT(14)
84 #define XILINX_DMA_DMASR_DLY_CNT_IRQ		BIT(13)
85 #define XILINX_DMA_DMASR_FRM_CNT_IRQ		BIT(12)
86 #define XILINX_DMA_DMASR_SOF_LATE_ERR		BIT(11)
87 #define XILINX_DMA_DMASR_SG_DEC_ERR		BIT(10)
88 #define XILINX_DMA_DMASR_SG_SLV_ERR		BIT(9)
89 #define XILINX_DMA_DMASR_EOF_EARLY_ERR		BIT(8)
90 #define XILINX_DMA_DMASR_SOF_EARLY_ERR		BIT(7)
91 #define XILINX_DMA_DMASR_DMA_DEC_ERR		BIT(6)
92 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR		BIT(5)
93 #define XILINX_DMA_DMASR_DMA_INT_ERR		BIT(4)
94 #define XILINX_DMA_DMASR_IDLE			BIT(1)
95 #define XILINX_DMA_DMASR_HALTED		BIT(0)
96 #define XILINX_DMA_DMASR_DELAY_MASK		GENMASK(31, 24)
97 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK	GENMASK(23, 16)
98 
99 #define XILINX_DMA_REG_CURDESC			0x0008
100 #define XILINX_DMA_REG_TAILDESC		0x0010
101 #define XILINX_DMA_REG_REG_INDEX		0x0014
102 #define XILINX_DMA_REG_FRMSTORE		0x0018
103 #define XILINX_DMA_REG_THRESHOLD		0x001c
104 #define XILINX_DMA_REG_FRMPTR_STS		0x0024
105 #define XILINX_DMA_REG_PARK_PTR		0x0028
106 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT	8
107 #define XILINX_DMA_PARK_PTR_WR_REF_MASK		GENMASK(12, 8)
108 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT	0
109 #define XILINX_DMA_PARK_PTR_RD_REF_MASK		GENMASK(4, 0)
110 #define XILINX_DMA_REG_VDMA_VERSION		0x002c
111 
112 /* Register Direct Mode Registers */
113 #define XILINX_DMA_REG_VSIZE			0x0000
114 #define XILINX_DMA_REG_HSIZE			0x0004
115 
116 #define XILINX_DMA_REG_FRMDLY_STRIDE		0x0008
117 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT	24
118 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT	0
119 
120 #define XILINX_VDMA_REG_START_ADDRESS(n)	(0x000c + 4 * (n))
121 #define XILINX_VDMA_REG_START_ADDRESS_64(n)	(0x000c + 8 * (n))
122 
123 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP	0x00ec
124 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP	BIT(0)
125 
126 /* HW specific definitions */
127 #define XILINX_DMA_MAX_CHANS_PER_DEVICE	0x2
128 
129 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK	\
130 		(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
131 		 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
132 		 XILINX_DMA_DMASR_ERR_IRQ)
133 
134 #define XILINX_DMA_DMASR_ALL_ERR_MASK	\
135 		(XILINX_DMA_DMASR_EOL_LATE_ERR | \
136 		 XILINX_DMA_DMASR_SOF_LATE_ERR | \
137 		 XILINX_DMA_DMASR_SG_DEC_ERR | \
138 		 XILINX_DMA_DMASR_SG_SLV_ERR | \
139 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
140 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
141 		 XILINX_DMA_DMASR_DMA_DEC_ERR | \
142 		 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
143 		 XILINX_DMA_DMASR_DMA_INT_ERR)
144 
145 /*
146  * Recoverable errors are DMA Internal error, SOF Early, EOF Early
147  * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
148  * is enabled in the h/w system.
149  */
150 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK	\
151 		(XILINX_DMA_DMASR_SOF_LATE_ERR | \
152 		 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
153 		 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
154 		 XILINX_DMA_DMASR_DMA_INT_ERR)
155 
156 /* Axi VDMA Flush on Fsync bits */
157 #define XILINX_DMA_FLUSH_S2MM		3
158 #define XILINX_DMA_FLUSH_MM2S		2
159 #define XILINX_DMA_FLUSH_BOTH		1
160 
161 /* Delay loop counter to prevent hardware failure */
162 #define XILINX_DMA_LOOP_COUNT		1000000
163 
164 /* AXI DMA Specific Registers/Offsets */
165 #define XILINX_DMA_REG_SRCDSTADDR	0x18
166 #define XILINX_DMA_REG_BTT		0x28
167 
168 /* AXI DMA Specific Masks/Bit fields */
169 #define XILINX_DMA_MAX_TRANS_LEN_MIN	8
170 #define XILINX_DMA_MAX_TRANS_LEN_MAX	23
171 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX	26
172 #define XILINX_DMA_CR_COALESCE_MAX	GENMASK(23, 16)
173 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK	BIT(4)
174 #define XILINX_DMA_CR_COALESCE_SHIFT	16
175 #define XILINX_DMA_BD_SOP		BIT(27)
176 #define XILINX_DMA_BD_EOP		BIT(26)
177 #define XILINX_DMA_COALESCE_MAX		255
178 #define XILINX_DMA_NUM_DESCS		255
179 #define XILINX_DMA_NUM_APP_WORDS	5
180 
181 /* AXI CDMA Specific Registers/Offsets */
182 #define XILINX_CDMA_REG_SRCADDR		0x18
183 #define XILINX_CDMA_REG_DSTADDR		0x20
184 
185 /* AXI CDMA Specific Masks */
186 #define XILINX_CDMA_CR_SGMODE          BIT(3)
187 
188 #define xilinx_prep_dma_addr_t(addr)	\
189 	((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
190 /**
191  * struct xilinx_vdma_desc_hw - Hardware Descriptor
192  * @next_desc: Next Descriptor Pointer @0x00
193  * @pad1: Reserved @0x04
194  * @buf_addr: Buffer address @0x08
195  * @buf_addr_msb: MSB of Buffer address @0x0C
196  * @vsize: Vertical Size @0x10
197  * @hsize: Horizontal Size @0x14
198  * @stride: Number of bytes between the first
199  *	    pixels of each horizontal line @0x18
200  */
201 struct xilinx_vdma_desc_hw {
202 	u32 next_desc;
203 	u32 pad1;
204 	u32 buf_addr;
205 	u32 buf_addr_msb;
206 	u32 vsize;
207 	u32 hsize;
208 	u32 stride;
209 } __aligned(64);
210 
211 /**
212  * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
213  * @next_desc: Next Descriptor Pointer @0x00
214  * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
215  * @buf_addr: Buffer address @0x08
216  * @buf_addr_msb: MSB of Buffer address @0x0C
217  * @reserved1: Reserved @0x10
218  * @reserved2: Reserved @0x14
219  * @control: Control field @0x18
220  * @status: Status field @0x1C
221  * @app: APP Fields @0x20 - 0x30
222  */
223 struct xilinx_axidma_desc_hw {
224 	u32 next_desc;
225 	u32 next_desc_msb;
226 	u32 buf_addr;
227 	u32 buf_addr_msb;
228 	u32 reserved1;
229 	u32 reserved2;
230 	u32 control;
231 	u32 status;
232 	u32 app[XILINX_DMA_NUM_APP_WORDS];
233 } __aligned(64);
234 
235 /**
236  * struct xilinx_cdma_desc_hw - Hardware Descriptor
237  * @next_desc: Next Descriptor Pointer @0x00
238  * @next_desc_msb: Next Descriptor Pointer MSB @0x04
239  * @src_addr: Source address @0x08
240  * @src_addr_msb: Source address MSB @0x0C
241  * @dest_addr: Destination address @0x10
242  * @dest_addr_msb: Destination address MSB @0x14
243  * @control: Control field @0x18
244  * @status: Status field @0x1C
245  */
246 struct xilinx_cdma_desc_hw {
247 	u32 next_desc;
248 	u32 next_desc_msb;
249 	u32 src_addr;
250 	u32 src_addr_msb;
251 	u32 dest_addr;
252 	u32 dest_addr_msb;
253 	u32 control;
254 	u32 status;
255 } __aligned(64);
256 
257 /**
258  * struct xilinx_vdma_tx_segment - Descriptor segment
259  * @hw: Hardware descriptor
260  * @node: Node in the descriptor segments list
261  * @phys: Physical address of segment
262  */
263 struct xilinx_vdma_tx_segment {
264 	struct xilinx_vdma_desc_hw hw;
265 	struct list_head node;
266 	dma_addr_t phys;
267 } __aligned(64);
268 
269 /**
270  * struct xilinx_axidma_tx_segment - Descriptor segment
271  * @hw: Hardware descriptor
272  * @node: Node in the descriptor segments list
273  * @phys: Physical address of segment
274  */
275 struct xilinx_axidma_tx_segment {
276 	struct xilinx_axidma_desc_hw hw;
277 	struct list_head node;
278 	dma_addr_t phys;
279 } __aligned(64);
280 
281 /**
282  * struct xilinx_cdma_tx_segment - Descriptor segment
283  * @hw: Hardware descriptor
284  * @node: Node in the descriptor segments list
285  * @phys: Physical address of segment
286  */
287 struct xilinx_cdma_tx_segment {
288 	struct xilinx_cdma_desc_hw hw;
289 	struct list_head node;
290 	dma_addr_t phys;
291 } __aligned(64);
292 
293 /**
294  * struct xilinx_dma_tx_descriptor - Per Transaction structure
295  * @async_tx: Async transaction descriptor
296  * @segments: TX segments list
297  * @node: Node in the channel descriptors list
298  * @cyclic: Check for cyclic transfers.
299  * @err: Whether the descriptor has an error.
300  * @residue: Residue of the completed descriptor
301  */
302 struct xilinx_dma_tx_descriptor {
303 	struct dma_async_tx_descriptor async_tx;
304 	struct list_head segments;
305 	struct list_head node;
306 	bool cyclic;
307 	bool err;
308 	u32 residue;
309 };
310 
311 /**
312  * struct xilinx_dma_chan - Driver specific DMA channel structure
313  * @xdev: Driver specific device structure
314  * @ctrl_offset: Control registers offset
315  * @desc_offset: TX descriptor registers offset
316  * @lock: Descriptor operation lock
317  * @pending_list: Descriptors waiting
318  * @active_list: Descriptors ready to submit
319  * @done_list: Complete descriptors
320  * @free_seg_list: Free descriptors
321  * @common: DMA common channel
322  * @desc_pool: Descriptors pool
323  * @dev: The dma device
324  * @irq: Channel IRQ
325  * @id: Channel ID
326  * @direction: Transfer direction
327  * @num_frms: Number of frames
328  * @has_sg: Support scatter transfers
329  * @cyclic: Check for cyclic transfers.
330  * @genlock: Support genlock mode
331  * @err: Channel has errors
332  * @idle: Check for channel idle
333  * @tasklet: Cleanup work after irq
334  * @config: Device configuration info
335  * @flush_on_fsync: Flush on Frame sync
336  * @desc_pendingcount: Descriptor pending count
337  * @ext_addr: Indicates 64 bit addressing is supported by dma channel
338  * @desc_submitcount: Descriptor h/w submitted count
339  * @seg_v: Statically allocated segments base
340  * @seg_p: Physical allocated segments base
341  * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
342  * @cyclic_seg_p: Physical allocated segments base for cyclic dma
343  * @start_transfer: Differentiate b/w DMA IP's transfer
344  * @stop_transfer: Differentiate b/w DMA IP's quiesce
345  * @has_vflip: S2MM vertical flip
346  */
347 struct xilinx_dma_chan {
348 	struct xilinx_dma_device *xdev;
349 	u32 ctrl_offset;
350 	u32 desc_offset;
351 	spinlock_t lock;
352 	struct list_head pending_list;
353 	struct list_head active_list;
354 	struct list_head done_list;
355 	struct list_head free_seg_list;
356 	struct dma_chan common;
357 	struct dma_pool *desc_pool;
358 	struct device *dev;
359 	int irq;
360 	int id;
361 	enum dma_transfer_direction direction;
362 	int num_frms;
363 	bool has_sg;
364 	bool cyclic;
365 	bool genlock;
366 	bool err;
367 	bool idle;
368 	struct tasklet_struct tasklet;
369 	struct xilinx_vdma_config config;
370 	bool flush_on_fsync;
371 	u32 desc_pendingcount;
372 	bool ext_addr;
373 	u32 desc_submitcount;
374 	struct xilinx_axidma_tx_segment *seg_v;
375 	dma_addr_t seg_p;
376 	struct xilinx_axidma_tx_segment *cyclic_seg_v;
377 	dma_addr_t cyclic_seg_p;
378 	void (*start_transfer)(struct xilinx_dma_chan *chan);
379 	int (*stop_transfer)(struct xilinx_dma_chan *chan);
380 	bool has_vflip;
381 	u32 buf_idx; // each irq this value increase 1. in cyclic mode, we use residue return this idx via device_tx_status/xilinx_dma_tx_status
382 };
383 
384 /**
385  * enum xdma_ip_type - DMA IP type.
386  *
387  * @XDMA_TYPE_AXIDMA: Axi dma ip.
388  * @XDMA_TYPE_CDMA: Axi cdma ip.
389  * @XDMA_TYPE_VDMA: Axi vdma ip.
390  *
391  */
392 enum xdma_ip_type {
393 	XDMA_TYPE_AXIDMA = 0,
394 	XDMA_TYPE_CDMA,
395 	XDMA_TYPE_VDMA,
396 };
397 
398 struct xilinx_dma_config {
399 	enum xdma_ip_type dmatype;
400 	int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
401 			struct clk **tx_clk, struct clk **txs_clk,
402 			struct clk **rx_clk, struct clk **rxs_clk);
403 };
404 
405 /**
406  * struct xilinx_dma_device - DMA device structure
407  * @regs: I/O mapped base address
408  * @dev: Device Structure
409  * @common: DMA device structure
410  * @chan: Driver specific DMA channel
411  * @has_sg: Specifies whether Scatter-Gather is present or not
412  * @flush_on_fsync: Flush on frame sync
413  * @ext_addr: Indicates 64 bit addressing is supported by dma device
414  * @pdev: Platform device structure pointer
415  * @dma_config: DMA config structure
416  * @axi_clk: DMA Axi4-lite interace clock
417  * @tx_clk: DMA mm2s clock
418  * @txs_clk: DMA mm2s stream clock
419  * @rx_clk: DMA s2mm clock
420  * @rxs_clk: DMA s2mm stream clock
421  * @nr_channels: Number of channels DMA device supports
422  * @chan_id: DMA channel identifier
423  * @max_buffer_len: Max buffer length
424  */
425 struct xilinx_dma_device {
426 	void __iomem *regs;
427 	struct device *dev;
428 	struct dma_device common;
429 	struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
430 	bool has_sg;
431 	u32 flush_on_fsync;
432 	bool ext_addr;
433 	struct platform_device  *pdev;
434 	const struct xilinx_dma_config *dma_config;
435 	struct clk *axi_clk;
436 	struct clk *tx_clk;
437 	struct clk *txs_clk;
438 	struct clk *rx_clk;
439 	struct clk *rxs_clk;
440 	u32 nr_channels;
441 	u32 chan_id;
442 	u32 max_buffer_len;
443 };
444 
445 /* Macros */
446 #define to_xilinx_chan(chan) \
447 	container_of(chan, struct xilinx_dma_chan, common)
448 #define to_dma_tx_descriptor(tx) \
449 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
450 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
451 	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
452 			   cond, delay_us, timeout_us)
453 
454 /* IO accessors */
455 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
456 {
457 	return ioread32(chan->xdev->regs + reg);
458 }
459 
460 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
461 {
462 	iowrite32(value, chan->xdev->regs + reg);
463 }
464 
465 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
466 				   u32 value)
467 {
468 	dma_write(chan, chan->desc_offset + reg, value);
469 }
470 
471 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
472 {
473 	return dma_read(chan, chan->ctrl_offset + reg);
474 }
475 
476 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
477 				   u32 value)
478 {
479 	dma_write(chan, chan->ctrl_offset + reg, value);
480 }
481 
482 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
483 				 u32 clr)
484 {
485 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
486 }
487 
488 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
489 				 u32 set)
490 {
491 	dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
492 }
493 
494 /**
495  * vdma_desc_write_64 - 64-bit descriptor write
496  * @chan: Driver specific VDMA channel
497  * @reg: Register to write
498  * @value_lsb: lower address of the descriptor.
499  * @value_msb: upper address of the descriptor.
500  *
501  * Since vdma driver is trying to write to a register offset which is not a
502  * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
503  * instead of a single 64 bit register write.
504  */
505 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
506 				      u32 value_lsb, u32 value_msb)
507 {
508 	/* Write the lsb 32 bits*/
509 	writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
510 
511 	/* Write the msb 32 bits */
512 	writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
513 }
514 
515 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
516 {
517 	lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
518 }
519 
520 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
521 				dma_addr_t addr)
522 {
523 	if (chan->ext_addr)
524 		dma_writeq(chan, reg, addr);
525 	else
526 		dma_ctrl_write(chan, reg, addr);
527 }
528 
529 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
530 				     struct xilinx_axidma_desc_hw *hw,
531 				     dma_addr_t buf_addr, size_t sg_used,
532 				     size_t period_len)
533 {
534 	if (chan->ext_addr) {
535 		hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
536 		hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
537 						 period_len);
538 	} else {
539 		hw->buf_addr = buf_addr + sg_used + period_len;
540 	}
541 }
542 
543 /* -----------------------------------------------------------------------------
544  * Descriptors and segments alloc and free
545  */
546 
547 /**
548  * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
549  * @chan: Driver specific DMA channel
550  *
551  * Return: The allocated segment on success and NULL on failure.
552  */
553 static struct xilinx_vdma_tx_segment *
554 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
555 {
556 	struct xilinx_vdma_tx_segment *segment;
557 	dma_addr_t phys;
558 
559 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
560 	if (!segment)
561 		return NULL;
562 
563 	segment->phys = phys;
564 
565 	return segment;
566 }
567 
568 /**
569  * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
570  * @chan: Driver specific DMA channel
571  *
572  * Return: The allocated segment on success and NULL on failure.
573  */
574 static struct xilinx_cdma_tx_segment *
575 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
576 {
577 	struct xilinx_cdma_tx_segment *segment;
578 	dma_addr_t phys;
579 
580 	segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
581 	if (!segment)
582 		return NULL;
583 
584 	segment->phys = phys;
585 
586 	return segment;
587 }
588 
589 /**
590  * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
591  * @chan: Driver specific DMA channel
592  *
593  * Return: The allocated segment on success and NULL on failure.
594  */
595 static struct xilinx_axidma_tx_segment *
596 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
597 {
598 	struct xilinx_axidma_tx_segment *segment = NULL;
599 	unsigned long flags;
600 
601 	spin_lock_irqsave(&chan->lock, flags);
602 	if (!list_empty(&chan->free_seg_list)) {
603 		segment = list_first_entry(&chan->free_seg_list,
604 					   struct xilinx_axidma_tx_segment,
605 					   node);
606 		list_del(&segment->node);
607 	}
608 	spin_unlock_irqrestore(&chan->lock, flags);
609 
610 	if (!segment)
611 		dev_dbg(chan->dev, "Could not find free tx segment\n");
612 
613 	return segment;
614 }
615 
616 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
617 {
618 	u32 next_desc = hw->next_desc;
619 	u32 next_desc_msb = hw->next_desc_msb;
620 
621 	memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
622 
623 	hw->next_desc = next_desc;
624 	hw->next_desc_msb = next_desc_msb;
625 }
626 
627 /**
628  * xilinx_dma_free_tx_segment - Free transaction segment
629  * @chan: Driver specific DMA channel
630  * @segment: DMA transaction segment
631  */
632 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
633 				struct xilinx_axidma_tx_segment *segment)
634 {
635 	xilinx_dma_clean_hw_desc(&segment->hw);
636 
637 	list_add_tail(&segment->node, &chan->free_seg_list);
638 }
639 
640 /**
641  * xilinx_cdma_free_tx_segment - Free transaction segment
642  * @chan: Driver specific DMA channel
643  * @segment: DMA transaction segment
644  */
645 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
646 				struct xilinx_cdma_tx_segment *segment)
647 {
648 	dma_pool_free(chan->desc_pool, segment, segment->phys);
649 }
650 
651 /**
652  * xilinx_vdma_free_tx_segment - Free transaction segment
653  * @chan: Driver specific DMA channel
654  * @segment: DMA transaction segment
655  */
656 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
657 					struct xilinx_vdma_tx_segment *segment)
658 {
659 	dma_pool_free(chan->desc_pool, segment, segment->phys);
660 }
661 
662 /**
663  * xilinx_dma_tx_descriptor - Allocate transaction descriptor
664  * @chan: Driver specific DMA channel
665  *
666  * Return: The allocated descriptor on success and NULL on failure.
667  */
668 static struct xilinx_dma_tx_descriptor *
669 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
670 {
671 	struct xilinx_dma_tx_descriptor *desc;
672 
673 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
674 	if (!desc)
675 		return NULL;
676 
677 	INIT_LIST_HEAD(&desc->segments);
678 
679 	return desc;
680 }
681 
682 /**
683  * xilinx_dma_free_tx_descriptor - Free transaction descriptor
684  * @chan: Driver specific DMA channel
685  * @desc: DMA transaction descriptor
686  */
687 static void
688 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
689 			       struct xilinx_dma_tx_descriptor *desc)
690 {
691 	struct xilinx_vdma_tx_segment *segment, *next;
692 	struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
693 	struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
694 
695 	if (!desc)
696 		return;
697 
698 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
699 		list_for_each_entry_safe(segment, next, &desc->segments, node) {
700 			list_del(&segment->node);
701 			xilinx_vdma_free_tx_segment(chan, segment);
702 		}
703 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
704 		list_for_each_entry_safe(cdma_segment, cdma_next,
705 					 &desc->segments, node) {
706 			list_del(&cdma_segment->node);
707 			xilinx_cdma_free_tx_segment(chan, cdma_segment);
708 		}
709 	} else {
710 		list_for_each_entry_safe(axidma_segment, axidma_next,
711 					 &desc->segments, node) {
712 			list_del(&axidma_segment->node);
713 			xilinx_dma_free_tx_segment(chan, axidma_segment);
714 		}
715 	}
716 
717 	kfree(desc);
718 }
719 
720 /* Required functions */
721 
722 /**
723  * xilinx_dma_free_desc_list - Free descriptors list
724  * @chan: Driver specific DMA channel
725  * @list: List to parse and delete the descriptor
726  */
727 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
728 					struct list_head *list)
729 {
730 	struct xilinx_dma_tx_descriptor *desc, *next;
731 
732 	list_for_each_entry_safe(desc, next, list, node) {
733 		list_del(&desc->node);
734 		xilinx_dma_free_tx_descriptor(chan, desc);
735 	}
736 }
737 
738 /**
739  * xilinx_dma_free_descriptors - Free channel descriptors
740  * @chan: Driver specific DMA channel
741  */
742 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
743 {
744 	unsigned long flags;
745 
746 	spin_lock_irqsave(&chan->lock, flags);
747 
748 	xilinx_dma_free_desc_list(chan, &chan->pending_list);
749 	xilinx_dma_free_desc_list(chan, &chan->done_list);
750 	xilinx_dma_free_desc_list(chan, &chan->active_list);
751 
752 	spin_unlock_irqrestore(&chan->lock, flags);
753 }
754 
755 /**
756  * xilinx_dma_free_chan_resources - Free channel resources
757  * @dchan: DMA channel
758  */
759 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
760 {
761 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
762 	unsigned long flags;
763 
764 	dev_dbg(chan->dev, "Free all channel resources.\n");
765 
766 	xilinx_dma_free_descriptors(chan);
767 
768 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
769 		spin_lock_irqsave(&chan->lock, flags);
770 		INIT_LIST_HEAD(&chan->free_seg_list);
771 		spin_unlock_irqrestore(&chan->lock, flags);
772 
773 		/* Free memory that is allocated for BD */
774 		dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
775 				  XILINX_DMA_NUM_DESCS, chan->seg_v,
776 				  chan->seg_p);
777 
778 		/* Free Memory that is allocated for cyclic DMA Mode */
779 		dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
780 				  chan->cyclic_seg_v, chan->cyclic_seg_p);
781 	}
782 
783 	if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
784 		dma_pool_destroy(chan->desc_pool);
785 		chan->desc_pool = NULL;
786 	}
787 }
788 
789 /**
790  * xilinx_dma_get_residue - Compute residue for a given descriptor
791  * @chan: Driver specific dma channel
792  * @desc: dma transaction descriptor
793  *
794  * Return: The number of residue bytes for the descriptor.
795  */
796 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
797 				  struct xilinx_dma_tx_descriptor *desc)
798 {
799 	struct xilinx_cdma_tx_segment *cdma_seg;
800 	struct xilinx_axidma_tx_segment *axidma_seg;
801 	struct xilinx_cdma_desc_hw *cdma_hw;
802 	struct xilinx_axidma_desc_hw *axidma_hw;
803 	struct list_head *entry;
804 	u32 residue = 0;
805 
806 	/**
807 	 * VDMA and simple mode do not support residue reporting, so the
808 	 * residue field will always be 0.
809 	 */
810 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA || !chan->has_sg)
811 		return residue;
812 
813 	list_for_each(entry, &desc->segments) {
814 		if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
815 			cdma_seg = list_entry(entry,
816 					      struct xilinx_cdma_tx_segment,
817 					      node);
818 			cdma_hw = &cdma_seg->hw;
819 			residue += (cdma_hw->control - cdma_hw->status) &
820 				   chan->xdev->max_buffer_len;
821 		} else {
822 			axidma_seg = list_entry(entry,
823 						struct xilinx_axidma_tx_segment,
824 						node);
825 			axidma_hw = &axidma_seg->hw;
826 			residue += (axidma_hw->control - axidma_hw->status) &
827 				   chan->xdev->max_buffer_len;
828 		}
829 	}
830 
831 	return residue;
832 }
833 
834 /**
835  * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
836  * @chan: Driver specific dma channel
837  * @desc: dma transaction descriptor
838  * @flags: flags for spin lock
839  */
840 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
841 					  struct xilinx_dma_tx_descriptor *desc,
842 					  unsigned long *flags)
843 {
844 	dma_async_tx_callback callback;
845 	void *callback_param;
846 
847 	callback = desc->async_tx.callback;
848 	callback_param = desc->async_tx.callback_param;
849 	if (callback) {
850 		spin_unlock_irqrestore(&chan->lock, *flags);
851 		callback(callback_param);
852 		spin_lock_irqsave(&chan->lock, *flags);
853 	}
854 }
855 
856 /**
857  * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
858  * @chan: Driver specific DMA channel
859  */
860 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
861 {
862 	struct xilinx_dma_tx_descriptor *desc, *next;
863 	unsigned long flags;
864 
865 	spin_lock_irqsave(&chan->lock, flags);
866 
867 	list_for_each_entry_safe(desc, next, &chan->done_list, node) {
868 		struct dmaengine_result result;
869 
870 		if (desc->cyclic) {
871 			xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
872 			break;
873 		}
874 
875 		/* Remove from the list of running transactions */
876 		list_del(&desc->node);
877 
878 		if (unlikely(desc->err)) {
879 			if (chan->direction == DMA_DEV_TO_MEM)
880 				result.result = DMA_TRANS_READ_FAILED;
881 			else
882 				result.result = DMA_TRANS_WRITE_FAILED;
883 		} else {
884 			result.result = DMA_TRANS_NOERROR;
885 		}
886 
887 		result.residue = desc->residue;
888 
889 		/* Run the link descriptor callback function */
890 		spin_unlock_irqrestore(&chan->lock, flags);
891 		dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
892 		spin_lock_irqsave(&chan->lock, flags);
893 
894 		/* Run any dependencies, then free the descriptor */
895 		dma_run_dependencies(&desc->async_tx);
896 		xilinx_dma_free_tx_descriptor(chan, desc);
897 	}
898 
899 	spin_unlock_irqrestore(&chan->lock, flags);
900 }
901 
902 /**
903  * xilinx_dma_do_tasklet - Schedule completion tasklet
904  * @data: Pointer to the Xilinx DMA channel structure
905  */
906 static void xilinx_dma_do_tasklet(unsigned long data)
907 {
908 	struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
909 
910 	xilinx_dma_chan_desc_cleanup(chan);
911 }
912 
913 /**
914  * xilinx_dma_alloc_chan_resources - Allocate channel resources
915  * @dchan: DMA channel
916  *
917  * Return: '0' on success and failure value on error
918  */
919 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
920 {
921 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
922 	int i;
923 
924 	/* Has this channel already been allocated? */
925 	if (chan->desc_pool)
926 		return 0;
927 
928 	/*
929 	 * We need the descriptor to be aligned to 64bytes
930 	 * for meeting Xilinx VDMA specification requirement.
931 	 */
932 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
933 		/* Allocate the buffer descriptors. */
934 		chan->seg_v = dma_zalloc_coherent(chan->dev,
935 						  sizeof(*chan->seg_v) *
936 						  XILINX_DMA_NUM_DESCS,
937 						  &chan->seg_p, GFP_KERNEL);
938 		if (!chan->seg_v) {
939 			dev_err(chan->dev,
940 				"unable to allocate channel %d descriptors\n",
941 				chan->id);
942 			return -ENOMEM;
943 		}
944 		/*
945 		 * For cyclic DMA mode we need to program the tail Descriptor
946 		 * register with a value which is not a part of the BD chain
947 		 * so allocating a desc segment during channel allocation for
948 		 * programming tail descriptor.
949 		 */
950 		chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
951 					sizeof(*chan->cyclic_seg_v),
952 					&chan->cyclic_seg_p, GFP_KERNEL);
953 		if (!chan->cyclic_seg_v) {
954 			dev_err(chan->dev,
955 				"unable to allocate desc segment for cyclic DMA\n");
956 			dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
957 				XILINX_DMA_NUM_DESCS, chan->seg_v,
958 				chan->seg_p);
959 			return -ENOMEM;
960 		}
961 		chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
962 
963 		for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
964 			chan->seg_v[i].hw.next_desc =
965 			lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
966 				((i + 1) % XILINX_DMA_NUM_DESCS));
967 			chan->seg_v[i].hw.next_desc_msb =
968 			upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
969 				((i + 1) % XILINX_DMA_NUM_DESCS));
970 			chan->seg_v[i].phys = chan->seg_p +
971 				sizeof(*chan->seg_v) * i;
972 			list_add_tail(&chan->seg_v[i].node,
973 				      &chan->free_seg_list);
974 		}
975 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
976 		chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
977 				   chan->dev,
978 				   sizeof(struct xilinx_cdma_tx_segment),
979 				   __alignof__(struct xilinx_cdma_tx_segment),
980 				   0);
981 	} else {
982 		chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
983 				     chan->dev,
984 				     sizeof(struct xilinx_vdma_tx_segment),
985 				     __alignof__(struct xilinx_vdma_tx_segment),
986 				     0);
987 	}
988 
989 	if (!chan->desc_pool &&
990 	    (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
991 		dev_err(chan->dev,
992 			"unable to allocate channel %d descriptor pool\n",
993 			chan->id);
994 		return -ENOMEM;
995 	}
996 
997 	dma_cookie_init(dchan);
998 
999 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1000 		/* For AXI DMA resetting once channel will reset the
1001 		 * other channel as well so enable the interrupts here.
1002 		 */
1003 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1004 			      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1005 	}
1006 
1007 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1008 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1009 			     XILINX_CDMA_CR_SGMODE);
1010 
1011 	return 0;
1012 }
1013 
1014 /**
1015  * xilinx_dma_tx_status - Get DMA transaction status
1016  * @dchan: DMA channel
1017  * @cookie: Transaction identifier
1018  * @txstate: Transaction state
1019  *
1020  * Return: DMA transaction status
1021  */
1022 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1023 					dma_cookie_t cookie,
1024 					struct dma_tx_state *txstate)
1025 {
1026 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1027 	struct xilinx_dma_tx_descriptor *desc;
1028 	enum dma_status ret;
1029 	unsigned long flags;
1030 
1031 	ret = dma_cookie_status(dchan, cookie, txstate);
1032 	if (ret == DMA_COMPLETE || !txstate)
1033 		return ret;
1034 
1035 	spin_lock_irqsave(&chan->lock, flags);
1036 
1037 	desc = list_last_entry(&chan->active_list,
1038 			       struct xilinx_dma_tx_descriptor, node);
1039 	desc->residue = xilinx_dma_get_residue(chan, desc);
1040 
1041 	spin_unlock_irqrestore(&chan->lock, flags);
1042 
1043 	if (chan->cyclic)
1044 		dma_set_residue(txstate, chan->buf_idx);
1045 	else
1046 		dma_set_residue(txstate, desc->residue);
1047 
1048 	return ret;
1049 }
1050 
1051 /**
1052  * xilinx_dma_stop_transfer - Halt DMA channel
1053  * @chan: Driver specific DMA channel
1054  *
1055  * Return: '0' on success and failure value on error
1056  */
1057 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1058 {
1059 	u32 val;
1060 
1061 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1062 
1063 	/* Wait for the hardware to halt */
1064 	return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1065 				       val | (XILINX_DMA_DMASR_IDLE |
1066 					      XILINX_DMA_DMASR_HALTED),
1067 				       0, XILINX_DMA_LOOP_COUNT);
1068 }
1069 
1070 /**
1071  * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1072  * @chan: Driver specific DMA channel
1073  *
1074  * Return: '0' on success and failure value on error
1075  */
1076 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1077 {
1078 	u32 val;
1079 
1080 	return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1081 				       val & XILINX_DMA_DMASR_IDLE, 0,
1082 				       XILINX_DMA_LOOP_COUNT);
1083 }
1084 
1085 /**
1086  * xilinx_dma_start - Start DMA channel
1087  * @chan: Driver specific DMA channel
1088  */
1089 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1090 {
1091 	int err;
1092 	u32 val;
1093 
1094 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1095 
1096 	/* Wait for the hardware to start */
1097 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1098 				      !(val & XILINX_DMA_DMASR_HALTED), 0,
1099 				      XILINX_DMA_LOOP_COUNT);
1100 
1101 	if (err) {
1102 		dev_err(chan->dev, "Cannot start channel %p: %x\n",
1103 			chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1104 
1105 		chan->err = true;
1106 	}
1107 }
1108 
1109 /**
1110  * xilinx_vdma_start_transfer - Starts VDMA transfer
1111  * @chan: Driver specific channel struct pointer
1112  */
1113 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1114 {
1115 	struct xilinx_vdma_config *config = &chan->config;
1116 	struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1117 	u32 reg, j;
1118 	struct xilinx_vdma_tx_segment *tail_segment;
1119 
1120 	/* This function was invoked with lock held */
1121 	if (chan->err)
1122 		return;
1123 
1124 	if (!chan->idle)
1125 		return;
1126 
1127 	if (list_empty(&chan->pending_list))
1128 		return;
1129 
1130 	desc = list_first_entry(&chan->pending_list,
1131 				struct xilinx_dma_tx_descriptor, node);
1132 	tail_desc = list_last_entry(&chan->pending_list,
1133 				    struct xilinx_dma_tx_descriptor, node);
1134 
1135 	tail_segment = list_last_entry(&tail_desc->segments,
1136 				       struct xilinx_vdma_tx_segment, node);
1137 
1138 	/*
1139 	 * If hardware is idle, then all descriptors on the running lists are
1140 	 * done, start new transfers
1141 	 */
1142 	if (chan->has_sg)
1143 		dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1144 				desc->async_tx.phys);
1145 
1146 	/* Configure the hardware using info in the config structure */
1147 	if (chan->has_vflip) {
1148 		reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1149 		reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1150 		reg |= config->vflip_en;
1151 		dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1152 			  reg);
1153 	}
1154 
1155 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1156 
1157 	if (config->frm_cnt_en)
1158 		reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1159 	else
1160 		reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1161 
1162 	/*
1163 	 * With SG, start with circular mode, so that BDs can be fetched.
1164 	 * In direct register mode, if not parking, enable circular mode
1165 	 */
1166 	if (chan->has_sg || !config->park)
1167 		reg |= XILINX_DMA_DMACR_CIRC_EN;
1168 
1169 	if (config->park)
1170 		reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1171 
1172 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1173 
1174 	j = chan->desc_submitcount;
1175 	reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1176 	if (chan->direction == DMA_MEM_TO_DEV) {
1177 		reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1178 		reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1179 	} else {
1180 		reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1181 		reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1182 	}
1183 	dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1184 
1185 	/* Start the hardware */
1186 	xilinx_dma_start(chan);
1187 
1188 	if (chan->err)
1189 		return;
1190 
1191 	/* Start the transfer */
1192 	if (chan->has_sg) {
1193 		dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1194 				tail_segment->phys);
1195 		list_splice_tail_init(&chan->pending_list, &chan->active_list);
1196 		chan->desc_pendingcount = 0;
1197 	} else {
1198 		struct xilinx_vdma_tx_segment *segment, *last = NULL;
1199 		int i = 0;
1200 
1201 		if (chan->desc_submitcount < chan->num_frms)
1202 			i = chan->desc_submitcount;
1203 
1204 		list_for_each_entry(segment, &desc->segments, node) {
1205 			if (chan->ext_addr)
1206 				vdma_desc_write_64(chan,
1207 					XILINX_VDMA_REG_START_ADDRESS_64(i++),
1208 					segment->hw.buf_addr,
1209 					segment->hw.buf_addr_msb);
1210 			else
1211 				vdma_desc_write(chan,
1212 					XILINX_VDMA_REG_START_ADDRESS(i++),
1213 					segment->hw.buf_addr);
1214 
1215 			last = segment;
1216 		}
1217 
1218 		if (!last)
1219 			return;
1220 
1221 		/* HW expects these parameters to be same for one transaction */
1222 		vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1223 		vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1224 				last->hw.stride);
1225 		vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1226 
1227 		chan->desc_submitcount++;
1228 		chan->desc_pendingcount--;
1229 		list_del(&desc->node);
1230 		list_add_tail(&desc->node, &chan->active_list);
1231 		if (chan->desc_submitcount == chan->num_frms)
1232 			chan->desc_submitcount = 0;
1233 	}
1234 
1235 	chan->idle = false;
1236 }
1237 
1238 /**
1239  * xilinx_cdma_start_transfer - Starts cdma transfer
1240  * @chan: Driver specific channel struct pointer
1241  */
1242 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1243 {
1244 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1245 	struct xilinx_cdma_tx_segment *tail_segment;
1246 	u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1247 
1248 	if (chan->err)
1249 		return;
1250 
1251 	if (!chan->idle)
1252 		return;
1253 
1254 	if (list_empty(&chan->pending_list))
1255 		return;
1256 
1257 	head_desc = list_first_entry(&chan->pending_list,
1258 				     struct xilinx_dma_tx_descriptor, node);
1259 	tail_desc = list_last_entry(&chan->pending_list,
1260 				    struct xilinx_dma_tx_descriptor, node);
1261 	tail_segment = list_last_entry(&tail_desc->segments,
1262 				       struct xilinx_cdma_tx_segment, node);
1263 
1264 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1265 		ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1266 		ctrl_reg |= chan->desc_pendingcount <<
1267 				XILINX_DMA_CR_COALESCE_SHIFT;
1268 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1269 	}
1270 
1271 	if (chan->has_sg) {
1272 		dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1273 			     XILINX_CDMA_CR_SGMODE);
1274 
1275 		dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1276 			     XILINX_CDMA_CR_SGMODE);
1277 
1278 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1279 			     head_desc->async_tx.phys);
1280 
1281 		/* Update tail ptr register which will start the transfer */
1282 		xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1283 			     tail_segment->phys);
1284 	} else {
1285 		/* In simple mode */
1286 		struct xilinx_cdma_tx_segment *segment;
1287 		struct xilinx_cdma_desc_hw *hw;
1288 
1289 		segment = list_first_entry(&head_desc->segments,
1290 					   struct xilinx_cdma_tx_segment,
1291 					   node);
1292 
1293 		hw = &segment->hw;
1294 
1295 		xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1296 			     xilinx_prep_dma_addr_t(hw->src_addr));
1297 		xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1298 			     xilinx_prep_dma_addr_t(hw->dest_addr));
1299 
1300 		/* Start the transfer */
1301 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1302 				hw->control & chan->xdev->max_buffer_len);
1303 	}
1304 
1305 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1306 	chan->desc_pendingcount = 0;
1307 	chan->idle = false;
1308 }
1309 
1310 /**
1311  * xilinx_dma_start_transfer - Starts DMA transfer
1312  * @chan: Driver specific channel struct pointer
1313  */
1314 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1315 {
1316 	struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1317 	struct xilinx_axidma_tx_segment *tail_segment;
1318 	u32 reg;
1319 
1320 	if (chan->err)
1321 		return;
1322 
1323 	if (!chan->idle)
1324 		return;
1325 
1326 	if (list_empty(&chan->pending_list))
1327 		return;
1328 
1329 	head_desc = list_first_entry(&chan->pending_list,
1330 				     struct xilinx_dma_tx_descriptor, node);
1331 	tail_desc = list_last_entry(&chan->pending_list,
1332 				    struct xilinx_dma_tx_descriptor, node);
1333 	tail_segment = list_last_entry(&tail_desc->segments,
1334 				       struct xilinx_axidma_tx_segment, node);
1335 
1336 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1337 
1338 	if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1339 		reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1340 		reg |= chan->desc_pendingcount <<
1341 				  XILINX_DMA_CR_COALESCE_SHIFT;
1342 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1343 	}
1344 
1345 	if (chan->has_sg)
1346 		xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1347 			     head_desc->async_tx.phys);
1348 
1349 	xilinx_dma_start(chan);
1350 
1351 	if (chan->err)
1352 		return;
1353 
1354 	/* Start the transfer */
1355 	if (chan->has_sg) {
1356 		if (chan->cyclic)
1357 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1358 				     chan->cyclic_seg_v->phys);
1359 		else
1360 			xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1361 				     tail_segment->phys);
1362 	} else {
1363 		struct xilinx_axidma_tx_segment *segment;
1364 		struct xilinx_axidma_desc_hw *hw;
1365 
1366 		segment = list_first_entry(&head_desc->segments,
1367 					   struct xilinx_axidma_tx_segment,
1368 					   node);
1369 		hw = &segment->hw;
1370 
1371 		xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1372 			     xilinx_prep_dma_addr_t(hw->buf_addr));
1373 
1374 		/* Start the transfer */
1375 		dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1376 			       hw->control & chan->xdev->max_buffer_len);
1377 	}
1378 
1379 	list_splice_tail_init(&chan->pending_list, &chan->active_list);
1380 	chan->desc_pendingcount = 0;
1381 	chan->idle = false;
1382 }
1383 
1384 /**
1385  * xilinx_dma_issue_pending - Issue pending transactions
1386  * @dchan: DMA channel
1387  */
1388 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1389 {
1390 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1391 	unsigned long flags;
1392 
1393 	spin_lock_irqsave(&chan->lock, flags);
1394 	chan->start_transfer(chan);
1395 	spin_unlock_irqrestore(&chan->lock, flags);
1396 }
1397 
1398 /**
1399  * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1400  * @chan : xilinx DMA channel
1401  *
1402  * CONTEXT: hardirq
1403  */
1404 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1405 {
1406 	struct xilinx_dma_tx_descriptor *desc, *next;
1407 
1408 	/* This function was invoked with lock held */
1409 	if (list_empty(&chan->active_list))
1410 		return;
1411 
1412 	list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1413 		desc->residue = xilinx_dma_get_residue(chan, desc);
1414 		desc->err = chan->err;
1415 
1416 		list_del(&desc->node);
1417 		if (!desc->cyclic)
1418 			dma_cookie_complete(&desc->async_tx);
1419 		list_add_tail(&desc->node, &chan->done_list);
1420 	}
1421 }
1422 
1423 /**
1424  * xilinx_dma_reset - Reset DMA channel
1425  * @chan: Driver specific DMA channel
1426  *
1427  * Return: '0' on success and failure value on error
1428  */
1429 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1430 {
1431 	int err;
1432 	u32 tmp;
1433 
1434 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1435 
1436 	/* Wait for the hardware to finish reset */
1437 	err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1438 				      !(tmp & XILINX_DMA_DMACR_RESET), 0,
1439 				      XILINX_DMA_LOOP_COUNT);
1440 
1441 	if (err) {
1442 		dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1443 			dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1444 			dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1445 		return -ETIMEDOUT;
1446 	}
1447 
1448 	chan->err = false;
1449 	chan->idle = true;
1450 	chan->desc_pendingcount = 0;
1451 	chan->desc_submitcount = 0;
1452 
1453 	return err;
1454 }
1455 
1456 /**
1457  * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1458  * @chan: Driver specific DMA channel
1459  *
1460  * Return: '0' on success and failure value on error
1461  */
1462 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1463 {
1464 	int err;
1465 
1466 	/* Reset VDMA */
1467 	err = xilinx_dma_reset(chan);
1468 	if (err)
1469 		return err;
1470 
1471 	/* Enable interrupts */
1472 	dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1473 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * xilinx_dma_irq_handler - DMA Interrupt handler
1480  * @irq: IRQ number
1481  * @data: Pointer to the Xilinx DMA channel structure
1482  *
1483  * Return: IRQ_HANDLED/IRQ_NONE
1484  */
1485 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1486 {
1487 	struct xilinx_dma_chan *chan = data;
1488 	u32 status;
1489 
1490 	/* Read the status and ack the interrupts. */
1491 	status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1492 	if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1493 		return IRQ_NONE;
1494 
1495 	dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1496 			status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1497 
1498 	if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1499 		/*
1500 		 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1501 		 * error is recoverable, ignore it. Otherwise flag the error.
1502 		 *
1503 		 * Only recoverable errors can be cleared in the DMASR register,
1504 		 * make sure not to write to other error bits to 1.
1505 		 */
1506 		u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1507 
1508 		dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1509 				errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1510 
1511 		if (!chan->flush_on_fsync ||
1512 		    (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1513 			dev_err(chan->dev,
1514 				"Channel %p has errors %x, cdr %x tdr %x\n",
1515 				chan, errors,
1516 				dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1517 				dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1518 			chan->err = true;
1519 		}
1520 	}
1521 
1522 	if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1523 		/*
1524 		 * Device takes too long to do the transfer when user requires
1525 		 * responsiveness.
1526 		 */
1527 		dev_dbg(chan->dev, "Inter-packet latency too long\n");
1528 	}
1529 
1530 	if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1531 		spin_lock(&chan->lock);
1532 		xilinx_dma_complete_descriptor(chan);
1533 		chan->idle = true;
1534 		chan->start_transfer(chan);
1535 		chan->buf_idx++;
1536 		spin_unlock(&chan->lock);
1537 	}
1538 
1539 	tasklet_schedule(&chan->tasklet);
1540 	return IRQ_HANDLED;
1541 }
1542 
1543 /**
1544  * append_desc_queue - Queuing descriptor
1545  * @chan: Driver specific dma channel
1546  * @desc: dma transaction descriptor
1547  */
1548 static void append_desc_queue(struct xilinx_dma_chan *chan,
1549 			      struct xilinx_dma_tx_descriptor *desc)
1550 {
1551 	struct xilinx_vdma_tx_segment *tail_segment;
1552 	struct xilinx_dma_tx_descriptor *tail_desc;
1553 	struct xilinx_axidma_tx_segment *axidma_tail_segment;
1554 	struct xilinx_cdma_tx_segment *cdma_tail_segment;
1555 
1556 	if (list_empty(&chan->pending_list))
1557 		goto append;
1558 
1559 	/*
1560 	 * Add the hardware descriptor to the chain of hardware descriptors
1561 	 * that already exists in memory.
1562 	 */
1563 	tail_desc = list_last_entry(&chan->pending_list,
1564 				    struct xilinx_dma_tx_descriptor, node);
1565 	if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1566 		tail_segment = list_last_entry(&tail_desc->segments,
1567 					       struct xilinx_vdma_tx_segment,
1568 					       node);
1569 		tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1570 	} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1571 		cdma_tail_segment = list_last_entry(&tail_desc->segments,
1572 						struct xilinx_cdma_tx_segment,
1573 						node);
1574 		cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1575 	} else {
1576 		axidma_tail_segment = list_last_entry(&tail_desc->segments,
1577 					       struct xilinx_axidma_tx_segment,
1578 					       node);
1579 		axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1580 	}
1581 
1582 	/*
1583 	 * Add the software descriptor and all children to the list
1584 	 * of pending transactions
1585 	 */
1586 append:
1587 	list_add_tail(&desc->node, &chan->pending_list);
1588 	/*
1589 	 * In CDMA each segment is considered as a descriptor, so increment
1590 	 * pending count in prep_slave_* implementation.
1591 	 */
1592 	if (chan->xdev->dma_config->dmatype != XDMA_TYPE_CDMA)
1593 		chan->desc_pendingcount++;
1594 
1595 	if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1596 	    && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1597 		dev_dbg(chan->dev, "desc pendingcount is too high\n");
1598 		chan->desc_pendingcount = chan->num_frms;
1599 	}
1600 }
1601 
1602 /**
1603  * xilinx_dma_tx_submit - Submit DMA transaction
1604  * @tx: Async transaction descriptor
1605  *
1606  * Return: cookie value on success and failure value on error
1607  */
1608 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1609 {
1610 	struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1611 	struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1612 	dma_cookie_t cookie;
1613 	unsigned long flags;
1614 	int err;
1615 
1616 	if (chan->cyclic) {
1617 		xilinx_dma_free_tx_descriptor(chan, desc);
1618 		return -EBUSY;
1619 	}
1620 
1621 	if (chan->err) {
1622 		/*
1623 		 * If reset fails, need to hard reset the system.
1624 		 * Channel is no longer functional
1625 		 */
1626 		err = xilinx_dma_chan_reset(chan);
1627 		if (err < 0)
1628 			return err;
1629 	}
1630 
1631 	spin_lock_irqsave(&chan->lock, flags);
1632 
1633 	cookie = dma_cookie_assign(tx);
1634 
1635 	/* Put this transaction onto the tail of the pending queue */
1636 	append_desc_queue(chan, desc);
1637 
1638 	if (desc->cyclic)
1639 		chan->cyclic = true;
1640 
1641 	spin_unlock_irqrestore(&chan->lock, flags);
1642 
1643 	return cookie;
1644 }
1645 
1646 /**
1647  * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1648  *	DMA_SLAVE transaction
1649  * @dchan: DMA channel
1650  * @xt: Interleaved template pointer
1651  * @flags: transfer ack flags
1652  *
1653  * Return: Async transaction descriptor on success and NULL on failure
1654  */
1655 static struct dma_async_tx_descriptor *
1656 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1657 				 struct dma_interleaved_template *xt,
1658 				 unsigned long flags)
1659 {
1660 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1661 	struct xilinx_dma_tx_descriptor *desc;
1662 	struct xilinx_vdma_tx_segment *segment;
1663 	struct xilinx_vdma_desc_hw *hw;
1664 
1665 	if (!is_slave_direction(xt->dir))
1666 		return NULL;
1667 
1668 	if (!xt->numf || !xt->sgl[0].size)
1669 		return NULL;
1670 
1671 	if (xt->frame_size != 1)
1672 		return NULL;
1673 
1674 	/* Allocate a transaction descriptor. */
1675 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1676 	if (!desc)
1677 		return NULL;
1678 
1679 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1680 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1681 	async_tx_ack(&desc->async_tx);
1682 
1683 	/* Allocate the link descriptor from DMA pool */
1684 	segment = xilinx_vdma_alloc_tx_segment(chan);
1685 	if (!segment)
1686 		goto error;
1687 
1688 	/* Fill in the hardware descriptor */
1689 	hw = &segment->hw;
1690 	hw->vsize = xt->numf;
1691 	hw->hsize = xt->sgl[0].size;
1692 	hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1693 			XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1694 	hw->stride |= chan->config.frm_dly <<
1695 			XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1696 
1697 	if (xt->dir != DMA_MEM_TO_DEV) {
1698 		if (chan->ext_addr) {
1699 			hw->buf_addr = lower_32_bits(xt->dst_start);
1700 			hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1701 		} else {
1702 			hw->buf_addr = xt->dst_start;
1703 		}
1704 	} else {
1705 		if (chan->ext_addr) {
1706 			hw->buf_addr = lower_32_bits(xt->src_start);
1707 			hw->buf_addr_msb = upper_32_bits(xt->src_start);
1708 		} else {
1709 			hw->buf_addr = xt->src_start;
1710 		}
1711 	}
1712 
1713 	/* Insert the segment into the descriptor segments list. */
1714 	list_add_tail(&segment->node, &desc->segments);
1715 
1716 	/* Link the last hardware descriptor with the first. */
1717 	segment = list_first_entry(&desc->segments,
1718 				   struct xilinx_vdma_tx_segment, node);
1719 	desc->async_tx.phys = segment->phys;
1720 
1721 	return &desc->async_tx;
1722 
1723 error:
1724 	xilinx_dma_free_tx_descriptor(chan, desc);
1725 	return NULL;
1726 }
1727 
1728 /**
1729  * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1730  * @dchan: DMA channel
1731  * @dma_dst: destination address
1732  * @dma_src: source address
1733  * @len: transfer length
1734  * @flags: transfer ack flags
1735  *
1736  * Return: Async transaction descriptor on success and NULL on failure
1737  */
1738 static struct dma_async_tx_descriptor *
1739 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1740 			dma_addr_t dma_src, size_t len, unsigned long flags)
1741 {
1742 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1743 	struct xilinx_dma_tx_descriptor *desc;
1744 	struct xilinx_cdma_tx_segment *segment;
1745 	struct xilinx_cdma_desc_hw *hw;
1746 
1747 	if (!len || len > chan->xdev->max_buffer_len)
1748 		return NULL;
1749 
1750 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1751 	if (!desc)
1752 		return NULL;
1753 
1754 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1755 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1756 
1757 	/* Allocate the link descriptor from DMA pool */
1758 	segment = xilinx_cdma_alloc_tx_segment(chan);
1759 	if (!segment)
1760 		goto error;
1761 
1762 	hw = &segment->hw;
1763 	hw->control = len;
1764 	hw->src_addr = dma_src;
1765 	hw->dest_addr = dma_dst;
1766 	if (chan->ext_addr) {
1767 		hw->src_addr_msb = upper_32_bits(dma_src);
1768 		hw->dest_addr_msb = upper_32_bits(dma_dst);
1769 	}
1770 
1771 	/* Insert the segment into the descriptor segments list. */
1772 	list_add_tail(&segment->node, &desc->segments);
1773 	chan->desc_pendingcount++;
1774 
1775 	desc->async_tx.phys = segment->phys;
1776 	hw->next_desc = segment->phys;
1777 
1778 	return &desc->async_tx;
1779 
1780 error:
1781 	xilinx_dma_free_tx_descriptor(chan, desc);
1782 	return NULL;
1783 }
1784 
1785 /**
1786  * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction
1787  * @dchan: DMA channel
1788  * @dst_sg: Destination scatter list
1789  * @dst_sg_len: Number of entries in destination scatter list
1790  * @src_sg: Source scatter list
1791  * @src_sg_len: Number of entries in source scatter list
1792  * @flags: transfer ack flags
1793  *
1794  * Return: Async transaction descriptor on success and NULL on failure
1795  */
1796 static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
1797 			struct dma_chan *dchan, struct scatterlist *dst_sg,
1798 			unsigned int dst_sg_len, struct scatterlist *src_sg,
1799 			unsigned int src_sg_len, unsigned long flags)
1800 {
1801 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1802 	struct xilinx_dma_tx_descriptor *desc;
1803 	struct xilinx_cdma_tx_segment *segment, *prev = NULL;
1804 	struct xilinx_cdma_desc_hw *hw;
1805 	size_t len, dst_avail, src_avail;
1806 	dma_addr_t dma_dst, dma_src;
1807 
1808 	if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
1809 		return NULL;
1810 
1811 	if (unlikely(dst_sg == NULL || src_sg == NULL))
1812 		return NULL;
1813 
1814 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1815 	if (!desc)
1816 		return NULL;
1817 
1818 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1819 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1820 
1821 	dst_avail = sg_dma_len(dst_sg);
1822 	src_avail = sg_dma_len(src_sg);
1823 	/*
1824 	 * loop until there is either no more source or no more destination
1825 	 * scatterlist entry
1826 	 */
1827 	while (true) {
1828 		len = min_t(size_t, src_avail, dst_avail);
1829 		len = min_t(size_t, len, chan->xdev->max_buffer_len);
1830 		if (len == 0)
1831 			goto fetch;
1832 
1833 		/* Allocate the link descriptor from DMA pool */
1834 		segment = xilinx_cdma_alloc_tx_segment(chan);
1835 		if (!segment)
1836 			goto error;
1837 
1838 		dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
1839 			dst_avail;
1840 		dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
1841 			src_avail;
1842 		hw = &segment->hw;
1843 		hw->control = len;
1844 		hw->src_addr = dma_src;
1845 		hw->dest_addr = dma_dst;
1846 		if (chan->ext_addr) {
1847 			hw->src_addr_msb = upper_32_bits(dma_src);
1848 			hw->dest_addr_msb = upper_32_bits(dma_dst);
1849 		}
1850 
1851 		if (prev)
1852 			prev->hw.next_desc = segment->phys;
1853 
1854 		prev = segment;
1855 		dst_avail -= len;
1856 		src_avail -= len;
1857 		list_add_tail(&segment->node, &desc->segments);
1858 		chan->desc_pendingcount++;
1859 
1860 fetch:
1861 		/* Fetch the next dst scatterlist entry */
1862 		if (dst_avail == 0) {
1863 			if (dst_sg_len == 0)
1864 				break;
1865 			dst_sg = sg_next(dst_sg);
1866 			if (dst_sg == NULL)
1867 				break;
1868 			dst_sg_len--;
1869 			dst_avail = sg_dma_len(dst_sg);
1870 		}
1871 		/* Fetch the next src scatterlist entry */
1872 		if (src_avail == 0) {
1873 			if (src_sg_len == 0)
1874 				break;
1875 			src_sg = sg_next(src_sg);
1876 			if (src_sg == NULL)
1877 				break;
1878 			src_sg_len--;
1879 			src_avail = sg_dma_len(src_sg);
1880 		}
1881 	}
1882 
1883 	/* Link the last hardware descriptor with the first. */
1884 	segment = list_first_entry(&desc->segments,
1885 				struct xilinx_cdma_tx_segment, node);
1886 	desc->async_tx.phys = segment->phys;
1887 	prev->hw.next_desc = segment->phys;
1888 
1889 	return &desc->async_tx;
1890 
1891 error:
1892 	xilinx_dma_free_tx_descriptor(chan, desc);
1893 	return NULL;
1894 }
1895 
1896 /**
1897  * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1898  * @dchan: DMA channel
1899  * @sgl: scatterlist to transfer to/from
1900  * @sg_len: number of entries in @scatterlist
1901  * @direction: DMA direction
1902  * @flags: transfer ack flags
1903  * @context: APP words of the descriptor
1904  *
1905  * Return: Async transaction descriptor on success and NULL on failure
1906  */
1907 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1908 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1909 	enum dma_transfer_direction direction, unsigned long flags,
1910 	void *context)
1911 {
1912 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1913 	struct xilinx_dma_tx_descriptor *desc;
1914 	struct xilinx_axidma_tx_segment *segment = NULL;
1915 	u32 *app_w = (u32 *)context;
1916 	struct scatterlist *sg;
1917 	size_t copy;
1918 	size_t sg_used;
1919 	unsigned int i;
1920 
1921 	if (!is_slave_direction(direction))
1922 		return NULL;
1923 
1924 	/* Allocate a transaction descriptor. */
1925 	desc = xilinx_dma_alloc_tx_descriptor(chan);
1926 	if (!desc)
1927 		return NULL;
1928 
1929 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1930 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1931 
1932 	/* Build transactions using information in the scatter gather list */
1933 	for_each_sg(sgl, sg, sg_len, i) {
1934 		sg_used = 0;
1935 
1936 		/* Loop until the entire scatterlist entry is used */
1937 		while (sg_used < sg_dma_len(sg)) {
1938 			struct xilinx_axidma_desc_hw *hw;
1939 
1940 			/* Get a free segment */
1941 			segment = xilinx_axidma_alloc_tx_segment(chan);
1942 			if (!segment)
1943 				goto error;
1944 
1945 			/*
1946 			 * Calculate the maximum number of bytes to transfer,
1947 			 * making sure it is less than the hw limit
1948 			 */
1949 			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1950 				     chan->xdev->max_buffer_len);
1951 			hw = &segment->hw;
1952 
1953 			/* Fill in the descriptor */
1954 			xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1955 					  sg_used, 0);
1956 
1957 			hw->control = copy;
1958 
1959 			if (chan->direction == DMA_MEM_TO_DEV) {
1960 				if (app_w)
1961 					memcpy(hw->app, app_w, sizeof(u32) *
1962 					       XILINX_DMA_NUM_APP_WORDS);
1963 			}
1964 
1965 			sg_used += copy;
1966 
1967 			/*
1968 			 * Insert the segment into the descriptor segments
1969 			 * list.
1970 			 */
1971 			list_add_tail(&segment->node, &desc->segments);
1972 		}
1973 	}
1974 
1975 	segment = list_first_entry(&desc->segments,
1976 				   struct xilinx_axidma_tx_segment, node);
1977 	desc->async_tx.phys = segment->phys;
1978 
1979 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
1980 	if (chan->direction == DMA_MEM_TO_DEV) {
1981 		segment->hw.control |= XILINX_DMA_BD_SOP;
1982 		segment = list_last_entry(&desc->segments,
1983 					  struct xilinx_axidma_tx_segment,
1984 					  node);
1985 		segment->hw.control |= XILINX_DMA_BD_EOP;
1986 	}
1987 
1988 	return &desc->async_tx;
1989 
1990 error:
1991 	xilinx_dma_free_tx_descriptor(chan, desc);
1992 	return NULL;
1993 }
1994 
1995 /**
1996  * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1997  * @dchan: DMA channel
1998  * @buf_addr: Physical address of the buffer
1999  * @buf_len: Total length of the cyclic buffers
2000  * @period_len: length of individual cyclic buffer
2001  * @direction: DMA direction
2002  * @flags: transfer ack flags
2003  *
2004  * Return: Async transaction descriptor on success and NULL on failure
2005  */
2006 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2007 	struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2008 	size_t period_len, enum dma_transfer_direction direction,
2009 	unsigned long flags)
2010 {
2011 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2012 	struct xilinx_dma_tx_descriptor *desc;
2013 	struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2014 	size_t copy, sg_used;
2015 	unsigned int num_periods;
2016 	int i;
2017 	u32 reg;
2018 
2019 	if (!period_len)
2020 		return NULL;
2021 
2022 	num_periods = buf_len / period_len;
2023 
2024 	if (!num_periods)
2025 		return NULL;
2026 
2027 	if (!is_slave_direction(direction))
2028 		return NULL;
2029 
2030 	/* Allocate a transaction descriptor. */
2031 	desc = xilinx_dma_alloc_tx_descriptor(chan);
2032 	if (!desc)
2033 		return NULL;
2034 
2035 	chan->direction = direction;
2036 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2037 	desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2038 
2039 	chan->buf_idx = 0;
2040 
2041 	for (i = 0; i < num_periods; ++i) {
2042 		sg_used = 0;
2043 
2044 		while (sg_used < period_len) {
2045 			struct xilinx_axidma_desc_hw *hw;
2046 
2047 			/* Get a free segment */
2048 			segment = xilinx_axidma_alloc_tx_segment(chan);
2049 			if (!segment)
2050 				goto error;
2051 
2052 			/*
2053 			 * Calculate the maximum number of bytes to transfer,
2054 			 * making sure it is less than the hw limit
2055 			 */
2056 			copy = min_t(size_t, period_len - sg_used,
2057 				     chan->xdev->max_buffer_len);
2058 			hw = &segment->hw;
2059 			xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2060 					  period_len * i);
2061 			hw->control = copy;
2062 
2063 			if (prev)
2064 				prev->hw.next_desc = segment->phys;
2065 
2066 			prev = segment;
2067 			sg_used += copy;
2068 
2069 			/*
2070 			 * Insert the segment into the descriptor segments
2071 			 * list.
2072 			 */
2073 			list_add_tail(&segment->node, &desc->segments);
2074 		}
2075 	}
2076 
2077 	head_segment = list_first_entry(&desc->segments,
2078 				   struct xilinx_axidma_tx_segment, node);
2079 	desc->async_tx.phys = head_segment->phys;
2080 
2081 	desc->cyclic = true;
2082 	reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2083 	reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2084 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2085 
2086 	segment = list_last_entry(&desc->segments,
2087 				  struct xilinx_axidma_tx_segment,
2088 				  node);
2089 	segment->hw.next_desc = (u32) head_segment->phys;
2090 
2091 	/* For the last DMA_MEM_TO_DEV transfer, set EOP */
2092 	if (direction == DMA_MEM_TO_DEV) {
2093 		head_segment->hw.control |= XILINX_DMA_BD_SOP;
2094 		segment->hw.control |= XILINX_DMA_BD_EOP;
2095 	}
2096 
2097 	return &desc->async_tx;
2098 
2099 error:
2100 	xilinx_dma_free_tx_descriptor(chan, desc);
2101 	return NULL;
2102 }
2103 
2104 /**
2105  * xilinx_dma_terminate_all - Halt the channel and free descriptors
2106  * @dchan: Driver specific DMA Channel pointer
2107  *
2108  * Return: '0' always.
2109  */
2110 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2111 {
2112 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2113 	u32 reg;
2114 	int err;
2115 
2116 	if (!chan->cyclic) {
2117 		err = chan->stop_transfer(chan);
2118 		if (err) {
2119 			dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2120 				chan, dma_ctrl_read(chan,
2121 				XILINX_DMA_REG_DMASR));
2122 			chan->err = true;
2123 		}
2124 	}
2125 
2126 	xilinx_dma_chan_reset(chan);
2127 	/* Remove and free all of the descriptors in the lists */
2128 	xilinx_dma_free_descriptors(chan);
2129 	chan->idle = true;
2130 
2131 	if (chan->cyclic) {
2132 		reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2133 		reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2134 		dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2135 		chan->cyclic = false;
2136 	}
2137 
2138 	if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2139 		dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2140 			     XILINX_CDMA_CR_SGMODE);
2141 
2142 	return 0;
2143 }
2144 
2145 /**
2146  * xilinx_dma_channel_set_config - Configure VDMA channel
2147  * Run-time configuration for Axi VDMA, supports:
2148  * . halt the channel
2149  * . configure interrupt coalescing and inter-packet delay threshold
2150  * . start/stop parking
2151  * . enable genlock
2152  *
2153  * @dchan: DMA channel
2154  * @cfg: VDMA device configuration pointer
2155  *
2156  * Return: '0' on success and failure value on error
2157  */
2158 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2159 					struct xilinx_vdma_config *cfg)
2160 {
2161 	struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2162 	u32 dmacr;
2163 
2164 	if (cfg->reset)
2165 		return xilinx_dma_chan_reset(chan);
2166 
2167 	dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2168 
2169 	chan->config.frm_dly = cfg->frm_dly;
2170 	chan->config.park = cfg->park;
2171 
2172 	/* genlock settings */
2173 	chan->config.gen_lock = cfg->gen_lock;
2174 	chan->config.master = cfg->master;
2175 
2176 	dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2177 	if (cfg->gen_lock && chan->genlock) {
2178 		dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2179 		dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2180 		dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2181 	}
2182 
2183 	chan->config.frm_cnt_en = cfg->frm_cnt_en;
2184 	chan->config.vflip_en = cfg->vflip_en;
2185 
2186 	if (cfg->park)
2187 		chan->config.park_frm = cfg->park_frm;
2188 	else
2189 		chan->config.park_frm = -1;
2190 
2191 	chan->config.coalesc = cfg->coalesc;
2192 	chan->config.delay = cfg->delay;
2193 
2194 	if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2195 		dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2196 		dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2197 		chan->config.coalesc = cfg->coalesc;
2198 	}
2199 
2200 	if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2201 		dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2202 		dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2203 		chan->config.delay = cfg->delay;
2204 	}
2205 
2206 	/* FSync Source selection */
2207 	dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2208 	dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2209 
2210 	dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2211 
2212 	return 0;
2213 }
2214 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2215 
2216 /* -----------------------------------------------------------------------------
2217  * Probe and remove
2218  */
2219 
2220 /**
2221  * xilinx_dma_chan_remove - Per Channel remove function
2222  * @chan: Driver specific DMA channel
2223  */
2224 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2225 {
2226 	/* Disable all interrupts */
2227 	dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2228 		      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2229 
2230 	if (chan->irq > 0)
2231 		free_irq(chan->irq, chan);
2232 
2233 	tasklet_kill(&chan->tasklet);
2234 
2235 	list_del(&chan->common.device_node);
2236 }
2237 
2238 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2239 			    struct clk **tx_clk, struct clk **rx_clk,
2240 			    struct clk **sg_clk, struct clk **tmp_clk)
2241 {
2242 	int err;
2243 
2244 	*tmp_clk = NULL;
2245 
2246 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2247 	if (IS_ERR(*axi_clk)) {
2248 		err = PTR_ERR(*axi_clk);
2249 		dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2250 		return err;
2251 	}
2252 
2253 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2254 	if (IS_ERR(*tx_clk))
2255 		*tx_clk = NULL;
2256 
2257 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2258 	if (IS_ERR(*rx_clk))
2259 		*rx_clk = NULL;
2260 
2261 	*sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2262 	if (IS_ERR(*sg_clk))
2263 		*sg_clk = NULL;
2264 
2265 	err = clk_prepare_enable(*axi_clk);
2266 	if (err) {
2267 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2268 		return err;
2269 	}
2270 
2271 	err = clk_prepare_enable(*tx_clk);
2272 	if (err) {
2273 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2274 		goto err_disable_axiclk;
2275 	}
2276 
2277 	err = clk_prepare_enable(*rx_clk);
2278 	if (err) {
2279 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2280 		goto err_disable_txclk;
2281 	}
2282 
2283 	err = clk_prepare_enable(*sg_clk);
2284 	if (err) {
2285 		dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2286 		goto err_disable_rxclk;
2287 	}
2288 
2289 	return 0;
2290 
2291 err_disable_rxclk:
2292 	clk_disable_unprepare(*rx_clk);
2293 err_disable_txclk:
2294 	clk_disable_unprepare(*tx_clk);
2295 err_disable_axiclk:
2296 	clk_disable_unprepare(*axi_clk);
2297 
2298 	return err;
2299 }
2300 
2301 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2302 			    struct clk **dev_clk, struct clk **tmp_clk,
2303 			    struct clk **tmp1_clk, struct clk **tmp2_clk)
2304 {
2305 	int err;
2306 
2307 	*tmp_clk = NULL;
2308 	*tmp1_clk = NULL;
2309 	*tmp2_clk = NULL;
2310 
2311 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2312 	if (IS_ERR(*axi_clk)) {
2313 		err = PTR_ERR(*axi_clk);
2314 		dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2315 		return err;
2316 	}
2317 
2318 	*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2319 	if (IS_ERR(*dev_clk)) {
2320 		err = PTR_ERR(*dev_clk);
2321 		dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2322 		return err;
2323 	}
2324 
2325 	err = clk_prepare_enable(*axi_clk);
2326 	if (err) {
2327 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2328 		return err;
2329 	}
2330 
2331 	err = clk_prepare_enable(*dev_clk);
2332 	if (err) {
2333 		dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2334 		goto err_disable_axiclk;
2335 	}
2336 
2337 	return 0;
2338 
2339 err_disable_axiclk:
2340 	clk_disable_unprepare(*axi_clk);
2341 
2342 	return err;
2343 }
2344 
2345 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2346 			    struct clk **tx_clk, struct clk **txs_clk,
2347 			    struct clk **rx_clk, struct clk **rxs_clk)
2348 {
2349 	int err;
2350 
2351 	*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2352 	if (IS_ERR(*axi_clk)) {
2353 		err = PTR_ERR(*axi_clk);
2354 		dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2355 		return err;
2356 	}
2357 
2358 	*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2359 	if (IS_ERR(*tx_clk))
2360 		*tx_clk = NULL;
2361 
2362 	*txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2363 	if (IS_ERR(*txs_clk))
2364 		*txs_clk = NULL;
2365 
2366 	*rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2367 	if (IS_ERR(*rx_clk))
2368 		*rx_clk = NULL;
2369 
2370 	*rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2371 	if (IS_ERR(*rxs_clk))
2372 		*rxs_clk = NULL;
2373 
2374 	err = clk_prepare_enable(*axi_clk);
2375 	if (err) {
2376 		dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2377 		return err;
2378 	}
2379 
2380 	err = clk_prepare_enable(*tx_clk);
2381 	if (err) {
2382 		dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2383 		goto err_disable_axiclk;
2384 	}
2385 
2386 	err = clk_prepare_enable(*txs_clk);
2387 	if (err) {
2388 		dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2389 		goto err_disable_txclk;
2390 	}
2391 
2392 	err = clk_prepare_enable(*rx_clk);
2393 	if (err) {
2394 		dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2395 		goto err_disable_txsclk;
2396 	}
2397 
2398 	err = clk_prepare_enable(*rxs_clk);
2399 	if (err) {
2400 		dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2401 		goto err_disable_rxclk;
2402 	}
2403 
2404 	return 0;
2405 
2406 err_disable_rxclk:
2407 	clk_disable_unprepare(*rx_clk);
2408 err_disable_txsclk:
2409 	clk_disable_unprepare(*txs_clk);
2410 err_disable_txclk:
2411 	clk_disable_unprepare(*tx_clk);
2412 err_disable_axiclk:
2413 	clk_disable_unprepare(*axi_clk);
2414 
2415 	return err;
2416 }
2417 
2418 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2419 {
2420 	clk_disable_unprepare(xdev->rxs_clk);
2421 	clk_disable_unprepare(xdev->rx_clk);
2422 	clk_disable_unprepare(xdev->txs_clk);
2423 	clk_disable_unprepare(xdev->tx_clk);
2424 	clk_disable_unprepare(xdev->axi_clk);
2425 }
2426 
2427 /**
2428  * xilinx_dma_chan_probe - Per Channel Probing
2429  * It get channel features from the device tree entry and
2430  * initialize special channel handling routines
2431  *
2432  * @xdev: Driver specific device structure
2433  * @node: Device node
2434  * @chan_id: DMA Channel id
2435  *
2436  * Return: '0' on success and failure value on error
2437  */
2438 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2439 				  struct device_node *node, int chan_id)
2440 {
2441 	struct xilinx_dma_chan *chan;
2442 	bool has_dre = false;
2443 	u32 value, width;
2444 	int err;
2445 
2446 	/* Allocate and initialize the channel structure */
2447 	chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2448 	if (!chan)
2449 		return -ENOMEM;
2450 
2451 	chan->dev = xdev->dev;
2452 	chan->xdev = xdev;
2453 	chan->has_sg = xdev->has_sg;
2454 	chan->desc_pendingcount = 0x0;
2455 	chan->ext_addr = xdev->ext_addr;
2456 	/* This variable ensures that descriptors are not
2457 	 * Submitted when dma engine is in progress. This variable is
2458 	 * Added to avoid polling for a bit in the status register to
2459 	 * Know dma state in the driver hot path.
2460 	 */
2461 	chan->idle = true;
2462 
2463 	spin_lock_init(&chan->lock);
2464 	INIT_LIST_HEAD(&chan->pending_list);
2465 	INIT_LIST_HEAD(&chan->done_list);
2466 	INIT_LIST_HEAD(&chan->active_list);
2467 	INIT_LIST_HEAD(&chan->free_seg_list);
2468 
2469 	/* Retrieve the channel properties from the device tree */
2470 	has_dre = of_property_read_bool(node, "xlnx,include-dre");
2471 
2472 	chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2473 
2474 	err = of_property_read_u32(node, "xlnx,datawidth", &value);
2475 	if (err) {
2476 		dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2477 		return err;
2478 	}
2479 	width = value >> 3; /* Convert bits to bytes */
2480 
2481 	/* If data width is greater than 8 bytes, DRE is not in hw */
2482 	if (width > 8)
2483 		has_dre = false;
2484 
2485 	if (!has_dre)
2486 		xdev->common.copy_align = fls(width - 1);
2487 
2488 	if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2489 	    of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2490 	    of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2491 		chan->direction = DMA_MEM_TO_DEV;
2492 		chan->id = chan_id;
2493 		xdev->common.directions = BIT(DMA_MEM_TO_DEV);
2494 
2495 		chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2496 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2497 			chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2498 			chan->config.park = 1;
2499 
2500 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2501 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2502 				chan->flush_on_fsync = true;
2503 		}
2504 	} else if (of_device_is_compatible(node,
2505 					   "xlnx,axi-vdma-s2mm-channel") ||
2506 		   of_device_is_compatible(node,
2507 					   "xlnx,axi-dma-s2mm-channel")) {
2508 		chan->direction = DMA_DEV_TO_MEM;
2509 		chan->id = chan_id;
2510 		xdev->common.directions |= BIT(DMA_DEV_TO_MEM);
2511 		chan->has_vflip = of_property_read_bool(node,
2512 					"xlnx,enable-vert-flip");
2513 		if (chan->has_vflip) {
2514 			chan->config.vflip_en = dma_read(chan,
2515 				XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2516 				XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2517 		}
2518 
2519 		chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2520 		if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2521 			chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2522 			chan->config.park = 1;
2523 
2524 			if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2525 			    xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2526 				chan->flush_on_fsync = true;
2527 		}
2528 	} else {
2529 		dev_err(xdev->dev, "Invalid channel compatible node\n");
2530 		return -EINVAL;
2531 	}
2532 
2533 	/* Request the interrupt */
2534 	chan->irq = irq_of_parse_and_map(node, 0);
2535 	err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2536 			  "xilinx-dma-controller", chan);
2537 	if (err) {
2538 		dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2539 		return err;
2540 	}
2541 
2542 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2543 		chan->start_transfer = xilinx_dma_start_transfer;
2544 		chan->stop_transfer = xilinx_dma_stop_transfer;
2545 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2546 		chan->start_transfer = xilinx_cdma_start_transfer;
2547 		chan->stop_transfer = xilinx_cdma_stop_transfer;
2548 	} else {
2549 		chan->start_transfer = xilinx_vdma_start_transfer;
2550 		chan->stop_transfer = xilinx_dma_stop_transfer;
2551 	}
2552 
2553 	/* Initialize the tasklet */
2554 	tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2555 			(unsigned long)chan);
2556 
2557 	/*
2558 	 * Initialize the DMA channel and add it to the DMA engine channels
2559 	 * list.
2560 	 */
2561 	chan->common.device = &xdev->common;
2562 
2563 	list_add_tail(&chan->common.device_node, &xdev->common.channels);
2564 	xdev->chan[chan->id] = chan;
2565 
2566 	/* Reset the channel */
2567 	err = xilinx_dma_chan_reset(chan);
2568 	if (err < 0) {
2569 		dev_err(xdev->dev, "Reset channel failed\n");
2570 		return err;
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 /**
2577  * xilinx_dma_child_probe - Per child node probe
2578  * It get number of dma-channels per child node from
2579  * device-tree and initializes all the channels.
2580  *
2581  * @xdev: Driver specific device structure
2582  * @node: Device node
2583  *
2584  * Return: 0 always.
2585  */
2586 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2587 				    struct device_node *node)
2588 {
2589 	int i, nr_channels = 1;
2590 
2591 	for (i = 0; i < nr_channels; i++)
2592 		xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2593 
2594 	xdev->nr_channels += nr_channels;
2595 
2596 	return 0;
2597 }
2598 
2599 /**
2600  * of_dma_xilinx_xlate - Translation function
2601  * @dma_spec: Pointer to DMA specifier as found in the device tree
2602  * @ofdma: Pointer to DMA controller data
2603  *
2604  * Return: DMA channel pointer on success and NULL on error
2605  */
2606 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2607 						struct of_dma *ofdma)
2608 {
2609 	struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2610 	int chan_id = dma_spec->args[0];
2611 
2612 	if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2613 		return NULL;
2614 
2615 	return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2616 }
2617 
2618 static const struct xilinx_dma_config axidma_config = {
2619 	.dmatype = XDMA_TYPE_AXIDMA,
2620 	.clk_init = axidma_clk_init,
2621 };
2622 
2623 static const struct xilinx_dma_config axicdma_config = {
2624 	.dmatype = XDMA_TYPE_CDMA,
2625 	.clk_init = axicdma_clk_init,
2626 };
2627 
2628 static const struct xilinx_dma_config axivdma_config = {
2629 	.dmatype = XDMA_TYPE_VDMA,
2630 	.clk_init = axivdma_clk_init,
2631 };
2632 
2633 static const struct of_device_id xilinx_dma_of_ids[] = {
2634 	{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2635 	{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2636 	{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2637 	{}
2638 };
2639 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2640 
2641 /**
2642  * xilinx_dma_probe - Driver probe function
2643  * @pdev: Pointer to the platform_device structure
2644  *
2645  * Return: '0' on success and failure value on error
2646  */
2647 static int xilinx_dma_probe(struct platform_device *pdev)
2648 {
2649 	int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2650 			struct clk **, struct clk **, struct clk **)
2651 					= axivdma_clk_init;
2652 	struct device_node *node = pdev->dev.of_node;
2653 	struct xilinx_dma_device *xdev;
2654 	struct device_node *child, *np = pdev->dev.of_node;
2655 	struct resource *io;
2656 	u32 num_frames, addr_width, len_width;
2657 	int i, err;
2658 
2659 	/* Allocate and initialize the DMA engine structure */
2660 	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2661 	if (!xdev)
2662 		return -ENOMEM;
2663 
2664 	xdev->dev = &pdev->dev;
2665 	if (np) {
2666 		const struct of_device_id *match;
2667 
2668 		match = of_match_node(xilinx_dma_of_ids, np);
2669 		if (match && match->data) {
2670 			xdev->dma_config = match->data;
2671 			clk_init = xdev->dma_config->clk_init;
2672 		}
2673 	}
2674 
2675 	err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2676 		       &xdev->rx_clk, &xdev->rxs_clk);
2677 	if (err)
2678 		return err;
2679 
2680 	/* Request and map I/O memory */
2681 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2682 	xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2683 	if (IS_ERR(xdev->regs))
2684 		return PTR_ERR(xdev->regs);
2685 
2686 	/* Retrieve the DMA engine properties from the device tree */
2687 	xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2688 	xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2689 
2690 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2691 		if (!of_property_read_u32(node, "xlnx,sg-length-width",
2692 					  &len_width)) {
2693 			if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2694 			    len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2695 				dev_warn(xdev->dev,
2696 					 "invalid xlnx,sg-length-width property value using default width\n");
2697 			} else {
2698 				if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2699 					dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2700 
2701 				xdev->max_buffer_len = GENMASK(len_width - 1, 0);
2702 			}
2703 		}
2704 	}
2705 
2706 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2707 		err = of_property_read_u32(node, "xlnx,num-fstores",
2708 					   &num_frames);
2709 		if (err < 0) {
2710 			dev_err(xdev->dev,
2711 				"missing xlnx,num-fstores property\n");
2712 			return err;
2713 		}
2714 
2715 		err = of_property_read_u32(node, "xlnx,flush-fsync",
2716 					   &xdev->flush_on_fsync);
2717 		if (err < 0)
2718 			dev_warn(xdev->dev,
2719 				 "missing xlnx,flush-fsync property\n");
2720 	}
2721 
2722 	err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2723 	if (err < 0)
2724 		dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2725 
2726 	if (addr_width > 32)
2727 		xdev->ext_addr = true;
2728 	else
2729 		xdev->ext_addr = false;
2730 
2731 	/* Set the dma mask bits */
2732 	dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2733 
2734 	/* Initialize the DMA engine */
2735 	xdev->common.dev = &pdev->dev;
2736 
2737 	INIT_LIST_HEAD(&xdev->common.channels);
2738 	if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2739 		dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2740 		dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2741 	}
2742 
2743 	xdev->common.dst_addr_widths = BIT(addr_width / 8);
2744 	xdev->common.src_addr_widths = BIT(addr_width / 8);
2745 	xdev->common.device_alloc_chan_resources =
2746 				xilinx_dma_alloc_chan_resources;
2747 	xdev->common.device_free_chan_resources =
2748 				xilinx_dma_free_chan_resources;
2749 	xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2750 	xdev->common.device_tx_status = xilinx_dma_tx_status;
2751 	xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2752 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2753 		dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2754 		xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2755 		xdev->common.device_prep_dma_cyclic =
2756 					  xilinx_dma_prep_dma_cyclic;
2757 		/* Residue calculation is supported by only AXI DMA and CDMA */
2758 		xdev->common.residue_granularity =
2759 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
2760 	} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2761 		dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2762 		dma_cap_set(DMA_SG, xdev->common.cap_mask);
2763 		xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2764 		xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg;
2765 		/* Residue calculation is supported by only AXI DMA and CDMA */
2766 		xdev->common.residue_granularity =
2767 					  DMA_RESIDUE_GRANULARITY_SEGMENT;
2768 	} else {
2769 		xdev->common.device_prep_interleaved_dma =
2770 				xilinx_vdma_dma_prep_interleaved;
2771 	}
2772 
2773 	platform_set_drvdata(pdev, xdev);
2774 
2775 	/* Initialize the channels */
2776 	for_each_child_of_node(node, child) {
2777 		err = xilinx_dma_child_probe(xdev, child);
2778 		if (err < 0)
2779 			goto disable_clks;
2780 	}
2781 
2782 	if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2783 		for (i = 0; i < xdev->nr_channels; i++)
2784 			if (xdev->chan[i])
2785 				xdev->chan[i]->num_frms = num_frames;
2786 	}
2787 
2788 	/* Register the DMA engine with the core */
2789 	dma_async_device_register(&xdev->common);
2790 
2791 	err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2792 					 xdev);
2793 	if (err < 0) {
2794 		dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2795 		dma_async_device_unregister(&xdev->common);
2796 		goto error;
2797 	}
2798 
2799 	if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2800 		dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2801 	else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2802 		dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2803 	else
2804 		dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2805 
2806 	return 0;
2807 
2808 disable_clks:
2809 	xdma_disable_allclks(xdev);
2810 error:
2811 	for (i = 0; i < xdev->nr_channels; i++)
2812 		if (xdev->chan[i])
2813 			xilinx_dma_chan_remove(xdev->chan[i]);
2814 
2815 	return err;
2816 }
2817 
2818 /**
2819  * xilinx_dma_remove - Driver remove function
2820  * @pdev: Pointer to the platform_device structure
2821  *
2822  * Return: Always '0'
2823  */
2824 static int xilinx_dma_remove(struct platform_device *pdev)
2825 {
2826 	struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2827 	int i;
2828 
2829 	of_dma_controller_free(pdev->dev.of_node);
2830 
2831 	dma_async_device_unregister(&xdev->common);
2832 
2833 	for (i = 0; i < xdev->nr_channels; i++)
2834 		if (xdev->chan[i])
2835 			xilinx_dma_chan_remove(xdev->chan[i]);
2836 
2837 	xdma_disable_allclks(xdev);
2838 
2839 	return 0;
2840 }
2841 
2842 static struct platform_driver xilinx_vdma_driver = {
2843 	.driver = {
2844 		.name = "xilinx-vdma",
2845 		.of_match_table = xilinx_dma_of_ids,
2846 	},
2847 	.probe = xilinx_dma_probe,
2848 	.remove = xilinx_dma_remove,
2849 };
2850 
2851 module_platform_driver(xilinx_vdma_driver);
2852 
2853 MODULE_AUTHOR("Xilinx and Xianjun Jiao");
2854 MODULE_DESCRIPTION("Xilinx VDMA driver");
2855 MODULE_LICENSE("GPL v2");
2856