1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2014 Emilio López
4  * Emilio López <[email protected]>
5  */
6 
7 #include <linux/bitmap.h>
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dmapool.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_dma.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/reset.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 
22 #include "virt-dma.h"
23 
24 /** Common macros to normal and dedicated DMA registers **/
25 
26 #define SUN4I_DMA_CFG_LOADING			BIT(31)
27 #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width)	((width) << 25)
28 #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len)	((len) << 23)
29 #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode)	((mode) << 21)
30 #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type)	((type) << 16)
31 #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width)	((width) << 9)
32 #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len)	((len) << 7)
33 #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode)	((mode) << 5)
34 #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type)	(type)
35 
36 #define SUNIV_DMA_CFG_DST_DATA_WIDTH(width)	((width) << 24)
37 #define SUNIV_DMA_CFG_SRC_DATA_WIDTH(width)	((width) << 8)
38 
39 #define SUN4I_MAX_BURST	8
40 #define SUNIV_MAX_BURST	4
41 
42 /** Normal DMA register values **/
43 
44 /* Normal DMA source/destination data request type values */
45 #define SUN4I_NDMA_DRQ_TYPE_SDRAM		0x16
46 #define SUN4I_NDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
47 
48 #define SUNIV_NDMA_DRQ_TYPE_SDRAM		0x11
49 #define SUNIV_NDMA_DRQ_TYPE_LIMIT		(0x17 + 1)
50 
51 /** Normal DMA register layout **/
52 
53 /* Dedicated DMA source/destination address mode values */
54 #define SUN4I_NDMA_ADDR_MODE_LINEAR		0
55 #define SUN4I_NDMA_ADDR_MODE_IO			1
56 
57 /* Normal DMA configuration register layout */
58 #define SUN4I_NDMA_CFG_CONT_MODE		BIT(30)
59 #define SUN4I_NDMA_CFG_WAIT_STATE(n)		((n) << 27)
60 #define SUN4I_NDMA_CFG_DST_NON_SECURE		BIT(22)
61 #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
62 #define SUN4I_NDMA_CFG_SRC_NON_SECURE		BIT(6)
63 
64 #define SUNIV_NDMA_CFG_CONT_MODE		BIT(29)
65 #define SUNIV_NDMA_CFG_WAIT_STATE(n)		((n) << 26)
66 
67 /** Dedicated DMA register values **/
68 
69 /* Dedicated DMA source/destination address mode values */
70 #define SUN4I_DDMA_ADDR_MODE_LINEAR		0
71 #define SUN4I_DDMA_ADDR_MODE_IO			1
72 #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE	2
73 #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE	3
74 
75 /* Dedicated DMA source/destination data request type values */
76 #define SUN4I_DDMA_DRQ_TYPE_SDRAM		0x1
77 #define SUN4I_DDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
78 
79 #define SUNIV_DDMA_DRQ_TYPE_SDRAM		0x1
80 #define SUNIV_DDMA_DRQ_TYPE_LIMIT		(0x9 + 1)
81 
82 /** Dedicated DMA register layout **/
83 
84 /* Dedicated DMA configuration register layout */
85 #define SUN4I_DDMA_CFG_BUSY			BIT(30)
86 #define SUN4I_DDMA_CFG_CONT_MODE		BIT(29)
87 #define SUN4I_DDMA_CFG_DST_NON_SECURE		BIT(28)
88 #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
89 #define SUN4I_DDMA_CFG_SRC_NON_SECURE		BIT(12)
90 
91 /* Dedicated DMA parameter register layout */
92 #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n)	(((n) - 1) << 24)
93 #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n)	(((n) - 1) << 16)
94 #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n)	(((n) - 1) << 8)
95 #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n)	(((n) - 1) << 0)
96 
97 /** DMA register offsets **/
98 
99 /* General register offsets */
100 #define SUN4I_DMA_IRQ_ENABLE_REG		0x0
101 #define SUN4I_DMA_IRQ_PENDING_STATUS_REG	0x4
102 
103 /* Normal DMA register offsets */
104 #define SUN4I_NDMA_CHANNEL_REG_BASE(n)		(0x100 + (n) * 0x20)
105 #define SUN4I_NDMA_CFG_REG			0x0
106 #define SUN4I_NDMA_SRC_ADDR_REG			0x4
107 #define SUN4I_NDMA_DST_ADDR_REG		0x8
108 #define SUN4I_NDMA_BYTE_COUNT_REG		0xC
109 
110 /* Dedicated DMA register offsets */
111 #define SUN4I_DDMA_CHANNEL_REG_BASE(n)		(0x300 + (n) * 0x20)
112 #define SUN4I_DDMA_CFG_REG			0x0
113 #define SUN4I_DDMA_SRC_ADDR_REG			0x4
114 #define SUN4I_DDMA_DST_ADDR_REG		0x8
115 #define SUN4I_DDMA_BYTE_COUNT_REG		0xC
116 #define SUN4I_DDMA_PARA_REG			0x18
117 
118 /** DMA Driver **/
119 
120 /*
121  * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
122  * that's 16 channels. As for endpoints, there's 29 and 21
123  * respectively. Given that the Normal DMA endpoints (other than
124  * SDRAM) can be used as tx/rx, we need 78 vchans in total
125  */
126 #define SUN4I_NDMA_NR_MAX_CHANNELS	8
127 #define SUN4I_DDMA_NR_MAX_CHANNELS	8
128 #define SUN4I_DMA_NR_MAX_CHANNELS					\
129 	(SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
130 #define SUN4I_NDMA_NR_MAX_VCHANS	(29 * 2 - 1)
131 #define SUN4I_DDMA_NR_MAX_VCHANS	21
132 #define SUN4I_DMA_NR_MAX_VCHANS						\
133 	(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
134 
135 #define SUNIV_NDMA_NR_MAX_CHANNELS	4
136 #define SUNIV_DDMA_NR_MAX_CHANNELS	4
137 #define SUNIV_NDMA_NR_MAX_VCHANS	(24 * 2 - 1)
138 #define SUNIV_DDMA_NR_MAX_VCHANS	10
139 
140 /* This set of SUN4I_DDMA timing parameters were found experimentally while
141  * working with the SPI driver and seem to make it behave correctly */
142 #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
143 	(SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) |			\
144 	 SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) |				\
145 	 SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) |				\
146 	 SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
147 
148 /*
149  * Normal DMA supports individual transfers (segments) up to 128k.
150  * Dedicated DMA supports transfers up to 16M. We can only report
151  * one size limit, so we have to use the smaller value.
152  */
153 #define SUN4I_NDMA_MAX_SEG_SIZE		SZ_128K
154 #define SUN4I_DDMA_MAX_SEG_SIZE		SZ_16M
155 #define SUN4I_DMA_MAX_SEG_SIZE		SUN4I_NDMA_MAX_SEG_SIZE
156 
157 /*
158  * Hardware channels / ports representation
159  *
160  * The hardware is used in several SoCs, with differing numbers
161  * of channels and endpoints. This structure ties those numbers
162  * to a certain compatible string.
163  */
164 struct sun4i_dma_config {
165 	u32 ndma_nr_max_channels;
166 	u32 ndma_nr_max_vchans;
167 
168 	u32 ddma_nr_max_channels;
169 	u32 ddma_nr_max_vchans;
170 
171 	u32 dma_nr_max_channels;
172 
173 	void (*set_dst_data_width)(u32 *p_cfg, s8 data_width);
174 	void (*set_src_data_width)(u32 *p_cfg, s8 data_width);
175 	int (*convert_burst)(u32 maxburst);
176 
177 	u8 ndma_drq_sdram;
178 	u8 ddma_drq_sdram;
179 
180 	u8 max_burst;
181 	bool has_reset;
182 };
183 
184 struct sun4i_dma_pchan {
185 	/* Register base of channel */
186 	void __iomem			*base;
187 	/* vchan currently being serviced */
188 	struct sun4i_dma_vchan		*vchan;
189 	/* Is this a dedicated pchan? */
190 	int				is_dedicated;
191 };
192 
193 struct sun4i_dma_vchan {
194 	struct virt_dma_chan		vc;
195 	struct dma_slave_config		cfg;
196 	struct sun4i_dma_pchan		*pchan;
197 	struct sun4i_dma_promise	*processing;
198 	struct sun4i_dma_contract	*contract;
199 	u8				endpoint;
200 	int				is_dedicated;
201 };
202 
203 struct sun4i_dma_promise {
204 	u32				cfg;
205 	u32				para;
206 	dma_addr_t			src;
207 	dma_addr_t			dst;
208 	size_t				len;
209 	struct list_head		list;
210 };
211 
212 /* A contract is a set of promises */
213 struct sun4i_dma_contract {
214 	struct virt_dma_desc		vd;
215 	struct list_head		demands;
216 	struct list_head		completed_demands;
217 	bool				is_cyclic : 1;
218 	bool				use_half_int : 1;
219 };
220 
221 struct sun4i_dma_dev {
222 	unsigned long *pchans_used;
223 	struct dma_device		slave;
224 	struct sun4i_dma_pchan		*pchans;
225 	struct sun4i_dma_vchan		*vchans;
226 	void __iomem			*base;
227 	struct clk			*clk;
228 	int				irq;
229 	spinlock_t			lock;
230 	const struct sun4i_dma_config *cfg;
231 	struct reset_control *rst;
232 };
233 
to_sun4i_dma_dev(struct dma_device * dev)234 static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
235 {
236 	return container_of(dev, struct sun4i_dma_dev, slave);
237 }
238 
to_sun4i_dma_vchan(struct dma_chan * chan)239 static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
240 {
241 	return container_of(chan, struct sun4i_dma_vchan, vc.chan);
242 }
243 
to_sun4i_dma_contract(struct virt_dma_desc * vd)244 static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
245 {
246 	return container_of(vd, struct sun4i_dma_contract, vd);
247 }
248 
chan2dev(struct dma_chan * chan)249 static struct device *chan2dev(struct dma_chan *chan)
250 {
251 	return &chan->dev->device;
252 }
253 
set_dst_data_width_a10(u32 * p_cfg,s8 data_width)254 static void set_dst_data_width_a10(u32 *p_cfg, s8 data_width)
255 {
256 	*p_cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(data_width);
257 }
258 
set_src_data_width_a10(u32 * p_cfg,s8 data_width)259 static void set_src_data_width_a10(u32 *p_cfg, s8 data_width)
260 {
261 	*p_cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(data_width);
262 }
263 
set_dst_data_width_f1c100s(u32 * p_cfg,s8 data_width)264 static void set_dst_data_width_f1c100s(u32 *p_cfg, s8 data_width)
265 {
266 	*p_cfg |= SUNIV_DMA_CFG_DST_DATA_WIDTH(data_width);
267 }
268 
set_src_data_width_f1c100s(u32 * p_cfg,s8 data_width)269 static void set_src_data_width_f1c100s(u32 *p_cfg, s8 data_width)
270 {
271 	*p_cfg |= SUNIV_DMA_CFG_SRC_DATA_WIDTH(data_width);
272 }
273 
convert_burst_a10(u32 maxburst)274 static int convert_burst_a10(u32 maxburst)
275 {
276 	if (maxburst > 8)
277 		return -EINVAL;
278 
279 	/* 1 -> 0, 4 -> 1, 8 -> 2 */
280 	return (maxburst >> 2);
281 }
282 
convert_burst_f1c100s(u32 maxburst)283 static int convert_burst_f1c100s(u32 maxburst)
284 {
285 	if (maxburst > 4)
286 		return -EINVAL;
287 
288 	/* 1 -> 0, 4 -> 1 */
289 	return (maxburst >> 2);
290 }
291 
convert_buswidth(enum dma_slave_buswidth addr_width)292 static int convert_buswidth(enum dma_slave_buswidth addr_width)
293 {
294 	if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
295 		return -EINVAL;
296 
297 	/* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
298 	return (addr_width >> 1);
299 }
300 
sun4i_dma_free_chan_resources(struct dma_chan * chan)301 static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
302 {
303 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
304 
305 	vchan_free_chan_resources(&vchan->vc);
306 }
307 
find_and_use_pchan(struct sun4i_dma_dev * priv,struct sun4i_dma_vchan * vchan)308 static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
309 						  struct sun4i_dma_vchan *vchan)
310 {
311 	struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
312 	unsigned long flags;
313 	int i, max;
314 
315 	/*
316 	 * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and
317 	 * priv->cfg->ndma_nr_max_channels+ are dedicated ones
318 	 */
319 	if (vchan->is_dedicated) {
320 		i = priv->cfg->ndma_nr_max_channels;
321 		max = priv->cfg->dma_nr_max_channels;
322 	} else {
323 		i = 0;
324 		max = priv->cfg->ndma_nr_max_channels;
325 	}
326 
327 	spin_lock_irqsave(&priv->lock, flags);
328 	for_each_clear_bit_from(i, priv->pchans_used, max) {
329 		pchan = &pchans[i];
330 		pchan->vchan = vchan;
331 		set_bit(i, priv->pchans_used);
332 		break;
333 	}
334 	spin_unlock_irqrestore(&priv->lock, flags);
335 
336 	return pchan;
337 }
338 
release_pchan(struct sun4i_dma_dev * priv,struct sun4i_dma_pchan * pchan)339 static void release_pchan(struct sun4i_dma_dev *priv,
340 			  struct sun4i_dma_pchan *pchan)
341 {
342 	unsigned long flags;
343 	int nr = pchan - priv->pchans;
344 
345 	spin_lock_irqsave(&priv->lock, flags);
346 
347 	pchan->vchan = NULL;
348 	clear_bit(nr, priv->pchans_used);
349 
350 	spin_unlock_irqrestore(&priv->lock, flags);
351 }
352 
configure_pchan(struct sun4i_dma_pchan * pchan,struct sun4i_dma_promise * d)353 static void configure_pchan(struct sun4i_dma_pchan *pchan,
354 			    struct sun4i_dma_promise *d)
355 {
356 	/*
357 	 * Configure addresses and misc parameters depending on type
358 	 * SUN4I_DDMA has an extra field with timing parameters
359 	 */
360 	if (pchan->is_dedicated) {
361 		writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
362 		writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
363 		writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
364 		writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
365 		writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
366 	} else {
367 		writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
368 		writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
369 		writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
370 		writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
371 	}
372 }
373 
set_pchan_interrupt(struct sun4i_dma_dev * priv,struct sun4i_dma_pchan * pchan,int half,int end)374 static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
375 				struct sun4i_dma_pchan *pchan,
376 				int half, int end)
377 {
378 	u32 reg;
379 	int pchan_number = pchan - priv->pchans;
380 	unsigned long flags;
381 
382 	spin_lock_irqsave(&priv->lock, flags);
383 
384 	reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
385 
386 	if (half)
387 		reg |= BIT(pchan_number * 2);
388 	else
389 		reg &= ~BIT(pchan_number * 2);
390 
391 	if (end)
392 		reg |= BIT(pchan_number * 2 + 1);
393 	else
394 		reg &= ~BIT(pchan_number * 2 + 1);
395 
396 	writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
397 
398 	spin_unlock_irqrestore(&priv->lock, flags);
399 }
400 
401 /*
402  * Execute pending operations on a vchan
403  *
404  * When given a vchan, this function will try to acquire a suitable
405  * pchan and, if successful, will configure it to fulfill a promise
406  * from the next pending contract.
407  *
408  * This function must be called with &vchan->vc.lock held.
409  */
__execute_vchan_pending(struct sun4i_dma_dev * priv,struct sun4i_dma_vchan * vchan)410 static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
411 				   struct sun4i_dma_vchan *vchan)
412 {
413 	struct sun4i_dma_promise *promise = NULL;
414 	struct sun4i_dma_contract *contract = NULL;
415 	struct sun4i_dma_pchan *pchan;
416 	struct virt_dma_desc *vd;
417 	int ret;
418 
419 	lockdep_assert_held(&vchan->vc.lock);
420 
421 	/* We need a pchan to do anything, so secure one if available */
422 	pchan = find_and_use_pchan(priv, vchan);
423 	if (!pchan)
424 		return -EBUSY;
425 
426 	/*
427 	 * Channel endpoints must not be repeated, so if this vchan
428 	 * has already submitted some work, we can't do anything else
429 	 */
430 	if (vchan->processing) {
431 		dev_dbg(chan2dev(&vchan->vc.chan),
432 			"processing something to this endpoint already\n");
433 		ret = -EBUSY;
434 		goto release_pchan;
435 	}
436 
437 	do {
438 		/* Figure out which contract we're working with today */
439 		vd = vchan_next_desc(&vchan->vc);
440 		if (!vd) {
441 			dev_dbg(chan2dev(&vchan->vc.chan),
442 				"No pending contract found");
443 			ret = 0;
444 			goto release_pchan;
445 		}
446 
447 		contract = to_sun4i_dma_contract(vd);
448 		if (list_empty(&contract->demands)) {
449 			/* The contract has been completed so mark it as such */
450 			list_del(&contract->vd.node);
451 			vchan_cookie_complete(&contract->vd);
452 			dev_dbg(chan2dev(&vchan->vc.chan),
453 				"Empty contract found and marked complete");
454 		}
455 	} while (list_empty(&contract->demands));
456 
457 	/* Now find out what we need to do */
458 	promise = list_first_entry(&contract->demands,
459 				   struct sun4i_dma_promise, list);
460 	vchan->processing = promise;
461 
462 	/* ... and make it reality */
463 	if (promise) {
464 		vchan->contract = contract;
465 		vchan->pchan = pchan;
466 		set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
467 		configure_pchan(pchan, promise);
468 	}
469 
470 	return 0;
471 
472 release_pchan:
473 	release_pchan(priv, pchan);
474 	return ret;
475 }
476 
sanitize_config(struct dma_slave_config * sconfig,enum dma_transfer_direction direction)477 static int sanitize_config(struct dma_slave_config *sconfig,
478 			   enum dma_transfer_direction direction)
479 {
480 	switch (direction) {
481 	case DMA_MEM_TO_DEV:
482 		if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
483 		    !sconfig->dst_maxburst)
484 			return -EINVAL;
485 
486 		if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
487 			sconfig->src_addr_width = sconfig->dst_addr_width;
488 
489 		if (!sconfig->src_maxburst)
490 			sconfig->src_maxburst = sconfig->dst_maxburst;
491 
492 		break;
493 
494 	case DMA_DEV_TO_MEM:
495 		if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
496 		    !sconfig->src_maxburst)
497 			return -EINVAL;
498 
499 		if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
500 			sconfig->dst_addr_width = sconfig->src_addr_width;
501 
502 		if (!sconfig->dst_maxburst)
503 			sconfig->dst_maxburst = sconfig->src_maxburst;
504 
505 		break;
506 	default:
507 		return 0;
508 	}
509 
510 	return 0;
511 }
512 
513 /*
514  * Generate a promise, to be used in a normal DMA contract.
515  *
516  * A NDMA promise contains all the information required to program the
517  * normal part of the DMA Engine and get data copied. A non-executed
518  * promise will live in the demands list on a contract. Once it has been
519  * completed, it will be moved to the completed demands list for later freeing.
520  * All linked promises will be freed when the corresponding contract is freed
521  */
522 static struct sun4i_dma_promise *
generate_ndma_promise(struct dma_chan * chan,dma_addr_t src,dma_addr_t dest,size_t len,struct dma_slave_config * sconfig,enum dma_transfer_direction direction)523 generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
524 		      size_t len, struct dma_slave_config *sconfig,
525 		      enum dma_transfer_direction direction)
526 {
527 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
528 	struct sun4i_dma_promise *promise;
529 	int ret;
530 
531 	ret = sanitize_config(sconfig, direction);
532 	if (ret)
533 		return NULL;
534 
535 	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
536 	if (!promise)
537 		return NULL;
538 
539 	promise->src = src;
540 	promise->dst = dest;
541 	promise->len = len;
542 	promise->cfg = SUN4I_DMA_CFG_LOADING |
543 		SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
544 
545 	dev_dbg(chan2dev(chan),
546 		"src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
547 		sconfig->src_maxburst, sconfig->dst_maxburst,
548 		sconfig->src_addr_width, sconfig->dst_addr_width);
549 
550 	/* Source burst */
551 	ret = priv->cfg->convert_burst(sconfig->src_maxburst);
552 	if (ret < 0)
553 		goto fail;
554 	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
555 
556 	/* Destination burst */
557 	ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
558 	if (ret < 0)
559 		goto fail;
560 	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
561 
562 	/* Source bus width */
563 	ret = convert_buswidth(sconfig->src_addr_width);
564 	if (ret < 0)
565 		goto fail;
566 	priv->cfg->set_src_data_width(&promise->cfg, ret);
567 
568 	/* Destination bus width */
569 	ret = convert_buswidth(sconfig->dst_addr_width);
570 	if (ret < 0)
571 		goto fail;
572 	priv->cfg->set_dst_data_width(&promise->cfg, ret);
573 
574 	return promise;
575 
576 fail:
577 	kfree(promise);
578 	return NULL;
579 }
580 
581 /*
582  * Generate a promise, to be used in a dedicated DMA contract.
583  *
584  * A DDMA promise contains all the information required to program the
585  * Dedicated part of the DMA Engine and get data copied. A non-executed
586  * promise will live in the demands list on a contract. Once it has been
587  * completed, it will be moved to the completed demands list for later freeing.
588  * All linked promises will be freed when the corresponding contract is freed
589  */
590 static struct sun4i_dma_promise *
generate_ddma_promise(struct dma_chan * chan,dma_addr_t src,dma_addr_t dest,size_t len,struct dma_slave_config * sconfig)591 generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
592 		      size_t len, struct dma_slave_config *sconfig)
593 {
594 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
595 	struct sun4i_dma_promise *promise;
596 	int ret;
597 
598 	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
599 	if (!promise)
600 		return NULL;
601 
602 	promise->src = src;
603 	promise->dst = dest;
604 	promise->len = len;
605 	promise->cfg = SUN4I_DMA_CFG_LOADING |
606 		SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
607 
608 	/* Source burst */
609 	ret = priv->cfg->convert_burst(sconfig->src_maxburst);
610 	if (ret < 0)
611 		goto fail;
612 	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
613 
614 	/* Destination burst */
615 	ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
616 	if (ret < 0)
617 		goto fail;
618 	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
619 
620 	/* Source bus width */
621 	ret = convert_buswidth(sconfig->src_addr_width);
622 	if (ret < 0)
623 		goto fail;
624 	priv->cfg->set_src_data_width(&promise->cfg, ret);
625 
626 	/* Destination bus width */
627 	ret = convert_buswidth(sconfig->dst_addr_width);
628 	if (ret < 0)
629 		goto fail;
630 	priv->cfg->set_dst_data_width(&promise->cfg, ret);
631 
632 	return promise;
633 
634 fail:
635 	kfree(promise);
636 	return NULL;
637 }
638 
639 /*
640  * Generate a contract
641  *
642  * Contracts function as DMA descriptors. As our hardware does not support
643  * linked lists, we need to implement SG via software. We use a contract
644  * to hold all the pieces of the request and process them serially one
645  * after another. Each piece is represented as a promise.
646  */
generate_dma_contract(void)647 static struct sun4i_dma_contract *generate_dma_contract(void)
648 {
649 	struct sun4i_dma_contract *contract;
650 
651 	contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
652 	if (!contract)
653 		return NULL;
654 
655 	INIT_LIST_HEAD(&contract->demands);
656 	INIT_LIST_HEAD(&contract->completed_demands);
657 
658 	return contract;
659 }
660 
661 /*
662  * Get next promise on a cyclic transfer
663  *
664  * Cyclic contracts contain a series of promises which are executed on a
665  * loop. This function returns the next promise from a cyclic contract,
666  * so it can be programmed into the hardware.
667  */
668 static struct sun4i_dma_promise *
get_next_cyclic_promise(struct sun4i_dma_contract * contract)669 get_next_cyclic_promise(struct sun4i_dma_contract *contract)
670 {
671 	struct sun4i_dma_promise *promise;
672 
673 	promise = list_first_entry_or_null(&contract->demands,
674 					   struct sun4i_dma_promise, list);
675 	if (!promise) {
676 		list_splice_init(&contract->completed_demands,
677 				 &contract->demands);
678 		promise = list_first_entry(&contract->demands,
679 					   struct sun4i_dma_promise, list);
680 	}
681 
682 	return promise;
683 }
684 
685 /*
686  * Free a contract and all its associated promises
687  */
sun4i_dma_free_contract(struct virt_dma_desc * vd)688 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
689 {
690 	struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
691 	struct sun4i_dma_promise *promise, *tmp;
692 
693 	/* Free all the demands and completed demands */
694 	list_for_each_entry_safe(promise, tmp, &contract->demands, list)
695 		kfree(promise);
696 
697 	list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
698 		kfree(promise);
699 
700 	kfree(contract);
701 }
702 
703 static struct dma_async_tx_descriptor *
sun4i_dma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)704 sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
705 			  dma_addr_t src, size_t len, unsigned long flags)
706 {
707 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
708 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
709 	struct dma_slave_config *sconfig = &vchan->cfg;
710 	struct sun4i_dma_promise *promise;
711 	struct sun4i_dma_contract *contract;
712 
713 	contract = generate_dma_contract();
714 	if (!contract)
715 		return NULL;
716 
717 	/*
718 	 * We can only do the copy to bus aligned addresses, so
719 	 * choose the best one so we get decent performance. We also
720 	 * maximize the burst size for this same reason.
721 	 */
722 	sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
723 	sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
724 	sconfig->src_maxburst = priv->cfg->max_burst;
725 	sconfig->dst_maxburst = priv->cfg->max_burst;
726 
727 	if (vchan->is_dedicated)
728 		promise = generate_ddma_promise(chan, src, dest, len, sconfig);
729 	else
730 		promise = generate_ndma_promise(chan, src, dest, len, sconfig,
731 						DMA_MEM_TO_MEM);
732 
733 	if (!promise) {
734 		kfree(contract);
735 		return NULL;
736 	}
737 
738 	/* Configure memcpy mode */
739 	if (vchan->is_dedicated) {
740 		promise->cfg |=
741 			SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) |
742 			SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram);
743 	} else {
744 		promise->cfg |=
745 			SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) |
746 			SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram);
747 	}
748 
749 	/* Fill the contract with our only promise */
750 	list_add_tail(&promise->list, &contract->demands);
751 
752 	/* And add it to the vchan */
753 	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
754 }
755 
756 static struct dma_async_tx_descriptor *
sun4i_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf,size_t len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)757 sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
758 			  size_t period_len, enum dma_transfer_direction dir,
759 			  unsigned long flags)
760 {
761 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
762 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
763 	struct dma_slave_config *sconfig = &vchan->cfg;
764 	struct sun4i_dma_promise *promise;
765 	struct sun4i_dma_contract *contract;
766 	dma_addr_t src, dest;
767 	u32 endpoints;
768 	int nr_periods, offset, plength, i;
769 	u8 ram_type, io_mode, linear_mode;
770 
771 	if (!is_slave_direction(dir)) {
772 		dev_err(chan2dev(chan), "Invalid DMA direction\n");
773 		return NULL;
774 	}
775 
776 	contract = generate_dma_contract();
777 	if (!contract)
778 		return NULL;
779 
780 	contract->is_cyclic = 1;
781 
782 	if (vchan->is_dedicated) {
783 		io_mode = SUN4I_DDMA_ADDR_MODE_IO;
784 		linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
785 		ram_type = priv->cfg->ddma_drq_sdram;
786 	} else {
787 		io_mode = SUN4I_NDMA_ADDR_MODE_IO;
788 		linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
789 		ram_type = priv->cfg->ndma_drq_sdram;
790 	}
791 
792 	if (dir == DMA_MEM_TO_DEV) {
793 		src = buf;
794 		dest = sconfig->dst_addr;
795 		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
796 			    SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
797 			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
798 			    SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
799 	} else {
800 		src = sconfig->src_addr;
801 		dest = buf;
802 		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
803 			    SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
804 			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
805 			    SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
806 	}
807 
808 	/*
809 	 * We will be using half done interrupts to make two periods
810 	 * out of a promise, so we need to program the DMA engine less
811 	 * often
812 	 */
813 
814 	/*
815 	 * The engine can interrupt on half-transfer, so we can use
816 	 * this feature to program the engine half as often as if we
817 	 * didn't use it (keep in mind the hardware doesn't support
818 	 * linked lists).
819 	 *
820 	 * Say you have a set of periods (| marks the start/end, I for
821 	 * interrupt, P for programming the engine to do a new
822 	 * transfer), the easy but slow way would be to do
823 	 *
824 	 *  |---|---|---|---| (periods / promises)
825 	 *  P  I,P I,P I,P  I
826 	 *
827 	 * Using half transfer interrupts you can do
828 	 *
829 	 *  |-------|-------| (promises as configured on hw)
830 	 *  |---|---|---|---| (periods)
831 	 *  P   I  I,P  I   I
832 	 *
833 	 * Which requires half the engine programming for the same
834 	 * functionality.
835 	 *
836 	 * This only works if two periods fit in a single promise. That will
837 	 * always be the case for dedicated DMA, where the hardware has a much
838 	 * larger maximum transfer size than advertised to clients.
839 	 */
840 	if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
841 		period_len *= 2;
842 		contract->use_half_int = 1;
843 	}
844 
845 	nr_periods = DIV_ROUND_UP(len, period_len);
846 	for (i = 0; i < nr_periods; i++) {
847 		/* Calculate the offset in the buffer and the length needed */
848 		offset = i * period_len;
849 		plength = min((len - offset), period_len);
850 		if (dir == DMA_MEM_TO_DEV)
851 			src = buf + offset;
852 		else
853 			dest = buf + offset;
854 
855 		/* Make the promise */
856 		if (vchan->is_dedicated)
857 			promise = generate_ddma_promise(chan, src, dest,
858 							plength, sconfig);
859 		else
860 			promise = generate_ndma_promise(chan, src, dest,
861 							plength, sconfig, dir);
862 
863 		if (!promise) {
864 			/* TODO: should we free everything? */
865 			return NULL;
866 		}
867 		promise->cfg |= endpoints;
868 
869 		/* Then add it to the contract */
870 		list_add_tail(&promise->list, &contract->demands);
871 	}
872 
873 	/* And add it to the vchan */
874 	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
875 }
876 
877 static struct dma_async_tx_descriptor *
sun4i_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)878 sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
879 			unsigned int sg_len, enum dma_transfer_direction dir,
880 			unsigned long flags, void *context)
881 {
882 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
883 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
884 	struct dma_slave_config *sconfig = &vchan->cfg;
885 	struct sun4i_dma_promise *promise;
886 	struct sun4i_dma_contract *contract;
887 	u8 ram_type, io_mode, linear_mode;
888 	struct scatterlist *sg;
889 	dma_addr_t srcaddr, dstaddr;
890 	u32 endpoints, para;
891 	int i;
892 
893 	if (!sgl)
894 		return NULL;
895 
896 	if (!is_slave_direction(dir)) {
897 		dev_err(chan2dev(chan), "Invalid DMA direction\n");
898 		return NULL;
899 	}
900 
901 	contract = generate_dma_contract();
902 	if (!contract)
903 		return NULL;
904 
905 	if (vchan->is_dedicated) {
906 		io_mode = SUN4I_DDMA_ADDR_MODE_IO;
907 		linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
908 		ram_type = priv->cfg->ddma_drq_sdram;
909 	} else {
910 		io_mode = SUN4I_NDMA_ADDR_MODE_IO;
911 		linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
912 		ram_type = priv->cfg->ndma_drq_sdram;
913 	}
914 
915 	if (dir == DMA_MEM_TO_DEV)
916 		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
917 			    SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
918 			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
919 			    SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
920 	else
921 		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
922 			    SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
923 			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
924 			    SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
925 
926 	for_each_sg(sgl, sg, sg_len, i) {
927 		/* Figure out addresses */
928 		if (dir == DMA_MEM_TO_DEV) {
929 			srcaddr = sg_dma_address(sg);
930 			dstaddr = sconfig->dst_addr;
931 		} else {
932 			srcaddr = sconfig->src_addr;
933 			dstaddr = sg_dma_address(sg);
934 		}
935 
936 		/*
937 		 * These are the magic DMA engine timings that keep SPI going.
938 		 * I haven't seen any interface on DMAEngine to configure
939 		 * timings, and so far they seem to work for everything we
940 		 * support, so I've kept them here. I don't know if other
941 		 * devices need different timings because, as usual, we only
942 		 * have the "para" bitfield meanings, but no comment on what
943 		 * the values should be when doing a certain operation :|
944 		 */
945 		para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
946 
947 		/* And make a suitable promise */
948 		if (vchan->is_dedicated)
949 			promise = generate_ddma_promise(chan, srcaddr, dstaddr,
950 							sg_dma_len(sg),
951 							sconfig);
952 		else
953 			promise = generate_ndma_promise(chan, srcaddr, dstaddr,
954 							sg_dma_len(sg),
955 							sconfig, dir);
956 
957 		if (!promise)
958 			return NULL; /* TODO: should we free everything? */
959 
960 		promise->cfg |= endpoints;
961 		promise->para = para;
962 
963 		/* Then add it to the contract */
964 		list_add_tail(&promise->list, &contract->demands);
965 	}
966 
967 	/*
968 	 * Once we've got all the promises ready, add the contract
969 	 * to the pending list on the vchan
970 	 */
971 	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
972 }
973 
sun4i_dma_terminate_all(struct dma_chan * chan)974 static int sun4i_dma_terminate_all(struct dma_chan *chan)
975 {
976 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
977 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
978 	struct sun4i_dma_pchan *pchan = vchan->pchan;
979 	LIST_HEAD(head);
980 	unsigned long flags;
981 
982 	spin_lock_irqsave(&vchan->vc.lock, flags);
983 	vchan_get_all_descriptors(&vchan->vc, &head);
984 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
985 
986 	/*
987 	 * Clearing the configuration register will halt the pchan. Interrupts
988 	 * may still trigger, so don't forget to disable them.
989 	 */
990 	if (pchan) {
991 		if (pchan->is_dedicated)
992 			writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
993 		else
994 			writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
995 		set_pchan_interrupt(priv, pchan, 0, 0);
996 		release_pchan(priv, pchan);
997 	}
998 
999 	spin_lock_irqsave(&vchan->vc.lock, flags);
1000 	/* Clear these so the vchan is usable again */
1001 	vchan->processing = NULL;
1002 	vchan->pchan = NULL;
1003 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
1004 
1005 	vchan_dma_desc_free_list(&vchan->vc, &head);
1006 
1007 	return 0;
1008 }
1009 
sun4i_dma_config(struct dma_chan * chan,struct dma_slave_config * config)1010 static int sun4i_dma_config(struct dma_chan *chan,
1011 			    struct dma_slave_config *config)
1012 {
1013 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
1014 
1015 	memcpy(&vchan->cfg, config, sizeof(*config));
1016 
1017 	return 0;
1018 }
1019 
sun4i_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1020 static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
1021 					   struct of_dma *ofdma)
1022 {
1023 	struct sun4i_dma_dev *priv = ofdma->of_dma_data;
1024 	struct sun4i_dma_vchan *vchan;
1025 	struct dma_chan *chan;
1026 	u8 is_dedicated = dma_spec->args[0];
1027 	u8 endpoint = dma_spec->args[1];
1028 
1029 	/* Check if type is Normal or Dedicated */
1030 	if (is_dedicated != 0 && is_dedicated != 1)
1031 		return NULL;
1032 
1033 	/* Make sure the endpoint looks sane */
1034 	if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
1035 	    (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
1036 		return NULL;
1037 
1038 	chan = dma_get_any_slave_channel(&priv->slave);
1039 	if (!chan)
1040 		return NULL;
1041 
1042 	/* Assign the endpoint to the vchan */
1043 	vchan = to_sun4i_dma_vchan(chan);
1044 	vchan->is_dedicated = is_dedicated;
1045 	vchan->endpoint = endpoint;
1046 
1047 	return chan;
1048 }
1049 
sun4i_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)1050 static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
1051 					   dma_cookie_t cookie,
1052 					   struct dma_tx_state *state)
1053 {
1054 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
1055 	struct sun4i_dma_pchan *pchan = vchan->pchan;
1056 	struct sun4i_dma_contract *contract;
1057 	struct sun4i_dma_promise *promise;
1058 	struct virt_dma_desc *vd;
1059 	unsigned long flags;
1060 	enum dma_status ret;
1061 	size_t bytes = 0;
1062 
1063 	ret = dma_cookie_status(chan, cookie, state);
1064 	if (!state || (ret == DMA_COMPLETE))
1065 		return ret;
1066 
1067 	spin_lock_irqsave(&vchan->vc.lock, flags);
1068 	vd = vchan_find_desc(&vchan->vc, cookie);
1069 	if (!vd)
1070 		goto exit;
1071 	contract = to_sun4i_dma_contract(vd);
1072 
1073 	list_for_each_entry(promise, &contract->demands, list)
1074 		bytes += promise->len;
1075 
1076 	/*
1077 	 * The hardware is configured to return the remaining byte
1078 	 * quantity. If possible, replace the first listed element's
1079 	 * full size with the actual remaining amount
1080 	 */
1081 	promise = list_first_entry_or_null(&contract->demands,
1082 					   struct sun4i_dma_promise, list);
1083 	if (promise && pchan) {
1084 		bytes -= promise->len;
1085 		if (pchan->is_dedicated)
1086 			bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
1087 		else
1088 			bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
1089 	}
1090 
1091 exit:
1092 
1093 	dma_set_residue(state, bytes);
1094 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
1095 
1096 	return ret;
1097 }
1098 
sun4i_dma_issue_pending(struct dma_chan * chan)1099 static void sun4i_dma_issue_pending(struct dma_chan *chan)
1100 {
1101 	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
1102 	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
1103 	unsigned long flags;
1104 
1105 	spin_lock_irqsave(&vchan->vc.lock, flags);
1106 
1107 	/*
1108 	 * If there are pending transactions for this vchan, push one of
1109 	 * them into the engine to get the ball rolling.
1110 	 */
1111 	if (vchan_issue_pending(&vchan->vc))
1112 		__execute_vchan_pending(priv, vchan);
1113 
1114 	spin_unlock_irqrestore(&vchan->vc.lock, flags);
1115 }
1116 
sun4i_dma_interrupt(int irq,void * dev_id)1117 static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
1118 {
1119 	struct sun4i_dma_dev *priv = dev_id;
1120 	struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
1121 	struct sun4i_dma_vchan *vchan;
1122 	struct sun4i_dma_contract *contract;
1123 	struct sun4i_dma_promise *promise;
1124 	unsigned long pendirq, irqs, disableirqs;
1125 	int bit, i, free_room, allow_mitigation = 1;
1126 
1127 	pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1128 
1129 handle_pending:
1130 
1131 	disableirqs = 0;
1132 	free_room = 0;
1133 
1134 	for_each_set_bit(bit, &pendirq, 32) {
1135 		pchan = &pchans[bit >> 1];
1136 		vchan = pchan->vchan;
1137 		if (!vchan) /* a terminated channel may still interrupt */
1138 			continue;
1139 		contract = vchan->contract;
1140 
1141 		/*
1142 		 * Disable the IRQ and free the pchan if it's an end
1143 		 * interrupt (odd bit)
1144 		 */
1145 		if (bit & 1) {
1146 			spin_lock(&vchan->vc.lock);
1147 
1148 			/*
1149 			 * Move the promise into the completed list now that
1150 			 * we're done with it
1151 			 */
1152 			list_move_tail(&vchan->processing->list,
1153 				       &contract->completed_demands);
1154 
1155 			/*
1156 			 * Cyclic DMA transfers are special:
1157 			 * - There's always something we can dispatch
1158 			 * - We need to run the callback
1159 			 * - Latency is very important, as this is used by audio
1160 			 * We therefore just cycle through the list and dispatch
1161 			 * whatever we have here, reusing the pchan. There's
1162 			 * no need to run the thread after this.
1163 			 *
1164 			 * For non-cyclic transfers we need to look around,
1165 			 * so we can program some more work, or notify the
1166 			 * client that their transfers have been completed.
1167 			 */
1168 			if (contract->is_cyclic) {
1169 				promise = get_next_cyclic_promise(contract);
1170 				vchan->processing = promise;
1171 				configure_pchan(pchan, promise);
1172 				vchan_cyclic_callback(&contract->vd);
1173 			} else {
1174 				vchan->processing = NULL;
1175 				vchan->pchan = NULL;
1176 
1177 				free_room = 1;
1178 				disableirqs |= BIT(bit);
1179 				release_pchan(priv, pchan);
1180 			}
1181 
1182 			spin_unlock(&vchan->vc.lock);
1183 		} else {
1184 			/* Half done interrupt */
1185 			if (contract->is_cyclic)
1186 				vchan_cyclic_callback(&contract->vd);
1187 			else
1188 				disableirqs |= BIT(bit);
1189 		}
1190 	}
1191 
1192 	/* Disable the IRQs for events we handled */
1193 	spin_lock(&priv->lock);
1194 	irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1195 	writel_relaxed(irqs & ~disableirqs,
1196 		       priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1197 	spin_unlock(&priv->lock);
1198 
1199 	/* Writing 1 to the pending field will clear the pending interrupt */
1200 	writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1201 
1202 	/*
1203 	 * If a pchan was freed, we may be able to schedule something else,
1204 	 * so have a look around
1205 	 */
1206 	if (free_room) {
1207 		for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1208 			vchan = &priv->vchans[i];
1209 			spin_lock(&vchan->vc.lock);
1210 			__execute_vchan_pending(priv, vchan);
1211 			spin_unlock(&vchan->vc.lock);
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Handle newer interrupts if some showed up, but only do it once
1217 	 * to avoid a too long a loop
1218 	 */
1219 	if (allow_mitigation) {
1220 		pendirq = readl_relaxed(priv->base +
1221 					SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1222 		if (pendirq) {
1223 			allow_mitigation = 0;
1224 			goto handle_pending;
1225 		}
1226 	}
1227 
1228 	return IRQ_HANDLED;
1229 }
1230 
sun4i_dma_probe(struct platform_device * pdev)1231 static int sun4i_dma_probe(struct platform_device *pdev)
1232 {
1233 	struct sun4i_dma_dev *priv;
1234 	int i, j, ret;
1235 
1236 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1237 	if (!priv)
1238 		return -ENOMEM;
1239 
1240 	priv->cfg = of_device_get_match_data(&pdev->dev);
1241 	if (!priv->cfg)
1242 		return -ENODEV;
1243 
1244 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1245 	if (IS_ERR(priv->base))
1246 		return PTR_ERR(priv->base);
1247 
1248 	priv->irq = platform_get_irq(pdev, 0);
1249 	if (priv->irq < 0)
1250 		return priv->irq;
1251 
1252 	priv->clk = devm_clk_get(&pdev->dev, NULL);
1253 	if (IS_ERR(priv->clk)) {
1254 		dev_err(&pdev->dev, "No clock specified\n");
1255 		return PTR_ERR(priv->clk);
1256 	}
1257 
1258 	if (priv->cfg->has_reset) {
1259 		priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
1260 		if (IS_ERR(priv->rst))
1261 			return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst),
1262 					     "Failed to get reset control\n");
1263 	}
1264 
1265 	platform_set_drvdata(pdev, priv);
1266 	spin_lock_init(&priv->lock);
1267 
1268 	dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
1269 
1270 	dma_cap_zero(priv->slave.cap_mask);
1271 	dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
1272 	dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
1273 	dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
1274 	dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
1275 
1276 	INIT_LIST_HEAD(&priv->slave.channels);
1277 	priv->slave.device_free_chan_resources	= sun4i_dma_free_chan_resources;
1278 	priv->slave.device_tx_status		= sun4i_dma_tx_status;
1279 	priv->slave.device_issue_pending	= sun4i_dma_issue_pending;
1280 	priv->slave.device_prep_slave_sg	= sun4i_dma_prep_slave_sg;
1281 	priv->slave.device_prep_dma_memcpy	= sun4i_dma_prep_dma_memcpy;
1282 	priv->slave.device_prep_dma_cyclic	= sun4i_dma_prep_dma_cyclic;
1283 	priv->slave.device_config		= sun4i_dma_config;
1284 	priv->slave.device_terminate_all	= sun4i_dma_terminate_all;
1285 	priv->slave.copy_align			= 2;
1286 	priv->slave.src_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1287 						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1288 						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1289 	priv->slave.dst_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1290 						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1291 						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1292 	priv->slave.directions			= BIT(DMA_DEV_TO_MEM) |
1293 						  BIT(DMA_MEM_TO_DEV);
1294 	priv->slave.residue_granularity		= DMA_RESIDUE_GRANULARITY_BURST;
1295 
1296 	priv->slave.dev = &pdev->dev;
1297 
1298 	priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels,
1299 				    sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
1300 	priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
1301 				    sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
1302 	priv->pchans_used = devm_kcalloc(&pdev->dev,
1303 					 BITS_TO_LONGS(priv->cfg->dma_nr_max_channels),
1304 					 sizeof(unsigned long), GFP_KERNEL);
1305 	if (!priv->vchans || !priv->pchans || !priv->pchans_used)
1306 		return -ENOMEM;
1307 
1308 	/*
1309 	 * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and
1310 	 * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are
1311 	 * dedicated ones
1312 	 */
1313 	for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
1314 		priv->pchans[i].base = priv->base +
1315 			SUN4I_NDMA_CHANNEL_REG_BASE(i);
1316 
1317 	for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
1318 		priv->pchans[i].base = priv->base +
1319 			SUN4I_DDMA_CHANNEL_REG_BASE(j);
1320 		priv->pchans[i].is_dedicated = 1;
1321 	}
1322 
1323 	for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
1324 		struct sun4i_dma_vchan *vchan = &priv->vchans[i];
1325 
1326 		spin_lock_init(&vchan->vc.lock);
1327 		vchan->vc.desc_free = sun4i_dma_free_contract;
1328 		vchan_init(&vchan->vc, &priv->slave);
1329 	}
1330 
1331 	ret = clk_prepare_enable(priv->clk);
1332 	if (ret) {
1333 		dev_err(&pdev->dev, "Couldn't enable the clock\n");
1334 		return ret;
1335 	}
1336 
1337 	/*
1338 	 * Make sure the IRQs are all disabled and accounted for. The bootloader
1339 	 * likes to leave these dirty
1340 	 */
1341 	writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
1342 	writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
1343 
1344 	ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
1345 			       0, dev_name(&pdev->dev), priv);
1346 	if (ret) {
1347 		dev_err(&pdev->dev, "Cannot request IRQ\n");
1348 		goto err_clk_disable;
1349 	}
1350 
1351 	ret = dma_async_device_register(&priv->slave);
1352 	if (ret) {
1353 		dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
1354 		goto err_clk_disable;
1355 	}
1356 
1357 	ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
1358 					 priv);
1359 	if (ret) {
1360 		dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1361 		goto err_dma_unregister;
1362 	}
1363 
1364 	dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
1365 
1366 	return 0;
1367 
1368 err_dma_unregister:
1369 	dma_async_device_unregister(&priv->slave);
1370 err_clk_disable:
1371 	clk_disable_unprepare(priv->clk);
1372 	return ret;
1373 }
1374 
sun4i_dma_remove(struct platform_device * pdev)1375 static void sun4i_dma_remove(struct platform_device *pdev)
1376 {
1377 	struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
1378 
1379 	/* Disable IRQ so no more work is scheduled */
1380 	disable_irq(priv->irq);
1381 
1382 	of_dma_controller_free(pdev->dev.of_node);
1383 	dma_async_device_unregister(&priv->slave);
1384 
1385 	clk_disable_unprepare(priv->clk);
1386 }
1387 
1388 static struct sun4i_dma_config sun4i_a10_dma_cfg = {
1389 	.ndma_nr_max_channels	= SUN4I_NDMA_NR_MAX_CHANNELS,
1390 	.ndma_nr_max_vchans	= SUN4I_NDMA_NR_MAX_VCHANS,
1391 
1392 	.ddma_nr_max_channels	= SUN4I_DDMA_NR_MAX_CHANNELS,
1393 	.ddma_nr_max_vchans	= SUN4I_DDMA_NR_MAX_VCHANS,
1394 
1395 	.dma_nr_max_channels	= SUN4I_DMA_NR_MAX_CHANNELS,
1396 
1397 	.set_dst_data_width	= set_dst_data_width_a10,
1398 	.set_src_data_width	= set_src_data_width_a10,
1399 	.convert_burst		= convert_burst_a10,
1400 
1401 	.ndma_drq_sdram		= SUN4I_NDMA_DRQ_TYPE_SDRAM,
1402 	.ddma_drq_sdram		= SUN4I_DDMA_DRQ_TYPE_SDRAM,
1403 
1404 	.max_burst		= SUN4I_MAX_BURST,
1405 	.has_reset		= false,
1406 };
1407 
1408 static struct sun4i_dma_config suniv_f1c100s_dma_cfg = {
1409 	.ndma_nr_max_channels	= SUNIV_NDMA_NR_MAX_CHANNELS,
1410 	.ndma_nr_max_vchans	= SUNIV_NDMA_NR_MAX_VCHANS,
1411 
1412 	.ddma_nr_max_channels	= SUNIV_DDMA_NR_MAX_CHANNELS,
1413 	.ddma_nr_max_vchans	= SUNIV_DDMA_NR_MAX_VCHANS,
1414 
1415 	.dma_nr_max_channels	= SUNIV_NDMA_NR_MAX_CHANNELS +
1416 		SUNIV_DDMA_NR_MAX_CHANNELS,
1417 
1418 	.set_dst_data_width	= set_dst_data_width_f1c100s,
1419 	.set_src_data_width	= set_src_data_width_f1c100s,
1420 	.convert_burst		= convert_burst_f1c100s,
1421 
1422 	.ndma_drq_sdram		= SUNIV_NDMA_DRQ_TYPE_SDRAM,
1423 	.ddma_drq_sdram		= SUNIV_DDMA_DRQ_TYPE_SDRAM,
1424 
1425 	.max_burst		= SUNIV_MAX_BURST,
1426 	.has_reset		= true,
1427 };
1428 
1429 static const struct of_device_id sun4i_dma_match[] = {
1430 	{ .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
1431 	{ .compatible = "allwinner,suniv-f1c100s-dma",
1432 		.data = &suniv_f1c100s_dma_cfg },
1433 	{ /* sentinel */ },
1434 };
1435 MODULE_DEVICE_TABLE(of, sun4i_dma_match);
1436 
1437 static struct platform_driver sun4i_dma_driver = {
1438 	.probe	= sun4i_dma_probe,
1439 	.remove = sun4i_dma_remove,
1440 	.driver	= {
1441 		.name		= "sun4i-dma",
1442 		.of_match_table	= sun4i_dma_match,
1443 	},
1444 };
1445 
1446 module_platform_driver(sun4i_dma_driver);
1447 
1448 MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
1449 MODULE_AUTHOR("Emilio López <[email protected]>");
1450 MODULE_LICENSE("GPL");
1451