blob: d8ae18dbf1a795c9c053625ee5b6af408440c43b [file] [log] [blame]
Zhang Wei173acc72008-03-01 07:42:48 -07001/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/dmaengine.h>
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/of_platform.h>
31
32#include "fsldma.h"
33
34static void dma_init(struct fsl_dma_chan *fsl_chan)
35{
36 /* Reset the channel */
37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
38
39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
40 case FSL_DMA_IP_85XX:
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
45 */
46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
48 break;
49 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
52 */
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
54 32);
55 break;
56 }
57
58}
59
Zhang Wei56822842008-03-13 10:45:27 -070060static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
Zhang Wei173acc72008-03-01 07:42:48 -070061{
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
63}
64
Zhang Wei56822842008-03-13 10:45:27 -070065static u32 get_sr(struct fsl_dma_chan *fsl_chan)
Zhang Wei173acc72008-03-01 07:42:48 -070066{
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
68}
69
70static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
71 struct fsl_dma_ld_hw *hw, u32 count)
72{
73 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
74}
75
76static void set_desc_src(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, dma_addr_t src)
78{
79 u64 snoop_bits;
80
81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
84}
85
86static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
88{
89 u64 snoop_bits;
90
91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
94}
95
96static void set_desc_next(struct fsl_dma_chan *fsl_chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t next)
98{
99 u64 snoop_bits;
100
101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
102 ? FSL_DMA_SNEN : 0;
103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
104}
105
106static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
107{
108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
109}
110
111static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
112{
113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
114}
115
116static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
117{
118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
119}
120
121static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
122{
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
124}
125
Zhang Weif79abb62008-03-18 18:45:00 -0700126static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
127{
128 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
129}
130
Zhang Wei173acc72008-03-01 07:42:48 -0700131static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
132{
133 u32 sr = get_sr(fsl_chan);
134 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
135}
136
137static void dma_start(struct fsl_dma_chan *fsl_chan)
138{
139 u32 mr_set = 0;;
140
141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
143 mr_set |= FSL_DMA_MR_EMP_EN;
144 } else
145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
147 & ~FSL_DMA_MR_EMP_EN, 32);
148
149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
150 mr_set |= FSL_DMA_MR_EMS_EN;
151 else
152 mr_set |= FSL_DMA_MR_CS;
153
154 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
155 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
156 | mr_set, 32);
157}
158
159static void dma_halt(struct fsl_dma_chan *fsl_chan)
160{
161 int i = 0;
162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
164 32);
165 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
166 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
167 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
168
169 while (!dma_is_idle(fsl_chan) && (i++ < 100))
170 udelay(10);
171 if (i >= 100 && !dma_is_idle(fsl_chan))
172 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
173}
174
175static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
176 struct fsl_desc_sw *desc)
177{
178 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
179 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
180 64);
181}
182
183static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
184 struct fsl_desc_sw *new_desc)
185{
186 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
187
188 if (list_empty(&fsl_chan->ld_queue))
189 return;
190
191 /* Link to the new descriptor physical address and
192 * Enable End-of-segment interrupt for
193 * the last link descriptor.
194 * (the previous node's next link descriptor)
195 *
196 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
197 */
198 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
199 new_desc->async_tx.phys | FSL_DMA_EOSIE |
200 (((fsl_chan->feature & FSL_DMA_IP_MASK)
201 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
202}
203
204/**
205 * fsl_chan_set_src_loop_size - Set source address hold transfer size
206 * @fsl_chan : Freescale DMA channel
207 * @size : Address loop size, 0 for disable loop
208 *
209 * The set source address hold transfer size. The source
210 * address hold or loop transfer size is when the DMA transfer
211 * data from source address (SA), if the loop size is 4, the DMA will
212 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
213 * SA + 1 ... and so on.
214 */
215static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
216{
217 switch (size) {
218 case 0:
219 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
220 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
221 (~FSL_DMA_MR_SAHE), 32);
222 break;
223 case 1:
224 case 2:
225 case 4:
226 case 8:
227 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
228 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
229 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
230 32);
231 break;
232 }
233}
234
235/**
236 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
237 * @fsl_chan : Freescale DMA channel
238 * @size : Address loop size, 0 for disable loop
239 *
240 * The set destination address hold transfer size. The destination
241 * address hold or loop transfer size is when the DMA transfer
242 * data to destination address (TA), if the loop size is 4, the DMA will
243 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
244 * TA + 1 ... and so on.
245 */
246static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
247{
248 switch (size) {
249 case 0:
250 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
251 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
252 (~FSL_DMA_MR_DAHE), 32);
253 break;
254 case 1:
255 case 2:
256 case 4:
257 case 8:
258 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
259 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
260 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
261 32);
262 break;
263 }
264}
265
266/**
267 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
268 * @fsl_chan : Freescale DMA channel
269 * @size : Pause control size, 0 for disable external pause control.
270 * The maximum is 1024.
271 *
272 * The Freescale DMA channel can be controlled by the external
273 * signal DREQ#. The pause control size is how many bytes are allowed
274 * to transfer before pausing the channel, after which a new assertion
275 * of DREQ# resumes channel operation.
276 */
277static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
278{
279 if (size > 1024)
280 return;
281
282 if (size) {
283 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
284 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
285 | ((__ilog2(size) << 24) & 0x0f000000),
286 32);
287 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
288 } else
289 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
290}
291
292/**
293 * fsl_chan_toggle_ext_start - Toggle channel external start status
294 * @fsl_chan : Freescale DMA channel
295 * @enable : 0 is disabled, 1 is enabled.
296 *
297 * If enable the external start, the channel can be started by an
298 * external DMA start pin. So the dma_start() does not start the
299 * transfer immediately. The DMA channel will wait for the
300 * control pin asserted.
301 */
302static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
303{
304 if (enable)
305 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
306 else
307 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
308}
309
310static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
311{
312 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
313 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
314 unsigned long flags;
315 dma_cookie_t cookie;
316
317 /* cookie increment and adding to ld_queue must be atomic */
318 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
319
320 cookie = fsl_chan->common.cookie;
321 cookie++;
322 if (cookie < 0)
323 cookie = 1;
324 desc->async_tx.cookie = cookie;
325 fsl_chan->common.cookie = desc->async_tx.cookie;
326
327 append_ld_queue(fsl_chan, desc);
328 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
329
330 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
331
332 return cookie;
333}
334
335/**
336 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
337 * @fsl_chan : Freescale DMA channel
338 *
339 * Return - The descriptor allocated. NULL for failed.
340 */
341static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
342 struct fsl_dma_chan *fsl_chan)
343{
344 dma_addr_t pdesc;
345 struct fsl_desc_sw *desc_sw;
346
347 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
348 if (desc_sw) {
349 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
350 dma_async_tx_descriptor_init(&desc_sw->async_tx,
351 &fsl_chan->common);
352 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
353 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
354 desc_sw->async_tx.phys = pdesc;
355 }
356
357 return desc_sw;
358}
359
360
361/**
362 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
363 * @fsl_chan : Freescale DMA channel
364 *
365 * This function will create a dma pool for descriptor allocation.
366 *
367 * Return - The number of descriptors allocated.
368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
370{
371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
372 LIST_HEAD(tmp_list);
373
374 /* We need the descriptor to be aligned to 32bytes
375 * for meeting FSL DMA specification requirement.
376 */
377 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
378 fsl_chan->dev, sizeof(struct fsl_desc_sw),
379 32, 0);
380 if (!fsl_chan->desc_pool) {
381 dev_err(fsl_chan->dev, "No memory for channel %d "
382 "descriptor dma pool.\n", fsl_chan->id);
383 return 0;
384 }
385
386 return 1;
387}
388
389/**
390 * fsl_dma_free_chan_resources - Free all resources of the channel.
391 * @fsl_chan : Freescale DMA channel
392 */
393static void fsl_dma_free_chan_resources(struct dma_chan *chan)
394{
395 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
396 struct fsl_desc_sw *desc, *_desc;
397 unsigned long flags;
398
399 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
400 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
401 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
402#ifdef FSL_DMA_LD_DEBUG
403 dev_dbg(fsl_chan->dev,
404 "LD %p will be released.\n", desc);
405#endif
406 list_del(&desc->node);
407 /* free link descriptor */
408 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
409 }
410 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
411 dma_pool_destroy(fsl_chan->desc_pool);
412}
413
Zhang Wei2187c262008-03-13 17:45:28 -0700414static struct dma_async_tx_descriptor *
415fsl_dma_prep_interrupt(struct dma_chan *chan)
416{
417 struct fsl_dma_chan *fsl_chan;
418 struct fsl_desc_sw *new;
419
420 if (!chan)
421 return NULL;
422
423 fsl_chan = to_fsl_chan(chan);
424
425 new = fsl_dma_alloc_descriptor(fsl_chan);
426 if (!new) {
427 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
428 return NULL;
429 }
430
431 new->async_tx.cookie = -EBUSY;
432 new->async_tx.ack = 0;
433
Zhang Weif79abb62008-03-18 18:45:00 -0700434 /* Insert the link descriptor to the LD ring */
435 list_add_tail(&new->node, &new->async_tx.tx_list);
436
Zhang Wei2187c262008-03-13 17:45:28 -0700437 /* Set End-of-link to the last link descriptor of new list*/
438 set_ld_eol(fsl_chan, new);
439
440 return &new->async_tx;
441}
442
Zhang Wei173acc72008-03-01 07:42:48 -0700443static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
444 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
445 size_t len, unsigned long flags)
446{
447 struct fsl_dma_chan *fsl_chan;
448 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
449 size_t copy;
450 LIST_HEAD(link_chain);
451
452 if (!chan)
453 return NULL;
454
455 if (!len)
456 return NULL;
457
458 fsl_chan = to_fsl_chan(chan);
459
460 do {
461
462 /* Allocate the link descriptor from DMA pool */
463 new = fsl_dma_alloc_descriptor(fsl_chan);
464 if (!new) {
465 dev_err(fsl_chan->dev,
466 "No free memory for link descriptor\n");
467 return NULL;
468 }
469#ifdef FSL_DMA_LD_DEBUG
470 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
471#endif
472
Zhang Wei56822842008-03-13 10:45:27 -0700473 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
Zhang Wei173acc72008-03-01 07:42:48 -0700474
475 set_desc_cnt(fsl_chan, &new->hw, copy);
476 set_desc_src(fsl_chan, &new->hw, dma_src);
477 set_desc_dest(fsl_chan, &new->hw, dma_dest);
478
479 if (!first)
480 first = new;
481 else
482 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
483
484 new->async_tx.cookie = 0;
485 new->async_tx.ack = 1;
486
487 prev = new;
488 len -= copy;
489 dma_src += copy;
490 dma_dest += copy;
491
492 /* Insert the link descriptor to the LD ring */
493 list_add_tail(&new->node, &first->async_tx.tx_list);
494 } while (len);
495
496 new->async_tx.ack = 0; /* client is in control of this ack */
497 new->async_tx.cookie = -EBUSY;
498
499 /* Set End-of-link to the last link descriptor of new list*/
500 set_ld_eol(fsl_chan, new);
501
502 return first ? &first->async_tx : NULL;
503}
504
505/**
506 * fsl_dma_update_completed_cookie - Update the completed cookie.
507 * @fsl_chan : Freescale DMA channel
508 */
509static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
510{
511 struct fsl_desc_sw *cur_desc, *desc;
512 dma_addr_t ld_phy;
513
514 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
515
516 if (ld_phy) {
517 cur_desc = NULL;
518 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
519 if (desc->async_tx.phys == ld_phy) {
520 cur_desc = desc;
521 break;
522 }
523
524 if (cur_desc && cur_desc->async_tx.cookie) {
525 if (dma_is_idle(fsl_chan))
526 fsl_chan->completed_cookie =
527 cur_desc->async_tx.cookie;
528 else
529 fsl_chan->completed_cookie =
530 cur_desc->async_tx.cookie - 1;
531 }
532 }
533}
534
535/**
536 * fsl_chan_ld_cleanup - Clean up link descriptors
537 * @fsl_chan : Freescale DMA channel
538 *
539 * This function clean up the ld_queue of DMA channel.
540 * If 'in_intr' is set, the function will move the link descriptor to
541 * the recycle list. Otherwise, free it directly.
542 */
543static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
544{
545 struct fsl_desc_sw *desc, *_desc;
546 unsigned long flags;
547
548 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
549
Zhang Wei173acc72008-03-01 07:42:48 -0700550 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
551 fsl_chan->completed_cookie);
552 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
553 dma_async_tx_callback callback;
554 void *callback_param;
555
556 if (dma_async_is_complete(desc->async_tx.cookie,
557 fsl_chan->completed_cookie, fsl_chan->common.cookie)
558 == DMA_IN_PROGRESS)
559 break;
560
561 callback = desc->async_tx.callback;
562 callback_param = desc->async_tx.callback_param;
563
564 /* Remove from ld_queue list */
565 list_del(&desc->node);
566
567 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
568 desc);
569 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
570
571 /* Run the link descriptor callback function */
572 if (callback) {
573 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
574 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
575 desc);
576 callback(callback_param);
577 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
578 }
579 }
580 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
581}
582
583/**
584 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
585 * @fsl_chan : Freescale DMA channel
586 */
587static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
588{
589 struct list_head *ld_node;
590 dma_addr_t next_dest_addr;
591 unsigned long flags;
592
593 if (!dma_is_idle(fsl_chan))
594 return;
595
596 dma_halt(fsl_chan);
597
598 /* If there are some link descriptors
599 * not transfered in queue. We need to start it.
600 */
601 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
602
603 /* Find the first un-transfer desciptor */
604 for (ld_node = fsl_chan->ld_queue.next;
605 (ld_node != &fsl_chan->ld_queue)
606 && (dma_async_is_complete(
607 to_fsl_desc(ld_node)->async_tx.cookie,
608 fsl_chan->completed_cookie,
609 fsl_chan->common.cookie) == DMA_SUCCESS);
610 ld_node = ld_node->next);
611
612 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
613
614 if (ld_node != &fsl_chan->ld_queue) {
615 /* Get the ld start address from ld_queue */
616 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
Zhang Wei56822842008-03-13 10:45:27 -0700617 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
618 (void *)next_dest_addr);
Zhang Wei173acc72008-03-01 07:42:48 -0700619 set_cdar(fsl_chan, next_dest_addr);
620 dma_start(fsl_chan);
621 } else {
622 set_cdar(fsl_chan, 0);
623 set_ndar(fsl_chan, 0);
624 }
625}
626
627/**
628 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
629 * @fsl_chan : Freescale DMA channel
630 */
631static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
632{
633 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
634
635#ifdef FSL_DMA_LD_DEBUG
636 struct fsl_desc_sw *ld;
637 unsigned long flags;
638
639 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
640 if (list_empty(&fsl_chan->ld_queue)) {
641 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
642 return;
643 }
644
645 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
646 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
647 int i;
648 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
649 fsl_chan->id, ld->async_tx.phys);
650 for (i = 0; i < 8; i++)
651 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
652 i, *(((u32 *)&ld->hw) + i));
653 }
654 dev_dbg(fsl_chan->dev, "----------------\n");
655 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
656#endif
657
658 fsl_chan_xfer_ld_queue(fsl_chan);
659}
660
661static void fsl_dma_dependency_added(struct dma_chan *chan)
662{
663 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
664
665 fsl_chan_ld_cleanup(fsl_chan);
666}
667
668/**
669 * fsl_dma_is_complete - Determine the DMA status
670 * @fsl_chan : Freescale DMA channel
671 */
672static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
673 dma_cookie_t cookie,
674 dma_cookie_t *done,
675 dma_cookie_t *used)
676{
677 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
678 dma_cookie_t last_used;
679 dma_cookie_t last_complete;
680
681 fsl_chan_ld_cleanup(fsl_chan);
682
683 last_used = chan->cookie;
684 last_complete = fsl_chan->completed_cookie;
685
686 if (done)
687 *done = last_complete;
688
689 if (used)
690 *used = last_used;
691
692 return dma_async_is_complete(cookie, last_complete, last_used);
693}
694
695static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
696{
697 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
Zhang Wei56822842008-03-13 10:45:27 -0700698 u32 stat;
Zhang Wei1c629792008-04-17 20:17:25 -0700699 int update_cookie = 0;
700 int xfer_ld_q = 0;
Zhang Wei173acc72008-03-01 07:42:48 -0700701
702 stat = get_sr(fsl_chan);
703 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
704 fsl_chan->id, stat);
705 set_sr(fsl_chan, stat); /* Clear the event register */
706
707 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
708 if (!stat)
709 return IRQ_NONE;
710
711 if (stat & FSL_DMA_SR_TE)
712 dev_err(fsl_chan->dev, "Transfer Error!\n");
713
Zhang Weif79abb62008-03-18 18:45:00 -0700714 /* Programming Error
715 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
716 * triger a PE interrupt.
717 */
718 if (stat & FSL_DMA_SR_PE) {
719 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
720 if (get_bcr(fsl_chan) == 0) {
721 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
722 * Now, update the completed cookie, and continue the
723 * next uncompleted transfer.
724 */
Zhang Wei1c629792008-04-17 20:17:25 -0700725 update_cookie = 1;
726 xfer_ld_q = 1;
Zhang Weif79abb62008-03-18 18:45:00 -0700727 }
728 stat &= ~FSL_DMA_SR_PE;
729 }
730
Zhang Wei173acc72008-03-01 07:42:48 -0700731 /* If the link descriptor segment transfer finishes,
732 * we will recycle the used descriptor.
733 */
734 if (stat & FSL_DMA_SR_EOSI) {
735 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
Zhang Wei56822842008-03-13 10:45:27 -0700736 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
737 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
Zhang Wei173acc72008-03-01 07:42:48 -0700738 stat &= ~FSL_DMA_SR_EOSI;
Zhang Wei1c629792008-04-17 20:17:25 -0700739 update_cookie = 1;
740 }
741
742 /* For MPC8349, EOCDI event need to update cookie
743 * and start the next transfer if it exist.
744 */
745 if (stat & FSL_DMA_SR_EOCDI) {
746 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
747 stat &= ~FSL_DMA_SR_EOCDI;
748 update_cookie = 1;
749 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700750 }
751
752 /* If it current transfer is the end-of-transfer,
753 * we should clear the Channel Start bit for
754 * prepare next transfer.
755 */
Zhang Wei1c629792008-04-17 20:17:25 -0700756 if (stat & FSL_DMA_SR_EOLNI) {
Zhang Wei173acc72008-03-01 07:42:48 -0700757 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
758 stat &= ~FSL_DMA_SR_EOLNI;
Zhang Wei1c629792008-04-17 20:17:25 -0700759 xfer_ld_q = 1;
Zhang Wei173acc72008-03-01 07:42:48 -0700760 }
761
Zhang Wei1c629792008-04-17 20:17:25 -0700762 if (update_cookie)
763 fsl_dma_update_completed_cookie(fsl_chan);
764 if (xfer_ld_q)
765 fsl_chan_xfer_ld_queue(fsl_chan);
Zhang Wei173acc72008-03-01 07:42:48 -0700766 if (stat)
767 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
768 stat);
769
770 dev_dbg(fsl_chan->dev, "event: Exit\n");
771 tasklet_schedule(&fsl_chan->tasklet);
772 return IRQ_HANDLED;
773}
774
775static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
776{
777 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
778 u32 gsr;
779 int ch_nr;
780
781 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
782 : in_le32(fdev->reg_base);
783 ch_nr = (32 - ffs(gsr)) / 8;
784
785 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
786 fdev->chan[ch_nr]) : IRQ_NONE;
787}
788
789static void dma_do_tasklet(unsigned long data)
790{
791 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
792 fsl_chan_ld_cleanup(fsl_chan);
793}
794
Zhang Wei411e23d2008-04-17 20:17:25 -0700795static void fsl_dma_callback_test(void *param)
Zhang Wei173acc72008-03-01 07:42:48 -0700796{
Zhang Wei411e23d2008-04-17 20:17:25 -0700797 struct fsl_dma_chan *fsl_chan = param;
Zhang Wei173acc72008-03-01 07:42:48 -0700798 if (fsl_chan)
Zhang Wei411e23d2008-04-17 20:17:25 -0700799 dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n");
Zhang Wei173acc72008-03-01 07:42:48 -0700800}
801
802static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
803{
804 struct dma_chan *chan;
805 int err = 0;
806 dma_addr_t dma_dest, dma_src;
807 dma_cookie_t cookie;
808 u8 *src, *dest;
809 int i;
810 size_t test_size;
811 struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
812
813 test_size = 4096;
814
815 src = kmalloc(test_size * 2, GFP_KERNEL);
816 if (!src) {
817 dev_err(fsl_chan->dev,
818 "selftest: Cannot alloc memory for test!\n");
819 err = -ENOMEM;
820 goto out;
821 }
822
823 dest = src + test_size;
824
825 for (i = 0; i < test_size; i++)
826 src[i] = (u8) i;
827
828 chan = &fsl_chan->common;
829
830 if (fsl_dma_alloc_chan_resources(chan) < 1) {
831 dev_err(fsl_chan->dev,
832 "selftest: Cannot alloc resources for DMA\n");
833 err = -ENODEV;
834 goto out;
835 }
836
837 /* TX 1 */
838 dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
839 DMA_TO_DEVICE);
840 dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
841 DMA_FROM_DEVICE);
842 tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
843 async_tx_ack(tx1);
844
845 cookie = fsl_dma_tx_submit(tx1);
846 fsl_dma_memcpy_issue_pending(chan);
847 msleep(2);
848
849 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
850 dev_err(fsl_chan->dev, "selftest: Time out!\n");
851 err = -ENODEV;
852 goto out;
853 }
854
855 /* Test free and re-alloc channel resources */
856 fsl_dma_free_chan_resources(chan);
857
858 if (fsl_dma_alloc_chan_resources(chan) < 1) {
859 dev_err(fsl_chan->dev,
860 "selftest: Cannot alloc resources for DMA\n");
861 err = -ENODEV;
862 goto free_resources;
863 }
864
865 /* Continue to test
866 * TX 2
867 */
868 dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
869 test_size / 4, DMA_TO_DEVICE);
870 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
871 test_size / 4, DMA_FROM_DEVICE);
872 tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
873 async_tx_ack(tx2);
874
875 /* TX 3 */
876 dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
877 test_size / 4, DMA_TO_DEVICE);
878 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
879 test_size / 4, DMA_FROM_DEVICE);
880 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
881 async_tx_ack(tx3);
882
Zhang Weif79abb62008-03-18 18:45:00 -0700883 /* Interrupt tx test */
884 tx1 = fsl_dma_prep_interrupt(chan);
885 async_tx_ack(tx1);
886 cookie = fsl_dma_tx_submit(tx1);
887
Zhang Wei173acc72008-03-01 07:42:48 -0700888 /* Test exchanging the prepared tx sort */
889 cookie = fsl_dma_tx_submit(tx3);
890 cookie = fsl_dma_tx_submit(tx2);
891
Zhang Wei173acc72008-03-01 07:42:48 -0700892 if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
893 dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
894 tx3->callback = fsl_dma_callback_test;
895 tx3->callback_param = fsl_chan;
896 }
Zhang Wei173acc72008-03-01 07:42:48 -0700897 fsl_dma_memcpy_issue_pending(chan);
898 msleep(2);
899
900 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
901 dev_err(fsl_chan->dev, "selftest: Time out!\n");
902 err = -ENODEV;
903 goto free_resources;
904 }
905
906 err = memcmp(src, dest, test_size);
907 if (err) {
908 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
909 i++);
Zhang Wei56822842008-03-13 10:45:27 -0700910 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is "
Zhang Wei173acc72008-03-01 07:42:48 -0700911 "error! src 0x%x, dest 0x%x\n",
Zhang Wei56822842008-03-13 10:45:27 -0700912 i, (long)test_size, *(src + i), *(dest + i));
Zhang Wei173acc72008-03-01 07:42:48 -0700913 }
914
915free_resources:
916 fsl_dma_free_chan_resources(chan);
917out:
918 kfree(src);
919 return err;
920}
921
922static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
923 const struct of_device_id *match)
924{
925 struct fsl_dma_device *fdev;
926 struct fsl_dma_chan *new_fsl_chan;
927 int err;
928
929 fdev = dev_get_drvdata(dev->dev.parent);
930 BUG_ON(!fdev);
931
932 /* alloc channel */
933 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
934 if (!new_fsl_chan) {
935 dev_err(&dev->dev, "No free memory for allocating "
936 "dma channels!\n");
937 err = -ENOMEM;
938 goto err;
939 }
940
941 /* get dma channel register base */
942 err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg);
943 if (err) {
944 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
945 dev->node->full_name);
946 goto err;
947 }
948
949 new_fsl_chan->feature = *(u32 *)match->data;
950
951 if (!fdev->feature)
952 fdev->feature = new_fsl_chan->feature;
953
954 /* If the DMA device's feature is different than its channels',
955 * report the bug.
956 */
957 WARN_ON(fdev->feature != new_fsl_chan->feature);
958
959 new_fsl_chan->dev = &dev->dev;
960 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
961 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
962
963 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
964 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
965 dev_err(&dev->dev, "There is no %d channel!\n",
966 new_fsl_chan->id);
967 err = -EINVAL;
968 goto err;
969 }
970 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
971 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
972 (unsigned long)new_fsl_chan);
973
974 /* Init the channel */
975 dma_init(new_fsl_chan);
976
977 /* Clear cdar registers */
978 set_cdar(new_fsl_chan, 0);
979
980 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
981 case FSL_DMA_IP_85XX:
982 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
983 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
984 case FSL_DMA_IP_83XX:
985 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
986 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
987 }
988
989 spin_lock_init(&new_fsl_chan->desc_lock);
990 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
991
992 new_fsl_chan->common.device = &fdev->common;
993
994 /* Add the channel to DMA device channel list */
995 list_add_tail(&new_fsl_chan->common.device_node,
996 &fdev->common.channels);
997 fdev->common.chancnt++;
998
999 new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0);
1000 if (new_fsl_chan->irq != NO_IRQ) {
1001 err = request_irq(new_fsl_chan->irq,
1002 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
1003 "fsldma-channel", new_fsl_chan);
1004 if (err) {
1005 dev_err(&dev->dev, "DMA channel %s request_irq error "
1006 "with return %d\n", dev->node->full_name, err);
1007 goto err;
1008 }
1009 }
1010
Zhang Wei173acc72008-03-01 07:42:48 -07001011 err = fsl_dma_self_test(new_fsl_chan);
1012 if (err)
1013 goto err;
Zhang Wei173acc72008-03-01 07:42:48 -07001014
1015 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1016 match->compatible, new_fsl_chan->irq);
1017
1018 return 0;
1019err:
1020 dma_halt(new_fsl_chan);
1021 iounmap(new_fsl_chan->reg_base);
1022 free_irq(new_fsl_chan->irq, new_fsl_chan);
1023 list_del(&new_fsl_chan->common.device_node);
1024 kfree(new_fsl_chan);
1025 return err;
1026}
1027
1028const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN;
1029const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
1030
1031static struct of_device_id of_fsl_dma_chan_ids[] = {
1032 {
Kumar Gala049c9d42008-03-31 11:13:21 -05001033 .compatible = "fsl,eloplus-dma-channel",
Zhang Wei173acc72008-03-01 07:42:48 -07001034 .data = (void *)&mpc8540_dma_ip_feature,
1035 },
1036 {
Kumar Gala049c9d42008-03-31 11:13:21 -05001037 .compatible = "fsl,elo-dma-channel",
Zhang Wei173acc72008-03-01 07:42:48 -07001038 .data = (void *)&mpc8349_dma_ip_feature,
1039 },
1040 {}
1041};
1042
1043static struct of_platform_driver of_fsl_dma_chan_driver = {
1044 .name = "of-fsl-dma-channel",
1045 .match_table = of_fsl_dma_chan_ids,
1046 .probe = of_fsl_dma_chan_probe,
1047};
1048
1049static __init int of_fsl_dma_chan_init(void)
1050{
1051 return of_register_platform_driver(&of_fsl_dma_chan_driver);
1052}
1053
1054static int __devinit of_fsl_dma_probe(struct of_device *dev,
1055 const struct of_device_id *match)
1056{
1057 int err;
1058 unsigned int irq;
1059 struct fsl_dma_device *fdev;
1060
1061 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1062 if (!fdev) {
1063 dev_err(&dev->dev, "No enough memory for 'priv'\n");
1064 err = -ENOMEM;
1065 goto err;
1066 }
1067 fdev->dev = &dev->dev;
1068 INIT_LIST_HEAD(&fdev->common.channels);
1069
1070 /* get DMA controller register base */
1071 err = of_address_to_resource(dev->node, 0, &fdev->reg);
1072 if (err) {
1073 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1074 dev->node->full_name);
1075 goto err;
1076 }
1077
1078 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
Zhang Wei56822842008-03-13 10:45:27 -07001079 "controller at %p...\n",
1080 match->compatible, (void *)fdev->reg.start);
Zhang Wei173acc72008-03-01 07:42:48 -07001081 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
1082 - fdev->reg.start + 1);
1083
1084 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1085 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1086 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1087 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
Zhang Wei2187c262008-03-13 17:45:28 -07001088 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
Zhang Wei173acc72008-03-01 07:42:48 -07001089 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1090 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1091 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1092 fdev->common.device_dependency_added = fsl_dma_dependency_added;
1093 fdev->common.dev = &dev->dev;
1094
1095 irq = irq_of_parse_and_map(dev->node, 0);
1096 if (irq != NO_IRQ) {
1097 err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED,
1098 "fsldma-device", fdev);
1099 if (err) {
1100 dev_err(&dev->dev, "DMA device request_irq error "
1101 "with return %d\n", err);
1102 goto err;
1103 }
1104 }
1105
1106 dev_set_drvdata(&(dev->dev), fdev);
1107 of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev);
1108
1109 dma_async_device_register(&fdev->common);
1110 return 0;
1111
1112err:
1113 iounmap(fdev->reg_base);
1114 kfree(fdev);
1115 return err;
1116}
1117
1118static struct of_device_id of_fsl_dma_ids[] = {
Kumar Gala049c9d42008-03-31 11:13:21 -05001119 { .compatible = "fsl,eloplus-dma", },
1120 { .compatible = "fsl,elo-dma", },
Zhang Wei173acc72008-03-01 07:42:48 -07001121 {}
1122};
1123
1124static struct of_platform_driver of_fsl_dma_driver = {
1125 .name = "of-fsl-dma",
1126 .match_table = of_fsl_dma_ids,
1127 .probe = of_fsl_dma_probe,
1128};
1129
1130static __init int of_fsl_dma_init(void)
1131{
1132 return of_register_platform_driver(&of_fsl_dma_driver);
1133}
1134
1135subsys_initcall(of_fsl_dma_chan_init);
1136subsys_initcall(of_fsl_dma_init);