blob: ec470bc74df8d6d8a7e5ce3c41a39565946d2444 [file] [log] [blame]
Jun Niee3fa9842015-05-05 22:06:08 +08001/*
2 * Copyright 2015 Linaro.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/sched.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/of_device.h>
21#include <linux/of.h>
22#include <linux/clk.h>
23#include <linux/of_dma.h>
24
25#include "virt-dma.h"
26
27#define DRIVER_NAME "zx-dma"
28#define DMA_ALIGN 4
29#define DMA_MAX_SIZE (0x10000 - PAGE_SIZE)
30#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
31
32#define REG_ZX_SRC_ADDR 0x00
33#define REG_ZX_DST_ADDR 0x04
34#define REG_ZX_TX_X_COUNT 0x08
35#define REG_ZX_TX_ZY_COUNT 0x0c
36#define REG_ZX_SRC_ZY_STEP 0x10
37#define REG_ZX_DST_ZY_STEP 0x14
38#define REG_ZX_LLI_ADDR 0x1c
39#define REG_ZX_CTRL 0x20
40#define REG_ZX_TC_IRQ 0x800
41#define REG_ZX_SRC_ERR_IRQ 0x804
42#define REG_ZX_DST_ERR_IRQ 0x808
43#define REG_ZX_CFG_ERR_IRQ 0x80c
44#define REG_ZX_TC_IRQ_RAW 0x810
45#define REG_ZX_SRC_ERR_IRQ_RAW 0x814
46#define REG_ZX_DST_ERR_IRQ_RAW 0x818
47#define REG_ZX_CFG_ERR_IRQ_RAW 0x81c
48#define REG_ZX_STATUS 0x820
49#define REG_ZX_DMA_GRP_PRIO 0x824
50#define REG_ZX_DMA_ARB 0x828
51
52#define ZX_FORCE_CLOSE BIT(31)
53#define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13)
54#define ZX_MAX_BURST_LEN 16
55#define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9)
56#define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6)
57#define ZX_IRQ_ENABLE_ALL (3 << 4)
58#define ZX_DST_FIFO_MODE BIT(3)
59#define ZX_SRC_FIFO_MODE BIT(2)
60#define ZX_SOFT_REQ BIT(1)
61#define ZX_CH_ENABLE BIT(0)
62
63#define ZX_DMA_BUSWIDTHS \
64 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
65 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
66 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
67 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
68 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
69
70enum zx_dma_burst_width {
71 ZX_DMA_WIDTH_8BIT = 0,
72 ZX_DMA_WIDTH_16BIT = 1,
73 ZX_DMA_WIDTH_32BIT = 2,
74 ZX_DMA_WIDTH_64BIT = 3,
75};
76
77struct zx_desc_hw {
78 u32 saddr;
79 u32 daddr;
80 u32 src_x;
81 u32 src_zy;
82 u32 src_zy_step;
83 u32 dst_zy_step;
84 u32 reserved1;
85 u32 lli;
86 u32 ctr;
87 u32 reserved[7]; /* pack as hardware registers region size */
88} __aligned(32);
89
90struct zx_dma_desc_sw {
91 struct virt_dma_desc vd;
92 dma_addr_t desc_hw_lli;
93 size_t desc_num;
94 size_t size;
95 struct zx_desc_hw *desc_hw;
96};
97
98struct zx_dma_phy;
99
100struct zx_dma_chan {
101 struct dma_slave_config slave_cfg;
102 int id; /* Request phy chan id */
103 u32 ccfg;
104 struct virt_dma_chan vc;
105 struct zx_dma_phy *phy;
106 struct list_head node;
107 dma_addr_t dev_addr;
108 enum dma_status status;
109};
110
111struct zx_dma_phy {
112 u32 idx;
113 void __iomem *base;
114 struct zx_dma_chan *vchan;
115 struct zx_dma_desc_sw *ds_run;
116 struct zx_dma_desc_sw *ds_done;
117};
118
119struct zx_dma_dev {
120 struct dma_device slave;
121 void __iomem *base;
122 spinlock_t lock; /* lock for ch and phy */
123 struct list_head chan_pending;
124 struct zx_dma_phy *phy;
125 struct zx_dma_chan *chans;
126 struct clk *clk;
127 struct dma_pool *pool;
128 u32 dma_channels;
129 u32 dma_requests;
Vinod Koul9bde2822015-05-18 15:33:13 +0530130 int irq;
Jun Niee3fa9842015-05-05 22:06:08 +0800131};
132
133#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
134
135static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
136{
137 return container_of(chan, struct zx_dma_chan, vc.chan);
138}
139
140static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
141{
142 u32 val = 0;
143
144 val = readl_relaxed(phy->base + REG_ZX_CTRL);
145 val &= ~ZX_CH_ENABLE;
146 writel_relaxed(val, phy->base + REG_ZX_CTRL);
147
148 val = 0x1 << phy->idx;
149 writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
150 writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
151 writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
152 writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
153}
154
155static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
156{
157 writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
158 writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
159 writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
160 writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
161 writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
162 writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
163 writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
164 writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
165}
166
167static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
168{
169 return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
170}
171
172static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
173{
174 return readl_relaxed(d->base + REG_ZX_STATUS);
175}
176
177static void zx_dma_init_state(struct zx_dma_dev *d)
178{
179 /* set same priority */
180 writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
181 /* clear all irq */
182 writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
183 writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
184 writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
185 writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
186}
187
188static int zx_dma_start_txd(struct zx_dma_chan *c)
189{
190 struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
191 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
192
193 if (!c->phy)
194 return -EAGAIN;
195
196 if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
197 return -EAGAIN;
198
199 if (vd) {
200 struct zx_dma_desc_sw *ds =
201 container_of(vd, struct zx_dma_desc_sw, vd);
202 /*
203 * fetch and remove request from vc->desc_issued
204 * so vc->desc_issued only contains desc pending
205 */
206 list_del(&ds->vd.node);
207 c->phy->ds_run = ds;
208 c->phy->ds_done = NULL;
209 /* start dma */
210 zx_dma_set_desc(c->phy, ds->desc_hw);
211 return 0;
212 }
213 c->phy->ds_done = NULL;
214 c->phy->ds_run = NULL;
215 return -EAGAIN;
216}
217
218static void zx_dma_task(struct zx_dma_dev *d)
219{
220 struct zx_dma_phy *p;
221 struct zx_dma_chan *c, *cn;
222 unsigned pch, pch_alloc = 0;
223 unsigned long flags;
224
225 /* check new dma request of running channel in vc->desc_issued */
226 list_for_each_entry_safe(c, cn, &d->slave.channels,
227 vc.chan.device_node) {
228 spin_lock_irqsave(&c->vc.lock, flags);
229 p = c->phy;
230 if (p && p->ds_done && zx_dma_start_txd(c)) {
231 /* No current txd associated with this channel */
232 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
233 /* Mark this channel free */
234 c->phy = NULL;
235 p->vchan = NULL;
236 }
237 spin_unlock_irqrestore(&c->vc.lock, flags);
238 }
239
240 /* check new channel request in d->chan_pending */
241 spin_lock_irqsave(&d->lock, flags);
242 while (!list_empty(&d->chan_pending)) {
243 c = list_first_entry(&d->chan_pending,
244 struct zx_dma_chan, node);
245 p = &d->phy[c->id];
246 if (!p->vchan) {
247 /* remove from d->chan_pending */
248 list_del_init(&c->node);
249 pch_alloc |= 1 << c->id;
250 /* Mark this channel allocated */
251 p->vchan = c;
252 c->phy = p;
253 } else {
254 dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
255 }
256 }
257 spin_unlock_irqrestore(&d->lock, flags);
258
259 for (pch = 0; pch < d->dma_channels; pch++) {
260 if (pch_alloc & (1 << pch)) {
261 p = &d->phy[pch];
262 c = p->vchan;
263 if (c) {
264 spin_lock_irqsave(&c->vc.lock, flags);
265 zx_dma_start_txd(c);
266 spin_unlock_irqrestore(&c->vc.lock, flags);
267 }
268 }
269 }
270}
271
272static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
273{
274 struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
275 struct zx_dma_phy *p;
276 struct zx_dma_chan *c;
277 u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
278 u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
279 u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
280 u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
281 u32 i, irq_chan = 0;
282
283 while (tc) {
284 i = __ffs(tc);
285 tc &= ~BIT(i);
286 p = &d->phy[i];
287 c = p->vchan;
288 if (c) {
289 unsigned long flags;
290
291 spin_lock_irqsave(&c->vc.lock, flags);
292 vchan_cookie_complete(&p->ds_run->vd);
293 p->ds_done = p->ds_run;
294 spin_unlock_irqrestore(&c->vc.lock, flags);
295 }
296 irq_chan |= BIT(i);
297 }
298
299 if (serr || derr || cfg)
300 dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
301 serr, derr, cfg);
302
303 writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
304 writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
305 writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
306 writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
307
308 if (irq_chan) {
309 zx_dma_task(d);
310 return IRQ_HANDLED;
311 } else {
312 return IRQ_NONE;
313 }
314}
315
316static void zx_dma_free_chan_resources(struct dma_chan *chan)
317{
318 struct zx_dma_chan *c = to_zx_chan(chan);
319 struct zx_dma_dev *d = to_zx_dma(chan->device);
320 unsigned long flags;
321
322 spin_lock_irqsave(&d->lock, flags);
323 list_del_init(&c->node);
324 spin_unlock_irqrestore(&d->lock, flags);
325
326 vchan_free_chan_resources(&c->vc);
327 c->ccfg = 0;
328}
329
330static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
331 dma_cookie_t cookie,
332 struct dma_tx_state *state)
333{
334 struct zx_dma_chan *c = to_zx_chan(chan);
335 struct zx_dma_phy *p;
336 struct virt_dma_desc *vd;
337 unsigned long flags;
338 enum dma_status ret;
339 size_t bytes = 0;
340
341 ret = dma_cookie_status(&c->vc.chan, cookie, state);
342 if (ret == DMA_COMPLETE || !state)
343 return ret;
344
345 spin_lock_irqsave(&c->vc.lock, flags);
346 p = c->phy;
347 ret = c->status;
348
349 /*
350 * If the cookie is on our issue queue, then the residue is
351 * its total size.
352 */
353 vd = vchan_find_desc(&c->vc, cookie);
354 if (vd) {
355 bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
356 } else if ((!p) || (!p->ds_run)) {
357 bytes = 0;
358 } else {
359 struct zx_dma_desc_sw *ds = p->ds_run;
360 u32 clli = 0, index = 0;
361
362 bytes = 0;
363 clli = zx_dma_get_curr_lli(p);
364 index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
365 for (; index < ds->desc_num; index++) {
366 bytes += ds->desc_hw[index].src_x;
367 /* end of lli */
368 if (!ds->desc_hw[index].lli)
369 break;
370 }
371 }
372 spin_unlock_irqrestore(&c->vc.lock, flags);
373 dma_set_residue(state, bytes);
374 return ret;
375}
376
377static void zx_dma_issue_pending(struct dma_chan *chan)
378{
379 struct zx_dma_chan *c = to_zx_chan(chan);
380 struct zx_dma_dev *d = to_zx_dma(chan->device);
381 unsigned long flags;
382 int issue = 0;
383
384 spin_lock_irqsave(&c->vc.lock, flags);
385 /* add request to vc->desc_issued */
386 if (vchan_issue_pending(&c->vc)) {
387 spin_lock(&d->lock);
388 if (!c->phy && list_empty(&c->node)) {
389 /* if new channel, add chan_pending */
390 list_add_tail(&c->node, &d->chan_pending);
391 issue = 1;
392 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
393 }
394 spin_unlock(&d->lock);
395 } else {
396 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
397 }
398 spin_unlock_irqrestore(&c->vc.lock, flags);
399
400 if (issue)
401 zx_dma_task(d);
402}
403
404static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
405 dma_addr_t src, size_t len, u32 num, u32 ccfg)
406{
407 if ((num + 1) < ds->desc_num)
408 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
409 sizeof(struct zx_desc_hw);
410 ds->desc_hw[num].saddr = src;
411 ds->desc_hw[num].daddr = dst;
412 ds->desc_hw[num].src_x = len;
413 ds->desc_hw[num].ctr = ccfg;
414}
415
416static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
417 struct dma_chan *chan)
418{
419 struct zx_dma_chan *c = to_zx_chan(chan);
420 struct zx_dma_desc_sw *ds;
421 struct zx_dma_dev *d = to_zx_dma(chan->device);
422 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
423
424 if (num > lli_limit) {
425 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
426 &c->vc, num, lli_limit);
427 return NULL;
428 }
429
430 ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
431 if (!ds)
432 return NULL;
433
434 ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
435 if (!ds->desc_hw) {
436 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
437 kfree(ds);
438 return NULL;
439 }
440 memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0);
441 ds->desc_num = num;
442 return ds;
443}
444
445static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
446{
447 switch (width) {
448 case DMA_SLAVE_BUSWIDTH_1_BYTE:
449 case DMA_SLAVE_BUSWIDTH_2_BYTES:
450 case DMA_SLAVE_BUSWIDTH_4_BYTES:
451 case DMA_SLAVE_BUSWIDTH_8_BYTES:
452 return ffs(width) - 1;
453 default:
454 return ZX_DMA_WIDTH_32BIT;
455 }
456}
457
458static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
459{
460 struct dma_slave_config *cfg = &c->slave_cfg;
461 enum zx_dma_burst_width src_width;
462 enum zx_dma_burst_width dst_width;
463 u32 maxburst = 0;
464
465 switch (dir) {
466 case DMA_MEM_TO_MEM:
467 c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
468 | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
469 | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
470 | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
471 break;
472 case DMA_MEM_TO_DEV:
473 c->dev_addr = cfg->dst_addr;
474 /* dst len is calculated from src width, len and dst width.
475 * We need make sure dst len not exceed MAX LEN.
476 */
477 dst_width = zx_dma_burst_width(cfg->dst_addr_width);
478 maxburst = cfg->dst_maxburst * cfg->dst_addr_width
479 / DMA_SLAVE_BUSWIDTH_8_BYTES;
480 maxburst = maxburst < ZX_MAX_BURST_LEN ?
481 maxburst : ZX_MAX_BURST_LEN;
482 c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
483 | ZX_SRC_BURST_LEN(maxburst - 1)
484 | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_64BIT)
485 | ZX_DST_BURST_WIDTH(dst_width);
486 break;
487 case DMA_DEV_TO_MEM:
488 c->dev_addr = cfg->src_addr;
489 src_width = zx_dma_burst_width(cfg->src_addr_width);
490 maxburst = cfg->src_maxburst;
491 maxburst = maxburst < ZX_MAX_BURST_LEN ?
492 maxburst : ZX_MAX_BURST_LEN;
493 c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
494 | ZX_SRC_BURST_LEN(maxburst - 1)
495 | ZX_SRC_BURST_WIDTH(src_width)
496 | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_64BIT);
497 break;
498 default:
499 return -EINVAL;
500 }
501 return 0;
502}
503
504static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
505 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
506 size_t len, unsigned long flags)
507{
508 struct zx_dma_chan *c = to_zx_chan(chan);
509 struct zx_dma_desc_sw *ds;
510 size_t copy = 0;
511 int num = 0;
512
513 if (!len)
514 return NULL;
515
516 if (zx_pre_config(c, DMA_MEM_TO_MEM))
517 return NULL;
518
519 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
520
521 ds = zx_alloc_desc_resource(num, chan);
522 if (!ds)
523 return NULL;
524
525 ds->size = len;
526 num = 0;
527
528 do {
529 copy = min_t(size_t, len, DMA_MAX_SIZE);
530 zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
531
532 src += copy;
533 dst += copy;
534 len -= copy;
535 } while (len);
536
537 ds->desc_hw[num - 1].lli = 0; /* end of link */
538 ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
539 return vchan_tx_prep(&c->vc, &ds->vd, flags);
540}
541
542static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
543 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
544 enum dma_transfer_direction dir, unsigned long flags, void *context)
545{
546 struct zx_dma_chan *c = to_zx_chan(chan);
547 struct zx_dma_desc_sw *ds;
548 size_t len, avail, total = 0;
549 struct scatterlist *sg;
550 dma_addr_t addr, src = 0, dst = 0;
551 int num = sglen, i;
552
553 if (!sgl)
554 return NULL;
555
556 if (zx_pre_config(c, dir))
557 return NULL;
558
559 for_each_sg(sgl, sg, sglen, i) {
560 avail = sg_dma_len(sg);
561 if (avail > DMA_MAX_SIZE)
562 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
563 }
564
565 ds = zx_alloc_desc_resource(num, chan);
566 if (!ds)
567 return NULL;
568
569 num = 0;
570 for_each_sg(sgl, sg, sglen, i) {
571 addr = sg_dma_address(sg);
572 avail = sg_dma_len(sg);
573 total += avail;
574
575 do {
576 len = min_t(size_t, avail, DMA_MAX_SIZE);
577
578 if (dir == DMA_MEM_TO_DEV) {
579 src = addr;
580 dst = c->dev_addr;
581 } else if (dir == DMA_DEV_TO_MEM) {
582 src = c->dev_addr;
583 dst = addr;
584 }
585
586 zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
587
588 addr += len;
589 avail -= len;
590 } while (avail);
591 }
592
593 ds->desc_hw[num - 1].lli = 0; /* end of link */
594 ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
595 ds->size = total;
596 return vchan_tx_prep(&c->vc, &ds->vd, flags);
597}
598
599static int zx_dma_config(struct dma_chan *chan,
600 struct dma_slave_config *cfg)
601{
602 struct zx_dma_chan *c = to_zx_chan(chan);
603
604 if (!cfg)
605 return -EINVAL;
606
607 memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
608
609 return 0;
610}
611
612static int zx_dma_terminate_all(struct dma_chan *chan)
613{
614 struct zx_dma_chan *c = to_zx_chan(chan);
615 struct zx_dma_dev *d = to_zx_dma(chan->device);
616 struct zx_dma_phy *p = c->phy;
617 unsigned long flags;
618 LIST_HEAD(head);
619
620 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
621
622 /* Prevent this channel being scheduled */
623 spin_lock(&d->lock);
624 list_del_init(&c->node);
625 spin_unlock(&d->lock);
626
627 /* Clear the tx descriptor lists */
628 spin_lock_irqsave(&c->vc.lock, flags);
629 vchan_get_all_descriptors(&c->vc, &head);
630 if (p) {
631 /* vchan is assigned to a pchan - stop the channel */
632 zx_dma_terminate_chan(p, d);
633 c->phy = NULL;
634 p->vchan = NULL;
635 p->ds_run = NULL;
636 p->ds_done = NULL;
637 }
638 spin_unlock_irqrestore(&c->vc.lock, flags);
639 vchan_dma_desc_free_list(&c->vc, &head);
640
641 return 0;
642}
643
644static void zx_dma_free_desc(struct virt_dma_desc *vd)
645{
646 struct zx_dma_desc_sw *ds =
647 container_of(vd, struct zx_dma_desc_sw, vd);
648 struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
649
650 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
651 kfree(ds);
652}
653
654static const struct of_device_id zx6702_dma_dt_ids[] = {
655 { .compatible = "zte,zx296702-dma", },
656 {}
657};
658MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
659
660static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
661 struct of_dma *ofdma)
662{
663 struct zx_dma_dev *d = ofdma->of_dma_data;
664 unsigned int request = dma_spec->args[0];
665 struct dma_chan *chan;
666 struct zx_dma_chan *c;
667
668 if (request > d->dma_requests)
669 return NULL;
670
671 chan = dma_get_any_slave_channel(&d->slave);
672 if (!chan) {
673 dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
674 return NULL;
675 }
676 c = to_zx_chan(chan);
677 c->id = request;
678 dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
679 c->id, &c->vc);
680 return chan;
681}
682
683static int zx_dma_probe(struct platform_device *op)
684{
685 struct zx_dma_dev *d;
686 struct resource *iores;
Vinod Koul9bde2822015-05-18 15:33:13 +0530687 int i, ret = 0;
Jun Niee3fa9842015-05-05 22:06:08 +0800688
689 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
690 if (!iores)
691 return -EINVAL;
692
693 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
694 if (!d)
695 return -ENOMEM;
696
697 d->base = devm_ioremap_resource(&op->dev, iores);
698 if (IS_ERR(d->base))
699 return PTR_ERR(d->base);
700
701 of_property_read_u32((&op->dev)->of_node,
702 "dma-channels", &d->dma_channels);
703 of_property_read_u32((&op->dev)->of_node,
704 "dma-requests", &d->dma_requests);
705 if (!d->dma_requests || !d->dma_channels)
706 return -EINVAL;
707
708 d->clk = devm_clk_get(&op->dev, NULL);
709 if (IS_ERR(d->clk)) {
710 dev_err(&op->dev, "no dma clk\n");
711 return PTR_ERR(d->clk);
712 }
713
Vinod Koul9bde2822015-05-18 15:33:13 +0530714 d->irq = platform_get_irq(op, 0);
715 ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
Jun Niee3fa9842015-05-05 22:06:08 +0800716 0, DRIVER_NAME, d);
717 if (ret)
718 return ret;
719
720 /* A DMA memory pool for LLIs, align on 32-byte boundary */
721 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
722 LLI_BLOCK_SIZE, 32, 0);
723 if (!d->pool)
724 return -ENOMEM;
725
726 /* init phy channel */
727 d->phy = devm_kzalloc(&op->dev,
728 d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
729 if (!d->phy)
730 return -ENOMEM;
731
732 for (i = 0; i < d->dma_channels; i++) {
733 struct zx_dma_phy *p = &d->phy[i];
734
735 p->idx = i;
736 p->base = d->base + i * 0x40;
737 }
738
739 INIT_LIST_HEAD(&d->slave.channels);
740 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
741 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
742 dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
743 d->slave.dev = &op->dev;
744 d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
745 d->slave.device_tx_status = zx_dma_tx_status;
746 d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
747 d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
748 d->slave.device_issue_pending = zx_dma_issue_pending;
749 d->slave.device_config = zx_dma_config;
750 d->slave.device_terminate_all = zx_dma_terminate_all;
751 d->slave.copy_align = DMA_ALIGN;
752 d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
753 d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
754 d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
755 | BIT(DMA_DEV_TO_MEM);
756 d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
757
758 /* init virtual channel */
759 d->chans = devm_kzalloc(&op->dev,
760 d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
761 if (!d->chans)
762 return -ENOMEM;
763
764 for (i = 0; i < d->dma_requests; i++) {
765 struct zx_dma_chan *c = &d->chans[i];
766
767 c->status = DMA_IN_PROGRESS;
768 INIT_LIST_HEAD(&c->node);
769 c->vc.desc_free = zx_dma_free_desc;
770 vchan_init(&c->vc, &d->slave);
771 }
772
773 /* Enable clock before accessing registers */
774 ret = clk_prepare_enable(d->clk);
775 if (ret < 0) {
776 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
777 goto zx_dma_out;
778 }
779
780 zx_dma_init_state(d);
781
782 spin_lock_init(&d->lock);
783 INIT_LIST_HEAD(&d->chan_pending);
784 platform_set_drvdata(op, d);
785
786 ret = dma_async_device_register(&d->slave);
787 if (ret)
788 goto clk_dis;
789
790 ret = of_dma_controller_register((&op->dev)->of_node,
791 zx_of_dma_simple_xlate, d);
792 if (ret)
793 goto of_dma_register_fail;
794
795 dev_info(&op->dev, "initialized\n");
796 return 0;
797
798of_dma_register_fail:
799 dma_async_device_unregister(&d->slave);
800clk_dis:
801 clk_disable_unprepare(d->clk);
802zx_dma_out:
803 return ret;
804}
805
806static int zx_dma_remove(struct platform_device *op)
807{
808 struct zx_dma_chan *c, *cn;
809 struct zx_dma_dev *d = platform_get_drvdata(op);
810
Vinod Koul9bde2822015-05-18 15:33:13 +0530811 /* explictly free the irq */
812 devm_free_irq(&op->dev, d->irq, d);
813
Jun Niee3fa9842015-05-05 22:06:08 +0800814 dma_async_device_unregister(&d->slave);
815 of_dma_controller_free((&op->dev)->of_node);
816
817 list_for_each_entry_safe(c, cn, &d->slave.channels,
818 vc.chan.device_node) {
819 list_del(&c->vc.chan.device_node);
820 }
821 clk_disable_unprepare(d->clk);
822 dmam_pool_destroy(d->pool);
823
824 return 0;
825}
826
827#ifdef CONFIG_PM_SLEEP
828static int zx_dma_suspend_dev(struct device *dev)
829{
830 struct zx_dma_dev *d = dev_get_drvdata(dev);
831 u32 stat = 0;
832
833 stat = zx_dma_get_chan_stat(d);
834 if (stat) {
835 dev_warn(d->slave.dev,
836 "chan %d is running fail to suspend\n", stat);
837 return -1;
838 }
839 clk_disable_unprepare(d->clk);
840 return 0;
841}
842
843static int zx_dma_resume_dev(struct device *dev)
844{
845 struct zx_dma_dev *d = dev_get_drvdata(dev);
846 int ret = 0;
847
848 ret = clk_prepare_enable(d->clk);
849 if (ret < 0) {
850 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
851 return ret;
852 }
853 zx_dma_init_state(d);
854 return 0;
855}
856#endif
857
858static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
859
860static struct platform_driver zx_pdma_driver = {
861 .driver = {
862 .name = DRIVER_NAME,
863 .pm = &zx_dma_pmops,
864 .of_match_table = zx6702_dma_dt_ids,
865 },
866 .probe = zx_dma_probe,
867 .remove = zx_dma_remove,
868};
869
870module_platform_driver(zx_pdma_driver);
871
872MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
873MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
874MODULE_LICENSE("GPL v2");