blob: f46b9b86fc9bb9de9ea940cbfae2115884edcce3 [file] [log] [blame]
Zhangfei Gao8e6152b2013-08-27 10:20:10 +08001/*
2 * Copyright (c) 2013 Linaro Ltd.
3 * Copyright (c) 2013 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/sched.h>
10#include <linux/device.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/of_device.h>
20#include <linux/of.h>
21#include <linux/clk.h>
22#include <linux/of_dma.h>
23
24#include "virt-dma.h"
25
26#define DRIVER_NAME "k3-dma"
Zhangfei Gao8e6152b2013-08-27 10:20:10 +080027#define DMA_MAX_SIZE 0x1ffc
28
29#define INT_STAT 0x00
30#define INT_TC1 0x04
31#define INT_ERR1 0x0c
32#define INT_ERR2 0x10
33#define INT_TC1_MASK 0x18
34#define INT_ERR1_MASK 0x20
35#define INT_ERR2_MASK 0x24
36#define INT_TC1_RAW 0x600
Andy Greenaceaaa12016-08-29 10:30:48 -070037#define INT_ERR1_RAW 0x610
38#define INT_ERR2_RAW 0x618
Zhangfei Gao8e6152b2013-08-27 10:20:10 +080039#define CH_PRI 0x688
40#define CH_STAT 0x690
41#define CX_CUR_CNT 0x704
42#define CX_LLI 0x800
43#define CX_CNT 0x810
44#define CX_SRC 0x814
45#define CX_DST 0x818
46#define CX_CFG 0x81c
47#define AXI_CFG 0x820
48#define AXI_CFG_DEFAULT 0x201201
49
50#define CX_LLI_CHAIN_EN 0x2
51#define CX_CFG_EN 0x1
52#define CX_CFG_MEM2PER (0x1 << 2)
53#define CX_CFG_PER2MEM (0x2 << 2)
54#define CX_CFG_SRCINCR (0x1 << 31)
55#define CX_CFG_DSTINCR (0x1 << 30)
56
57struct k3_desc_hw {
58 u32 lli;
59 u32 reserved[3];
60 u32 count;
61 u32 saddr;
62 u32 daddr;
63 u32 config;
64} __aligned(32);
65
66struct k3_dma_desc_sw {
67 struct virt_dma_desc vd;
68 dma_addr_t desc_hw_lli;
69 size_t desc_num;
70 size_t size;
71 struct k3_desc_hw desc_hw[0];
72};
73
74struct k3_dma_phy;
75
76struct k3_dma_chan {
77 u32 ccfg;
78 struct virt_dma_chan vc;
79 struct k3_dma_phy *phy;
80 struct list_head node;
81 enum dma_transfer_direction dir;
82 dma_addr_t dev_addr;
83 enum dma_status status;
84};
85
86struct k3_dma_phy {
87 u32 idx;
88 void __iomem *base;
89 struct k3_dma_chan *vchan;
90 struct k3_dma_desc_sw *ds_run;
91 struct k3_dma_desc_sw *ds_done;
92};
93
94struct k3_dma_dev {
95 struct dma_device slave;
96 void __iomem *base;
97 struct tasklet_struct task;
98 spinlock_t lock;
99 struct list_head chan_pending;
100 struct k3_dma_phy *phy;
101 struct k3_dma_chan *chans;
102 struct clk *clk;
103 u32 dma_channels;
104 u32 dma_requests;
Vinod Koul486b10a2016-07-03 00:02:29 +0530105 unsigned int irq;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800106};
107
108#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
109
110static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
111{
112 return container_of(chan, struct k3_dma_chan, vc.chan);
113}
114
115static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
116{
117 u32 val = 0;
118
119 if (on) {
120 val = readl_relaxed(phy->base + CX_CFG);
121 val |= CX_CFG_EN;
122 writel_relaxed(val, phy->base + CX_CFG);
123 } else {
124 val = readl_relaxed(phy->base + CX_CFG);
125 val &= ~CX_CFG_EN;
126 writel_relaxed(val, phy->base + CX_CFG);
127 }
128}
129
130static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
131{
132 u32 val = 0;
133
134 k3_dma_pause_dma(phy, false);
135
136 val = 0x1 << phy->idx;
137 writel_relaxed(val, d->base + INT_TC1_RAW);
138 writel_relaxed(val, d->base + INT_ERR1_RAW);
139 writel_relaxed(val, d->base + INT_ERR2_RAW);
140}
141
142static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
143{
144 writel_relaxed(hw->lli, phy->base + CX_LLI);
145 writel_relaxed(hw->count, phy->base + CX_CNT);
146 writel_relaxed(hw->saddr, phy->base + CX_SRC);
147 writel_relaxed(hw->daddr, phy->base + CX_DST);
148 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
149 writel_relaxed(hw->config, phy->base + CX_CFG);
150}
151
152static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
153{
154 u32 cnt = 0;
155
156 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
157 cnt &= 0xffff;
158 return cnt;
159}
160
161static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
162{
163 return readl_relaxed(phy->base + CX_LLI);
164}
165
166static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
167{
168 return readl_relaxed(d->base + CH_STAT);
169}
170
171static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
172{
173 if (on) {
174 /* set same priority */
175 writel_relaxed(0x0, d->base + CH_PRI);
176
177 /* unmask irq */
178 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
179 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
180 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
181 } else {
182 /* mask irq */
183 writel_relaxed(0x0, d->base + INT_TC1_MASK);
184 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
185 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
186 }
187}
188
189static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
190{
191 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
192 struct k3_dma_phy *p;
193 struct k3_dma_chan *c;
194 u32 stat = readl_relaxed(d->base + INT_STAT);
195 u32 tc1 = readl_relaxed(d->base + INT_TC1);
196 u32 err1 = readl_relaxed(d->base + INT_ERR1);
197 u32 err2 = readl_relaxed(d->base + INT_ERR2);
198 u32 i, irq_chan = 0;
199
200 while (stat) {
201 i = __ffs(stat);
202 stat &= (stat - 1);
203 if (likely(tc1 & BIT(i))) {
204 p = &d->phy[i];
205 c = p->vchan;
206 if (c) {
207 unsigned long flags;
208
209 spin_lock_irqsave(&c->vc.lock, flags);
210 vchan_cookie_complete(&p->ds_run->vd);
211 p->ds_done = p->ds_run;
212 spin_unlock_irqrestore(&c->vc.lock, flags);
213 }
214 irq_chan |= BIT(i);
215 }
216 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
217 dev_warn(d->slave.dev, "DMA ERR\n");
218 }
219
220 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
221 writel_relaxed(err1, d->base + INT_ERR1_RAW);
222 writel_relaxed(err2, d->base + INT_ERR2_RAW);
223
Andy Green0173c892016-08-29 10:30:49 -0700224 if (irq_chan)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800225 tasklet_schedule(&d->task);
Andy Green0173c892016-08-29 10:30:49 -0700226
227 if (irq_chan || err1 || err2)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800228 return IRQ_HANDLED;
Andy Green0173c892016-08-29 10:30:49 -0700229
230 return IRQ_NONE;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800231}
232
233static int k3_dma_start_txd(struct k3_dma_chan *c)
234{
235 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
236 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
237
238 if (!c->phy)
239 return -EAGAIN;
240
241 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
242 return -EAGAIN;
243
244 if (vd) {
245 struct k3_dma_desc_sw *ds =
246 container_of(vd, struct k3_dma_desc_sw, vd);
247 /*
248 * fetch and remove request from vc->desc_issued
249 * so vc->desc_issued only contains desc pending
250 */
251 list_del(&ds->vd.node);
252 c->phy->ds_run = ds;
253 c->phy->ds_done = NULL;
254 /* start dma */
255 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
256 return 0;
257 }
258 c->phy->ds_done = NULL;
259 c->phy->ds_run = NULL;
260 return -EAGAIN;
261}
262
263static void k3_dma_tasklet(unsigned long arg)
264{
265 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
266 struct k3_dma_phy *p;
267 struct k3_dma_chan *c, *cn;
268 unsigned pch, pch_alloc = 0;
269
270 /* check new dma request of running channel in vc->desc_issued */
271 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
272 spin_lock_irq(&c->vc.lock);
273 p = c->phy;
274 if (p && p->ds_done) {
275 if (k3_dma_start_txd(c)) {
276 /* No current txd associated with this channel */
277 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
278 /* Mark this channel free */
279 c->phy = NULL;
280 p->vchan = NULL;
281 }
282 }
283 spin_unlock_irq(&c->vc.lock);
284 }
285
286 /* check new channel request in d->chan_pending */
287 spin_lock_irq(&d->lock);
288 for (pch = 0; pch < d->dma_channels; pch++) {
289 p = &d->phy[pch];
290
291 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
292 c = list_first_entry(&d->chan_pending,
293 struct k3_dma_chan, node);
294 /* remove from d->chan_pending */
295 list_del_init(&c->node);
296 pch_alloc |= 1 << pch;
297 /* Mark this channel allocated */
298 p->vchan = c;
299 c->phy = p;
300 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
301 }
302 }
303 spin_unlock_irq(&d->lock);
304
305 for (pch = 0; pch < d->dma_channels; pch++) {
306 if (pch_alloc & (1 << pch)) {
307 p = &d->phy[pch];
308 c = p->vchan;
309 if (c) {
310 spin_lock_irq(&c->vc.lock);
311 k3_dma_start_txd(c);
312 spin_unlock_irq(&c->vc.lock);
313 }
314 }
315 }
316}
317
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800318static void k3_dma_free_chan_resources(struct dma_chan *chan)
319{
320 struct k3_dma_chan *c = to_k3_chan(chan);
321 struct k3_dma_dev *d = to_k3_dma(chan->device);
322 unsigned long flags;
323
324 spin_lock_irqsave(&d->lock, flags);
325 list_del_init(&c->node);
326 spin_unlock_irqrestore(&d->lock, flags);
327
328 vchan_free_chan_resources(&c->vc);
329 c->ccfg = 0;
330}
331
332static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
333 dma_cookie_t cookie, struct dma_tx_state *state)
334{
335 struct k3_dma_chan *c = to_k3_chan(chan);
336 struct k3_dma_dev *d = to_k3_dma(chan->device);
337 struct k3_dma_phy *p;
338 struct virt_dma_desc *vd;
339 unsigned long flags;
340 enum dma_status ret;
341 size_t bytes = 0;
342
343 ret = dma_cookie_status(&c->vc.chan, cookie, state);
Vinod Koulbd2c3482013-10-16 20:50:09 +0530344 if (ret == DMA_COMPLETE)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800345 return ret;
346
347 spin_lock_irqsave(&c->vc.lock, flags);
348 p = c->phy;
349 ret = c->status;
350
351 /*
352 * If the cookie is on our issue queue, then the residue is
353 * its total size.
354 */
355 vd = vchan_find_desc(&c->vc, cookie);
356 if (vd) {
357 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
358 } else if ((!p) || (!p->ds_run)) {
359 bytes = 0;
360 } else {
361 struct k3_dma_desc_sw *ds = p->ds_run;
362 u32 clli = 0, index = 0;
363
364 bytes = k3_dma_get_curr_cnt(d, p);
365 clli = k3_dma_get_curr_lli(p);
366 index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw);
367 for (; index < ds->desc_num; index++) {
368 bytes += ds->desc_hw[index].count;
369 /* end of lli */
370 if (!ds->desc_hw[index].lli)
371 break;
372 }
373 }
374 spin_unlock_irqrestore(&c->vc.lock, flags);
375 dma_set_residue(state, bytes);
376 return ret;
377}
378
379static void k3_dma_issue_pending(struct dma_chan *chan)
380{
381 struct k3_dma_chan *c = to_k3_chan(chan);
382 struct k3_dma_dev *d = to_k3_dma(chan->device);
383 unsigned long flags;
384
385 spin_lock_irqsave(&c->vc.lock, flags);
386 /* add request to vc->desc_issued */
387 if (vchan_issue_pending(&c->vc)) {
388 spin_lock(&d->lock);
389 if (!c->phy) {
390 if (list_empty(&c->node)) {
391 /* if new channel, add chan_pending */
392 list_add_tail(&c->node, &d->chan_pending);
393 /* check in tasklet */
394 tasklet_schedule(&d->task);
395 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
396 }
397 }
398 spin_unlock(&d->lock);
399 } else
400 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
401 spin_unlock_irqrestore(&c->vc.lock, flags);
402}
403
404static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
405 dma_addr_t src, size_t len, u32 num, u32 ccfg)
406{
407 if ((num + 1) < ds->desc_num)
408 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
409 sizeof(struct k3_desc_hw);
410 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
411 ds->desc_hw[num].count = len;
412 ds->desc_hw[num].saddr = src;
413 ds->desc_hw[num].daddr = dst;
414 ds->desc_hw[num].config = ccfg;
415}
416
417static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
418 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
419 size_t len, unsigned long flags)
420{
421 struct k3_dma_chan *c = to_k3_chan(chan);
422 struct k3_dma_desc_sw *ds;
423 size_t copy = 0;
424 int num = 0;
425
426 if (!len)
427 return NULL;
428
429 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
430 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
Peter Griffinaef94fe2016-06-07 18:38:41 +0100431 if (!ds)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800432 return NULL;
Peter Griffinaef94fe2016-06-07 18:38:41 +0100433
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800434 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
435 ds->size = len;
436 ds->desc_num = num;
437 num = 0;
438
439 if (!c->ccfg) {
Maxime Riparddb084252014-11-17 14:42:20 +0100440 /* default is memtomem, without calling device_config */
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800441 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
442 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
443 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
444 }
445
446 do {
447 copy = min_t(size_t, len, DMA_MAX_SIZE);
448 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
449
450 if (c->dir == DMA_MEM_TO_DEV) {
451 src += copy;
452 } else if (c->dir == DMA_DEV_TO_MEM) {
453 dst += copy;
454 } else {
455 src += copy;
456 dst += copy;
457 }
458 len -= copy;
459 } while (len);
460
461 ds->desc_hw[num-1].lli = 0; /* end of link */
462 return vchan_tx_prep(&c->vc, &ds->vd, flags);
463}
464
465static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
466 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
467 enum dma_transfer_direction dir, unsigned long flags, void *context)
468{
469 struct k3_dma_chan *c = to_k3_chan(chan);
470 struct k3_dma_desc_sw *ds;
471 size_t len, avail, total = 0;
472 struct scatterlist *sg;
473 dma_addr_t addr, src = 0, dst = 0;
474 int num = sglen, i;
475
Zhangfei Gaoc61177c2014-01-14 11:37:43 +0800476 if (sgl == NULL)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800477 return NULL;
478
479 for_each_sg(sgl, sg, sglen, i) {
480 avail = sg_dma_len(sg);
481 if (avail > DMA_MAX_SIZE)
482 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
483 }
484
485 ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
Peter Griffinaef94fe2016-06-07 18:38:41 +0100486 if (!ds)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800487 return NULL;
Peter Griffinaef94fe2016-06-07 18:38:41 +0100488
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800489 ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
490 ds->desc_num = num;
491 num = 0;
492
493 for_each_sg(sgl, sg, sglen, i) {
494 addr = sg_dma_address(sg);
495 avail = sg_dma_len(sg);
496 total += avail;
497
498 do {
499 len = min_t(size_t, avail, DMA_MAX_SIZE);
500
501 if (dir == DMA_MEM_TO_DEV) {
502 src = addr;
503 dst = c->dev_addr;
504 } else if (dir == DMA_DEV_TO_MEM) {
505 src = c->dev_addr;
506 dst = addr;
507 }
508
509 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
510
511 addr += len;
512 avail -= len;
513 } while (avail);
514 }
515
516 ds->desc_hw[num-1].lli = 0; /* end of link */
517 ds->size = total;
518 return vchan_tx_prep(&c->vc, &ds->vd, flags);
519}
520
Maxime Riparddb084252014-11-17 14:42:20 +0100521static int k3_dma_config(struct dma_chan *chan,
522 struct dma_slave_config *cfg)
523{
524 struct k3_dma_chan *c = to_k3_chan(chan);
525 u32 maxburst = 0, val = 0;
526 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
527
528 if (cfg == NULL)
529 return -EINVAL;
530 c->dir = cfg->direction;
531 if (c->dir == DMA_DEV_TO_MEM) {
532 c->ccfg = CX_CFG_DSTINCR;
533 c->dev_addr = cfg->src_addr;
534 maxburst = cfg->src_maxburst;
535 width = cfg->src_addr_width;
536 } else if (c->dir == DMA_MEM_TO_DEV) {
537 c->ccfg = CX_CFG_SRCINCR;
538 c->dev_addr = cfg->dst_addr;
539 maxburst = cfg->dst_maxburst;
540 width = cfg->dst_addr_width;
541 }
542 switch (width) {
543 case DMA_SLAVE_BUSWIDTH_1_BYTE:
544 case DMA_SLAVE_BUSWIDTH_2_BYTES:
545 case DMA_SLAVE_BUSWIDTH_4_BYTES:
546 case DMA_SLAVE_BUSWIDTH_8_BYTES:
547 val = __ffs(width);
548 break;
549 default:
550 val = 3;
551 break;
552 }
553 c->ccfg |= (val << 12) | (val << 16);
554
555 if ((maxburst == 0) || (maxburst > 16))
Andy Green6c28a902016-08-29 10:30:47 -0700556 val = 15;
Maxime Riparddb084252014-11-17 14:42:20 +0100557 else
558 val = maxburst - 1;
559 c->ccfg |= (val << 20) | (val << 24);
560 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
561
562 /* specific request line */
563 c->ccfg |= c->vc.chan.chan_id << 4;
564
565 return 0;
566}
567
568static int k3_dma_terminate_all(struct dma_chan *chan)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800569{
570 struct k3_dma_chan *c = to_k3_chan(chan);
571 struct k3_dma_dev *d = to_k3_dma(chan->device);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800572 struct k3_dma_phy *p = c->phy;
573 unsigned long flags;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800574 LIST_HEAD(head);
575
Maxime Riparddb084252014-11-17 14:42:20 +0100576 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800577
Maxime Riparddb084252014-11-17 14:42:20 +0100578 /* Prevent this channel being scheduled */
579 spin_lock(&d->lock);
580 list_del_init(&c->node);
581 spin_unlock(&d->lock);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800582
Maxime Riparddb084252014-11-17 14:42:20 +0100583 /* Clear the tx descriptor lists */
584 spin_lock_irqsave(&c->vc.lock, flags);
585 vchan_get_all_descriptors(&c->vc, &head);
586 if (p) {
587 /* vchan is assigned to a pchan - stop the channel */
588 k3_dma_terminate_chan(p, d);
589 c->phy = NULL;
590 p->vchan = NULL;
591 p->ds_run = p->ds_done = NULL;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800592 }
Maxime Riparddb084252014-11-17 14:42:20 +0100593 spin_unlock_irqrestore(&c->vc.lock, flags);
594 vchan_dma_desc_free_list(&c->vc, &head);
595
596 return 0;
597}
598
Krzysztof Kozlowskia1a9bec2014-12-29 14:01:30 +0100599static int k3_dma_transfer_pause(struct dma_chan *chan)
Maxime Riparddb084252014-11-17 14:42:20 +0100600{
601 struct k3_dma_chan *c = to_k3_chan(chan);
602 struct k3_dma_dev *d = to_k3_dma(chan->device);
603 struct k3_dma_phy *p = c->phy;
604
605 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
606 if (c->status == DMA_IN_PROGRESS) {
607 c->status = DMA_PAUSED;
608 if (p) {
609 k3_dma_pause_dma(p, false);
610 } else {
611 spin_lock(&d->lock);
612 list_del_init(&c->node);
613 spin_unlock(&d->lock);
614 }
615 }
616
617 return 0;
618}
619
Krzysztof Kozlowskia1a9bec2014-12-29 14:01:30 +0100620static int k3_dma_transfer_resume(struct dma_chan *chan)
Maxime Riparddb084252014-11-17 14:42:20 +0100621{
622 struct k3_dma_chan *c = to_k3_chan(chan);
623 struct k3_dma_dev *d = to_k3_dma(chan->device);
624 struct k3_dma_phy *p = c->phy;
625 unsigned long flags;
626
627 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
628 spin_lock_irqsave(&c->vc.lock, flags);
629 if (c->status == DMA_PAUSED) {
630 c->status = DMA_IN_PROGRESS;
631 if (p) {
632 k3_dma_pause_dma(p, true);
633 } else if (!list_empty(&c->vc.desc_issued)) {
634 spin_lock(&d->lock);
635 list_add_tail(&c->node, &d->chan_pending);
636 spin_unlock(&d->lock);
637 }
638 }
639 spin_unlock_irqrestore(&c->vc.lock, flags);
640
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800641 return 0;
642}
643
644static void k3_dma_free_desc(struct virt_dma_desc *vd)
645{
646 struct k3_dma_desc_sw *ds =
647 container_of(vd, struct k3_dma_desc_sw, vd);
648
649 kfree(ds);
650}
651
Fabian Frederick57c03422015-03-16 20:17:14 +0100652static const struct of_device_id k3_pdma_dt_ids[] = {
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800653 { .compatible = "hisilicon,k3-dma-1.0", },
654 {}
655};
656MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
657
658static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
659 struct of_dma *ofdma)
660{
661 struct k3_dma_dev *d = ofdma->of_dma_data;
662 unsigned int request = dma_spec->args[0];
663
664 if (request > d->dma_requests)
665 return NULL;
666
667 return dma_get_slave_channel(&(d->chans[request].vc.chan));
668}
669
670static int k3_dma_probe(struct platform_device *op)
671{
672 struct k3_dma_dev *d;
673 const struct of_device_id *of_id;
674 struct resource *iores;
675 int i, ret, irq = 0;
676
677 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
678 if (!iores)
679 return -EINVAL;
680
681 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
682 if (!d)
683 return -ENOMEM;
684
Jingoo Hana576b7f2013-09-02 10:25:13 +0900685 d->base = devm_ioremap_resource(&op->dev, iores);
686 if (IS_ERR(d->base))
687 return PTR_ERR(d->base);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800688
689 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
690 if (of_id) {
691 of_property_read_u32((&op->dev)->of_node,
692 "dma-channels", &d->dma_channels);
693 of_property_read_u32((&op->dev)->of_node,
694 "dma-requests", &d->dma_requests);
695 }
696
697 d->clk = devm_clk_get(&op->dev, NULL);
698 if (IS_ERR(d->clk)) {
699 dev_err(&op->dev, "no dma clk\n");
700 return PTR_ERR(d->clk);
701 }
702
703 irq = platform_get_irq(op, 0);
704 ret = devm_request_irq(&op->dev, irq,
Michael Opdenacker174b5372013-10-13 07:10:51 +0200705 k3_dma_int_handler, 0, DRIVER_NAME, d);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800706 if (ret)
707 return ret;
708
Vinod Koul486b10a2016-07-03 00:02:29 +0530709 d->irq = irq;
710
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800711 /* init phy channel */
712 d->phy = devm_kzalloc(&op->dev,
713 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
714 if (d->phy == NULL)
715 return -ENOMEM;
716
717 for (i = 0; i < d->dma_channels; i++) {
718 struct k3_dma_phy *p = &d->phy[i];
719
720 p->idx = i;
721 p->base = d->base + i * 0x40;
722 }
723
724 INIT_LIST_HEAD(&d->slave.channels);
725 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
726 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
727 d->slave.dev = &op->dev;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800728 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
729 d->slave.device_tx_status = k3_dma_tx_status;
730 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
731 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
732 d->slave.device_issue_pending = k3_dma_issue_pending;
Maxime Riparddb084252014-11-17 14:42:20 +0100733 d->slave.device_config = k3_dma_config;
Krzysztof Kozlowskia1a9bec2014-12-29 14:01:30 +0100734 d->slave.device_pause = k3_dma_transfer_pause;
735 d->slave.device_resume = k3_dma_transfer_resume;
Maxime Riparddb084252014-11-17 14:42:20 +0100736 d->slave.device_terminate_all = k3_dma_terminate_all;
Maxime Ripard77a68e52015-07-20 10:41:32 +0200737 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800738
739 /* init virtual channel */
740 d->chans = devm_kzalloc(&op->dev,
741 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
742 if (d->chans == NULL)
743 return -ENOMEM;
744
745 for (i = 0; i < d->dma_requests; i++) {
746 struct k3_dma_chan *c = &d->chans[i];
747
748 c->status = DMA_IN_PROGRESS;
749 INIT_LIST_HEAD(&c->node);
750 c->vc.desc_free = k3_dma_free_desc;
751 vchan_init(&c->vc, &d->slave);
752 }
753
754 /* Enable clock before accessing registers */
755 ret = clk_prepare_enable(d->clk);
756 if (ret < 0) {
757 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
758 return ret;
759 }
760
761 k3_dma_enable_dma(d, true);
762
763 ret = dma_async_device_register(&d->slave);
764 if (ret)
Wei Yongjun89b90c02016-07-19 11:29:41 +0000765 goto dma_async_register_fail;
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800766
767 ret = of_dma_controller_register((&op->dev)->of_node,
768 k3_of_dma_simple_xlate, d);
769 if (ret)
770 goto of_dma_register_fail;
771
772 spin_lock_init(&d->lock);
773 INIT_LIST_HEAD(&d->chan_pending);
774 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
775 platform_set_drvdata(op, d);
776 dev_info(&op->dev, "initialized\n");
777
778 return 0;
779
780of_dma_register_fail:
781 dma_async_device_unregister(&d->slave);
Wei Yongjun89b90c02016-07-19 11:29:41 +0000782dma_async_register_fail:
783 clk_disable_unprepare(d->clk);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800784 return ret;
785}
786
787static int k3_dma_remove(struct platform_device *op)
788{
789 struct k3_dma_chan *c, *cn;
790 struct k3_dma_dev *d = platform_get_drvdata(op);
791
792 dma_async_device_unregister(&d->slave);
793 of_dma_controller_free((&op->dev)->of_node);
794
Vinod Koul486b10a2016-07-03 00:02:29 +0530795 devm_free_irq(&op->dev, d->irq, d);
796
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800797 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
798 list_del(&c->vc.chan.device_node);
799 tasklet_kill(&c->vc.task);
800 }
801 tasklet_kill(&d->task);
802 clk_disable_unprepare(d->clk);
803 return 0;
804}
805
Jingoo Hanaf2d3132014-10-27 21:36:26 +0900806#ifdef CONFIG_PM_SLEEP
Arnd Bergmann10b3e222015-01-13 14:23:13 +0100807static int k3_dma_suspend_dev(struct device *dev)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800808{
809 struct k3_dma_dev *d = dev_get_drvdata(dev);
810 u32 stat = 0;
811
812 stat = k3_dma_get_chan_stat(d);
813 if (stat) {
814 dev_warn(d->slave.dev,
815 "chan %d is running fail to suspend\n", stat);
816 return -1;
817 }
818 k3_dma_enable_dma(d, false);
819 clk_disable_unprepare(d->clk);
820 return 0;
821}
822
Arnd Bergmann10b3e222015-01-13 14:23:13 +0100823static int k3_dma_resume_dev(struct device *dev)
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800824{
825 struct k3_dma_dev *d = dev_get_drvdata(dev);
826 int ret = 0;
827
828 ret = clk_prepare_enable(d->clk);
829 if (ret < 0) {
830 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
831 return ret;
832 }
833 k3_dma_enable_dma(d, true);
834 return 0;
835}
Jingoo Hanaf2d3132014-10-27 21:36:26 +0900836#endif
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800837
Arnd Bergmann10b3e222015-01-13 14:23:13 +0100838static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800839
840static struct platform_driver k3_pdma_driver = {
841 .driver = {
842 .name = DRIVER_NAME,
Zhangfei Gao8e6152b2013-08-27 10:20:10 +0800843 .pm = &k3_dma_pmops,
844 .of_match_table = k3_pdma_dt_ids,
845 },
846 .probe = k3_dma_probe,
847 .remove = k3_dma_remove,
848};
849
850module_platform_driver(k3_pdma_driver);
851
852MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
853MODULE_ALIAS("platform:k3dma");
854MODULE_LICENSE("GPL v2");