blob: 55c1782e36239cd56c02df91fcea88582afdfe83 [file] [log] [blame]
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +01001#include <linux/delay.h>
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02002#include <linux/dmaengine.h>
3#include <linux/dma-mapping.h>
4#include <linux/platform_device.h>
5#include <linux/module.h>
6#include <linux/of.h>
7#include <linux/slab.h>
8#include <linux/of_dma.h>
9#include <linux/of_irq.h>
10#include <linux/dmapool.h>
11#include <linux/interrupt.h>
12#include <linux/of_address.h>
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +020013#include <linux/pm_runtime.h>
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020014#include "dmaengine.h"
15
16#define DESC_TYPE 27
17#define DESC_TYPE_HOST 0x10
18#define DESC_TYPE_TEARD 0x13
19
20#define TD_DESC_IS_RX (1 << 16)
21#define TD_DESC_DMA_NUM 10
22
23#define DESC_LENGTH_BITS_NUM 21
24
25#define DESC_TYPE_USB (5 << 26)
26#define DESC_PD_COMPLETE (1 << 31)
27
28/* DMA engine */
29#define DMA_TDFDQ 4
30#define DMA_TXGCR(x) (0x800 + (x) * 0x20)
31#define DMA_RXGCR(x) (0x808 + (x) * 0x20)
32#define RXHPCRA0 4
33
34#define GCR_CHAN_ENABLE (1 << 31)
35#define GCR_TEARDOWN (1 << 30)
36#define GCR_STARV_RETRY (1 << 24)
37#define GCR_DESC_TYPE_HOST (1 << 14)
38
39/* DMA scheduler */
40#define DMA_SCHED_CTRL 0
41#define DMA_SCHED_CTRL_EN (1 << 31)
42#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
43
44#define SCHED_ENTRY0_CHAN(x) ((x) << 0)
45#define SCHED_ENTRY0_IS_RX (1 << 7)
46
47#define SCHED_ENTRY1_CHAN(x) ((x) << 8)
48#define SCHED_ENTRY1_IS_RX (1 << 15)
49
50#define SCHED_ENTRY2_CHAN(x) ((x) << 16)
51#define SCHED_ENTRY2_IS_RX (1 << 23)
52
53#define SCHED_ENTRY3_CHAN(x) ((x) << 24)
54#define SCHED_ENTRY3_IS_RX (1 << 31)
55
56/* Queue manager */
57/* 4 KiB of memory for descriptors, 2 for each endpoint */
58#define ALLOC_DECS_NUM 128
59#define DESCS_AREAS 1
60#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
61#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
62
63#define QMGR_LRAM0_BASE 0x80
64#define QMGR_LRAM_SIZE 0x84
65#define QMGR_LRAM1_BASE 0x88
66#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
67#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
68#define QMGR_MEMCTRL_IDX_SH 16
69#define QMGR_MEMCTRL_DESC_SH 8
70
71#define QMGR_NUM_PEND 5
72#define QMGR_PEND(x) (0x90 + (x) * 4)
73
74#define QMGR_PENDING_SLOT_Q(x) (x / 32)
75#define QMGR_PENDING_BIT_Q(x) (x % 32)
76
77#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
78#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
79#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
80#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
81
82/* Glue layer specific */
83/* USBSS / USB AM335x */
84#define USBSS_IRQ_STATUS 0x28
85#define USBSS_IRQ_ENABLER 0x2c
86#define USBSS_IRQ_CLEARR 0x30
87
88#define USBSS_IRQ_PD_COMP (1 << 2)
89
Daniel Mack13bbfb52014-05-26 14:52:34 +020090/* Packet Descriptor */
91#define PD2_ZERO_LENGTH (1 << 19)
92
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +020093struct cppi41_channel {
94 struct dma_chan chan;
95 struct dma_async_tx_descriptor txd;
96 struct cppi41_dd *cdd;
97 struct cppi41_desc *desc;
98 dma_addr_t desc_phys;
99 void __iomem *gcr_reg;
100 int is_tx;
101 u32 residue;
102
103 unsigned int q_num;
104 unsigned int q_comp_num;
105 unsigned int port_num;
106
107 unsigned td_retry;
108 unsigned td_queued:1;
109 unsigned td_seen:1;
110 unsigned td_desc_seen:1;
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700111
112 struct list_head node; /* Node for pending list */
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200113};
114
115struct cppi41_desc {
116 u32 pd0;
117 u32 pd1;
118 u32 pd2;
119 u32 pd3;
120 u32 pd4;
121 u32 pd5;
122 u32 pd6;
123 u32 pd7;
124} __aligned(32);
125
126struct chan_queues {
127 u16 submit;
128 u16 complete;
129};
130
131struct cppi41_dd {
132 struct dma_device ddev;
133
134 void *qmgr_scratch;
135 dma_addr_t scratch_phys;
136
137 struct cppi41_desc *cd;
138 dma_addr_t descs_phys;
139 u32 first_td_desc;
140 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
141
142 void __iomem *usbss_mem;
143 void __iomem *ctrl_mem;
144 void __iomem *sched_mem;
145 void __iomem *qmgr_mem;
146 unsigned int irq;
147 const struct chan_queues *queues_rx;
148 const struct chan_queues *queues_tx;
149 struct chan_queues td_queue;
Daniel Mackf8964962013-10-22 12:14:03 +0200150
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700151 struct list_head pending; /* Pending queued transfers */
152 spinlock_t lock; /* Lock for pending list */
153
Daniel Mackf8964962013-10-22 12:14:03 +0200154 /* context for suspend/resume */
155 unsigned int dma_tdfdq;
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -0800156
157 bool is_suspended;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200158};
159
160#define FIST_COMPLETION_QUEUE 93
161static struct chan_queues usb_queues_tx[] = {
162 /* USB0 ENDP 1 */
163 [ 0] = { .submit = 32, .complete = 93},
164 [ 1] = { .submit = 34, .complete = 94},
165 [ 2] = { .submit = 36, .complete = 95},
166 [ 3] = { .submit = 38, .complete = 96},
167 [ 4] = { .submit = 40, .complete = 97},
168 [ 5] = { .submit = 42, .complete = 98},
169 [ 6] = { .submit = 44, .complete = 99},
170 [ 7] = { .submit = 46, .complete = 100},
171 [ 8] = { .submit = 48, .complete = 101},
172 [ 9] = { .submit = 50, .complete = 102},
173 [10] = { .submit = 52, .complete = 103},
174 [11] = { .submit = 54, .complete = 104},
175 [12] = { .submit = 56, .complete = 105},
176 [13] = { .submit = 58, .complete = 106},
177 [14] = { .submit = 60, .complete = 107},
178
179 /* USB1 ENDP1 */
180 [15] = { .submit = 62, .complete = 125},
181 [16] = { .submit = 64, .complete = 126},
182 [17] = { .submit = 66, .complete = 127},
183 [18] = { .submit = 68, .complete = 128},
184 [19] = { .submit = 70, .complete = 129},
185 [20] = { .submit = 72, .complete = 130},
186 [21] = { .submit = 74, .complete = 131},
187 [22] = { .submit = 76, .complete = 132},
188 [23] = { .submit = 78, .complete = 133},
189 [24] = { .submit = 80, .complete = 134},
190 [25] = { .submit = 82, .complete = 135},
191 [26] = { .submit = 84, .complete = 136},
192 [27] = { .submit = 86, .complete = 137},
193 [28] = { .submit = 88, .complete = 138},
194 [29] = { .submit = 90, .complete = 139},
195};
196
197static const struct chan_queues usb_queues_rx[] = {
198 /* USB0 ENDP 1 */
199 [ 0] = { .submit = 1, .complete = 109},
200 [ 1] = { .submit = 2, .complete = 110},
201 [ 2] = { .submit = 3, .complete = 111},
202 [ 3] = { .submit = 4, .complete = 112},
203 [ 4] = { .submit = 5, .complete = 113},
204 [ 5] = { .submit = 6, .complete = 114},
205 [ 6] = { .submit = 7, .complete = 115},
206 [ 7] = { .submit = 8, .complete = 116},
207 [ 8] = { .submit = 9, .complete = 117},
208 [ 9] = { .submit = 10, .complete = 118},
209 [10] = { .submit = 11, .complete = 119},
210 [11] = { .submit = 12, .complete = 120},
211 [12] = { .submit = 13, .complete = 121},
212 [13] = { .submit = 14, .complete = 122},
213 [14] = { .submit = 15, .complete = 123},
214
215 /* USB1 ENDP 1 */
216 [15] = { .submit = 16, .complete = 141},
217 [16] = { .submit = 17, .complete = 142},
218 [17] = { .submit = 18, .complete = 143},
219 [18] = { .submit = 19, .complete = 144},
220 [19] = { .submit = 20, .complete = 145},
221 [20] = { .submit = 21, .complete = 146},
222 [21] = { .submit = 22, .complete = 147},
223 [22] = { .submit = 23, .complete = 148},
224 [23] = { .submit = 24, .complete = 149},
225 [24] = { .submit = 25, .complete = 150},
226 [25] = { .submit = 26, .complete = 151},
227 [26] = { .submit = 27, .complete = 152},
228 [27] = { .submit = 28, .complete = 153},
229 [28] = { .submit = 29, .complete = 154},
230 [29] = { .submit = 30, .complete = 155},
231};
232
233struct cppi_glue_infos {
234 irqreturn_t (*isr)(int irq, void *data);
235 const struct chan_queues *queues_rx;
236 const struct chan_queues *queues_tx;
237 struct chan_queues td_queue;
238};
239
240static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
241{
242 return container_of(c, struct cppi41_channel, chan);
243}
244
245static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
246{
247 struct cppi41_channel *c;
248 u32 descs_size;
249 u32 desc_num;
250
251 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
252
253 if (!((desc >= cdd->descs_phys) &&
254 (desc < (cdd->descs_phys + descs_size)))) {
255 return NULL;
256 }
257
258 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
Dan Carpenter2d17f7f2013-08-28 13:48:44 +0300259 BUG_ON(desc_num >= ALLOC_DECS_NUM);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200260 c = cdd->chan_busy[desc_num];
261 cdd->chan_busy[desc_num] = NULL;
Tony Lindgren2c2e7fe2017-01-19 08:49:07 -0800262
263 /* Usecount for chan_busy[], paired with push_desc_queue() */
264 pm_runtime_put(cdd->ddev.dev);
265
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200266 return c;
267}
268
269static void cppi_writel(u32 val, void *__iomem *mem)
270{
271 __raw_writel(val, mem);
272}
273
274static u32 cppi_readl(void *__iomem *mem)
275{
276 return __raw_readl(mem);
277}
278
279static u32 pd_trans_len(u32 val)
280{
281 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
282}
283
Daniel Mack706ff622013-10-22 12:14:04 +0200284static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
285{
286 u32 desc;
287
288 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
289 desc &= ~0x1f;
290 return desc;
291}
292
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200293static irqreturn_t cppi41_irq(int irq, void *data)
294{
295 struct cppi41_dd *cdd = data;
296 struct cppi41_channel *c;
297 u32 status;
298 int i;
299
300 status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
301 if (!(status & USBSS_IRQ_PD_COMP))
302 return IRQ_NONE;
303 cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
304
305 for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
306 i++) {
307 u32 val;
308 u32 q_num;
309
310 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
311 if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
312 u32 mask;
313 /* set corresponding bit for completetion Q 93 */
314 mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
315 /* not set all bits for queues less than Q 93 */
316 mask--;
317 /* now invert and keep only Q 93+ set */
318 val &= ~mask;
319 }
320
321 if (val)
322 __iormb();
323
324 while (val) {
Daniel Mack13bbfb52014-05-26 14:52:34 +0200325 u32 desc, len;
Tony Lindgrend5afc1b2016-11-16 10:24:15 -0800326 int error;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200327
Tony Lindgrend5afc1b2016-11-16 10:24:15 -0800328 error = pm_runtime_get(cdd->ddev.dev);
329 if (error < 0)
Tony Lindgren098de422016-11-09 09:47:59 -0700330 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
Tony Lindgrend5afc1b2016-11-16 10:24:15 -0800331 __func__, error);
Tony Lindgren098de422016-11-09 09:47:59 -0700332
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200333 q_num = __fls(val);
334 val &= ~(1 << q_num);
335 q_num += 32 * i;
Daniel Mack706ff622013-10-22 12:14:04 +0200336 desc = cppi41_pop_desc(cdd, q_num);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200337 c = desc_to_chan(cdd, desc);
338 if (WARN_ON(!c)) {
339 pr_err("%s() q %d desc %08x\n", __func__,
340 q_num, desc);
341 continue;
342 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200343
Daniel Mack13bbfb52014-05-26 14:52:34 +0200344 if (c->desc->pd2 & PD2_ZERO_LENGTH)
345 len = 0;
346 else
347 len = pd_trans_len(c->desc->pd0);
348
349 c->residue = pd_trans_len(c->desc->pd6) - len;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200350 dma_cookie_complete(&c->txd);
Dave Jiangb310a612016-07-20 13:10:54 -0700351 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700352
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700353 pm_runtime_mark_last_busy(cdd->ddev.dev);
354 pm_runtime_put_autosuspend(cdd->ddev.dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200355 }
356 }
357 return IRQ_HANDLED;
358}
359
360static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
361{
362 dma_cookie_t cookie;
363
364 cookie = dma_cookie_assign(tx);
365
366 return cookie;
367}
368
369static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
370{
371 struct cppi41_channel *c = to_cpp41_chan(chan);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700372 struct cppi41_dd *cdd = c->cdd;
373 int error;
374
375 error = pm_runtime_get_sync(cdd->ddev.dev);
Tony Lindgren740b4be2016-11-11 11:28:52 -0800376 if (error < 0) {
Tony Lindgrend5afc1b2016-11-16 10:24:15 -0800377 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
378 __func__, error);
Tony Lindgren740b4be2016-11-11 11:28:52 -0800379 pm_runtime_put_noidle(cdd->ddev.dev);
380
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700381 return error;
Tony Lindgren740b4be2016-11-11 11:28:52 -0800382 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200383
384 dma_cookie_init(chan);
385 dma_async_tx_descriptor_init(&c->txd, chan);
386 c->txd.tx_submit = cppi41_tx_submit;
387
388 if (!c->is_tx)
389 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
390
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700391 pm_runtime_mark_last_busy(cdd->ddev.dev);
392 pm_runtime_put_autosuspend(cdd->ddev.dev);
393
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200394 return 0;
395}
396
397static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
398{
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700399 struct cppi41_channel *c = to_cpp41_chan(chan);
400 struct cppi41_dd *cdd = c->cdd;
401 int error;
402
403 error = pm_runtime_get_sync(cdd->ddev.dev);
Tony Lindgren740b4be2016-11-11 11:28:52 -0800404 if (error < 0) {
405 pm_runtime_put_noidle(cdd->ddev.dev);
406
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700407 return;
Tony Lindgren740b4be2016-11-11 11:28:52 -0800408 }
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700409
410 WARN_ON(!list_empty(&cdd->pending));
411
412 pm_runtime_mark_last_busy(cdd->ddev.dev);
413 pm_runtime_put_autosuspend(cdd->ddev.dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200414}
415
416static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
417 dma_cookie_t cookie, struct dma_tx_state *txstate)
418{
419 struct cppi41_channel *c = to_cpp41_chan(chan);
420 enum dma_status ret;
421
422 /* lock */
423 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Kouled83c0c2013-10-16 13:36:28 +0530424 if (txstate && ret == DMA_COMPLETE)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200425 txstate->residue = c->residue;
426 /* unlock */
427
428 return ret;
429}
430
431static void push_desc_queue(struct cppi41_channel *c)
432{
433 struct cppi41_dd *cdd = c->cdd;
434 u32 desc_num;
435 u32 desc_phys;
436 u32 reg;
437
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200438 c->residue = 0;
439
440 reg = GCR_CHAN_ENABLE;
441 if (!c->is_tx) {
442 reg |= GCR_STARV_RETRY;
443 reg |= GCR_DESC_TYPE_HOST;
444 reg |= c->q_comp_num;
445 }
446
447 cppi_writel(reg, c->gcr_reg);
448
449 /*
450 * We don't use writel() but __raw_writel() so we have to make sure
451 * that the DMA descriptor in coherent memory made to the main memory
452 * before starting the dma engine.
453 */
454 __iowmb();
Tony Lindgren670fc2a2016-08-19 15:59:39 -0700455
Tony Lindgren2c2e7fe2017-01-19 08:49:07 -0800456 /*
457 * DMA transfers can take at least 200ms to complete with USB mass
458 * storage connected. To prevent autosuspend timeouts, we must use
459 * pm_runtime_get/put() when chan_busy[] is modified. This will get
460 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
461 * outcome of the transfer.
462 */
463 pm_runtime_get(cdd->ddev.dev);
464
Tony Lindgren670fc2a2016-08-19 15:59:39 -0700465 desc_phys = lower_32_bits(c->desc_phys);
466 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
467 WARN_ON(cdd->chan_busy[desc_num]);
468 cdd->chan_busy[desc_num] = c;
469
470 reg = (sizeof(struct cppi41_desc) - 24) / 4;
471 reg |= desc_phys;
472 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
473}
474
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -0800475/*
476 * Caller must hold cdd->lock to prevent push_desc_queue()
477 * getting called out of order. We have both cppi41_dma_issue_pending()
478 * and cppi41_runtime_resume() call this function.
479 */
480static void cppi41_run_queue(struct cppi41_dd *cdd)
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700481{
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -0800482 struct cppi41_channel *c, *_c;
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700483
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -0800484 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
485 push_desc_queue(c);
486 list_del(&c->node);
487 }
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700488}
489
Tony Lindgren670fc2a2016-08-19 15:59:39 -0700490static void cppi41_dma_issue_pending(struct dma_chan *chan)
491{
492 struct cppi41_channel *c = to_cpp41_chan(chan);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700493 struct cppi41_dd *cdd = c->cdd;
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -0800494 unsigned long flags;
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700495 int error;
Tony Lindgren670fc2a2016-08-19 15:59:39 -0700496
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700497 error = pm_runtime_get(cdd->ddev.dev);
Tony Lindgrenf2f6f822016-09-13 10:22:43 -0700498 if ((error != -EINPROGRESS) && error < 0) {
Tony Lindgren740b4be2016-11-11 11:28:52 -0800499 pm_runtime_put_noidle(cdd->ddev.dev);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -0700500 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
501 error);
502
503 return;
504 }
505
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -0800506 spin_lock_irqsave(&cdd->lock, flags);
507 list_add_tail(&c->node, &cdd->pending);
508 if (!cdd->is_suspended)
509 cppi41_run_queue(cdd);
510 spin_unlock_irqrestore(&cdd->lock, flags);
Tony Lindgren098de422016-11-09 09:47:59 -0700511
512 pm_runtime_mark_last_busy(cdd->ddev.dev);
513 pm_runtime_put_autosuspend(cdd->ddev.dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200514}
515
516static u32 get_host_pd0(u32 length)
517{
518 u32 reg;
519
520 reg = DESC_TYPE_HOST << DESC_TYPE;
521 reg |= length;
522
523 return reg;
524}
525
526static u32 get_host_pd1(struct cppi41_channel *c)
527{
528 u32 reg;
529
530 reg = 0;
531
532 return reg;
533}
534
535static u32 get_host_pd2(struct cppi41_channel *c)
536{
537 u32 reg;
538
539 reg = DESC_TYPE_USB;
540 reg |= c->q_comp_num;
541
542 return reg;
543}
544
545static u32 get_host_pd3(u32 length)
546{
547 u32 reg;
548
549 /* PD3 = packet size */
550 reg = length;
551
552 return reg;
553}
554
555static u32 get_host_pd6(u32 length)
556{
557 u32 reg;
558
559 /* PD6 buffer size */
560 reg = DESC_PD_COMPLETE;
561 reg |= length;
562
563 return reg;
564}
565
566static u32 get_host_pd4_or_7(u32 addr)
567{
568 u32 reg;
569
570 reg = addr;
571
572 return reg;
573}
574
575static u32 get_host_pd5(void)
576{
577 u32 reg;
578
579 reg = 0;
580
581 return reg;
582}
583
584static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
585 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
586 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
587{
588 struct cppi41_channel *c = to_cpp41_chan(chan);
589 struct cppi41_desc *d;
590 struct scatterlist *sg;
591 unsigned int i;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200592
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200593 d = c->desc;
594 for_each_sg(sgl, sg, sg_len, i) {
595 u32 addr;
596 u32 len;
597
598 /* We need to use more than one desc once musb supports sg */
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200599 addr = lower_32_bits(sg_dma_address(sg));
600 len = sg_dma_len(sg);
601
602 d->pd0 = get_host_pd0(len);
603 d->pd1 = get_host_pd1(c);
604 d->pd2 = get_host_pd2(c);
605 d->pd3 = get_host_pd3(len);
606 d->pd4 = get_host_pd4_or_7(addr);
607 d->pd5 = get_host_pd5();
608 d->pd6 = get_host_pd6(len);
609 d->pd7 = get_host_pd4_or_7(addr);
610
611 d++;
612 }
613
614 return &c->txd;
615}
616
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200617static void cppi41_compute_td_desc(struct cppi41_desc *d)
618{
619 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
620}
621
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200622static int cppi41_tear_down_chan(struct cppi41_channel *c)
623{
624 struct cppi41_dd *cdd = c->cdd;
625 struct cppi41_desc *td;
626 u32 reg;
627 u32 desc_phys;
628 u32 td_desc_phys;
629
630 td = cdd->cd;
631 td += cdd->first_td_desc;
632
633 td_desc_phys = cdd->descs_phys;
634 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
635
636 if (!c->td_queued) {
637 cppi41_compute_td_desc(td);
638 __iowmb();
639
640 reg = (sizeof(struct cppi41_desc) - 24) / 4;
641 reg |= td_desc_phys;
642 cppi_writel(reg, cdd->qmgr_mem +
643 QMGR_QUEUE_D(cdd->td_queue.submit));
644
645 reg = GCR_CHAN_ENABLE;
646 if (!c->is_tx) {
647 reg |= GCR_STARV_RETRY;
648 reg |= GCR_DESC_TYPE_HOST;
649 reg |= c->q_comp_num;
650 }
651 reg |= GCR_TEARDOWN;
652 cppi_writel(reg, c->gcr_reg);
653 c->td_queued = 1;
Sebastian Andrzej Siewior6f9d7052014-12-03 15:09:49 +0100654 c->td_retry = 500;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200655 }
656
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200657 if (!c->td_seen || !c->td_desc_seen) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200658
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200659 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
660 if (!desc_phys)
661 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200662
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200663 if (desc_phys == c->desc_phys) {
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200664 c->td_desc_seen = 1;
Sebastian Andrzej Siewior1e378a62013-10-22 12:14:05 +0200665
666 } else if (desc_phys == td_desc_phys) {
667 u32 pd0;
668
669 __iormb();
670 pd0 = td->pd0;
671 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
672 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
673 WARN_ON((pd0 & 0x1f) != c->port_num);
674 c->td_seen = 1;
675 } else if (desc_phys) {
676 WARN_ON_ONCE(1);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200677 }
678 }
679 c->td_retry--;
680 /*
681 * If the TX descriptor / channel is in use, the caller needs to poke
682 * his TD bit multiple times. After that he hardware releases the
683 * transfer descriptor followed by TD descriptor. Waiting seems not to
684 * cause any difference.
685 * RX seems to be thrown out right away. However once the TearDown
686 * descriptor gets through we are done. If we have seens the transfer
687 * descriptor before the TD we fetch it from enqueue, it has to be
688 * there waiting for us.
689 */
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100690 if (!c->td_seen && c->td_retry) {
691 udelay(1);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200692 return -EAGAIN;
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100693 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200694 WARN_ON(!c->td_retry);
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100695
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200696 if (!c->td_desc_seen) {
Daniel Mack706ff622013-10-22 12:14:04 +0200697 desc_phys = cppi41_pop_desc(cdd, c->q_num);
Sebastian Andrzej Siewior754416e2014-12-03 15:09:50 +0100698 if (!desc_phys)
699 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200700 WARN_ON(!desc_phys);
701 }
702
703 c->td_queued = 0;
704 c->td_seen = 0;
705 c->td_desc_seen = 0;
706 cppi_writel(0, c->gcr_reg);
707 return 0;
708}
709
710static int cppi41_stop_chan(struct dma_chan *chan)
711{
712 struct cppi41_channel *c = to_cpp41_chan(chan);
713 struct cppi41_dd *cdd = c->cdd;
714 u32 desc_num;
715 u32 desc_phys;
716 int ret;
717
George Cherian975faae2014-02-27 10:44:40 +0530718 desc_phys = lower_32_bits(c->desc_phys);
719 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
720 if (!cdd->chan_busy[desc_num])
721 return 0;
722
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200723 ret = cppi41_tear_down_chan(c);
724 if (ret)
725 return ret;
726
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200727 WARN_ON(!cdd->chan_busy[desc_num]);
728 cdd->chan_busy[desc_num] = NULL;
729
Tony Lindgren2c2e7fe2017-01-19 08:49:07 -0800730 /* Usecount for chan_busy[], paired with push_desc_queue() */
731 pm_runtime_put(cdd->ddev.dev);
732
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200733 return 0;
734}
735
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200736static void cleanup_chans(struct cppi41_dd *cdd)
737{
738 while (!list_empty(&cdd->ddev.channels)) {
739 struct cppi41_channel *cchan;
740
741 cchan = list_first_entry(&cdd->ddev.channels,
742 struct cppi41_channel, chan.device_node);
743 list_del(&cchan->chan.device_node);
744 kfree(cchan);
745 }
746}
747
Daniel Macke327e212013-09-22 16:50:00 +0200748static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200749{
750 struct cppi41_channel *cchan;
751 int i;
752 int ret;
753 u32 n_chans;
754
Daniel Macke327e212013-09-22 16:50:00 +0200755 ret = of_property_read_u32(dev->of_node, "#dma-channels",
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200756 &n_chans);
757 if (ret)
758 return ret;
759 /*
760 * The channels can only be used as TX or as RX. So we add twice
761 * that much dma channels because USB can only do RX or TX.
762 */
763 n_chans *= 2;
764
765 for (i = 0; i < n_chans; i++) {
766 cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
767 if (!cchan)
768 goto err;
769
770 cchan->cdd = cdd;
771 if (i & 1) {
772 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
773 cchan->is_tx = 1;
774 } else {
775 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
776 cchan->is_tx = 0;
777 }
778 cchan->port_num = i >> 1;
779 cchan->desc = &cdd->cd[i];
780 cchan->desc_phys = cdd->descs_phys;
781 cchan->desc_phys += i * sizeof(struct cppi41_desc);
782 cchan->chan.device = &cdd->ddev;
783 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
784 }
785 cdd->first_td_desc = n_chans;
786
787 return 0;
788err:
789 cleanup_chans(cdd);
790 return -ENOMEM;
791}
792
Daniel Macke327e212013-09-22 16:50:00 +0200793static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200794{
795 unsigned int mem_decs;
796 int i;
797
798 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
799
800 for (i = 0; i < DESCS_AREAS; i++) {
801
802 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
803 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
804
Daniel Macke327e212013-09-22 16:50:00 +0200805 dma_free_coherent(dev, mem_decs, cdd->cd,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200806 cdd->descs_phys);
807 }
808}
809
810static void disable_sched(struct cppi41_dd *cdd)
811{
812 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
813}
814
Daniel Mackb46ce4d2013-09-22 16:50:01 +0200815static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200816{
817 disable_sched(cdd);
818
Daniel Macke327e212013-09-22 16:50:00 +0200819 purge_descs(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200820
821 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
822 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
Daniel Macke327e212013-09-22 16:50:00 +0200823 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200824 cdd->scratch_phys);
825}
826
Daniel Macke327e212013-09-22 16:50:00 +0200827static int init_descs(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200828{
829 unsigned int desc_size;
830 unsigned int mem_decs;
831 int i;
832 u32 reg;
833 u32 idx;
834
835 BUILD_BUG_ON(sizeof(struct cppi41_desc) &
836 (sizeof(struct cppi41_desc) - 1));
837 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
838 BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
839
840 desc_size = sizeof(struct cppi41_desc);
841 mem_decs = ALLOC_DECS_NUM * desc_size;
842
843 idx = 0;
844 for (i = 0; i < DESCS_AREAS; i++) {
845
846 reg = idx << QMGR_MEMCTRL_IDX_SH;
847 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
848 reg |= ilog2(ALLOC_DECS_NUM) - 5;
849
850 BUILD_BUG_ON(DESCS_AREAS != 1);
Daniel Macke327e212013-09-22 16:50:00 +0200851 cdd->cd = dma_alloc_coherent(dev, mem_decs,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200852 &cdd->descs_phys, GFP_KERNEL);
853 if (!cdd->cd)
854 return -ENOMEM;
855
856 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
857 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
858
859 idx += ALLOC_DECS_NUM;
860 }
861 return 0;
862}
863
864static void init_sched(struct cppi41_dd *cdd)
865{
866 unsigned ch;
867 unsigned word;
868 u32 reg;
869
870 word = 0;
871 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
872 for (ch = 0; ch < 15 * 2; ch += 2) {
873
874 reg = SCHED_ENTRY0_CHAN(ch);
875 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
876
877 reg |= SCHED_ENTRY2_CHAN(ch + 1);
878 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
879 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
880 word++;
881 }
882 reg = 15 * 2 * 2 - 1;
883 reg |= DMA_SCHED_CTRL_EN;
884 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
885}
886
Daniel Macke327e212013-09-22 16:50:00 +0200887static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200888{
889 int ret;
890
891 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
Daniel Macke327e212013-09-22 16:50:00 +0200892 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200893 &cdd->scratch_phys, GFP_KERNEL);
894 if (!cdd->qmgr_scratch)
895 return -ENOMEM;
896
897 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
898 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
899 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
900
Daniel Macke327e212013-09-22 16:50:00 +0200901 ret = init_descs(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200902 if (ret)
903 goto err_td;
904
905 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
906 init_sched(cdd);
907 return 0;
908err_td:
Daniel Mackb46ce4d2013-09-22 16:50:01 +0200909 deinit_cppi41(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200910 return ret;
911}
912
913static struct platform_driver cpp41_dma_driver;
914/*
915 * The param format is:
916 * X Y
917 * X: Port
918 * Y: 0 = RX else TX
919 */
920#define INFO_PORT 0
921#define INFO_IS_TX 1
922
923static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
924{
925 struct cppi41_channel *cchan;
926 struct cppi41_dd *cdd;
927 const struct chan_queues *queues;
928 u32 *num = param;
929
930 if (chan->device->dev->driver != &cpp41_dma_driver.driver)
931 return false;
932
933 cchan = to_cpp41_chan(chan);
934
935 if (cchan->port_num != num[INFO_PORT])
936 return false;
937
938 if (cchan->is_tx && !num[INFO_IS_TX])
939 return false;
940 cdd = cchan->cdd;
941 if (cchan->is_tx)
942 queues = cdd->queues_tx;
943 else
944 queues = cdd->queues_rx;
945
946 BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
947 if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
948 return false;
949
950 cchan->q_num = queues[cchan->port_num].submit;
951 cchan->q_comp_num = queues[cchan->port_num].complete;
952 return true;
953}
954
955static struct of_dma_filter_info cpp41_dma_info = {
956 .filter_fn = cpp41_dma_filter_fn,
957};
958
959static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
960 struct of_dma *ofdma)
961{
962 int count = dma_spec->args_count;
963 struct of_dma_filter_info *info = ofdma->of_dma_data;
964
965 if (!info || !info->filter_fn)
966 return NULL;
967
968 if (count != 2)
969 return NULL;
970
971 return dma_request_channel(info->dma_cap, info->filter_fn,
972 &dma_spec->args[0]);
973}
974
975static const struct cppi_glue_infos usb_infos = {
976 .isr = cppi41_irq,
977 .queues_rx = usb_queues_rx,
978 .queues_tx = usb_queues_tx,
979 .td_queue = { .submit = 31, .complete = 0 },
980};
981
982static const struct of_device_id cppi41_dma_ids[] = {
983 { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
984 {},
985};
986MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
987
Daniel Macke327e212013-09-22 16:50:00 +0200988static const struct cppi_glue_infos *get_glue_info(struct device *dev)
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200989{
990 const struct of_device_id *of_id;
991
Daniel Macke327e212013-09-22 16:50:00 +0200992 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +0200993 if (!of_id)
994 return NULL;
995 return of_id->data;
996}
997
Felipe Balbiffeb13a2015-04-08 11:45:42 -0500998#define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
999 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1000 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1001 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1002
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001003static int cppi41_dma_probe(struct platform_device *pdev)
1004{
1005 struct cppi41_dd *cdd;
Daniel Mack717d8182013-09-22 16:50:02 +02001006 struct device *dev = &pdev->dev;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001007 const struct cppi_glue_infos *glue_info;
1008 int irq;
1009 int ret;
1010
Daniel Mack717d8182013-09-22 16:50:02 +02001011 glue_info = get_glue_info(dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001012 if (!glue_info)
1013 return -EINVAL;
1014
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +05301015 cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001016 if (!cdd)
1017 return -ENOMEM;
1018
1019 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
1020 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
1021 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
1022 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
1023 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
1024 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
Maxime Ripard3b5a03a2014-11-17 14:42:10 +01001025 cdd->ddev.device_terminate_all = cppi41_stop_chan;
Felipe Balbiffeb13a2015-04-08 11:45:42 -05001026 cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1027 cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
1028 cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
1029 cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
Daniel Mack717d8182013-09-22 16:50:02 +02001030 cdd->ddev.dev = dev;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001031 INIT_LIST_HEAD(&cdd->ddev.channels);
1032 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
1033
Daniel Mack717d8182013-09-22 16:50:02 +02001034 cdd->usbss_mem = of_iomap(dev->of_node, 0);
1035 cdd->ctrl_mem = of_iomap(dev->of_node, 1);
1036 cdd->sched_mem = of_iomap(dev->of_node, 2);
1037 cdd->qmgr_mem = of_iomap(dev->of_node, 3);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001038 spin_lock_init(&cdd->lock);
1039 INIT_LIST_HEAD(&cdd->pending);
1040
1041 platform_set_drvdata(pdev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001042
1043 if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +05301044 !cdd->qmgr_mem)
1045 return -ENXIO;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001046
Daniel Mack717d8182013-09-22 16:50:02 +02001047 pm_runtime_enable(dev);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001048 pm_runtime_set_autosuspend_delay(dev, 100);
1049 pm_runtime_use_autosuspend(dev);
Daniel Mack717d8182013-09-22 16:50:02 +02001050 ret = pm_runtime_get_sync(dev);
Sebastian Andrzej Siewiorcbf1e562013-10-22 12:14:06 +02001051 if (ret < 0)
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +02001052 goto err_get_sync;
1053
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001054 cdd->queues_rx = glue_info->queues_rx;
1055 cdd->queues_tx = glue_info->queues_tx;
1056 cdd->td_queue = glue_info->td_queue;
1057
Daniel Mack717d8182013-09-22 16:50:02 +02001058 ret = init_cppi41(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001059 if (ret)
1060 goto err_init_cppi;
1061
Daniel Mack717d8182013-09-22 16:50:02 +02001062 ret = cppi41_add_chans(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001063 if (ret)
1064 goto err_chans;
1065
Daniel Mack717d8182013-09-22 16:50:02 +02001066 irq = irq_of_parse_and_map(dev->of_node, 0);
Julia Lawallf3b77722013-12-29 23:47:23 +01001067 if (!irq) {
1068 ret = -EINVAL;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001069 goto err_irq;
Julia Lawallf3b77722013-12-29 23:47:23 +01001070 }
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001071
1072 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1073
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +05301074 ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
Daniel Mack717d8182013-09-22 16:50:02 +02001075 dev_name(dev), cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001076 if (ret)
1077 goto err_irq;
1078 cdd->irq = irq;
1079
1080 ret = dma_async_device_register(&cdd->ddev);
1081 if (ret)
1082 goto err_dma_reg;
1083
Daniel Mack717d8182013-09-22 16:50:02 +02001084 ret = of_dma_controller_register(dev->of_node,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001085 cppi41_dma_xlate, &cpp41_dma_info);
1086 if (ret)
1087 goto err_of;
1088
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001089 pm_runtime_mark_last_busy(dev);
1090 pm_runtime_put_autosuspend(dev);
1091
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001092 return 0;
1093err_of:
1094 dma_async_device_unregister(&cdd->ddev);
1095err_dma_reg:
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001096err_irq:
1097 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1098 cleanup_chans(cdd);
1099err_chans:
Daniel Mack717d8182013-09-22 16:50:02 +02001100 deinit_cppi41(dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001101err_init_cppi:
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001102 pm_runtime_dont_use_autosuspend(dev);
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +02001103err_get_sync:
Tony Lindgrend5afc1b2016-11-16 10:24:15 -08001104 pm_runtime_put_sync(dev);
Daniel Mack717d8182013-09-22 16:50:02 +02001105 pm_runtime_disable(dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001106 iounmap(cdd->usbss_mem);
1107 iounmap(cdd->ctrl_mem);
1108 iounmap(cdd->sched_mem);
1109 iounmap(cdd->qmgr_mem);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001110 return ret;
1111}
1112
1113static int cppi41_dma_remove(struct platform_device *pdev)
1114{
1115 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
Tony Lindgren12f59082016-11-09 09:47:58 -07001116 int error;
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001117
Tony Lindgren12f59082016-11-09 09:47:58 -07001118 error = pm_runtime_get_sync(&pdev->dev);
1119 if (error < 0)
1120 dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
1121 __func__, error);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001122 of_dma_controller_free(pdev->dev.of_node);
1123 dma_async_device_unregister(&cdd->ddev);
1124
1125 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
Kiran Padwalf0f3b5f2014-09-24 15:53:46 +05301126 devm_free_irq(&pdev->dev, cdd->irq, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001127 cleanup_chans(cdd);
Daniel Mackb46ce4d2013-09-22 16:50:01 +02001128 deinit_cppi41(&pdev->dev, cdd);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001129 iounmap(cdd->usbss_mem);
1130 iounmap(cdd->ctrl_mem);
1131 iounmap(cdd->sched_mem);
1132 iounmap(cdd->qmgr_mem);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001133 pm_runtime_dont_use_autosuspend(&pdev->dev);
1134 pm_runtime_put_sync(&pdev->dev);
Sebastian Andrzej Siewiord6aafa22013-08-20 18:35:53 +02001135 pm_runtime_disable(&pdev->dev);
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001136 return 0;
1137}
1138
Arnd Bergmann522ef612016-09-06 15:20:05 +02001139static int __maybe_unused cppi41_suspend(struct device *dev)
Daniel Mackf97b98d2013-09-22 16:50:04 +02001140{
1141 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1142
Daniel Mackf8964962013-10-22 12:14:03 +02001143 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
Daniel Mackf97b98d2013-09-22 16:50:04 +02001144 cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
1145 disable_sched(cdd);
1146
1147 return 0;
1148}
1149
Arnd Bergmann522ef612016-09-06 15:20:05 +02001150static int __maybe_unused cppi41_resume(struct device *dev)
Daniel Mackf97b98d2013-09-22 16:50:04 +02001151{
1152 struct cppi41_dd *cdd = dev_get_drvdata(dev);
Daniel Mackf8964962013-10-22 12:14:03 +02001153 struct cppi41_channel *c;
Daniel Mackf97b98d2013-09-22 16:50:04 +02001154 int i;
1155
1156 for (i = 0; i < DESCS_AREAS; i++)
1157 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1158
Daniel Mackf8964962013-10-22 12:14:03 +02001159 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1160 if (!c->is_tx)
1161 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1162
Daniel Mackf97b98d2013-09-22 16:50:04 +02001163 init_sched(cdd);
Daniel Mackf8964962013-10-22 12:14:03 +02001164
1165 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1166 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1167 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1168 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1169
Daniel Mackf97b98d2013-09-22 16:50:04 +02001170 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
1171
1172 return 0;
1173}
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001174
Arnd Bergmann522ef612016-09-06 15:20:05 +02001175static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001176{
1177 struct cppi41_dd *cdd = dev_get_drvdata(dev);
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -08001178 unsigned long flags;
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001179
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -08001180 spin_lock_irqsave(&cdd->lock, flags);
1181 cdd->is_suspended = true;
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001182 WARN_ON(!list_empty(&cdd->pending));
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -08001183 spin_unlock_irqrestore(&cdd->lock, flags);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001184
1185 return 0;
1186}
1187
Arnd Bergmann522ef612016-09-06 15:20:05 +02001188static int __maybe_unused cppi41_runtime_resume(struct device *dev)
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001189{
1190 struct cppi41_dd *cdd = dev_get_drvdata(dev);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001191 unsigned long flags;
1192
1193 spin_lock_irqsave(&cdd->lock, flags);
Tony Lindgrenbc05a2e2017-01-19 08:49:08 -08001194 cdd->is_suspended = false;
1195 cppi41_run_queue(cdd);
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001196 spin_unlock_irqrestore(&cdd->lock, flags);
1197
1198 return 0;
1199}
Daniel Mackf97b98d2013-09-22 16:50:04 +02001200
Tony Lindgrenfdea2d02016-08-31 07:19:59 -07001201static const struct dev_pm_ops cppi41_pm_ops = {
1202 SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
1203 SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
1204 cppi41_runtime_resume,
1205 NULL)
1206};
Daniel Mackf97b98d2013-09-22 16:50:04 +02001207
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001208static struct platform_driver cpp41_dma_driver = {
1209 .probe = cppi41_dma_probe,
1210 .remove = cppi41_dma_remove,
1211 .driver = {
1212 .name = "cppi41-dma-engine",
Daniel Mackf97b98d2013-09-22 16:50:04 +02001213 .pm = &cppi41_pm_ops,
Sebastian Andrzej Siewior9b3452d2013-06-20 12:13:04 +02001214 .of_match_table = of_match_ptr(cppi41_dma_ids),
1215 },
1216};
1217
1218module_platform_driver(cpp41_dma_driver);
1219MODULE_LICENSE("GPL");
1220MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");