blob: f63be92cac40ea1c63cd23056b374c809b1993f9 [file] [log] [blame]
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08001/* linux/arch/arm/mach-msm/dma.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -08005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Arve Hjønnevågc5541072009-06-25 17:03:14 -070017#include <linux/clk.h>
18#include <linux/err.h>
Russell Kingfced80c2008-09-06 12:10:45 +010019#include <linux/io.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080020#include <linux/interrupt.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24#include <linux/pm_runtime.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010025#include <mach/dma.h>
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#define MODULE_NAME "msm_dmov"
28
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -080029#define MSM_DMOV_CHANNEL_COUNT 16
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define MSM_DMOV_CRCI_COUNT 16
31
32enum {
33 CLK_DIS,
34 CLK_TO_BE_DIS,
35 CLK_EN
36};
37
38struct msm_dmov_ci_conf {
39 int start;
40 int end;
41 int burst;
42};
43
44struct msm_dmov_crci_conf {
45 int sd;
46 int blk_size;
47};
48
49struct msm_dmov_chan_conf {
50 int sd;
51 int block;
52 int priority;
53};
54
55struct msm_dmov_conf {
56 void *base;
57 struct msm_dmov_crci_conf *crci_conf;
58 struct msm_dmov_chan_conf *chan_conf;
59 int channel_active;
60 struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
61 struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
62 unsigned int crci_mask;
63 spinlock_t lock;
64 unsigned int irq;
65 struct clk *clk;
66 struct clk *pclk;
67 struct clk *ebiclk;
68 unsigned int clk_ctl;
69 struct timer_list timer;
70};
71
72static void msm_dmov_clock_timer(unsigned long);
73static int msm_dmov_clk_toggle(int, int);
74
75#ifdef CONFIG_ARCH_MSM8X60
76
77#define DMOV_CHANNEL_DEFAULT_CONF { .sd = 1, .block = 0, .priority = 0 }
78#define DMOV_CHANNEL_MODEM_CONF { .sd = 3, .block = 0, .priority = 0 }
79#define DMOV_CHANNEL_CONF(secd, blk, pri) \
80 { .sd = secd, .block = blk, .priority = pri }
81
82static struct msm_dmov_chan_conf adm0_chan_conf[] = {
83 DMOV_CHANNEL_DEFAULT_CONF,
84 DMOV_CHANNEL_DEFAULT_CONF,
85 DMOV_CHANNEL_DEFAULT_CONF,
86 DMOV_CHANNEL_DEFAULT_CONF,
87 DMOV_CHANNEL_DEFAULT_CONF,
88 DMOV_CHANNEL_DEFAULT_CONF,
89 DMOV_CHANNEL_DEFAULT_CONF,
90 DMOV_CHANNEL_DEFAULT_CONF,
91 DMOV_CHANNEL_DEFAULT_CONF,
92 DMOV_CHANNEL_DEFAULT_CONF,
93 DMOV_CHANNEL_MODEM_CONF,
94 DMOV_CHANNEL_MODEM_CONF,
95 DMOV_CHANNEL_MODEM_CONF,
96 DMOV_CHANNEL_MODEM_CONF,
97 DMOV_CHANNEL_MODEM_CONF,
98 DMOV_CHANNEL_DEFAULT_CONF,
99};
100
101static struct msm_dmov_chan_conf adm1_chan_conf[] = {
102 DMOV_CHANNEL_DEFAULT_CONF,
103 DMOV_CHANNEL_DEFAULT_CONF,
104 DMOV_CHANNEL_DEFAULT_CONF,
105 DMOV_CHANNEL_DEFAULT_CONF,
106 DMOV_CHANNEL_DEFAULT_CONF,
107 DMOV_CHANNEL_DEFAULT_CONF,
108 DMOV_CHANNEL_DEFAULT_CONF,
109 DMOV_CHANNEL_DEFAULT_CONF,
110 DMOV_CHANNEL_DEFAULT_CONF,
111 DMOV_CHANNEL_DEFAULT_CONF,
112 DMOV_CHANNEL_MODEM_CONF,
113 DMOV_CHANNEL_MODEM_CONF,
114 DMOV_CHANNEL_MODEM_CONF,
115 DMOV_CHANNEL_MODEM_CONF,
116 DMOV_CHANNEL_MODEM_CONF,
117 DMOV_CHANNEL_MODEM_CONF,
118};
119
120#define DMOV_CRCI_DEFAULT_CONF { .sd = 1, .blk_size = 0 }
121#define DMOV_CRCI_CONF(secd, blk) { .sd = secd, .blk_size = blk }
122
123static struct msm_dmov_crci_conf adm0_crci_conf[] = {
124 DMOV_CRCI_DEFAULT_CONF,
125 DMOV_CRCI_DEFAULT_CONF,
126 DMOV_CRCI_DEFAULT_CONF,
127 DMOV_CRCI_DEFAULT_CONF,
128 DMOV_CRCI_DEFAULT_CONF,
129 DMOV_CRCI_DEFAULT_CONF,
130 DMOV_CRCI_CONF(1, 4),
131 DMOV_CRCI_DEFAULT_CONF,
132 DMOV_CRCI_DEFAULT_CONF,
133 DMOV_CRCI_DEFAULT_CONF,
134 DMOV_CRCI_DEFAULT_CONF,
135 DMOV_CRCI_DEFAULT_CONF,
136 DMOV_CRCI_DEFAULT_CONF,
137 DMOV_CRCI_DEFAULT_CONF,
138 DMOV_CRCI_DEFAULT_CONF,
139 DMOV_CRCI_DEFAULT_CONF,
140};
141
142static struct msm_dmov_crci_conf adm1_crci_conf[] = {
143 DMOV_CRCI_DEFAULT_CONF,
144 DMOV_CRCI_CONF(1, 1),
145 DMOV_CRCI_CONF(1, 1),
146 DMOV_CRCI_DEFAULT_CONF,
147 DMOV_CRCI_CONF(1, 1),
148 DMOV_CRCI_CONF(1, 1),
149 DMOV_CRCI_DEFAULT_CONF,
150 DMOV_CRCI_DEFAULT_CONF,
151 DMOV_CRCI_DEFAULT_CONF,
152 DMOV_CRCI_DEFAULT_CONF,
153 DMOV_CRCI_DEFAULT_CONF,
154 DMOV_CRCI_DEFAULT_CONF,
155 DMOV_CRCI_DEFAULT_CONF,
156 DMOV_CRCI_DEFAULT_CONF,
157 DMOV_CRCI_CONF(1, 1),
158 DMOV_CRCI_DEFAULT_CONF,
159};
160
161static struct msm_dmov_conf dmov_conf[] = {
162 {
163 .crci_conf = adm0_crci_conf,
164 .chan_conf = adm0_chan_conf,
165 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
166 .clk_ctl = CLK_DIS,
167 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
168 }, {
169 .crci_conf = adm1_crci_conf,
170 .chan_conf = adm1_chan_conf,
171 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
172 .clk_ctl = CLK_DIS,
173 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 1),
174 }
175};
176#else
177static struct msm_dmov_conf dmov_conf[] = {
178 {
179 .crci_conf = NULL,
180 .chan_conf = NULL,
181 .lock = __SPIN_LOCK_UNLOCKED(dmov_lock),
182 .clk_ctl = CLK_DIS,
183 .timer = TIMER_INITIALIZER(msm_dmov_clock_timer, 0, 0),
184 }
185};
186#endif
187
188#define MSM_DMOV_ID_COUNT (MSM_DMOV_CHANNEL_COUNT * ARRAY_SIZE(dmov_conf))
189#define DMOV_REG(name, adm) ((name) + (dmov_conf[adm].base))
190#define DMOV_ID_TO_ADM(id) ((id) / MSM_DMOV_CHANNEL_COUNT)
191#define DMOV_ID_TO_CHAN(id) ((id) % MSM_DMOV_CHANNEL_COUNT)
192#define DMOV_CHAN_ADM_TO_ID(ch, adm) ((ch) + (adm) * MSM_DMOV_CHANNEL_COUNT)
193
194#ifdef CONFIG_MSM_ADM3
195#define DMOV_IRQ_TO_ADM(irq) \
196({ \
197 typeof(irq) _irq = irq; \
198 ((_irq == INT_ADM1_MASTER) || (_irq == INT_ADM1_AARM)); \
199})
200#else
201#define DMOV_IRQ_TO_ADM(irq) 0
202#endif
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800203
204enum {
205 MSM_DMOV_PRINT_ERRORS = 1,
206 MSM_DMOV_PRINT_IO = 2,
207 MSM_DMOV_PRINT_FLOW = 4
208};
209
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800210unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
211
212#define MSM_DMOV_DPRINTF(mask, format, args...) \
213 do { \
214 if ((mask) & msm_dmov_print_mask) \
215 printk(KERN_ERR format, args); \
216 } while (0)
217#define PRINT_ERROR(format, args...) \
218 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_ERRORS, format, args);
219#define PRINT_IO(format, args...) \
220 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_IO, format, args);
221#define PRINT_FLOW(format, args...) \
222 MSM_DMOV_DPRINTF(MSM_DMOV_PRINT_FLOW, format, args);
223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224static int msm_dmov_clk_toggle(int adm, int on)
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700225{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 int ret = 0;
227
228 if (on) {
229 ret = clk_enable(dmov_conf[adm].clk);
230 if (ret)
231 goto err;
232 if (dmov_conf[adm].pclk) {
233 ret = clk_enable(dmov_conf[adm].pclk);
234 if (ret) {
235 clk_disable(dmov_conf[adm].clk);
236 goto err;
237 }
238 }
239 if (dmov_conf[adm].ebiclk) {
240 ret = clk_enable(dmov_conf[adm].ebiclk);
241 if (ret) {
242 if (dmov_conf[adm].pclk)
243 clk_disable(dmov_conf[adm].pclk);
244 clk_disable(dmov_conf[adm].clk);
245 }
246 }
247 } else {
248 clk_disable(dmov_conf[adm].clk);
249 if (dmov_conf[adm].pclk)
250 clk_disable(dmov_conf[adm].pclk);
251 if (dmov_conf[adm].ebiclk)
252 clk_disable(dmov_conf[adm].ebiclk);
253 }
254err:
255 return ret;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700256}
257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258static void msm_dmov_clock_timer(unsigned long adm)
259{
260 unsigned long irq_flags;
261 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
262 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
263 BUG_ON(dmov_conf[adm].channel_active);
264 msm_dmov_clk_toggle(adm, 0);
265 dmov_conf[adm].clk_ctl = CLK_DIS;
266 }
267 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
268}
269
270void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
271{
272 int adm = DMOV_ID_TO_ADM(id);
273 int ch = DMOV_ID_TO_CHAN(id);
274 writel_relaxed((graceful << 31), DMOV_REG(DMOV_FLUSH0(ch), adm));
275 wmb();
276}
277EXPORT_SYMBOL(msm_dmov_stop_cmd);
278
279#define CRCI_UNUSED 0
280#define CRCI_CONFLICT 1
281#define CRCI_MUX_OFF 2
282#define CRCI_MUX_ON 3
283
284#ifdef CONFIG_MSM_ADM3
285static int crci_mask_compare(unsigned int x, unsigned int y)
286{
287 unsigned int mask;
288 int i;
289 for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
290 mask = (x ^ y) >> (2*i);
291 if ((mask & 3) == CRCI_CONFLICT)
292 return 1;
293 }
294 return 0;
295}
296#endif
297
298static int check_crci_conflict(struct msm_dmov_cmd *cmd, int adm)
299{
300#ifdef CONFIG_MSM_ADM3
301 int i;
302 struct msm_dmov_cmd *iter;
303 struct list_head *cmd_list;
304 unsigned int active_crci_mask = 0;
305
306 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
307 cmd_list = &dmov_conf[adm].active_commands[i];
308 list_for_each_entry(iter, cmd_list, list) {
309 active_crci_mask |= iter->crci_mask;
310 }
311 }
312 return crci_mask_compare(cmd->crci_mask, active_crci_mask);
313#endif
314 return 0;
315}
316
317#define CRCI_MUXSEL(n) (((n) >> 4) & 1)
318#define CRCI_NUM(n) ((n) & 0xF)
319
320unsigned int msm_dmov_build_crci_mask(int n, ...)
321{
322 unsigned int mask = 0;
323#ifdef CONFIG_MSM_ADM3
324 int i;
325 int crci;
326 int crci_num;
327 unsigned int crci_muxsel;
328 va_list crcis;
329 va_start(crcis, n);
330 for (i = 0; i < n; i++) {
331 crci = va_arg(crcis, int);
332 crci_muxsel = CRCI_MUXSEL(crci);
333 crci_num = CRCI_NUM(crci);
334 mask |= (1 << (2*crci_num + 1));
335 mask |= (crci_muxsel << (2*crci_num));
336 }
337 va_end(crcis);
338#endif
339 return mask;
340}
341EXPORT_SYMBOL(msm_dmov_build_crci_mask);
342
343
344static void set_crci_mask(int crci_mask, int adm)
345{
346#ifdef CONFIG_MSM_ADM3
347 int i;
348 int blk_size;
349 unsigned int crci_ctl;
350 unsigned int tmp_crci_mask;
351 unsigned int blank_mask;
352
353 for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
354 tmp_crci_mask = (crci_mask >> (2*i)) & 3;
355 if (crci_mask_compare(dmov_conf[adm].crci_mask,
356 tmp_crci_mask << (2*i))) {
357 blank_mask = ~(3 << (2*i));
358 blk_size = dmov_conf[adm].crci_conf[i].blk_size;
359 crci_ctl = DMOV_CRCI_CTL_BLK_SZ(blk_size);
360 if (tmp_crci_mask == CRCI_MUX_ON)
361 crci_ctl |= DMOV_CRCI_MUX;
362
363 writel_relaxed(crci_ctl, DMOV_REG(DMOV_CRCI_CTL(i),
364 adm));
365 dmov_conf[adm].crci_mask &= blank_mask;
366 dmov_conf[adm].crci_mask |= (tmp_crci_mask << (2*i));
367 }
368 }
369 wmb();
370#endif
371}
372
373static void start_ready_cmds(int adm)
374{
375#ifdef CONFIG_MSM_ADM3
376 int i;
377 unsigned int status;
378 struct list_head *rdy;
379 struct list_head *act;
380 struct msm_dmov_cmd *cmd;
381 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
382 rdy = &dmov_conf[adm].ready_commands[i];
383 act = &dmov_conf[adm].active_commands[i];
384 cmd = list_entry(rdy->next, typeof(*cmd), list);
385 if (!list_empty(rdy) && !check_crci_conflict(cmd, adm)) {
386 status = readl_relaxed(DMOV_REG(DMOV_STATUS(i), adm));
387 if (status & DMOV_STATUS_CMD_PTR_RDY) {
388 list_del(&cmd->list);
389 list_add_tail(&cmd->list, act);
390 dmov_conf[adm].channel_active |= (1 << i);
391 set_crci_mask(cmd->crci_mask, adm);
392 writel_relaxed(cmd->cmdptr,
393 DMOV_REG(DMOV_CMD_PTR(i), adm));
394 }
395 }
396 }
397#endif
398}
399
400void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800401{
402 unsigned long irq_flags;
403 unsigned int status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 int conflict;
405 int adm = DMOV_ID_TO_ADM(id);
406 int ch = DMOV_ID_TO_CHAN(id);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
409 if (dmov_conf[adm].clk_ctl == CLK_DIS)
410 msm_dmov_clk_toggle(adm, 1);
411 else if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS)
412 del_timer(&dmov_conf[adm].timer);
413 dmov_conf[adm].clk_ctl = CLK_EN;
414
415 status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
416 conflict = check_crci_conflict(cmd, adm);
417 if ((status & DMOV_STATUS_CMD_PTR_RDY) && !conflict) {
418 PRINT_IO("msm_dmov_enqueue_cmd(%d), start command, status %x\n",
419 id, status);
420 if (cmd->exec_func)
421 cmd->exec_func(cmd);
422 list_add_tail(&cmd->list, &dmov_conf[adm].active_commands[ch]);
423 if (!dmov_conf[adm].channel_active)
424 enable_irq(dmov_conf[adm].irq);
425 dmov_conf[adm].channel_active |= 1U << ch;
426 PRINT_IO("Writing %x exactly to register", cmd->cmdptr);
427 set_crci_mask(cmd->crci_mask, adm);
428 writel_relaxed(cmd->cmdptr, DMOV_REG(DMOV_CMD_PTR(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800429 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 if (!dmov_conf[adm].channel_active) {
431 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
432 mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
433 }
434 if (list_empty(&dmov_conf[adm].active_commands[ch])
435 && !conflict)
436 PRINT_ERROR("msm_dmov_enqueue_cmd_ext(%d), stalled, "
437 "status %x\n", id, status);
438 PRINT_IO("msm_dmov_enqueue_cmd(%d), enqueue command, status "
439 "%x\n", id, status);
440 list_add_tail(&cmd->list, &dmov_conf[adm].ready_commands[ch]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800441 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800443}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444EXPORT_SYMBOL(msm_dmov_enqueue_cmd_ext);
445
446void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
447{
448 /* Disable callback function (for backwards compatibility) */
449 cmd->exec_func = NULL;
450
451 msm_dmov_enqueue_cmd_ext(id, cmd);
452}
453EXPORT_SYMBOL(msm_dmov_enqueue_cmd);
454
455void msm_dmov_flush(unsigned int id)
456{
457 unsigned long irq_flags;
458 int ch = DMOV_ID_TO_CHAN(id);
459 int adm = DMOV_ID_TO_ADM(id);
460 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
461 /* XXX not checking if flush cmd sent already */
462 if (!list_empty(&dmov_conf[adm].active_commands[ch])) {
463 PRINT_IO("msm_dmov_flush(%d), send flush cmd\n", id);
464 writel_relaxed(DMOV_FLUSH_TYPE, DMOV_REG(DMOV_FLUSH0(ch), adm));
465 }
466 /* spin_unlock_irqrestore has the necessary barrier */
467 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
468}
469EXPORT_SYMBOL(msm_dmov_flush);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800470
471struct msm_dmov_exec_cmdptr_cmd {
472 struct msm_dmov_cmd dmov_cmd;
473 struct completion complete;
474 unsigned id;
475 unsigned int result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700476 struct msm_dmov_errdata err;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800477};
478
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700479static void
480dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
481 unsigned int result,
482 struct msm_dmov_errdata *err)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800483{
484 struct msm_dmov_exec_cmdptr_cmd *cmd = container_of(_cmd, struct msm_dmov_exec_cmdptr_cmd, dmov_cmd);
485 cmd->result = result;
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700486 if (result != 0x80000002 && err)
487 memcpy(&cmd->err, err, sizeof(struct msm_dmov_errdata));
488
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800489 complete(&cmd->complete);
490}
491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492int msm_dmov_exec_cmd(unsigned id, unsigned int crci_mask, unsigned int cmdptr)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800493{
494 struct msm_dmov_exec_cmdptr_cmd cmd;
495
496 PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
497
498 cmd.dmov_cmd.cmdptr = cmdptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 cmd.dmov_cmd.crci_mask = crci_mask;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800500 cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 cmd.dmov_cmd.exec_func = NULL;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800502 cmd.id = id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 cmd.result = 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800504 init_completion(&cmd.complete);
505
506 msm_dmov_enqueue_cmd(id, &cmd.dmov_cmd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 wait_for_completion_io(&cmd.complete);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800508
509 if (cmd.result != 0x80000002) {
510 PRINT_ERROR("dmov_exec_cmdptr(%d): ERROR, result: %x\n", id, cmd.result);
511 PRINT_ERROR("dmov_exec_cmdptr(%d): flush: %x %x %x %x\n",
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700512 id, cmd.err.flush[0], cmd.err.flush[1], cmd.err.flush[2], cmd.err.flush[3]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800513 return -EIO;
514 }
515 PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
516 return 0;
517}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518EXPORT_SYMBOL(msm_dmov_exec_cmd);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520static void fill_errdata(struct msm_dmov_errdata *errdata, int ch, int adm)
521{
522 errdata->flush[0] = readl_relaxed(DMOV_REG(DMOV_FLUSH0(ch), adm));
523 errdata->flush[1] = readl_relaxed(DMOV_REG(DMOV_FLUSH1(ch), adm));
524 errdata->flush[2] = readl_relaxed(DMOV_REG(DMOV_FLUSH2(ch), adm));
525 errdata->flush[3] = readl_relaxed(DMOV_REG(DMOV_FLUSH3(ch), adm));
526 errdata->flush[4] = readl_relaxed(DMOV_REG(DMOV_FLUSH4(ch), adm));
527 errdata->flush[5] = readl_relaxed(DMOV_REG(DMOV_FLUSH5(ch), adm));
528}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800529
530static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
531{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 unsigned int int_status;
533 unsigned int mask;
534 unsigned int id;
535 unsigned int ch;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800536 unsigned long irq_flags;
537 unsigned int ch_status;
538 unsigned int ch_result;
539 struct msm_dmov_cmd *cmd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540 int adm = DMOV_IRQ_TO_ADM(irq);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
543 /* read and clear isr */
544 int_status = readl_relaxed(DMOV_REG(DMOV_ISR, adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800545 PRINT_FLOW("msm_datamover_irq_handler: DMOV_ISR %x\n", int_status);
546
547 while (int_status) {
548 mask = int_status & -int_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 ch = fls(mask) - 1;
550 id = DMOV_CHAN_ADM_TO_ID(ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800551 PRINT_FLOW("msm_datamover_irq_handler %08x %08x id %d\n", int_status, mask, id);
552 int_status &= ~mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800554 if (!(ch_status & DMOV_STATUS_RSLT_VALID)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 PRINT_FLOW("msm_datamover_irq_handler id %d, "
556 "result not valid %x\n", id, ch_status);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800557 continue;
558 }
559 do {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 ch_result = readl_relaxed(DMOV_REG(DMOV_RSLT(ch), adm));
561 if (list_empty(&dmov_conf[adm].active_commands[ch])) {
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800562 PRINT_ERROR("msm_datamover_irq_handler id %d, got result "
563 "with no active command, status %x, result %x\n",
564 id, ch_status, ch_result);
565 cmd = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 } else {
567 cmd = list_entry(dmov_conf[adm].
568 active_commands[ch].next, typeof(*cmd),
569 list);
570 }
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800571 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x, result %x\n", id, ch_status, ch_result);
572 if (ch_result & DMOV_RSLT_DONE) {
573 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n",
574 id, ch_status);
575 PRINT_IO("msm_datamover_irq_handler id %d, got result "
576 "for %p, result %x\n", id, cmd, ch_result);
577 if (cmd) {
578 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700579 cmd->complete_func(cmd, ch_result, NULL);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800580 }
581 }
582 if (ch_result & DMOV_RSLT_FLUSH) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700583 struct msm_dmov_errdata errdata;
584
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 fill_errdata(&errdata, ch, adm);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800586 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700587 PRINT_FLOW("msm_datamover_irq_handler id %d, flush, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800588 if (cmd) {
589 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700590 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800591 }
592 }
593 if (ch_result & DMOV_RSLT_ERROR) {
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700594 struct msm_dmov_errdata errdata;
595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 fill_errdata(&errdata, ch, adm);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700597
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800598 PRINT_ERROR("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700599 PRINT_ERROR("msm_datamover_irq_handler id %d, error, result %x, flush0 %x\n", id, ch_result, errdata.flush[0]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800600 if (cmd) {
601 list_del(&cmd->list);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700602 cmd->complete_func(cmd, ch_result, &errdata);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800603 }
604 /* this does not seem to work, once we get an error */
605 /* the datamover will no longer accept commands */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 writel_relaxed(0, DMOV_REG(DMOV_FLUSH0(ch),
607 adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800608 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 rmb();
610 ch_status = readl_relaxed(DMOV_REG(DMOV_STATUS(ch),
611 adm));
612#ifndef CONFIG_MSM_ADM3
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800613 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 if ((ch_status & DMOV_STATUS_CMD_PTR_RDY) &&
615 !list_empty(&dmov_conf[adm].ready_commands[ch])) {
616 cmd = list_entry(dmov_conf[adm].
617 ready_commands[ch].next, typeof(*cmd),
618 list);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800619 list_del(&cmd->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 if (cmd->exec_func)
621 cmd->exec_func(cmd);
622 list_add_tail(&cmd->list,
623 &dmov_conf[adm].active_commands[ch]);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800624 PRINT_FLOW("msm_datamover_irq_handler id %d, start command\n", id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 writel_relaxed(cmd->cmdptr,
626 DMOV_REG(DMOV_CMD_PTR(ch), adm));
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800627 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628#endif
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800629 } while (ch_status & DMOV_STATUS_RSLT_VALID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 if (list_empty(&dmov_conf[adm].active_commands[ch]) &&
631 list_empty(&dmov_conf[adm].ready_commands[ch]))
632 dmov_conf[adm].channel_active &= ~(1U << ch);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800633 PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
634 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 start_ready_cmds(adm);
637 if (!dmov_conf[adm].channel_active) {
638 disable_irq_nosync(dmov_conf[adm].irq);
639 dmov_conf[adm].clk_ctl = CLK_TO_BE_DIS;
640 mod_timer(&dmov_conf[adm].timer, jiffies + HZ);
Arve Hjønnevågc5541072009-06-25 17:03:14 -0700641 }
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700642
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800644 return IRQ_HANDLED;
645}
646
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647static int msm_dmov_suspend_late(struct device *dev)
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800648{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 unsigned long irq_flags;
650 struct platform_device *pdev = to_platform_device(dev);
651 int adm = (pdev->id >= 0) ? pdev->id : 0;
652 spin_lock_irqsave(&dmov_conf[adm].lock, irq_flags);
653 if (dmov_conf[adm].clk_ctl == CLK_TO_BE_DIS) {
654 BUG_ON(dmov_conf[adm].channel_active);
655 del_timer(&dmov_conf[adm].timer);
656 msm_dmov_clk_toggle(adm, 0);
657 dmov_conf[adm].clk_ctl = CLK_DIS;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800658 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 spin_unlock_irqrestore(&dmov_conf[adm].lock, irq_flags);
Brian Swetland8a0f6f12008-09-10 14:58:25 -0700660 return 0;
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800661}
662
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663static int msm_dmov_runtime_suspend(struct device *dev)
664{
665 dev_dbg(dev, "pm_runtime: suspending...\n");
666 return 0;
667}
Arve Hjønnevågbfe645a2007-11-26 04:12:29 -0800668
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669static int msm_dmov_runtime_resume(struct device *dev)
670{
671 dev_dbg(dev, "pm_runtime: resuming...\n");
672 return 0;
673}
674
675static int msm_dmov_runtime_idle(struct device *dev)
676{
677 dev_dbg(dev, "pm_runtime: idling...\n");
678 return 0;
679}
680
681static struct dev_pm_ops msm_dmov_dev_pm_ops = {
682 .runtime_suspend = msm_dmov_runtime_suspend,
683 .runtime_resume = msm_dmov_runtime_resume,
684 .runtime_idle = msm_dmov_runtime_idle,
685 .suspend = msm_dmov_suspend_late,
686};
687
688static int msm_dmov_init_clocks(struct platform_device *pdev)
689{
690 int adm = (pdev->id >= 0) ? pdev->id : 0;
691 int ret;
692
693 dmov_conf[adm].clk = clk_get(&pdev->dev, "adm_clk");
694 if (IS_ERR(dmov_conf[adm].clk)) {
695 printk(KERN_ERR "%s: Error getting adm_clk\n", __func__);
696 dmov_conf[adm].clk = NULL;
697 return -ENOENT;
698 }
699
700 dmov_conf[adm].pclk = clk_get(&pdev->dev, "adm_pclk");
701 if (IS_ERR(dmov_conf[adm].pclk)) {
702 dmov_conf[adm].pclk = NULL;
703 /* pclk not present on all SoCs, don't bail on failure */
704 }
705
706 dmov_conf[adm].ebiclk = clk_get(&pdev->dev, "ebi1_clk");
707 if (IS_ERR(dmov_conf[adm].ebiclk)) {
708 dmov_conf[adm].ebiclk = NULL;
709 /* ebiclk not present on all SoCs, don't bail on failure */
710 } else {
711 ret = clk_set_rate(dmov_conf[adm].ebiclk, 27000000);
712 if (ret)
713 return -ENOENT;
714 }
715
716 return 0;
717}
718
719static void config_datamover(int adm)
720{
721#ifdef CONFIG_MSM_ADM3
722 int i;
723 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
724 struct msm_dmov_chan_conf *chan_conf =
725 dmov_conf[adm].chan_conf;
726 unsigned conf;
727 /* Only configure scorpion channels */
728 if (chan_conf[i].sd <= 1) {
729 conf = readl_relaxed(DMOV_REG(DMOV_CONF(i), adm));
730 conf &= ~DMOV_CONF_SD(7);
731 conf |= DMOV_CONF_SD(chan_conf[i].sd);
732 writel_relaxed(conf | DMOV_CONF_SHADOW_EN,
733 DMOV_REG(DMOV_CONF(i), adm));
734 }
735 }
736 for (i = 0; i < MSM_DMOV_CRCI_COUNT; i++) {
737 struct msm_dmov_crci_conf *crci_conf =
738 dmov_conf[adm].crci_conf;
739
740 writel_relaxed(DMOV_CRCI_CTL_BLK_SZ(crci_conf[i].blk_size),
741 DMOV_REG(DMOV_CRCI_CTL(i), adm));
742 }
743#endif
744}
745
746static int msm_dmov_probe(struct platform_device *pdev)
747{
748 int adm = (pdev->id >= 0) ? pdev->id : 0;
749 int i;
750 int ret;
751 struct resource *res =
752 platform_get_resource(pdev, IORESOURCE_IRQ, 0);
753
754 if (res) {
755 dmov_conf[adm].irq = res->start;
756 dmov_conf[adm].base = (void *)res->end;
757 }
758 if (!dmov_conf[adm].base || !dmov_conf[adm].irq)
759 return -ENXIO;
760
761 ret = request_irq(dmov_conf[adm].irq, msm_datamover_irq_handler,
762 0, "msmdatamover", NULL);
763 if (ret) {
764 PRINT_ERROR("Requesting ADM%d irq %d failed\n", adm,
765 dmov_conf[adm].irq);
766 return ret;
767 }
768 disable_irq(dmov_conf[adm].irq);
769 ret = msm_dmov_init_clocks(pdev);
770 if (ret) {
771 PRINT_ERROR("Requesting ADM%d clocks failed\n", adm);
772 return -ENOENT;
773 }
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700774 ret = msm_dmov_clk_toggle(adm, 1);
775 if (ret) {
776 PRINT_ERROR("Enabling ADM%d clocks failed\n", adm);
777 return -ENOENT;
778 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779
780 config_datamover(adm);
781 for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
782 INIT_LIST_HEAD(&dmov_conf[adm].ready_commands[i]);
783 INIT_LIST_HEAD(&dmov_conf[adm].active_commands[i]);
784
785 writel_relaxed(DMOV_RSLT_CONF_IRQ_EN
786 | DMOV_RSLT_CONF_FORCE_FLUSH_RSLT,
787 DMOV_REG(DMOV_RSLT_CONF(i), adm));
788 }
789 wmb();
Jeff Ohlsteincdbfc182011-08-10 12:08:28 -0700790 msm_dmov_clk_toggle(adm, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791 return ret;
792}
793
794static struct platform_driver msm_dmov_driver = {
795 .probe = msm_dmov_probe,
796 .driver = {
797 .name = MODULE_NAME,
798 .owner = THIS_MODULE,
799 .pm = &msm_dmov_dev_pm_ops,
800 },
801};
802
803/* static int __init */
804static int __init msm_init_datamover(void)
805{
806 int ret;
807 ret = platform_driver_register(&msm_dmov_driver);
808 if (ret)
809 return ret;
810 return 0;
811}
812arch_initcall(msm_init_datamover);