blob: 2d720f2b6c7506e4c31b6fa81d185b47d10d4e59 [file] [log] [blame]
Colin Cross4de3a8f2010-04-05 13:16:42 -07001/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
Stephen Warren1ca00342011-01-05 14:32:20 -070030#include <linux/clk.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070031#include <mach/dma.h>
32#include <mach/irqs.h>
33#include <mach/iomap.h>
Colin Cross2ea67fd2010-10-04 08:49:49 -070034#include <mach/suspend.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070035
36#define APB_DMA_GEN 0x000
37#define GEN_ENABLE (1<<31)
38
39#define APB_DMA_CNTRL 0x010
40
41#define APB_DMA_IRQ_MASK 0x01c
42
43#define APB_DMA_IRQ_MASK_SET 0x020
44
45#define APB_DMA_CHAN_CSR 0x000
46#define CSR_ENB (1<<31)
47#define CSR_IE_EOC (1<<30)
48#define CSR_HOLD (1<<29)
49#define CSR_DIR (1<<28)
50#define CSR_ONCE (1<<27)
51#define CSR_FLOW (1<<21)
52#define CSR_REQ_SEL_SHIFT 16
53#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
54#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
55#define CSR_WCOUNT_SHIFT 2
56#define CSR_WCOUNT_MASK 0xFFFC
57
58#define APB_DMA_CHAN_STA 0x004
59#define STA_BUSY (1<<31)
60#define STA_ISE_EOC (1<<30)
61#define STA_HALT (1<<29)
62#define STA_PING_PONG (1<<28)
63#define STA_COUNT_SHIFT 2
64#define STA_COUNT_MASK 0xFFFC
65
66#define APB_DMA_CHAN_AHB_PTR 0x010
67
68#define APB_DMA_CHAN_AHB_SEQ 0x014
69#define AHB_SEQ_INTR_ENB (1<<31)
70#define AHB_SEQ_BUS_WIDTH_SHIFT 28
71#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
72#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
73#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
76#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
77#define AHB_SEQ_DATA_SWAP (1<<27)
78#define AHB_SEQ_BURST_MASK (0x7<<24)
79#define AHB_SEQ_BURST_1 (4<<24)
80#define AHB_SEQ_BURST_4 (5<<24)
81#define AHB_SEQ_BURST_8 (6<<24)
82#define AHB_SEQ_DBL_BUF (1<<19)
83#define AHB_SEQ_WRAP_SHIFT 16
84#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
85
86#define APB_DMA_CHAN_APB_PTR 0x018
87
88#define APB_DMA_CHAN_APB_SEQ 0x01c
89#define APB_SEQ_BUS_WIDTH_SHIFT 28
90#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
91#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
92#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
95#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
96#define APB_SEQ_DATA_SWAP (1<<27)
97#define APB_SEQ_WRAP_SHIFT 16
98#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
99
100#define TEGRA_SYSTEM_DMA_CH_NR 16
101#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
102#define TEGRA_SYSTEM_DMA_CH_MIN 0
103#define TEGRA_SYSTEM_DMA_CH_MAX \
104 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
105
106#define NV_DMA_MAX_TRASFER_SIZE 0x10000
107
108const unsigned int ahb_addr_wrap_table[8] = {
109 0, 32, 64, 128, 256, 512, 1024, 2048
110};
111
112const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
113
114const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
115
116#define TEGRA_DMA_NAME_SIZE 16
117struct tegra_dma_channel {
118 struct list_head list;
119 int id;
120 spinlock_t lock;
121 char name[TEGRA_DMA_NAME_SIZE];
122 void __iomem *addr;
123 int mode;
124 int irq;
Colin Cross5789fee2010-08-18 00:19:12 -0700125 int req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700126};
127
128#define NV_DMA_MAX_CHANNELS 32
129
Colin Cross5789fee2010-08-18 00:19:12 -0700130static DEFINE_MUTEX(tegra_dma_lock);
131
Colin Cross4de3a8f2010-04-05 13:16:42 -0700132static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
133static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
134
135static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
136 struct tegra_dma_req *req);
137static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
138 struct tegra_dma_req *req);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700139static void tegra_dma_stop(struct tegra_dma_channel *ch);
140
141void tegra_dma_flush(struct tegra_dma_channel *ch)
142{
143}
144EXPORT_SYMBOL(tegra_dma_flush);
145
146void tegra_dma_dequeue(struct tegra_dma_channel *ch)
147{
148 struct tegra_dma_req *req;
149
Colin Cross5789fee2010-08-18 00:19:12 -0700150 if (tegra_dma_is_empty(ch))
151 return;
152
Colin Cross4de3a8f2010-04-05 13:16:42 -0700153 req = list_entry(ch->list.next, typeof(*req), node);
154
155 tegra_dma_dequeue_req(ch, req);
156 return;
157}
158
159void tegra_dma_stop(struct tegra_dma_channel *ch)
160{
Colin Cross5789fee2010-08-18 00:19:12 -0700161 u32 csr;
162 u32 status;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700163
Colin Cross5789fee2010-08-18 00:19:12 -0700164 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700165 csr &= ~CSR_IE_EOC;
166 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
167
168 csr &= ~CSR_ENB;
169 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
170
171 status = readl(ch->addr + APB_DMA_CHAN_STA);
172 if (status & STA_ISE_EOC)
173 writel(status, ch->addr + APB_DMA_CHAN_STA);
174}
175
176int tegra_dma_cancel(struct tegra_dma_channel *ch)
177{
Colin Cross5789fee2010-08-18 00:19:12 -0700178 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700179 unsigned long irq_flags;
180
181 spin_lock_irqsave(&ch->lock, irq_flags);
182 while (!list_empty(&ch->list))
183 list_del(ch->list.next);
184
Colin Cross5789fee2010-08-18 00:19:12 -0700185 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700186 csr &= ~CSR_REQ_SEL_MASK;
187 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700188 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
189
190 tegra_dma_stop(ch);
191
192 spin_unlock_irqrestore(&ch->lock, irq_flags);
193 return 0;
194}
195
196int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
197 struct tegra_dma_req *_req)
198{
199 unsigned int csr;
200 unsigned int status;
201 struct tegra_dma_req *req = NULL;
202 int found = 0;
203 unsigned long irq_flags;
204 int to_transfer;
205 int req_transfer_count;
206
207 spin_lock_irqsave(&ch->lock, irq_flags);
208 list_for_each_entry(req, &ch->list, node) {
209 if (req == _req) {
210 list_del(&req->node);
211 found = 1;
212 break;
213 }
214 }
215 if (!found) {
216 spin_unlock_irqrestore(&ch->lock, irq_flags);
217 return 0;
218 }
219
220 /* STOP the DMA and get the transfer count.
221 * Getting the transfer count is tricky.
222 * - Change the source selector to invalid to stop the DMA from
223 * FIFO to memory.
224 * - Read the status register to know the number of pending
225 * bytes to be transfered.
226 * - Finally stop or program the DMA to the next buffer in the
227 * list.
228 */
Colin Cross5789fee2010-08-18 00:19:12 -0700229 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700230 csr &= ~CSR_REQ_SEL_MASK;
231 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700232 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
233
234 /* Get the transfer count */
235 status = readl(ch->addr + APB_DMA_CHAN_STA);
236 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
Colin Cross5789fee2010-08-18 00:19:12 -0700237 req_transfer_count = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700238 req_transfer_count += 1;
239 to_transfer += 1;
240
241 req->bytes_transferred = req_transfer_count;
242
243 if (status & STA_BUSY)
244 req->bytes_transferred -= to_transfer;
245
246 /* In continous transfer mode, DMA only tracks the count of the
247 * half DMA buffer. So, if the DMA already finished half the DMA
248 * then add the half buffer to the completed count.
249 *
250 * FIXME: There can be a race here. What if the req to
251 * dequue happens at the same time as the DMA just moved to
252 * the new buffer and SW didn't yet received the interrupt?
253 */
254 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
255 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
256 req->bytes_transferred += req_transfer_count;
257
258 req->bytes_transferred *= 4;
259
260 tegra_dma_stop(ch);
261 if (!list_empty(&ch->list)) {
262 /* if the list is not empty, queue the next request */
263 struct tegra_dma_req *next_req;
264 next_req = list_entry(ch->list.next,
265 typeof(*next_req), node);
266 tegra_dma_update_hw(ch, next_req);
267 }
268 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
269
270 spin_unlock_irqrestore(&ch->lock, irq_flags);
271
272 /* Callback should be called without any lock */
273 req->complete(req);
274 return 0;
275}
276EXPORT_SYMBOL(tegra_dma_dequeue_req);
277
278bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
279{
280 unsigned long irq_flags;
281 bool is_empty;
282
283 spin_lock_irqsave(&ch->lock, irq_flags);
284 if (list_empty(&ch->list))
285 is_empty = true;
286 else
287 is_empty = false;
288 spin_unlock_irqrestore(&ch->lock, irq_flags);
289 return is_empty;
290}
291EXPORT_SYMBOL(tegra_dma_is_empty);
292
293bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
294 struct tegra_dma_req *_req)
295{
296 unsigned long irq_flags;
297 struct tegra_dma_req *req;
298
299 spin_lock_irqsave(&ch->lock, irq_flags);
300 list_for_each_entry(req, &ch->list, node) {
301 if (req == _req) {
302 spin_unlock_irqrestore(&ch->lock, irq_flags);
303 return true;
304 }
305 }
306 spin_unlock_irqrestore(&ch->lock, irq_flags);
307 return false;
308}
309EXPORT_SYMBOL(tegra_dma_is_req_inflight);
310
311int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
312 struct tegra_dma_req *req)
313{
314 unsigned long irq_flags;
Stephen Warren499ef7a2011-01-05 14:24:12 -0700315 struct tegra_dma_req *_req;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700316 int start_dma = 0;
317
318 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
319 req->source_addr & 0x3 || req->dest_addr & 0x3) {
320 pr_err("Invalid DMA request for channel %d\n", ch->id);
321 return -EINVAL;
322 }
323
324 spin_lock_irqsave(&ch->lock, irq_flags);
325
Stephen Warren499ef7a2011-01-05 14:24:12 -0700326 list_for_each_entry(_req, &ch->list, node) {
327 if (req == _req) {
328 spin_unlock_irqrestore(&ch->lock, irq_flags);
329 return -EEXIST;
330 }
331 }
332
Colin Cross4de3a8f2010-04-05 13:16:42 -0700333 req->bytes_transferred = 0;
334 req->status = 0;
335 req->buffer_status = 0;
336 if (list_empty(&ch->list))
337 start_dma = 1;
338
339 list_add_tail(&req->node, &ch->list);
340
341 if (start_dma)
342 tegra_dma_update_hw(ch, req);
343
344 spin_unlock_irqrestore(&ch->lock, irq_flags);
345
346 return 0;
347}
348EXPORT_SYMBOL(tegra_dma_enqueue_req);
349
350struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
351{
352 int channel;
Colin Cross5789fee2010-08-18 00:19:12 -0700353 struct tegra_dma_channel *ch = NULL;
354
355 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700356
357 /* first channel is the shared channel */
358 if (mode & TEGRA_DMA_SHARED) {
359 channel = TEGRA_SYSTEM_DMA_CH_MIN;
360 } else {
361 channel = find_first_zero_bit(channel_usage,
362 ARRAY_SIZE(dma_channels));
363 if (channel >= ARRAY_SIZE(dma_channels))
Colin Cross5789fee2010-08-18 00:19:12 -0700364 goto out;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700365 }
366 __set_bit(channel, channel_usage);
367 ch = &dma_channels[channel];
368 ch->mode = mode;
Colin Cross5789fee2010-08-18 00:19:12 -0700369
370out:
371 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700372 return ch;
373}
374EXPORT_SYMBOL(tegra_dma_allocate_channel);
375
376void tegra_dma_free_channel(struct tegra_dma_channel *ch)
377{
378 if (ch->mode & TEGRA_DMA_SHARED)
379 return;
380 tegra_dma_cancel(ch);
Colin Cross5789fee2010-08-18 00:19:12 -0700381 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700382 __clear_bit(ch->id, channel_usage);
Colin Cross5789fee2010-08-18 00:19:12 -0700383 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700384}
385EXPORT_SYMBOL(tegra_dma_free_channel);
386
387static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
388 struct tegra_dma_req *req)
389{
Colin Cross5789fee2010-08-18 00:19:12 -0700390 u32 apb_ptr;
391 u32 ahb_ptr;
392
Colin Cross4de3a8f2010-04-05 13:16:42 -0700393 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700394 apb_ptr = req->source_addr;
395 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700396 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700397 apb_ptr = req->dest_addr;
398 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700399 }
Colin Cross5789fee2010-08-18 00:19:12 -0700400 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
401 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700402
403 req->status = TEGRA_DMA_REQ_INFLIGHT;
404 return;
405}
406
407static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
408 struct tegra_dma_req *req)
409{
410 int ahb_addr_wrap;
411 int apb_addr_wrap;
412 int ahb_bus_width;
413 int apb_bus_width;
414 int index;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700415
Colin Cross5789fee2010-08-18 00:19:12 -0700416 u32 ahb_seq;
417 u32 apb_seq;
418 u32 ahb_ptr;
419 u32 apb_ptr;
420 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700421
Colin Cross5789fee2010-08-18 00:19:12 -0700422 csr = CSR_IE_EOC | CSR_FLOW;
423 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
424 apb_seq = 0;
425
426 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700427
428 /* One shot mode is always single buffered,
429 * continuous mode is always double buffered
430 * */
431 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
Colin Cross5789fee2010-08-18 00:19:12 -0700432 csr |= CSR_ONCE;
433 ch->req_transfer_count = (req->size >> 2) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700434 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700435 ahb_seq |= AHB_SEQ_DBL_BUF;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700436
437 /* In double buffered mode, we set the size to half the
438 * requested size and interrupt when half the buffer
439 * is full */
Colin Cross5789fee2010-08-18 00:19:12 -0700440 ch->req_transfer_count = (req->size >> 3) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700441 }
442
Colin Cross5789fee2010-08-18 00:19:12 -0700443 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
444
Colin Cross4de3a8f2010-04-05 13:16:42 -0700445 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700446 apb_ptr = req->source_addr;
447 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700448
449 apb_addr_wrap = req->source_wrap;
450 ahb_addr_wrap = req->dest_wrap;
451 apb_bus_width = req->source_bus_width;
452 ahb_bus_width = req->dest_bus_width;
453
454 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700455 csr |= CSR_DIR;
456 apb_ptr = req->dest_addr;
457 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700458
459 apb_addr_wrap = req->dest_wrap;
460 ahb_addr_wrap = req->source_wrap;
461 apb_bus_width = req->dest_bus_width;
462 ahb_bus_width = req->source_bus_width;
463 }
464
465 apb_addr_wrap >>= 2;
466 ahb_addr_wrap >>= 2;
467
468 /* set address wrap for APB size */
469 index = 0;
470 do {
471 if (apb_addr_wrap_table[index] == apb_addr_wrap)
472 break;
473 index++;
474 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
475 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700476 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700477
478 /* set address wrap for AHB size */
479 index = 0;
480 do {
481 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
482 break;
483 index++;
484 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
485 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700486 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700487
488 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
489 if (bus_width_table[index] == ahb_bus_width)
490 break;
491 }
492 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700493 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700494
495 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
496 if (bus_width_table[index] == apb_bus_width)
497 break;
498 }
499 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700500 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700501
Colin Cross5789fee2010-08-18 00:19:12 -0700502 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
503 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
504 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
505 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
506 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700507
Colin Cross5789fee2010-08-18 00:19:12 -0700508 csr |= CSR_ENB;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700509 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
510
511 req->status = TEGRA_DMA_REQ_INFLIGHT;
512}
513
Colin Cross4de3a8f2010-04-05 13:16:42 -0700514static void handle_oneshot_dma(struct tegra_dma_channel *ch)
515{
516 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700517 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700518
Colin Cross5789fee2010-08-18 00:19:12 -0700519 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700520 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700521 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700522 return;
523 }
524
525 req = list_entry(ch->list.next, typeof(*req), node);
526 if (req) {
527 int bytes_transferred;
528
Colin Cross5789fee2010-08-18 00:19:12 -0700529 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700530 bytes_transferred += 1;
531 bytes_transferred <<= 2;
532
533 list_del(&req->node);
534 req->bytes_transferred = bytes_transferred;
535 req->status = TEGRA_DMA_REQ_SUCCESS;
536
Colin Cross5789fee2010-08-18 00:19:12 -0700537 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700538 /* Callback should be called without any lock */
539 pr_debug("%s: transferred %d bytes\n", __func__,
540 req->bytes_transferred);
541 req->complete(req);
Colin Cross5789fee2010-08-18 00:19:12 -0700542 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700543 }
544
545 if (!list_empty(&ch->list)) {
546 req = list_entry(ch->list.next, typeof(*req), node);
547 /* the complete function we just called may have enqueued
548 another req, in which case dma has already started */
549 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
550 tegra_dma_update_hw(ch, req);
551 }
Colin Cross5789fee2010-08-18 00:19:12 -0700552 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700553}
554
555static void handle_continuous_dma(struct tegra_dma_channel *ch)
556{
557 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700558 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700559
Colin Cross5789fee2010-08-18 00:19:12 -0700560 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700561 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700562 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700563 return;
564 }
565
566 req = list_entry(ch->list.next, typeof(*req), node);
567 if (req) {
568 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
Colin Cross5789fee2010-08-18 00:19:12 -0700569 bool is_dma_ping_complete;
570 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
571 & STA_PING_PONG) ? true : false;
572 if (req->to_memory)
573 is_dma_ping_complete = !is_dma_ping_complete;
574 /* Out of sync - Release current buffer */
575 if (!is_dma_ping_complete) {
576 int bytes_transferred;
577
578 bytes_transferred = ch->req_transfer_count;
579 bytes_transferred += 1;
580 bytes_transferred <<= 3;
581 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
582 req->bytes_transferred = bytes_transferred;
583 req->status = TEGRA_DMA_REQ_SUCCESS;
584 tegra_dma_stop(ch);
585
586 if (!list_is_last(&req->node, &ch->list)) {
587 struct tegra_dma_req *next_req;
588
589 next_req = list_entry(req->node.next,
590 typeof(*next_req), node);
591 tegra_dma_update_hw(ch, next_req);
592 }
593
594 list_del(&req->node);
595
596 /* DMA lock is NOT held when callbak is called */
597 spin_unlock_irqrestore(&ch->lock, irq_flags);
598 req->complete(req);
599 return;
600 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700601 /* Load the next request into the hardware, if available
602 * */
603 if (!list_is_last(&req->node, &ch->list)) {
604 struct tegra_dma_req *next_req;
605
606 next_req = list_entry(req->node.next,
607 typeof(*next_req), node);
608 tegra_dma_update_hw_partial(ch, next_req);
609 }
610 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
611 req->status = TEGRA_DMA_REQ_SUCCESS;
612 /* DMA lock is NOT held when callback is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700613 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700614 if (likely(req->threshold))
615 req->threshold(req);
616 return;
617
618 } else if (req->buffer_status ==
619 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
620 /* Callback when the buffer is completely full (i.e on
621 * the second interrupt */
622 int bytes_transferred;
623
Colin Cross5789fee2010-08-18 00:19:12 -0700624 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700625 bytes_transferred += 1;
626 bytes_transferred <<= 3;
627
628 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
629 req->bytes_transferred = bytes_transferred;
630 req->status = TEGRA_DMA_REQ_SUCCESS;
631 list_del(&req->node);
632
633 /* DMA lock is NOT held when callbak is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700634 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700635 req->complete(req);
636 return;
637
638 } else {
639 BUG();
640 }
641 }
Colin Cross5789fee2010-08-18 00:19:12 -0700642 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700643}
644
645static irqreturn_t dma_isr(int irq, void *data)
646{
647 struct tegra_dma_channel *ch = data;
648 unsigned long status;
649
650 status = readl(ch->addr + APB_DMA_CHAN_STA);
651 if (status & STA_ISE_EOC)
652 writel(status, ch->addr + APB_DMA_CHAN_STA);
653 else {
654 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
655 return IRQ_HANDLED;
656 }
657 return IRQ_WAKE_THREAD;
658}
659
660static irqreturn_t dma_thread_fn(int irq, void *data)
661{
662 struct tegra_dma_channel *ch = data;
663
664 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
665 handle_oneshot_dma(ch);
666 else
667 handle_continuous_dma(ch);
668
669
670 return IRQ_HANDLED;
671}
672
673int __init tegra_dma_init(void)
674{
675 int ret = 0;
676 int i;
677 unsigned int irq;
678 void __iomem *addr;
Stephen Warren1ca00342011-01-05 14:32:20 -0700679 struct clk *c;
680
681 c = clk_get_sys("tegra-dma", NULL);
682 if (IS_ERR(c)) {
683 pr_err("Unable to get clock for APB DMA\n");
684 ret = PTR_ERR(c);
685 goto fail;
686 }
687 ret = clk_enable(c);
688 if (ret != 0) {
689 pr_err("Unable to enable clock for APB DMA\n");
690 goto fail;
691 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700692
693 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
694 writel(GEN_ENABLE, addr + APB_DMA_GEN);
695 writel(0, addr + APB_DMA_CNTRL);
696 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
697 addr + APB_DMA_IRQ_MASK_SET);
698
699 memset(channel_usage, 0, sizeof(channel_usage));
700 memset(dma_channels, 0, sizeof(dma_channels));
701
702 /* Reserve all the channels we are not supposed to touch */
703 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
704 __set_bit(i, channel_usage);
705
706 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
707 struct tegra_dma_channel *ch = &dma_channels[i];
708
709 __clear_bit(i, channel_usage);
710
711 ch->id = i;
712 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
713
714 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
715 TEGRA_APB_DMA_CH0_SIZE * i);
716
717 spin_lock_init(&ch->lock);
718 INIT_LIST_HEAD(&ch->list);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700719
720 irq = INT_APB_DMA_CH0 + i;
721 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
722 dma_channels[i].name, ch);
723 if (ret) {
724 pr_err("Failed to register IRQ %d for DMA %d\n",
725 irq, i);
726 goto fail;
727 }
728 ch->irq = irq;
729 }
730 /* mark the shared channel allocated */
731 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
732
733 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
734 __set_bit(i, channel_usage);
735
736 return ret;
737fail:
738 writel(0, addr + APB_DMA_GEN);
739 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
740 struct tegra_dma_channel *ch = &dma_channels[i];
741 if (ch->irq)
742 free_irq(ch->irq, ch);
743 }
744 return ret;
745}
746
747#ifdef CONFIG_PM
748static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
749
750void tegra_dma_suspend(void)
751{
752 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
753 u32 *ctx = apb_dma;
754 int i;
755
756 *ctx++ = readl(addr + APB_DMA_GEN);
757 *ctx++ = readl(addr + APB_DMA_CNTRL);
758 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
759
760 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
761 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
762 TEGRA_APB_DMA_CH0_SIZE * i);
763
764 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
765 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
766 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
767 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
768 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
769 }
770}
771
772void tegra_dma_resume(void)
773{
774 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
775 u32 *ctx = apb_dma;
776 int i;
777
778 writel(*ctx++, addr + APB_DMA_GEN);
779 writel(*ctx++, addr + APB_DMA_CNTRL);
780 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
781
782 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
783 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
784 TEGRA_APB_DMA_CH0_SIZE * i);
785
786 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
787 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
788 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
789 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
790 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
791 }
792}
793
794#endif