blob: c0cf967e47d3bced9577a43d87c3c42c9465e251 [file] [log] [blame]
Colin Cross4de3a8f2010-04-05 13:16:42 -07001/*
2 * arch/arm/mach-tegra/dma.c
3 *
4 * System DMA driver for NVIDIA Tegra SoCs
5 *
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/err.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
Stephen Warren1ca00342011-01-05 14:32:20 -070030#include <linux/clk.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070031#include <mach/dma.h>
32#include <mach/irqs.h>
33#include <mach/iomap.h>
Colin Cross2ea67fd2010-10-04 08:49:49 -070034#include <mach/suspend.h>
Colin Cross4de3a8f2010-04-05 13:16:42 -070035
36#define APB_DMA_GEN 0x000
37#define GEN_ENABLE (1<<31)
38
39#define APB_DMA_CNTRL 0x010
40
41#define APB_DMA_IRQ_MASK 0x01c
42
43#define APB_DMA_IRQ_MASK_SET 0x020
44
45#define APB_DMA_CHAN_CSR 0x000
46#define CSR_ENB (1<<31)
47#define CSR_IE_EOC (1<<30)
48#define CSR_HOLD (1<<29)
49#define CSR_DIR (1<<28)
50#define CSR_ONCE (1<<27)
51#define CSR_FLOW (1<<21)
52#define CSR_REQ_SEL_SHIFT 16
53#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
54#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
55#define CSR_WCOUNT_SHIFT 2
56#define CSR_WCOUNT_MASK 0xFFFC
57
58#define APB_DMA_CHAN_STA 0x004
59#define STA_BUSY (1<<31)
60#define STA_ISE_EOC (1<<30)
61#define STA_HALT (1<<29)
62#define STA_PING_PONG (1<<28)
63#define STA_COUNT_SHIFT 2
64#define STA_COUNT_MASK 0xFFFC
65
66#define APB_DMA_CHAN_AHB_PTR 0x010
67
68#define APB_DMA_CHAN_AHB_SEQ 0x014
69#define AHB_SEQ_INTR_ENB (1<<31)
70#define AHB_SEQ_BUS_WIDTH_SHIFT 28
71#define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
72#define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
73#define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
74#define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
75#define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
76#define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
77#define AHB_SEQ_DATA_SWAP (1<<27)
78#define AHB_SEQ_BURST_MASK (0x7<<24)
79#define AHB_SEQ_BURST_1 (4<<24)
80#define AHB_SEQ_BURST_4 (5<<24)
81#define AHB_SEQ_BURST_8 (6<<24)
82#define AHB_SEQ_DBL_BUF (1<<19)
83#define AHB_SEQ_WRAP_SHIFT 16
84#define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
85
86#define APB_DMA_CHAN_APB_PTR 0x018
87
88#define APB_DMA_CHAN_APB_SEQ 0x01c
89#define APB_SEQ_BUS_WIDTH_SHIFT 28
90#define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
91#define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
92#define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
93#define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
94#define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
95#define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
96#define APB_SEQ_DATA_SWAP (1<<27)
97#define APB_SEQ_WRAP_SHIFT 16
98#define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
99
100#define TEGRA_SYSTEM_DMA_CH_NR 16
101#define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
102#define TEGRA_SYSTEM_DMA_CH_MIN 0
103#define TEGRA_SYSTEM_DMA_CH_MAX \
104 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
105
106#define NV_DMA_MAX_TRASFER_SIZE 0x10000
107
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700108static const unsigned int ahb_addr_wrap_table[8] = {
Colin Cross4de3a8f2010-04-05 13:16:42 -0700109 0, 32, 64, 128, 256, 512, 1024, 2048
110};
111
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700112static const unsigned int apb_addr_wrap_table[8] = {
113 0, 1, 2, 4, 8, 16, 32, 64
114};
Colin Cross4de3a8f2010-04-05 13:16:42 -0700115
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700116static const unsigned int bus_width_table[5] = {
117 8, 16, 32, 64, 128
118};
Colin Cross4de3a8f2010-04-05 13:16:42 -0700119
120#define TEGRA_DMA_NAME_SIZE 16
121struct tegra_dma_channel {
122 struct list_head list;
123 int id;
124 spinlock_t lock;
125 char name[TEGRA_DMA_NAME_SIZE];
126 void __iomem *addr;
127 int mode;
128 int irq;
Colin Cross5789fee2010-08-18 00:19:12 -0700129 int req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700130};
131
132#define NV_DMA_MAX_CHANNELS 32
133
Stephen Warrenccac0512011-02-23 14:49:30 -0700134static bool tegra_dma_initialized;
Colin Cross5789fee2010-08-18 00:19:12 -0700135static DEFINE_MUTEX(tegra_dma_lock);
136
Colin Cross4de3a8f2010-04-05 13:16:42 -0700137static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
138static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
139
140static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
141 struct tegra_dma_req *req);
142static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
143 struct tegra_dma_req *req);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700144static void tegra_dma_stop(struct tegra_dma_channel *ch);
145
146void tegra_dma_flush(struct tegra_dma_channel *ch)
147{
148}
149EXPORT_SYMBOL(tegra_dma_flush);
150
151void tegra_dma_dequeue(struct tegra_dma_channel *ch)
152{
153 struct tegra_dma_req *req;
154
Colin Cross5789fee2010-08-18 00:19:12 -0700155 if (tegra_dma_is_empty(ch))
156 return;
157
Colin Cross4de3a8f2010-04-05 13:16:42 -0700158 req = list_entry(ch->list.next, typeof(*req), node);
159
160 tegra_dma_dequeue_req(ch, req);
161 return;
162}
163
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700164static void tegra_dma_stop(struct tegra_dma_channel *ch)
Colin Cross4de3a8f2010-04-05 13:16:42 -0700165{
Colin Cross5789fee2010-08-18 00:19:12 -0700166 u32 csr;
167 u32 status;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700168
Colin Cross5789fee2010-08-18 00:19:12 -0700169 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700170 csr &= ~CSR_IE_EOC;
171 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
172
173 csr &= ~CSR_ENB;
174 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
175
176 status = readl(ch->addr + APB_DMA_CHAN_STA);
177 if (status & STA_ISE_EOC)
178 writel(status, ch->addr + APB_DMA_CHAN_STA);
179}
180
Olof Johanssoncf28cba2011-09-08 18:07:35 -0700181static int tegra_dma_cancel(struct tegra_dma_channel *ch)
Colin Cross4de3a8f2010-04-05 13:16:42 -0700182{
Colin Cross5789fee2010-08-18 00:19:12 -0700183 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700184 unsigned long irq_flags;
185
186 spin_lock_irqsave(&ch->lock, irq_flags);
187 while (!list_empty(&ch->list))
188 list_del(ch->list.next);
189
Colin Cross5789fee2010-08-18 00:19:12 -0700190 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700191 csr &= ~CSR_REQ_SEL_MASK;
192 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700193 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
194
195 tegra_dma_stop(ch);
196
197 spin_unlock_irqrestore(&ch->lock, irq_flags);
198 return 0;
199}
200
201int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
202 struct tegra_dma_req *_req)
203{
204 unsigned int csr;
205 unsigned int status;
206 struct tegra_dma_req *req = NULL;
207 int found = 0;
208 unsigned long irq_flags;
209 int to_transfer;
210 int req_transfer_count;
211
212 spin_lock_irqsave(&ch->lock, irq_flags);
213 list_for_each_entry(req, &ch->list, node) {
214 if (req == _req) {
215 list_del(&req->node);
216 found = 1;
217 break;
218 }
219 }
220 if (!found) {
221 spin_unlock_irqrestore(&ch->lock, irq_flags);
222 return 0;
223 }
224
225 /* STOP the DMA and get the transfer count.
226 * Getting the transfer count is tricky.
227 * - Change the source selector to invalid to stop the DMA from
228 * FIFO to memory.
229 * - Read the status register to know the number of pending
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300230 * bytes to be transferred.
Colin Cross4de3a8f2010-04-05 13:16:42 -0700231 * - Finally stop or program the DMA to the next buffer in the
232 * list.
233 */
Colin Cross5789fee2010-08-18 00:19:12 -0700234 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700235 csr &= ~CSR_REQ_SEL_MASK;
236 csr |= CSR_REQ_SEL_INVALID;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700237 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
238
239 /* Get the transfer count */
240 status = readl(ch->addr + APB_DMA_CHAN_STA);
241 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
Colin Cross5789fee2010-08-18 00:19:12 -0700242 req_transfer_count = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700243 req_transfer_count += 1;
244 to_transfer += 1;
245
246 req->bytes_transferred = req_transfer_count;
247
248 if (status & STA_BUSY)
249 req->bytes_transferred -= to_transfer;
250
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300251 /* In continuous transfer mode, DMA only tracks the count of the
Colin Cross4de3a8f2010-04-05 13:16:42 -0700252 * half DMA buffer. So, if the DMA already finished half the DMA
253 * then add the half buffer to the completed count.
254 *
255 * FIXME: There can be a race here. What if the req to
256 * dequue happens at the same time as the DMA just moved to
257 * the new buffer and SW didn't yet received the interrupt?
258 */
259 if (ch->mode & TEGRA_DMA_MODE_CONTINOUS)
260 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
261 req->bytes_transferred += req_transfer_count;
262
263 req->bytes_transferred *= 4;
264
265 tegra_dma_stop(ch);
266 if (!list_empty(&ch->list)) {
267 /* if the list is not empty, queue the next request */
268 struct tegra_dma_req *next_req;
269 next_req = list_entry(ch->list.next,
270 typeof(*next_req), node);
271 tegra_dma_update_hw(ch, next_req);
272 }
273 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
274
275 spin_unlock_irqrestore(&ch->lock, irq_flags);
276
277 /* Callback should be called without any lock */
278 req->complete(req);
279 return 0;
280}
281EXPORT_SYMBOL(tegra_dma_dequeue_req);
282
283bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
284{
285 unsigned long irq_flags;
286 bool is_empty;
287
288 spin_lock_irqsave(&ch->lock, irq_flags);
289 if (list_empty(&ch->list))
290 is_empty = true;
291 else
292 is_empty = false;
293 spin_unlock_irqrestore(&ch->lock, irq_flags);
294 return is_empty;
295}
296EXPORT_SYMBOL(tegra_dma_is_empty);
297
298bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
299 struct tegra_dma_req *_req)
300{
301 unsigned long irq_flags;
302 struct tegra_dma_req *req;
303
304 spin_lock_irqsave(&ch->lock, irq_flags);
305 list_for_each_entry(req, &ch->list, node) {
306 if (req == _req) {
307 spin_unlock_irqrestore(&ch->lock, irq_flags);
308 return true;
309 }
310 }
311 spin_unlock_irqrestore(&ch->lock, irq_flags);
312 return false;
313}
314EXPORT_SYMBOL(tegra_dma_is_req_inflight);
315
316int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
317 struct tegra_dma_req *req)
318{
319 unsigned long irq_flags;
Stephen Warren499ef7a2011-01-05 14:24:12 -0700320 struct tegra_dma_req *_req;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700321 int start_dma = 0;
322
323 if (req->size > NV_DMA_MAX_TRASFER_SIZE ||
324 req->source_addr & 0x3 || req->dest_addr & 0x3) {
325 pr_err("Invalid DMA request for channel %d\n", ch->id);
326 return -EINVAL;
327 }
328
329 spin_lock_irqsave(&ch->lock, irq_flags);
330
Stephen Warren499ef7a2011-01-05 14:24:12 -0700331 list_for_each_entry(_req, &ch->list, node) {
332 if (req == _req) {
333 spin_unlock_irqrestore(&ch->lock, irq_flags);
334 return -EEXIST;
335 }
336 }
337
Colin Cross4de3a8f2010-04-05 13:16:42 -0700338 req->bytes_transferred = 0;
339 req->status = 0;
340 req->buffer_status = 0;
341 if (list_empty(&ch->list))
342 start_dma = 1;
343
344 list_add_tail(&req->node, &ch->list);
345
346 if (start_dma)
347 tegra_dma_update_hw(ch, req);
348
349 spin_unlock_irqrestore(&ch->lock, irq_flags);
350
351 return 0;
352}
353EXPORT_SYMBOL(tegra_dma_enqueue_req);
354
355struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
356{
357 int channel;
Colin Cross5789fee2010-08-18 00:19:12 -0700358 struct tegra_dma_channel *ch = NULL;
359
Stephen Warrenccac0512011-02-23 14:49:30 -0700360 if (WARN_ON(!tegra_dma_initialized))
361 return NULL;
362
Colin Cross5789fee2010-08-18 00:19:12 -0700363 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700364
365 /* first channel is the shared channel */
366 if (mode & TEGRA_DMA_SHARED) {
367 channel = TEGRA_SYSTEM_DMA_CH_MIN;
368 } else {
369 channel = find_first_zero_bit(channel_usage,
370 ARRAY_SIZE(dma_channels));
371 if (channel >= ARRAY_SIZE(dma_channels))
Colin Cross5789fee2010-08-18 00:19:12 -0700372 goto out;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700373 }
374 __set_bit(channel, channel_usage);
375 ch = &dma_channels[channel];
376 ch->mode = mode;
Colin Cross5789fee2010-08-18 00:19:12 -0700377
378out:
379 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700380 return ch;
381}
382EXPORT_SYMBOL(tegra_dma_allocate_channel);
383
384void tegra_dma_free_channel(struct tegra_dma_channel *ch)
385{
386 if (ch->mode & TEGRA_DMA_SHARED)
387 return;
388 tegra_dma_cancel(ch);
Colin Cross5789fee2010-08-18 00:19:12 -0700389 mutex_lock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700390 __clear_bit(ch->id, channel_usage);
Colin Cross5789fee2010-08-18 00:19:12 -0700391 mutex_unlock(&tegra_dma_lock);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700392}
393EXPORT_SYMBOL(tegra_dma_free_channel);
394
395static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
396 struct tegra_dma_req *req)
397{
Colin Cross5789fee2010-08-18 00:19:12 -0700398 u32 apb_ptr;
399 u32 ahb_ptr;
400
Colin Cross4de3a8f2010-04-05 13:16:42 -0700401 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700402 apb_ptr = req->source_addr;
403 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700404 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700405 apb_ptr = req->dest_addr;
406 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700407 }
Colin Cross5789fee2010-08-18 00:19:12 -0700408 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
409 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700410
411 req->status = TEGRA_DMA_REQ_INFLIGHT;
412 return;
413}
414
415static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
416 struct tegra_dma_req *req)
417{
418 int ahb_addr_wrap;
419 int apb_addr_wrap;
420 int ahb_bus_width;
421 int apb_bus_width;
422 int index;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700423
Colin Cross5789fee2010-08-18 00:19:12 -0700424 u32 ahb_seq;
425 u32 apb_seq;
426 u32 ahb_ptr;
427 u32 apb_ptr;
428 u32 csr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700429
Colin Cross5789fee2010-08-18 00:19:12 -0700430 csr = CSR_IE_EOC | CSR_FLOW;
431 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
432 apb_seq = 0;
433
434 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700435
436 /* One shot mode is always single buffered,
437 * continuous mode is always double buffered
438 * */
439 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
Colin Cross5789fee2010-08-18 00:19:12 -0700440 csr |= CSR_ONCE;
441 ch->req_transfer_count = (req->size >> 2) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700442 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700443 ahb_seq |= AHB_SEQ_DBL_BUF;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700444
445 /* In double buffered mode, we set the size to half the
446 * requested size and interrupt when half the buffer
447 * is full */
Colin Cross5789fee2010-08-18 00:19:12 -0700448 ch->req_transfer_count = (req->size >> 3) - 1;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700449 }
450
Colin Cross5789fee2010-08-18 00:19:12 -0700451 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
452
Colin Cross4de3a8f2010-04-05 13:16:42 -0700453 if (req->to_memory) {
Colin Cross5789fee2010-08-18 00:19:12 -0700454 apb_ptr = req->source_addr;
455 ahb_ptr = req->dest_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700456
457 apb_addr_wrap = req->source_wrap;
458 ahb_addr_wrap = req->dest_wrap;
459 apb_bus_width = req->source_bus_width;
460 ahb_bus_width = req->dest_bus_width;
461
462 } else {
Colin Cross5789fee2010-08-18 00:19:12 -0700463 csr |= CSR_DIR;
464 apb_ptr = req->dest_addr;
465 ahb_ptr = req->source_addr;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700466
467 apb_addr_wrap = req->dest_wrap;
468 ahb_addr_wrap = req->source_wrap;
469 apb_bus_width = req->dest_bus_width;
470 ahb_bus_width = req->source_bus_width;
471 }
472
473 apb_addr_wrap >>= 2;
474 ahb_addr_wrap >>= 2;
475
476 /* set address wrap for APB size */
477 index = 0;
478 do {
479 if (apb_addr_wrap_table[index] == apb_addr_wrap)
480 break;
481 index++;
482 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
483 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700484 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700485
486 /* set address wrap for AHB size */
487 index = 0;
488 do {
489 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
490 break;
491 index++;
492 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
493 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700494 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700495
496 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
497 if (bus_width_table[index] == ahb_bus_width)
498 break;
499 }
500 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700501 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700502
503 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
504 if (bus_width_table[index] == apb_bus_width)
505 break;
506 }
507 BUG_ON(index == ARRAY_SIZE(bus_width_table));
Colin Cross5789fee2010-08-18 00:19:12 -0700508 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700509
Colin Cross5789fee2010-08-18 00:19:12 -0700510 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
511 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
512 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
513 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
514 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700515
Colin Cross5789fee2010-08-18 00:19:12 -0700516 csr |= CSR_ENB;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700517 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
518
519 req->status = TEGRA_DMA_REQ_INFLIGHT;
520}
521
Colin Cross4de3a8f2010-04-05 13:16:42 -0700522static void handle_oneshot_dma(struct tegra_dma_channel *ch)
523{
524 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700525 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700526
Colin Cross5789fee2010-08-18 00:19:12 -0700527 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700528 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700529 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700530 return;
531 }
532
533 req = list_entry(ch->list.next, typeof(*req), node);
534 if (req) {
535 int bytes_transferred;
536
Colin Cross5789fee2010-08-18 00:19:12 -0700537 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700538 bytes_transferred += 1;
539 bytes_transferred <<= 2;
540
541 list_del(&req->node);
542 req->bytes_transferred = bytes_transferred;
543 req->status = TEGRA_DMA_REQ_SUCCESS;
544
Colin Cross5789fee2010-08-18 00:19:12 -0700545 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700546 /* Callback should be called without any lock */
547 pr_debug("%s: transferred %d bytes\n", __func__,
548 req->bytes_transferred);
549 req->complete(req);
Colin Cross5789fee2010-08-18 00:19:12 -0700550 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700551 }
552
553 if (!list_empty(&ch->list)) {
554 req = list_entry(ch->list.next, typeof(*req), node);
555 /* the complete function we just called may have enqueued
556 another req, in which case dma has already started */
557 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
558 tegra_dma_update_hw(ch, req);
559 }
Colin Cross5789fee2010-08-18 00:19:12 -0700560 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700561}
562
563static void handle_continuous_dma(struct tegra_dma_channel *ch)
564{
565 struct tegra_dma_req *req;
Colin Cross5789fee2010-08-18 00:19:12 -0700566 unsigned long irq_flags;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700567
Colin Cross5789fee2010-08-18 00:19:12 -0700568 spin_lock_irqsave(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700569 if (list_empty(&ch->list)) {
Colin Cross5789fee2010-08-18 00:19:12 -0700570 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700571 return;
572 }
573
574 req = list_entry(ch->list.next, typeof(*req), node);
575 if (req) {
576 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
Colin Cross5789fee2010-08-18 00:19:12 -0700577 bool is_dma_ping_complete;
578 is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA)
579 & STA_PING_PONG) ? true : false;
580 if (req->to_memory)
581 is_dma_ping_complete = !is_dma_ping_complete;
582 /* Out of sync - Release current buffer */
583 if (!is_dma_ping_complete) {
584 int bytes_transferred;
585
586 bytes_transferred = ch->req_transfer_count;
587 bytes_transferred += 1;
588 bytes_transferred <<= 3;
589 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
590 req->bytes_transferred = bytes_transferred;
591 req->status = TEGRA_DMA_REQ_SUCCESS;
592 tegra_dma_stop(ch);
593
594 if (!list_is_last(&req->node, &ch->list)) {
595 struct tegra_dma_req *next_req;
596
597 next_req = list_entry(req->node.next,
598 typeof(*next_req), node);
599 tegra_dma_update_hw(ch, next_req);
600 }
601
602 list_del(&req->node);
603
604 /* DMA lock is NOT held when callbak is called */
605 spin_unlock_irqrestore(&ch->lock, irq_flags);
606 req->complete(req);
607 return;
608 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700609 /* Load the next request into the hardware, if available
610 * */
611 if (!list_is_last(&req->node, &ch->list)) {
612 struct tegra_dma_req *next_req;
613
614 next_req = list_entry(req->node.next,
615 typeof(*next_req), node);
616 tegra_dma_update_hw_partial(ch, next_req);
617 }
618 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
619 req->status = TEGRA_DMA_REQ_SUCCESS;
620 /* DMA lock is NOT held when callback is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700621 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700622 if (likely(req->threshold))
623 req->threshold(req);
624 return;
625
626 } else if (req->buffer_status ==
627 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
628 /* Callback when the buffer is completely full (i.e on
629 * the second interrupt */
630 int bytes_transferred;
631
Colin Cross5789fee2010-08-18 00:19:12 -0700632 bytes_transferred = ch->req_transfer_count;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700633 bytes_transferred += 1;
634 bytes_transferred <<= 3;
635
636 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
637 req->bytes_transferred = bytes_transferred;
638 req->status = TEGRA_DMA_REQ_SUCCESS;
639 list_del(&req->node);
640
641 /* DMA lock is NOT held when callbak is called */
Colin Cross5789fee2010-08-18 00:19:12 -0700642 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700643 req->complete(req);
644 return;
645
646 } else {
647 BUG();
648 }
649 }
Colin Cross5789fee2010-08-18 00:19:12 -0700650 spin_unlock_irqrestore(&ch->lock, irq_flags);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700651}
652
653static irqreturn_t dma_isr(int irq, void *data)
654{
655 struct tegra_dma_channel *ch = data;
656 unsigned long status;
657
658 status = readl(ch->addr + APB_DMA_CHAN_STA);
659 if (status & STA_ISE_EOC)
660 writel(status, ch->addr + APB_DMA_CHAN_STA);
661 else {
662 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
663 return IRQ_HANDLED;
664 }
665 return IRQ_WAKE_THREAD;
666}
667
668static irqreturn_t dma_thread_fn(int irq, void *data)
669{
670 struct tegra_dma_channel *ch = data;
671
672 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
673 handle_oneshot_dma(ch);
674 else
675 handle_continuous_dma(ch);
676
677
678 return IRQ_HANDLED;
679}
680
681int __init tegra_dma_init(void)
682{
683 int ret = 0;
684 int i;
685 unsigned int irq;
686 void __iomem *addr;
Stephen Warren1ca00342011-01-05 14:32:20 -0700687 struct clk *c;
688
Stephen Warrenccac0512011-02-23 14:49:30 -0700689 bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
690
Stephen Warren1ca00342011-01-05 14:32:20 -0700691 c = clk_get_sys("tegra-dma", NULL);
692 if (IS_ERR(c)) {
693 pr_err("Unable to get clock for APB DMA\n");
694 ret = PTR_ERR(c);
695 goto fail;
696 }
697 ret = clk_enable(c);
698 if (ret != 0) {
699 pr_err("Unable to enable clock for APB DMA\n");
700 goto fail;
701 }
Colin Cross4de3a8f2010-04-05 13:16:42 -0700702
703 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
704 writel(GEN_ENABLE, addr + APB_DMA_GEN);
705 writel(0, addr + APB_DMA_CNTRL);
706 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
707 addr + APB_DMA_IRQ_MASK_SET);
708
Colin Cross4de3a8f2010-04-05 13:16:42 -0700709 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
710 struct tegra_dma_channel *ch = &dma_channels[i];
711
Colin Cross4de3a8f2010-04-05 13:16:42 -0700712 ch->id = i;
713 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
714
715 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
716 TEGRA_APB_DMA_CH0_SIZE * i);
717
718 spin_lock_init(&ch->lock);
719 INIT_LIST_HEAD(&ch->list);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700720
721 irq = INT_APB_DMA_CH0 + i;
722 ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0,
723 dma_channels[i].name, ch);
724 if (ret) {
725 pr_err("Failed to register IRQ %d for DMA %d\n",
726 irq, i);
727 goto fail;
728 }
729 ch->irq = irq;
Stephen Warrenccac0512011-02-23 14:49:30 -0700730
731 __clear_bit(i, channel_usage);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700732 }
733 /* mark the shared channel allocated */
734 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
735
Stephen Warrenccac0512011-02-23 14:49:30 -0700736 tegra_dma_initialized = true;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700737
Stephen Warrenccac0512011-02-23 14:49:30 -0700738 return 0;
Colin Cross4de3a8f2010-04-05 13:16:42 -0700739fail:
740 writel(0, addr + APB_DMA_GEN);
741 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
742 struct tegra_dma_channel *ch = &dma_channels[i];
743 if (ch->irq)
744 free_irq(ch->irq, ch);
745 }
746 return ret;
747}
Stephen Warrendc54c232011-02-23 10:41:29 -0700748postcore_initcall(tegra_dma_init);
Colin Cross4de3a8f2010-04-05 13:16:42 -0700749
750#ifdef CONFIG_PM
751static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
752
753void tegra_dma_suspend(void)
754{
755 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
756 u32 *ctx = apb_dma;
757 int i;
758
759 *ctx++ = readl(addr + APB_DMA_GEN);
760 *ctx++ = readl(addr + APB_DMA_CNTRL);
761 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
762
763 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
764 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
765 TEGRA_APB_DMA_CH0_SIZE * i);
766
767 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
768 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
769 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
770 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
771 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
772 }
773}
774
775void tegra_dma_resume(void)
776{
777 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
778 u32 *ctx = apb_dma;
779 int i;
780
781 writel(*ctx++, addr + APB_DMA_GEN);
782 writel(*ctx++, addr + APB_DMA_CNTRL);
783 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
784
785 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
786 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
787 TEGRA_APB_DMA_CH0_SIZE * i);
788
789 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
790 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
791 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
792 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
793 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);
794 }
795}
796
797#endif