blob: f6c018f1b4532e1cd4a6405babe1c2b08d27ed84 [file] [log] [blame]
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301/*
2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/bitops.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
Thierry Reding73312052013-01-21 11:09:00 +010024#include <linux/err.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053025#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/platform_device.h>
33#include <linux/pm_runtime.h>
34#include <linux/slab.h>
Prashant Gaikwad61fd2902013-01-11 13:16:26 +053035#include <linux/clk/tegra.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053036
Laxman Dewanganec8a1582012-06-06 10:55:27 +053037#include "dmaengine.h"
38
39#define TEGRA_APBDMA_GENERAL 0x0
40#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
41
42#define TEGRA_APBDMA_CONTROL 0x010
43#define TEGRA_APBDMA_IRQ_MASK 0x01c
44#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
45
46/* CSR register */
47#define TEGRA_APBDMA_CHAN_CSR 0x00
48#define TEGRA_APBDMA_CSR_ENB BIT(31)
49#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
50#define TEGRA_APBDMA_CSR_HOLD BIT(29)
51#define TEGRA_APBDMA_CSR_DIR BIT(28)
52#define TEGRA_APBDMA_CSR_ONCE BIT(27)
53#define TEGRA_APBDMA_CSR_FLOW BIT(21)
54#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
55#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
56
57/* STATUS register */
58#define TEGRA_APBDMA_CHAN_STATUS 0x004
59#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
60#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
61#define TEGRA_APBDMA_STATUS_HALT BIT(29)
62#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
63#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
64#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
65
66/* AHB memory address */
67#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
68
69/* AHB sequence register */
70#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
71#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
72#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
73#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
74#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
75#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
76#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
77#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
78#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
79#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
80#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
81#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
82#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
83#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
84
85/* APB address */
86#define TEGRA_APBDMA_CHAN_APBPTR 0x018
87
88/* APB sequence register */
89#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
90#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
91#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
92#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
93#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
94#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
95#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
96#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
97
98/*
99 * If any burst is in flight and DMA paused then this is the time to complete
100 * on-flight burst and update DMA status register.
101 */
102#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
103
104/* Channel base address offset from APBDMA base address */
105#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
106
107/* DMA channel register space size */
108#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
109
110struct tegra_dma;
111
112/*
113 * tegra_dma_chip_data Tegra chip specific DMA data
114 * @nr_channels: Number of channels available in the controller.
115 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
116 */
117struct tegra_dma_chip_data {
118 int nr_channels;
119 int max_dma_count;
120};
121
122/* DMA channel registers */
123struct tegra_dma_channel_regs {
124 unsigned long csr;
125 unsigned long ahb_ptr;
126 unsigned long apb_ptr;
127 unsigned long ahb_seq;
128 unsigned long apb_seq;
129};
130
131/*
132 * tegra_dma_sg_req: Dma request details to configure hardware. This
133 * contains the details for one transfer to configure DMA hw.
134 * The client's request for data transfer can be broken into multiple
135 * sub-transfer as per requester details and hw support.
136 * This sub transfer get added in the list of transfer and point to Tegra
137 * DMA descriptor which manages the transfer details.
138 */
139struct tegra_dma_sg_req {
140 struct tegra_dma_channel_regs ch_regs;
141 int req_len;
142 bool configured;
143 bool last_sg;
144 bool half_done;
145 struct list_head node;
146 struct tegra_dma_desc *dma_desc;
147};
148
149/*
150 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
151 * This descriptor keep track of transfer status, callbacks and request
152 * counts etc.
153 */
154struct tegra_dma_desc {
155 struct dma_async_tx_descriptor txd;
156 int bytes_requested;
157 int bytes_transferred;
158 enum dma_status dma_status;
159 struct list_head node;
160 struct list_head tx_list;
161 struct list_head cb_node;
162 int cb_count;
163};
164
165struct tegra_dma_channel;
166
167typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
168 bool to_terminate);
169
170/* tegra_dma_channel: Channel specific information */
171struct tegra_dma_channel {
172 struct dma_chan dma_chan;
Laxman Dewangand0fc9052012-10-03 22:48:07 +0530173 char name[30];
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530174 bool config_init;
175 int id;
176 int irq;
177 unsigned long chan_base_offset;
178 spinlock_t lock;
179 bool busy;
180 struct tegra_dma *tdma;
181 bool cyclic;
182
183 /* Different lists for managing the requests */
184 struct list_head free_sg_req;
185 struct list_head pending_sg_req;
186 struct list_head free_dma_desc;
187 struct list_head cb_desc;
188
189 /* ISR handler and tasklet for bottom half of isr handling */
190 dma_isr_handler isr_handler;
191 struct tasklet_struct tasklet;
192 dma_async_tx_callback callback;
193 void *callback_param;
194
195 /* Channel-slave specific configuration */
196 struct dma_slave_config dma_sconfig;
197};
198
199/* tegra_dma: Tegra DMA specific information */
200struct tegra_dma {
201 struct dma_device dma_dev;
202 struct device *dev;
203 struct clk *dma_clk;
204 spinlock_t global_lock;
205 void __iomem *base_addr;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +0200206 const struct tegra_dma_chip_data *chip_data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530207
208 /* Some register need to be cache before suspend */
209 u32 reg_gen;
210
211 /* Last member of the structure */
212 struct tegra_dma_channel channels[0];
213};
214
215static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
216{
217 writel(val, tdma->base_addr + reg);
218}
219
220static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
221{
222 return readl(tdma->base_addr + reg);
223}
224
225static inline void tdc_write(struct tegra_dma_channel *tdc,
226 u32 reg, u32 val)
227{
228 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
229}
230
231static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
232{
233 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
234}
235
236static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
237{
238 return container_of(dc, struct tegra_dma_channel, dma_chan);
239}
240
241static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
242 struct dma_async_tx_descriptor *td)
243{
244 return container_of(td, struct tegra_dma_desc, txd);
245}
246
247static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
248{
249 return &tdc->dma_chan.dev->device;
250}
251
252static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
253static int tegra_dma_runtime_suspend(struct device *dev);
254static int tegra_dma_runtime_resume(struct device *dev);
255
256/* Get DMA desc from free list, if not there then allocate it. */
257static struct tegra_dma_desc *tegra_dma_desc_get(
258 struct tegra_dma_channel *tdc)
259{
260 struct tegra_dma_desc *dma_desc;
261 unsigned long flags;
262
263 spin_lock_irqsave(&tdc->lock, flags);
264
265 /* Do not allocate if desc are waiting for ack */
266 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
267 if (async_tx_test_ack(&dma_desc->txd)) {
268 list_del(&dma_desc->node);
269 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +0530270 dma_desc->txd.flags = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530271 return dma_desc;
272 }
273 }
274
275 spin_unlock_irqrestore(&tdc->lock, flags);
276
277 /* Allocate DMA desc */
278 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
279 if (!dma_desc) {
280 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
281 return NULL;
282 }
283
284 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
285 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
286 dma_desc->txd.flags = 0;
287 return dma_desc;
288}
289
290static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
291 struct tegra_dma_desc *dma_desc)
292{
293 unsigned long flags;
294
295 spin_lock_irqsave(&tdc->lock, flags);
296 if (!list_empty(&dma_desc->tx_list))
297 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
298 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
299 spin_unlock_irqrestore(&tdc->lock, flags);
300}
301
302static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
303 struct tegra_dma_channel *tdc)
304{
305 struct tegra_dma_sg_req *sg_req = NULL;
306 unsigned long flags;
307
308 spin_lock_irqsave(&tdc->lock, flags);
309 if (!list_empty(&tdc->free_sg_req)) {
310 sg_req = list_first_entry(&tdc->free_sg_req,
311 typeof(*sg_req), node);
312 list_del(&sg_req->node);
313 spin_unlock_irqrestore(&tdc->lock, flags);
314 return sg_req;
315 }
316 spin_unlock_irqrestore(&tdc->lock, flags);
317
318 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
319 if (!sg_req)
320 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
321 return sg_req;
322}
323
324static int tegra_dma_slave_config(struct dma_chan *dc,
325 struct dma_slave_config *sconfig)
326{
327 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
328
329 if (!list_empty(&tdc->pending_sg_req)) {
330 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
331 return -EBUSY;
332 }
333
334 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
335 tdc->config_init = true;
336 return 0;
337}
338
339static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
340 bool wait_for_burst_complete)
341{
342 struct tegra_dma *tdma = tdc->tdma;
343
344 spin_lock(&tdma->global_lock);
345 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
346 if (wait_for_burst_complete)
347 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
348}
349
350static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
351{
352 struct tegra_dma *tdma = tdc->tdma;
353
354 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
355 spin_unlock(&tdma->global_lock);
356}
357
358static void tegra_dma_stop(struct tegra_dma_channel *tdc)
359{
360 u32 csr;
361 u32 status;
362
363 /* Disable interrupts */
364 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
365 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
366 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
367
368 /* Disable DMA */
369 csr &= ~TEGRA_APBDMA_CSR_ENB;
370 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
371
372 /* Clear interrupt status if it is there */
373 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
374 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
375 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
376 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
377 }
378 tdc->busy = false;
379}
380
381static void tegra_dma_start(struct tegra_dma_channel *tdc,
382 struct tegra_dma_sg_req *sg_req)
383{
384 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
385
386 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
387 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
388 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
389 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
390 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
391
392 /* Start DMA */
393 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
394 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
395}
396
397static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
398 struct tegra_dma_sg_req *nsg_req)
399{
400 unsigned long status;
401
402 /*
403 * The DMA controller reloads the new configuration for next transfer
404 * after last burst of current transfer completes.
405 * If there is no IEC status then this makes sure that last burst
406 * has not be completed. There may be case that last burst is on
407 * flight and so it can complete but because DMA is paused, it
408 * will not generates interrupt as well as not reload the new
409 * configuration.
410 * If there is already IEC status then interrupt handler need to
411 * load new configuration.
412 */
413 tegra_dma_global_pause(tdc, false);
414 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
415
416 /*
417 * If interrupt is pending then do nothing as the ISR will handle
418 * the programing for new request.
419 */
420 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
421 dev_err(tdc2dev(tdc),
422 "Skipping new configuration as interrupt is pending\n");
423 tegra_dma_global_resume(tdc);
424 return;
425 }
426
427 /* Safe to program new configuration */
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
429 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
430 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
431 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
432 nsg_req->configured = true;
433
434 tegra_dma_global_resume(tdc);
435}
436
437static void tdc_start_head_req(struct tegra_dma_channel *tdc)
438{
439 struct tegra_dma_sg_req *sg_req;
440
441 if (list_empty(&tdc->pending_sg_req))
442 return;
443
444 sg_req = list_first_entry(&tdc->pending_sg_req,
445 typeof(*sg_req), node);
446 tegra_dma_start(tdc, sg_req);
447 sg_req->configured = true;
448 tdc->busy = true;
449}
450
451static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
452{
453 struct tegra_dma_sg_req *hsgreq;
454 struct tegra_dma_sg_req *hnsgreq;
455
456 if (list_empty(&tdc->pending_sg_req))
457 return;
458
459 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
460 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
461 hnsgreq = list_first_entry(&hsgreq->node,
462 typeof(*hnsgreq), node);
463 tegra_dma_configure_for_next(tdc, hnsgreq);
464 }
465}
466
467static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
468 struct tegra_dma_sg_req *sg_req, unsigned long status)
469{
470 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
471}
472
473static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
474{
475 struct tegra_dma_sg_req *sgreq;
476 struct tegra_dma_desc *dma_desc;
477
478 while (!list_empty(&tdc->pending_sg_req)) {
479 sgreq = list_first_entry(&tdc->pending_sg_req,
480 typeof(*sgreq), node);
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800481 list_move_tail(&sgreq->node, &tdc->free_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530482 if (sgreq->last_sg) {
483 dma_desc = sgreq->dma_desc;
484 dma_desc->dma_status = DMA_ERROR;
485 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
486
487 /* Add in cb list if it is not there. */
488 if (!dma_desc->cb_count)
489 list_add_tail(&dma_desc->cb_node,
490 &tdc->cb_desc);
491 dma_desc->cb_count++;
492 }
493 }
494 tdc->isr_handler = NULL;
495}
496
497static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
498 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
499{
500 struct tegra_dma_sg_req *hsgreq = NULL;
501
502 if (list_empty(&tdc->pending_sg_req)) {
503 dev_err(tdc2dev(tdc), "Dma is running without req\n");
504 tegra_dma_stop(tdc);
505 return false;
506 }
507
508 /*
509 * Check that head req on list should be in flight.
510 * If it is not in flight then abort transfer as
511 * looping of transfer can not continue.
512 */
513 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
514 if (!hsgreq->configured) {
515 tegra_dma_stop(tdc);
516 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
517 tegra_dma_abort_all(tdc);
518 return false;
519 }
520
521 /* Configure next request */
522 if (!to_terminate)
523 tdc_configure_next_head_desc(tdc);
524 return true;
525}
526
527static void handle_once_dma_done(struct tegra_dma_channel *tdc,
528 bool to_terminate)
529{
530 struct tegra_dma_sg_req *sgreq;
531 struct tegra_dma_desc *dma_desc;
532
533 tdc->busy = false;
534 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
535 dma_desc = sgreq->dma_desc;
536 dma_desc->bytes_transferred += sgreq->req_len;
537
538 list_del(&sgreq->node);
539 if (sgreq->last_sg) {
540 dma_desc->dma_status = DMA_SUCCESS;
541 dma_cookie_complete(&dma_desc->txd);
542 if (!dma_desc->cb_count)
543 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
544 dma_desc->cb_count++;
545 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
546 }
547 list_add_tail(&sgreq->node, &tdc->free_sg_req);
548
549 /* Do not start DMA if it is going to be terminate */
550 if (to_terminate || list_empty(&tdc->pending_sg_req))
551 return;
552
553 tdc_start_head_req(tdc);
554 return;
555}
556
557static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
558 bool to_terminate)
559{
560 struct tegra_dma_sg_req *sgreq;
561 struct tegra_dma_desc *dma_desc;
562 bool st;
563
564 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
565 dma_desc = sgreq->dma_desc;
566 dma_desc->bytes_transferred += sgreq->req_len;
567
568 /* Callback need to be call */
569 if (!dma_desc->cb_count)
570 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
571 dma_desc->cb_count++;
572
573 /* If not last req then put at end of pending list */
574 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800575 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530576 sgreq->configured = false;
577 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
578 if (!st)
579 dma_desc->dma_status = DMA_ERROR;
580 }
581 return;
582}
583
584static void tegra_dma_tasklet(unsigned long data)
585{
586 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
587 dma_async_tx_callback callback = NULL;
588 void *callback_param = NULL;
589 struct tegra_dma_desc *dma_desc;
590 unsigned long flags;
591 int cb_count;
592
593 spin_lock_irqsave(&tdc->lock, flags);
594 while (!list_empty(&tdc->cb_desc)) {
595 dma_desc = list_first_entry(&tdc->cb_desc,
596 typeof(*dma_desc), cb_node);
597 list_del(&dma_desc->cb_node);
598 callback = dma_desc->txd.callback;
599 callback_param = dma_desc->txd.callback_param;
600 cb_count = dma_desc->cb_count;
601 dma_desc->cb_count = 0;
602 spin_unlock_irqrestore(&tdc->lock, flags);
603 while (cb_count-- && callback)
604 callback(callback_param);
605 spin_lock_irqsave(&tdc->lock, flags);
606 }
607 spin_unlock_irqrestore(&tdc->lock, flags);
608}
609
610static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
611{
612 struct tegra_dma_channel *tdc = dev_id;
613 unsigned long status;
614 unsigned long flags;
615
616 spin_lock_irqsave(&tdc->lock, flags);
617
618 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
619 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
620 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
621 tdc->isr_handler(tdc, false);
622 tasklet_schedule(&tdc->tasklet);
623 spin_unlock_irqrestore(&tdc->lock, flags);
624 return IRQ_HANDLED;
625 }
626
627 spin_unlock_irqrestore(&tdc->lock, flags);
628 dev_info(tdc2dev(tdc),
629 "Interrupt already served status 0x%08lx\n", status);
630 return IRQ_NONE;
631}
632
633static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
634{
635 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
636 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
637 unsigned long flags;
638 dma_cookie_t cookie;
639
640 spin_lock_irqsave(&tdc->lock, flags);
641 dma_desc->dma_status = DMA_IN_PROGRESS;
642 cookie = dma_cookie_assign(&dma_desc->txd);
643 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
644 spin_unlock_irqrestore(&tdc->lock, flags);
645 return cookie;
646}
647
648static void tegra_dma_issue_pending(struct dma_chan *dc)
649{
650 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
651 unsigned long flags;
652
653 spin_lock_irqsave(&tdc->lock, flags);
654 if (list_empty(&tdc->pending_sg_req)) {
655 dev_err(tdc2dev(tdc), "No DMA request\n");
656 goto end;
657 }
658 if (!tdc->busy) {
659 tdc_start_head_req(tdc);
660
661 /* Continuous single mode: Configure next req */
662 if (tdc->cyclic) {
663 /*
664 * Wait for 1 burst time for configure DMA for
665 * next transfer.
666 */
667 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
668 tdc_configure_next_head_desc(tdc);
669 }
670 }
671end:
672 spin_unlock_irqrestore(&tdc->lock, flags);
673 return;
674}
675
676static void tegra_dma_terminate_all(struct dma_chan *dc)
677{
678 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
679 struct tegra_dma_sg_req *sgreq;
680 struct tegra_dma_desc *dma_desc;
681 unsigned long flags;
682 unsigned long status;
683 bool was_busy;
684
685 spin_lock_irqsave(&tdc->lock, flags);
686 if (list_empty(&tdc->pending_sg_req)) {
687 spin_unlock_irqrestore(&tdc->lock, flags);
688 return;
689 }
690
691 if (!tdc->busy)
692 goto skip_dma_stop;
693
694 /* Pause DMA before checking the queue status */
695 tegra_dma_global_pause(tdc, true);
696
697 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
698 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
699 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
700 tdc->isr_handler(tdc, true);
701 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
702 }
703
704 was_busy = tdc->busy;
705 tegra_dma_stop(tdc);
706
707 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
708 sgreq = list_first_entry(&tdc->pending_sg_req,
709 typeof(*sgreq), node);
710 sgreq->dma_desc->bytes_transferred +=
711 get_current_xferred_count(tdc, sgreq, status);
712 }
713 tegra_dma_global_resume(tdc);
714
715skip_dma_stop:
716 tegra_dma_abort_all(tdc);
717
718 while (!list_empty(&tdc->cb_desc)) {
719 dma_desc = list_first_entry(&tdc->cb_desc,
720 typeof(*dma_desc), cb_node);
721 list_del(&dma_desc->cb_node);
722 dma_desc->cb_count = 0;
723 }
724 spin_unlock_irqrestore(&tdc->lock, flags);
725}
726
727static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
728 dma_cookie_t cookie, struct dma_tx_state *txstate)
729{
730 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
731 struct tegra_dma_desc *dma_desc;
732 struct tegra_dma_sg_req *sg_req;
733 enum dma_status ret;
734 unsigned long flags;
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530735 unsigned int residual;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530736
737 spin_lock_irqsave(&tdc->lock, flags);
738
739 ret = dma_cookie_status(dc, cookie, txstate);
740 if (ret == DMA_SUCCESS) {
741 dma_set_residue(txstate, 0);
742 spin_unlock_irqrestore(&tdc->lock, flags);
743 return ret;
744 }
745
746 /* Check on wait_ack desc status */
747 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
748 if (dma_desc->txd.cookie == cookie) {
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530749 residual = dma_desc->bytes_requested -
750 (dma_desc->bytes_transferred %
751 dma_desc->bytes_requested);
752 dma_set_residue(txstate, residual);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530753 ret = dma_desc->dma_status;
754 spin_unlock_irqrestore(&tdc->lock, flags);
755 return ret;
756 }
757 }
758
759 /* Check in pending list */
760 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
761 dma_desc = sg_req->dma_desc;
762 if (dma_desc->txd.cookie == cookie) {
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530763 residual = dma_desc->bytes_requested -
764 (dma_desc->bytes_transferred %
765 dma_desc->bytes_requested);
766 dma_set_residue(txstate, residual);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530767 ret = dma_desc->dma_status;
768 spin_unlock_irqrestore(&tdc->lock, flags);
769 return ret;
770 }
771 }
772
773 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
774 spin_unlock_irqrestore(&tdc->lock, flags);
775 return ret;
776}
777
778static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
779 unsigned long arg)
780{
781 switch (cmd) {
782 case DMA_SLAVE_CONFIG:
783 return tegra_dma_slave_config(dc,
784 (struct dma_slave_config *)arg);
785
786 case DMA_TERMINATE_ALL:
787 tegra_dma_terminate_all(dc);
788 return 0;
789
790 default:
791 break;
792 }
793
794 return -ENXIO;
795}
796
797static inline int get_bus_width(struct tegra_dma_channel *tdc,
798 enum dma_slave_buswidth slave_bw)
799{
800 switch (slave_bw) {
801 case DMA_SLAVE_BUSWIDTH_1_BYTE:
802 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
803 case DMA_SLAVE_BUSWIDTH_2_BYTES:
804 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
805 case DMA_SLAVE_BUSWIDTH_4_BYTES:
806 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
807 case DMA_SLAVE_BUSWIDTH_8_BYTES:
808 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
809 default:
810 dev_warn(tdc2dev(tdc),
811 "slave bw is not supported, using 32bits\n");
812 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
813 }
814}
815
816static inline int get_burst_size(struct tegra_dma_channel *tdc,
817 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
818{
819 int burst_byte;
820 int burst_ahb_width;
821
822 /*
823 * burst_size from client is in terms of the bus_width.
824 * convert them into AHB memory width which is 4 byte.
825 */
826 burst_byte = burst_size * slave_bw;
827 burst_ahb_width = burst_byte / 4;
828
829 /* If burst size is 0 then calculate the burst size based on length */
830 if (!burst_ahb_width) {
831 if (len & 0xF)
832 return TEGRA_APBDMA_AHBSEQ_BURST_1;
833 else if ((len >> 4) & 0x1)
834 return TEGRA_APBDMA_AHBSEQ_BURST_4;
835 else
836 return TEGRA_APBDMA_AHBSEQ_BURST_8;
837 }
838 if (burst_ahb_width < 4)
839 return TEGRA_APBDMA_AHBSEQ_BURST_1;
840 else if (burst_ahb_width < 8)
841 return TEGRA_APBDMA_AHBSEQ_BURST_4;
842 else
843 return TEGRA_APBDMA_AHBSEQ_BURST_8;
844}
845
846static int get_transfer_param(struct tegra_dma_channel *tdc,
847 enum dma_transfer_direction direction, unsigned long *apb_addr,
848 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
849 enum dma_slave_buswidth *slave_bw)
850{
851
852 switch (direction) {
853 case DMA_MEM_TO_DEV:
854 *apb_addr = tdc->dma_sconfig.dst_addr;
855 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
856 *burst_size = tdc->dma_sconfig.dst_maxburst;
857 *slave_bw = tdc->dma_sconfig.dst_addr_width;
858 *csr = TEGRA_APBDMA_CSR_DIR;
859 return 0;
860
861 case DMA_DEV_TO_MEM:
862 *apb_addr = tdc->dma_sconfig.src_addr;
863 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
864 *burst_size = tdc->dma_sconfig.src_maxburst;
865 *slave_bw = tdc->dma_sconfig.src_addr_width;
866 *csr = 0;
867 return 0;
868
869 default:
870 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
871 return -EINVAL;
872 }
873 return -EINVAL;
874}
875
876static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
877 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
878 enum dma_transfer_direction direction, unsigned long flags,
879 void *context)
880{
881 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
882 struct tegra_dma_desc *dma_desc;
883 unsigned int i;
884 struct scatterlist *sg;
885 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
886 struct list_head req_list;
887 struct tegra_dma_sg_req *sg_req = NULL;
888 u32 burst_size;
889 enum dma_slave_buswidth slave_bw;
890 int ret;
891
892 if (!tdc->config_init) {
893 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
894 return NULL;
895 }
896 if (sg_len < 1) {
897 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
898 return NULL;
899 }
900
901 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
902 &burst_size, &slave_bw);
903 if (ret < 0)
904 return NULL;
905
906 INIT_LIST_HEAD(&req_list);
907
908 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
909 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
910 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
911 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
912
913 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
914 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
915 if (flags & DMA_PREP_INTERRUPT)
916 csr |= TEGRA_APBDMA_CSR_IE_EOC;
917
918 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
919
920 dma_desc = tegra_dma_desc_get(tdc);
921 if (!dma_desc) {
922 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
923 return NULL;
924 }
925 INIT_LIST_HEAD(&dma_desc->tx_list);
926 INIT_LIST_HEAD(&dma_desc->cb_node);
927 dma_desc->cb_count = 0;
928 dma_desc->bytes_requested = 0;
929 dma_desc->bytes_transferred = 0;
930 dma_desc->dma_status = DMA_IN_PROGRESS;
931
932 /* Make transfer requests */
933 for_each_sg(sgl, sg, sg_len, i) {
934 u32 len, mem;
935
Laxman Dewangan597c8542012-06-22 20:41:10 +0530936 mem = sg_dma_address(sg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530937 len = sg_dma_len(sg);
938
939 if ((len & 3) || (mem & 3) ||
940 (len > tdc->tdma->chip_data->max_dma_count)) {
941 dev_err(tdc2dev(tdc),
942 "Dma length/memory address is not supported\n");
943 tegra_dma_desc_put(tdc, dma_desc);
944 return NULL;
945 }
946
947 sg_req = tegra_dma_sg_req_get(tdc);
948 if (!sg_req) {
949 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
950 tegra_dma_desc_put(tdc, dma_desc);
951 return NULL;
952 }
953
954 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
955 dma_desc->bytes_requested += len;
956
957 sg_req->ch_regs.apb_ptr = apb_ptr;
958 sg_req->ch_regs.ahb_ptr = mem;
959 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
960 sg_req->ch_regs.apb_seq = apb_seq;
961 sg_req->ch_regs.ahb_seq = ahb_seq;
962 sg_req->configured = false;
963 sg_req->last_sg = false;
964 sg_req->dma_desc = dma_desc;
965 sg_req->req_len = len;
966
967 list_add_tail(&sg_req->node, &dma_desc->tx_list);
968 }
969 sg_req->last_sg = true;
970 if (flags & DMA_CTRL_ACK)
971 dma_desc->txd.flags = DMA_CTRL_ACK;
972
973 /*
974 * Make sure that mode should not be conflicting with currently
975 * configured mode.
976 */
977 if (!tdc->isr_handler) {
978 tdc->isr_handler = handle_once_dma_done;
979 tdc->cyclic = false;
980 } else {
981 if (tdc->cyclic) {
982 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
983 tegra_dma_desc_put(tdc, dma_desc);
984 return NULL;
985 }
986 }
987
988 return &dma_desc->txd;
989}
990
991struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
992 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
993 size_t period_len, enum dma_transfer_direction direction,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +0300994 unsigned long flags, void *context)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530995{
996 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
997 struct tegra_dma_desc *dma_desc = NULL;
998 struct tegra_dma_sg_req *sg_req = NULL;
999 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1000 int len;
1001 size_t remain_len;
1002 dma_addr_t mem = buf_addr;
1003 u32 burst_size;
1004 enum dma_slave_buswidth slave_bw;
1005 int ret;
1006
1007 if (!buf_len || !period_len) {
1008 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1009 return NULL;
1010 }
1011
1012 if (!tdc->config_init) {
1013 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1014 return NULL;
1015 }
1016
1017 /*
1018 * We allow to take more number of requests till DMA is
1019 * not started. The driver will loop over all requests.
1020 * Once DMA is started then new requests can be queued only after
1021 * terminating the DMA.
1022 */
1023 if (tdc->busy) {
1024 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1025 return NULL;
1026 }
1027
1028 /*
1029 * We only support cycle transfer when buf_len is multiple of
1030 * period_len.
1031 */
1032 if (buf_len % period_len) {
1033 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1034 return NULL;
1035 }
1036
1037 len = period_len;
1038 if ((len & 3) || (buf_addr & 3) ||
1039 (len > tdc->tdma->chip_data->max_dma_count)) {
1040 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1041 return NULL;
1042 }
1043
1044 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1045 &burst_size, &slave_bw);
1046 if (ret < 0)
1047 return NULL;
1048
1049
1050 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1051 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1052 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1053 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1054
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301055 csr |= TEGRA_APBDMA_CSR_FLOW;
1056 if (flags & DMA_PREP_INTERRUPT)
1057 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301058 csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1059
1060 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1061
1062 dma_desc = tegra_dma_desc_get(tdc);
1063 if (!dma_desc) {
1064 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1065 return NULL;
1066 }
1067
1068 INIT_LIST_HEAD(&dma_desc->tx_list);
1069 INIT_LIST_HEAD(&dma_desc->cb_node);
1070 dma_desc->cb_count = 0;
1071
1072 dma_desc->bytes_transferred = 0;
1073 dma_desc->bytes_requested = buf_len;
1074 remain_len = buf_len;
1075
1076 /* Split transfer equal to period size */
1077 while (remain_len) {
1078 sg_req = tegra_dma_sg_req_get(tdc);
1079 if (!sg_req) {
1080 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1081 tegra_dma_desc_put(tdc, dma_desc);
1082 return NULL;
1083 }
1084
1085 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1086 sg_req->ch_regs.apb_ptr = apb_ptr;
1087 sg_req->ch_regs.ahb_ptr = mem;
1088 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
1089 sg_req->ch_regs.apb_seq = apb_seq;
1090 sg_req->ch_regs.ahb_seq = ahb_seq;
1091 sg_req->configured = false;
1092 sg_req->half_done = false;
1093 sg_req->last_sg = false;
1094 sg_req->dma_desc = dma_desc;
1095 sg_req->req_len = len;
1096
1097 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1098 remain_len -= len;
1099 mem += len;
1100 }
1101 sg_req->last_sg = true;
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301102 if (flags & DMA_CTRL_ACK)
1103 dma_desc->txd.flags = DMA_CTRL_ACK;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301104
1105 /*
1106 * Make sure that mode should not be conflicting with currently
1107 * configured mode.
1108 */
1109 if (!tdc->isr_handler) {
1110 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1111 tdc->cyclic = true;
1112 } else {
1113 if (!tdc->cyclic) {
1114 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1115 tegra_dma_desc_put(tdc, dma_desc);
1116 return NULL;
1117 }
1118 }
1119
1120 return &dma_desc->txd;
1121}
1122
1123static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1124{
1125 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301126 struct tegra_dma *tdma = tdc->tdma;
1127 int ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301128
1129 dma_cookie_init(&tdc->dma_chan);
1130 tdc->config_init = false;
Laxman Dewanganffc49302012-07-20 13:31:08 +05301131 ret = clk_prepare_enable(tdma->dma_clk);
1132 if (ret < 0)
1133 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1134 return ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301135}
1136
1137static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1138{
1139 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301140 struct tegra_dma *tdma = tdc->tdma;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301141
1142 struct tegra_dma_desc *dma_desc;
1143 struct tegra_dma_sg_req *sg_req;
1144 struct list_head dma_desc_list;
1145 struct list_head sg_req_list;
1146 unsigned long flags;
1147
1148 INIT_LIST_HEAD(&dma_desc_list);
1149 INIT_LIST_HEAD(&sg_req_list);
1150
1151 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1152
1153 if (tdc->busy)
1154 tegra_dma_terminate_all(dc);
1155
1156 spin_lock_irqsave(&tdc->lock, flags);
1157 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1158 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1159 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1160 INIT_LIST_HEAD(&tdc->cb_desc);
1161 tdc->config_init = false;
1162 spin_unlock_irqrestore(&tdc->lock, flags);
1163
1164 while (!list_empty(&dma_desc_list)) {
1165 dma_desc = list_first_entry(&dma_desc_list,
1166 typeof(*dma_desc), node);
1167 list_del(&dma_desc->node);
1168 kfree(dma_desc);
1169 }
1170
1171 while (!list_empty(&sg_req_list)) {
1172 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1173 list_del(&sg_req->node);
1174 kfree(sg_req);
1175 }
Laxman Dewanganffc49302012-07-20 13:31:08 +05301176 clk_disable_unprepare(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301177}
1178
1179/* Tegra20 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001180static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301181 .nr_channels = 16,
1182 .max_dma_count = 1024UL * 64,
1183};
1184
1185#if defined(CONFIG_OF)
1186/* Tegra30 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001187static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301188 .nr_channels = 32,
1189 .max_dma_count = 1024UL * 64,
1190};
1191
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001192static const struct of_device_id tegra_dma_of_match[] = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301193 {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301194 .compatible = "nvidia,tegra30-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301195 .data = &tegra30_dma_chip_data,
1196 }, {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301197 .compatible = "nvidia,tegra20-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301198 .data = &tegra20_dma_chip_data,
1199 }, {
1200 },
1201};
1202MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1203#endif
1204
Bill Pemberton463a1f82012-11-19 13:22:55 -05001205static int tegra_dma_probe(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301206{
1207 struct resource *res;
1208 struct tegra_dma *tdma;
1209 int ret;
1210 int i;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +02001211 const struct tegra_dma_chip_data *cdata = NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301212
1213 if (pdev->dev.of_node) {
1214 const struct of_device_id *match;
1215 match = of_match_device(of_match_ptr(tegra_dma_of_match),
1216 &pdev->dev);
1217 if (!match) {
1218 dev_err(&pdev->dev, "Error: No device match found\n");
1219 return -ENODEV;
1220 }
1221 cdata = match->data;
1222 } else {
1223 /* If no device tree then fallback to tegra20 */
1224 cdata = &tegra20_dma_chip_data;
1225 }
1226
1227 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1228 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1229 if (!tdma) {
1230 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1231 return -ENOMEM;
1232 }
1233
1234 tdma->dev = &pdev->dev;
1235 tdma->chip_data = cdata;
1236 platform_set_drvdata(pdev, tdma);
1237
1238 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1239 if (!res) {
1240 dev_err(&pdev->dev, "No mem resource for DMA\n");
1241 return -EINVAL;
1242 }
1243
Thierry Reding73312052013-01-21 11:09:00 +01001244 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1245 if (IS_ERR(tdma->base_addr))
1246 return PTR_ERR(tdma->base_addr);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301247
1248 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1249 if (IS_ERR(tdma->dma_clk)) {
1250 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1251 return PTR_ERR(tdma->dma_clk);
1252 }
1253
1254 spin_lock_init(&tdma->global_lock);
1255
1256 pm_runtime_enable(&pdev->dev);
1257 if (!pm_runtime_enabled(&pdev->dev)) {
1258 ret = tegra_dma_runtime_resume(&pdev->dev);
1259 if (ret) {
1260 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1261 ret);
1262 goto err_pm_disable;
1263 }
1264 }
1265
Laxman Dewanganffc49302012-07-20 13:31:08 +05301266 /* Enable clock before accessing registers */
1267 ret = clk_prepare_enable(tdma->dma_clk);
1268 if (ret < 0) {
1269 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1270 goto err_pm_disable;
1271 }
1272
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301273 /* Reset DMA controller */
1274 tegra_periph_reset_assert(tdma->dma_clk);
1275 udelay(2);
1276 tegra_periph_reset_deassert(tdma->dma_clk);
1277
1278 /* Enable global DMA registers */
1279 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1280 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1281 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1282
Laxman Dewanganffc49302012-07-20 13:31:08 +05301283 clk_disable_unprepare(tdma->dma_clk);
1284
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301285 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1286 for (i = 0; i < cdata->nr_channels; i++) {
1287 struct tegra_dma_channel *tdc = &tdma->channels[i];
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301288
1289 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1290 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
1291
1292 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1293 if (!res) {
1294 ret = -EINVAL;
1295 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1296 goto err_irq;
1297 }
1298 tdc->irq = res->start;
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301299 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301300 ret = devm_request_irq(&pdev->dev, tdc->irq,
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301301 tegra_dma_isr, 0, tdc->name, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301302 if (ret) {
1303 dev_err(&pdev->dev,
1304 "request_irq failed with err %d channel %d\n",
1305 i, ret);
1306 goto err_irq;
1307 }
1308
1309 tdc->dma_chan.device = &tdma->dma_dev;
1310 dma_cookie_init(&tdc->dma_chan);
1311 list_add_tail(&tdc->dma_chan.device_node,
1312 &tdma->dma_dev.channels);
1313 tdc->tdma = tdma;
1314 tdc->id = i;
1315
1316 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1317 (unsigned long)tdc);
1318 spin_lock_init(&tdc->lock);
1319
1320 INIT_LIST_HEAD(&tdc->pending_sg_req);
1321 INIT_LIST_HEAD(&tdc->free_sg_req);
1322 INIT_LIST_HEAD(&tdc->free_dma_desc);
1323 INIT_LIST_HEAD(&tdc->cb_desc);
1324 }
1325
1326 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1327 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
Laxman Dewangan46fb3f82012-06-22 17:12:43 +05301328 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1329
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301330 tdma->dma_dev.dev = &pdev->dev;
1331 tdma->dma_dev.device_alloc_chan_resources =
1332 tegra_dma_alloc_chan_resources;
1333 tdma->dma_dev.device_free_chan_resources =
1334 tegra_dma_free_chan_resources;
1335 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1336 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1337 tdma->dma_dev.device_control = tegra_dma_device_control;
1338 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1339 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1340
1341 ret = dma_async_device_register(&tdma->dma_dev);
1342 if (ret < 0) {
1343 dev_err(&pdev->dev,
1344 "Tegra20 APB DMA driver registration failed %d\n", ret);
1345 goto err_irq;
1346 }
1347
1348 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1349 cdata->nr_channels);
1350 return 0;
1351
1352err_irq:
1353 while (--i >= 0) {
1354 struct tegra_dma_channel *tdc = &tdma->channels[i];
1355 tasklet_kill(&tdc->tasklet);
1356 }
1357
1358err_pm_disable:
1359 pm_runtime_disable(&pdev->dev);
1360 if (!pm_runtime_status_suspended(&pdev->dev))
1361 tegra_dma_runtime_suspend(&pdev->dev);
1362 return ret;
1363}
1364
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001365static int tegra_dma_remove(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301366{
1367 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1368 int i;
1369 struct tegra_dma_channel *tdc;
1370
1371 dma_async_device_unregister(&tdma->dma_dev);
1372
1373 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1374 tdc = &tdma->channels[i];
1375 tasklet_kill(&tdc->tasklet);
1376 }
1377
1378 pm_runtime_disable(&pdev->dev);
1379 if (!pm_runtime_status_suspended(&pdev->dev))
1380 tegra_dma_runtime_suspend(&pdev->dev);
1381
1382 return 0;
1383}
1384
1385static int tegra_dma_runtime_suspend(struct device *dev)
1386{
1387 struct platform_device *pdev = to_platform_device(dev);
1388 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1389
Prashant Gaikwad56482ec2012-06-25 12:01:31 +05301390 clk_disable_unprepare(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301391 return 0;
1392}
1393
1394static int tegra_dma_runtime_resume(struct device *dev)
1395{
1396 struct platform_device *pdev = to_platform_device(dev);
1397 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1398 int ret;
1399
Prashant Gaikwad56482ec2012-06-25 12:01:31 +05301400 ret = clk_prepare_enable(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301401 if (ret < 0) {
1402 dev_err(dev, "clk_enable failed: %d\n", ret);
1403 return ret;
1404 }
1405 return 0;
1406}
1407
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001408static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301409#ifdef CONFIG_PM_RUNTIME
1410 .runtime_suspend = tegra_dma_runtime_suspend,
1411 .runtime_resume = tegra_dma_runtime_resume,
1412#endif
1413};
1414
1415static struct platform_driver tegra_dmac_driver = {
1416 .driver = {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301417 .name = "tegra-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301418 .owner = THIS_MODULE,
1419 .pm = &tegra_dma_dev_pm_ops,
1420 .of_match_table = of_match_ptr(tegra_dma_of_match),
1421 },
1422 .probe = tegra_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001423 .remove = tegra_dma_remove,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301424};
1425
1426module_platform_driver(tegra_dmac_driver);
1427
1428MODULE_ALIAS("platform:tegra20-apbdma");
1429MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1430MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1431MODULE_LICENSE("GPL v2");