blob: d8450c3f35f0ed901e9eff8392e44842ff48598d [file] [log] [blame]
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301/*
2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
3 *
Stephen Warren996556c2013-11-11 13:09:35 -07004 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
Laxman Dewanganec8a1582012-06-06 10:55:27 +05305 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/bitops.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
Thierry Reding73312052013-01-21 11:09:00 +010024#include <linux/err.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053025#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
Stephen Warren996556c2013-11-11 13:09:35 -070032#include <linux/of_dma.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053033#include <linux/platform_device.h>
Laxman Dewangan3065c192013-04-24 15:24:27 +053034#include <linux/pm.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053035#include <linux/pm_runtime.h>
Stephen Warren9aa433d2013-11-06 16:35:34 -070036#include <linux/reset.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053037#include <linux/slab.h>
38
Laxman Dewanganec8a1582012-06-06 10:55:27 +053039#include "dmaengine.h"
40
41#define TEGRA_APBDMA_GENERAL 0x0
42#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
43
44#define TEGRA_APBDMA_CONTROL 0x010
45#define TEGRA_APBDMA_IRQ_MASK 0x01c
46#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
47
48/* CSR register */
49#define TEGRA_APBDMA_CHAN_CSR 0x00
50#define TEGRA_APBDMA_CSR_ENB BIT(31)
51#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
52#define TEGRA_APBDMA_CSR_HOLD BIT(29)
53#define TEGRA_APBDMA_CSR_DIR BIT(28)
54#define TEGRA_APBDMA_CSR_ONCE BIT(27)
55#define TEGRA_APBDMA_CSR_FLOW BIT(21)
56#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
57#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
58
59/* STATUS register */
60#define TEGRA_APBDMA_CHAN_STATUS 0x004
61#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
62#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
63#define TEGRA_APBDMA_STATUS_HALT BIT(29)
64#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
65#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
66#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
67
Laxman Dewangan1b140902013-01-06 21:52:02 +053068#define TEGRA_APBDMA_CHAN_CSRE 0x00C
69#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
70
Laxman Dewanganec8a1582012-06-06 10:55:27 +053071/* AHB memory address */
72#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
73
74/* AHB sequence register */
75#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
76#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
77#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
78#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
79#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
80#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
81#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
82#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
83#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
84#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
85#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
86#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
87#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
88#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
89
90/* APB address */
91#define TEGRA_APBDMA_CHAN_APBPTR 0x018
92
93/* APB sequence register */
94#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
95#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
96#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
97#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
98#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
99#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
100#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
101#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
102
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700103/* Tegra148 specific registers */
104#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
105
106#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
107
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530108/*
109 * If any burst is in flight and DMA paused then this is the time to complete
110 * on-flight burst and update DMA status register.
111 */
112#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
113
114/* Channel base address offset from APBDMA base address */
115#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
116
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530117struct tegra_dma;
118
119/*
120 * tegra_dma_chip_data Tegra chip specific DMA data
121 * @nr_channels: Number of channels available in the controller.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700122 * @channel_reg_size: Channel register size/stride.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530123 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
Laxman Dewangan1b140902013-01-06 21:52:02 +0530124 * @support_channel_pause: Support channel wise pause of dma.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700125 * @support_separate_wcount_reg: Support separate word count register.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530126 */
127struct tegra_dma_chip_data {
128 int nr_channels;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700129 int channel_reg_size;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530130 int max_dma_count;
Laxman Dewangan1b140902013-01-06 21:52:02 +0530131 bool support_channel_pause;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700132 bool support_separate_wcount_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530133};
134
135/* DMA channel registers */
136struct tegra_dma_channel_regs {
137 unsigned long csr;
138 unsigned long ahb_ptr;
139 unsigned long apb_ptr;
140 unsigned long ahb_seq;
141 unsigned long apb_seq;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700142 unsigned long wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530143};
144
145/*
146 * tegra_dma_sg_req: Dma request details to configure hardware. This
147 * contains the details for one transfer to configure DMA hw.
148 * The client's request for data transfer can be broken into multiple
149 * sub-transfer as per requester details and hw support.
150 * This sub transfer get added in the list of transfer and point to Tegra
151 * DMA descriptor which manages the transfer details.
152 */
153struct tegra_dma_sg_req {
154 struct tegra_dma_channel_regs ch_regs;
155 int req_len;
156 bool configured;
157 bool last_sg;
158 bool half_done;
159 struct list_head node;
160 struct tegra_dma_desc *dma_desc;
161};
162
163/*
164 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
165 * This descriptor keep track of transfer status, callbacks and request
166 * counts etc.
167 */
168struct tegra_dma_desc {
169 struct dma_async_tx_descriptor txd;
170 int bytes_requested;
171 int bytes_transferred;
172 enum dma_status dma_status;
173 struct list_head node;
174 struct list_head tx_list;
175 struct list_head cb_node;
176 int cb_count;
177};
178
179struct tegra_dma_channel;
180
181typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
182 bool to_terminate);
183
184/* tegra_dma_channel: Channel specific information */
185struct tegra_dma_channel {
186 struct dma_chan dma_chan;
Laxman Dewangand0fc9052012-10-03 22:48:07 +0530187 char name[30];
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530188 bool config_init;
189 int id;
190 int irq;
191 unsigned long chan_base_offset;
192 spinlock_t lock;
193 bool busy;
194 struct tegra_dma *tdma;
195 bool cyclic;
196
197 /* Different lists for managing the requests */
198 struct list_head free_sg_req;
199 struct list_head pending_sg_req;
200 struct list_head free_dma_desc;
201 struct list_head cb_desc;
202
203 /* ISR handler and tasklet for bottom half of isr handling */
204 dma_isr_handler isr_handler;
205 struct tasklet_struct tasklet;
206 dma_async_tx_callback callback;
207 void *callback_param;
208
209 /* Channel-slave specific configuration */
Stephen Warren996556c2013-11-11 13:09:35 -0700210 unsigned int slave_id;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530211 struct dma_slave_config dma_sconfig;
Laxman Dewangan3065c192013-04-24 15:24:27 +0530212 struct tegra_dma_channel_regs channel_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530213};
214
215/* tegra_dma: Tegra DMA specific information */
216struct tegra_dma {
217 struct dma_device dma_dev;
218 struct device *dev;
219 struct clk *dma_clk;
Stephen Warren9aa433d2013-11-06 16:35:34 -0700220 struct reset_control *rst;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530221 spinlock_t global_lock;
222 void __iomem *base_addr;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +0200223 const struct tegra_dma_chip_data *chip_data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530224
225 /* Some register need to be cache before suspend */
226 u32 reg_gen;
227
228 /* Last member of the structure */
229 struct tegra_dma_channel channels[0];
230};
231
232static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
233{
234 writel(val, tdma->base_addr + reg);
235}
236
237static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
238{
239 return readl(tdma->base_addr + reg);
240}
241
242static inline void tdc_write(struct tegra_dma_channel *tdc,
243 u32 reg, u32 val)
244{
245 writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
246}
247
248static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
249{
250 return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
251}
252
253static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
254{
255 return container_of(dc, struct tegra_dma_channel, dma_chan);
256}
257
258static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
259 struct dma_async_tx_descriptor *td)
260{
261 return container_of(td, struct tegra_dma_desc, txd);
262}
263
264static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
265{
266 return &tdc->dma_chan.dev->device;
267}
268
269static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
270static int tegra_dma_runtime_suspend(struct device *dev);
271static int tegra_dma_runtime_resume(struct device *dev);
272
273/* Get DMA desc from free list, if not there then allocate it. */
274static struct tegra_dma_desc *tegra_dma_desc_get(
275 struct tegra_dma_channel *tdc)
276{
277 struct tegra_dma_desc *dma_desc;
278 unsigned long flags;
279
280 spin_lock_irqsave(&tdc->lock, flags);
281
282 /* Do not allocate if desc are waiting for ack */
283 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
284 if (async_tx_test_ack(&dma_desc->txd)) {
285 list_del(&dma_desc->node);
286 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +0530287 dma_desc->txd.flags = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530288 return dma_desc;
289 }
290 }
291
292 spin_unlock_irqrestore(&tdc->lock, flags);
293
294 /* Allocate DMA desc */
295 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
296 if (!dma_desc) {
297 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
298 return NULL;
299 }
300
301 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
302 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
303 dma_desc->txd.flags = 0;
304 return dma_desc;
305}
306
307static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
308 struct tegra_dma_desc *dma_desc)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&tdc->lock, flags);
313 if (!list_empty(&dma_desc->tx_list))
314 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
315 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
316 spin_unlock_irqrestore(&tdc->lock, flags);
317}
318
319static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
320 struct tegra_dma_channel *tdc)
321{
322 struct tegra_dma_sg_req *sg_req = NULL;
323 unsigned long flags;
324
325 spin_lock_irqsave(&tdc->lock, flags);
326 if (!list_empty(&tdc->free_sg_req)) {
327 sg_req = list_first_entry(&tdc->free_sg_req,
328 typeof(*sg_req), node);
329 list_del(&sg_req->node);
330 spin_unlock_irqrestore(&tdc->lock, flags);
331 return sg_req;
332 }
333 spin_unlock_irqrestore(&tdc->lock, flags);
334
335 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
336 if (!sg_req)
337 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
338 return sg_req;
339}
340
341static int tegra_dma_slave_config(struct dma_chan *dc,
342 struct dma_slave_config *sconfig)
343{
344 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
345
346 if (!list_empty(&tdc->pending_sg_req)) {
347 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
348 return -EBUSY;
349 }
350
351 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
Stephen Warren996556c2013-11-11 13:09:35 -0700352 if (!tdc->slave_id)
353 tdc->slave_id = sconfig->slave_id;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530354 tdc->config_init = true;
355 return 0;
356}
357
358static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
359 bool wait_for_burst_complete)
360{
361 struct tegra_dma *tdma = tdc->tdma;
362
363 spin_lock(&tdma->global_lock);
364 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
365 if (wait_for_burst_complete)
366 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
367}
368
369static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
370{
371 struct tegra_dma *tdma = tdc->tdma;
372
373 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
374 spin_unlock(&tdma->global_lock);
375}
376
Laxman Dewangan1b140902013-01-06 21:52:02 +0530377static void tegra_dma_pause(struct tegra_dma_channel *tdc,
378 bool wait_for_burst_complete)
379{
380 struct tegra_dma *tdma = tdc->tdma;
381
382 if (tdma->chip_data->support_channel_pause) {
383 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
384 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
385 if (wait_for_burst_complete)
386 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
387 } else {
388 tegra_dma_global_pause(tdc, wait_for_burst_complete);
389 }
390}
391
392static void tegra_dma_resume(struct tegra_dma_channel *tdc)
393{
394 struct tegra_dma *tdma = tdc->tdma;
395
396 if (tdma->chip_data->support_channel_pause) {
397 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
398 } else {
399 tegra_dma_global_resume(tdc);
400 }
401}
402
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530403static void tegra_dma_stop(struct tegra_dma_channel *tdc)
404{
405 u32 csr;
406 u32 status;
407
408 /* Disable interrupts */
409 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
410 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
411 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
412
413 /* Disable DMA */
414 csr &= ~TEGRA_APBDMA_CSR_ENB;
415 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
416
417 /* Clear interrupt status if it is there */
418 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
419 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
420 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
421 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
422 }
423 tdc->busy = false;
424}
425
426static void tegra_dma_start(struct tegra_dma_channel *tdc,
427 struct tegra_dma_sg_req *sg_req)
428{
429 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
430
431 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
432 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
433 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
434 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
435 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700436 if (tdc->tdma->chip_data->support_separate_wcount_reg)
437 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530438
439 /* Start DMA */
440 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
441 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
442}
443
444static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
445 struct tegra_dma_sg_req *nsg_req)
446{
447 unsigned long status;
448
449 /*
450 * The DMA controller reloads the new configuration for next transfer
451 * after last burst of current transfer completes.
452 * If there is no IEC status then this makes sure that last burst
453 * has not be completed. There may be case that last burst is on
454 * flight and so it can complete but because DMA is paused, it
455 * will not generates interrupt as well as not reload the new
456 * configuration.
457 * If there is already IEC status then interrupt handler need to
458 * load new configuration.
459 */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530460 tegra_dma_pause(tdc, false);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530461 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
462
463 /*
464 * If interrupt is pending then do nothing as the ISR will handle
465 * the programing for new request.
466 */
467 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
468 dev_err(tdc2dev(tdc),
469 "Skipping new configuration as interrupt is pending\n");
Laxman Dewangan1b140902013-01-06 21:52:02 +0530470 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530471 return;
472 }
473
474 /* Safe to program new configuration */
475 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
476 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700477 if (tdc->tdma->chip_data->support_separate_wcount_reg)
478 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
479 nsg_req->ch_regs.wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530480 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
481 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
482 nsg_req->configured = true;
483
Laxman Dewangan1b140902013-01-06 21:52:02 +0530484 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530485}
486
487static void tdc_start_head_req(struct tegra_dma_channel *tdc)
488{
489 struct tegra_dma_sg_req *sg_req;
490
491 if (list_empty(&tdc->pending_sg_req))
492 return;
493
494 sg_req = list_first_entry(&tdc->pending_sg_req,
495 typeof(*sg_req), node);
496 tegra_dma_start(tdc, sg_req);
497 sg_req->configured = true;
498 tdc->busy = true;
499}
500
501static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
502{
503 struct tegra_dma_sg_req *hsgreq;
504 struct tegra_dma_sg_req *hnsgreq;
505
506 if (list_empty(&tdc->pending_sg_req))
507 return;
508
509 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
510 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
511 hnsgreq = list_first_entry(&hsgreq->node,
512 typeof(*hnsgreq), node);
513 tegra_dma_configure_for_next(tdc, hnsgreq);
514 }
515}
516
517static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
518 struct tegra_dma_sg_req *sg_req, unsigned long status)
519{
520 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
521}
522
523static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
524{
525 struct tegra_dma_sg_req *sgreq;
526 struct tegra_dma_desc *dma_desc;
527
528 while (!list_empty(&tdc->pending_sg_req)) {
529 sgreq = list_first_entry(&tdc->pending_sg_req,
530 typeof(*sgreq), node);
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800531 list_move_tail(&sgreq->node, &tdc->free_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530532 if (sgreq->last_sg) {
533 dma_desc = sgreq->dma_desc;
534 dma_desc->dma_status = DMA_ERROR;
535 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
536
537 /* Add in cb list if it is not there. */
538 if (!dma_desc->cb_count)
539 list_add_tail(&dma_desc->cb_node,
540 &tdc->cb_desc);
541 dma_desc->cb_count++;
542 }
543 }
544 tdc->isr_handler = NULL;
545}
546
547static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
548 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
549{
550 struct tegra_dma_sg_req *hsgreq = NULL;
551
552 if (list_empty(&tdc->pending_sg_req)) {
553 dev_err(tdc2dev(tdc), "Dma is running without req\n");
554 tegra_dma_stop(tdc);
555 return false;
556 }
557
558 /*
559 * Check that head req on list should be in flight.
560 * If it is not in flight then abort transfer as
561 * looping of transfer can not continue.
562 */
563 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
564 if (!hsgreq->configured) {
565 tegra_dma_stop(tdc);
566 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
567 tegra_dma_abort_all(tdc);
568 return false;
569 }
570
571 /* Configure next request */
572 if (!to_terminate)
573 tdc_configure_next_head_desc(tdc);
574 return true;
575}
576
577static void handle_once_dma_done(struct tegra_dma_channel *tdc,
578 bool to_terminate)
579{
580 struct tegra_dma_sg_req *sgreq;
581 struct tegra_dma_desc *dma_desc;
582
583 tdc->busy = false;
584 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
585 dma_desc = sgreq->dma_desc;
586 dma_desc->bytes_transferred += sgreq->req_len;
587
588 list_del(&sgreq->node);
589 if (sgreq->last_sg) {
Vinod Koul00d696f2013-10-16 21:04:50 +0530590 dma_desc->dma_status = DMA_COMPLETE;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530591 dma_cookie_complete(&dma_desc->txd);
592 if (!dma_desc->cb_count)
593 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
594 dma_desc->cb_count++;
595 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
596 }
597 list_add_tail(&sgreq->node, &tdc->free_sg_req);
598
599 /* Do not start DMA if it is going to be terminate */
600 if (to_terminate || list_empty(&tdc->pending_sg_req))
601 return;
602
603 tdc_start_head_req(tdc);
604 return;
605}
606
607static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
608 bool to_terminate)
609{
610 struct tegra_dma_sg_req *sgreq;
611 struct tegra_dma_desc *dma_desc;
612 bool st;
613
614 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
615 dma_desc = sgreq->dma_desc;
616 dma_desc->bytes_transferred += sgreq->req_len;
617
618 /* Callback need to be call */
619 if (!dma_desc->cb_count)
620 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
621 dma_desc->cb_count++;
622
623 /* If not last req then put at end of pending list */
624 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800625 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530626 sgreq->configured = false;
627 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
628 if (!st)
629 dma_desc->dma_status = DMA_ERROR;
630 }
631 return;
632}
633
634static void tegra_dma_tasklet(unsigned long data)
635{
636 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
637 dma_async_tx_callback callback = NULL;
638 void *callback_param = NULL;
639 struct tegra_dma_desc *dma_desc;
640 unsigned long flags;
641 int cb_count;
642
643 spin_lock_irqsave(&tdc->lock, flags);
644 while (!list_empty(&tdc->cb_desc)) {
645 dma_desc = list_first_entry(&tdc->cb_desc,
646 typeof(*dma_desc), cb_node);
647 list_del(&dma_desc->cb_node);
648 callback = dma_desc->txd.callback;
649 callback_param = dma_desc->txd.callback_param;
650 cb_count = dma_desc->cb_count;
651 dma_desc->cb_count = 0;
652 spin_unlock_irqrestore(&tdc->lock, flags);
653 while (cb_count-- && callback)
654 callback(callback_param);
655 spin_lock_irqsave(&tdc->lock, flags);
656 }
657 spin_unlock_irqrestore(&tdc->lock, flags);
658}
659
660static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
661{
662 struct tegra_dma_channel *tdc = dev_id;
663 unsigned long status;
664 unsigned long flags;
665
666 spin_lock_irqsave(&tdc->lock, flags);
667
668 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
669 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
670 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
671 tdc->isr_handler(tdc, false);
672 tasklet_schedule(&tdc->tasklet);
673 spin_unlock_irqrestore(&tdc->lock, flags);
674 return IRQ_HANDLED;
675 }
676
677 spin_unlock_irqrestore(&tdc->lock, flags);
678 dev_info(tdc2dev(tdc),
679 "Interrupt already served status 0x%08lx\n", status);
680 return IRQ_NONE;
681}
682
683static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
684{
685 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
686 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
687 unsigned long flags;
688 dma_cookie_t cookie;
689
690 spin_lock_irqsave(&tdc->lock, flags);
691 dma_desc->dma_status = DMA_IN_PROGRESS;
692 cookie = dma_cookie_assign(&dma_desc->txd);
693 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
694 spin_unlock_irqrestore(&tdc->lock, flags);
695 return cookie;
696}
697
698static void tegra_dma_issue_pending(struct dma_chan *dc)
699{
700 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
701 unsigned long flags;
702
703 spin_lock_irqsave(&tdc->lock, flags);
704 if (list_empty(&tdc->pending_sg_req)) {
705 dev_err(tdc2dev(tdc), "No DMA request\n");
706 goto end;
707 }
708 if (!tdc->busy) {
709 tdc_start_head_req(tdc);
710
711 /* Continuous single mode: Configure next req */
712 if (tdc->cyclic) {
713 /*
714 * Wait for 1 burst time for configure DMA for
715 * next transfer.
716 */
717 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
718 tdc_configure_next_head_desc(tdc);
719 }
720 }
721end:
722 spin_unlock_irqrestore(&tdc->lock, flags);
723 return;
724}
725
726static void tegra_dma_terminate_all(struct dma_chan *dc)
727{
728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
729 struct tegra_dma_sg_req *sgreq;
730 struct tegra_dma_desc *dma_desc;
731 unsigned long flags;
732 unsigned long status;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700733 unsigned long wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530734 bool was_busy;
735
736 spin_lock_irqsave(&tdc->lock, flags);
737 if (list_empty(&tdc->pending_sg_req)) {
738 spin_unlock_irqrestore(&tdc->lock, flags);
739 return;
740 }
741
742 if (!tdc->busy)
743 goto skip_dma_stop;
744
745 /* Pause DMA before checking the queue status */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530746 tegra_dma_pause(tdc, true);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530747
748 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
749 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
750 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
751 tdc->isr_handler(tdc, true);
752 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
753 }
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700754 if (tdc->tdma->chip_data->support_separate_wcount_reg)
755 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
756 else
757 wcount = status;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530758
759 was_busy = tdc->busy;
760 tegra_dma_stop(tdc);
761
762 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
763 sgreq = list_first_entry(&tdc->pending_sg_req,
764 typeof(*sgreq), node);
765 sgreq->dma_desc->bytes_transferred +=
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700766 get_current_xferred_count(tdc, sgreq, wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530767 }
Laxman Dewangan1b140902013-01-06 21:52:02 +0530768 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530769
770skip_dma_stop:
771 tegra_dma_abort_all(tdc);
772
773 while (!list_empty(&tdc->cb_desc)) {
774 dma_desc = list_first_entry(&tdc->cb_desc,
775 typeof(*dma_desc), cb_node);
776 list_del(&dma_desc->cb_node);
777 dma_desc->cb_count = 0;
778 }
779 spin_unlock_irqrestore(&tdc->lock, flags);
780}
781
782static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
783 dma_cookie_t cookie, struct dma_tx_state *txstate)
784{
785 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
786 struct tegra_dma_desc *dma_desc;
787 struct tegra_dma_sg_req *sg_req;
788 enum dma_status ret;
789 unsigned long flags;
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530790 unsigned int residual;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530791
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530792 ret = dma_cookie_status(dc, cookie, txstate);
Vinod Koul00d696f2013-10-16 21:04:50 +0530793 if (ret == DMA_COMPLETE)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530794 return ret;
Andy Shevchenko0a0aee22013-05-27 15:14:39 +0300795
796 spin_lock_irqsave(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530797
798 /* Check on wait_ack desc status */
799 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
800 if (dma_desc->txd.cookie == cookie) {
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530801 residual = dma_desc->bytes_requested -
802 (dma_desc->bytes_transferred %
803 dma_desc->bytes_requested);
804 dma_set_residue(txstate, residual);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530805 ret = dma_desc->dma_status;
806 spin_unlock_irqrestore(&tdc->lock, flags);
807 return ret;
808 }
809 }
810
811 /* Check in pending list */
812 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
813 dma_desc = sg_req->dma_desc;
814 if (dma_desc->txd.cookie == cookie) {
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530815 residual = dma_desc->bytes_requested -
816 (dma_desc->bytes_transferred %
817 dma_desc->bytes_requested);
818 dma_set_residue(txstate, residual);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530819 ret = dma_desc->dma_status;
820 spin_unlock_irqrestore(&tdc->lock, flags);
821 return ret;
822 }
823 }
824
825 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
826 spin_unlock_irqrestore(&tdc->lock, flags);
827 return ret;
828}
829
830static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
831 unsigned long arg)
832{
833 switch (cmd) {
834 case DMA_SLAVE_CONFIG:
835 return tegra_dma_slave_config(dc,
836 (struct dma_slave_config *)arg);
837
838 case DMA_TERMINATE_ALL:
839 tegra_dma_terminate_all(dc);
840 return 0;
841
842 default:
843 break;
844 }
845
846 return -ENXIO;
847}
848
849static inline int get_bus_width(struct tegra_dma_channel *tdc,
850 enum dma_slave_buswidth slave_bw)
851{
852 switch (slave_bw) {
853 case DMA_SLAVE_BUSWIDTH_1_BYTE:
854 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
855 case DMA_SLAVE_BUSWIDTH_2_BYTES:
856 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
857 case DMA_SLAVE_BUSWIDTH_4_BYTES:
858 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
859 case DMA_SLAVE_BUSWIDTH_8_BYTES:
860 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
861 default:
862 dev_warn(tdc2dev(tdc),
863 "slave bw is not supported, using 32bits\n");
864 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
865 }
866}
867
868static inline int get_burst_size(struct tegra_dma_channel *tdc,
869 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
870{
871 int burst_byte;
872 int burst_ahb_width;
873
874 /*
875 * burst_size from client is in terms of the bus_width.
876 * convert them into AHB memory width which is 4 byte.
877 */
878 burst_byte = burst_size * slave_bw;
879 burst_ahb_width = burst_byte / 4;
880
881 /* If burst size is 0 then calculate the burst size based on length */
882 if (!burst_ahb_width) {
883 if (len & 0xF)
884 return TEGRA_APBDMA_AHBSEQ_BURST_1;
885 else if ((len >> 4) & 0x1)
886 return TEGRA_APBDMA_AHBSEQ_BURST_4;
887 else
888 return TEGRA_APBDMA_AHBSEQ_BURST_8;
889 }
890 if (burst_ahb_width < 4)
891 return TEGRA_APBDMA_AHBSEQ_BURST_1;
892 else if (burst_ahb_width < 8)
893 return TEGRA_APBDMA_AHBSEQ_BURST_4;
894 else
895 return TEGRA_APBDMA_AHBSEQ_BURST_8;
896}
897
898static int get_transfer_param(struct tegra_dma_channel *tdc,
899 enum dma_transfer_direction direction, unsigned long *apb_addr,
900 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
901 enum dma_slave_buswidth *slave_bw)
902{
903
904 switch (direction) {
905 case DMA_MEM_TO_DEV:
906 *apb_addr = tdc->dma_sconfig.dst_addr;
907 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
908 *burst_size = tdc->dma_sconfig.dst_maxburst;
909 *slave_bw = tdc->dma_sconfig.dst_addr_width;
910 *csr = TEGRA_APBDMA_CSR_DIR;
911 return 0;
912
913 case DMA_DEV_TO_MEM:
914 *apb_addr = tdc->dma_sconfig.src_addr;
915 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
916 *burst_size = tdc->dma_sconfig.src_maxburst;
917 *slave_bw = tdc->dma_sconfig.src_addr_width;
918 *csr = 0;
919 return 0;
920
921 default:
922 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
923 return -EINVAL;
924 }
925 return -EINVAL;
926}
927
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700928static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
929 struct tegra_dma_channel_regs *ch_regs, u32 len)
930{
931 u32 len_field = (len - 4) & 0xFFFC;
932
933 if (tdc->tdma->chip_data->support_separate_wcount_reg)
934 ch_regs->wcount = len_field;
935 else
936 ch_regs->csr |= len_field;
937}
938
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530939static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
940 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
941 enum dma_transfer_direction direction, unsigned long flags,
942 void *context)
943{
944 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
945 struct tegra_dma_desc *dma_desc;
946 unsigned int i;
947 struct scatterlist *sg;
948 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
949 struct list_head req_list;
950 struct tegra_dma_sg_req *sg_req = NULL;
951 u32 burst_size;
952 enum dma_slave_buswidth slave_bw;
953 int ret;
954
955 if (!tdc->config_init) {
956 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
957 return NULL;
958 }
959 if (sg_len < 1) {
960 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
961 return NULL;
962 }
963
964 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
965 &burst_size, &slave_bw);
966 if (ret < 0)
967 return NULL;
968
969 INIT_LIST_HEAD(&req_list);
970
971 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
972 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
973 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
974 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
975
976 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
Stephen Warren996556c2013-11-11 13:09:35 -0700977 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530978 if (flags & DMA_PREP_INTERRUPT)
979 csr |= TEGRA_APBDMA_CSR_IE_EOC;
980
981 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
982
983 dma_desc = tegra_dma_desc_get(tdc);
984 if (!dma_desc) {
985 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
986 return NULL;
987 }
988 INIT_LIST_HEAD(&dma_desc->tx_list);
989 INIT_LIST_HEAD(&dma_desc->cb_node);
990 dma_desc->cb_count = 0;
991 dma_desc->bytes_requested = 0;
992 dma_desc->bytes_transferred = 0;
993 dma_desc->dma_status = DMA_IN_PROGRESS;
994
995 /* Make transfer requests */
996 for_each_sg(sgl, sg, sg_len, i) {
997 u32 len, mem;
998
Laxman Dewangan597c8542012-06-22 20:41:10 +0530999 mem = sg_dma_address(sg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301000 len = sg_dma_len(sg);
1001
1002 if ((len & 3) || (mem & 3) ||
1003 (len > tdc->tdma->chip_data->max_dma_count)) {
1004 dev_err(tdc2dev(tdc),
1005 "Dma length/memory address is not supported\n");
1006 tegra_dma_desc_put(tdc, dma_desc);
1007 return NULL;
1008 }
1009
1010 sg_req = tegra_dma_sg_req_get(tdc);
1011 if (!sg_req) {
1012 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1013 tegra_dma_desc_put(tdc, dma_desc);
1014 return NULL;
1015 }
1016
1017 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1018 dma_desc->bytes_requested += len;
1019
1020 sg_req->ch_regs.apb_ptr = apb_ptr;
1021 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001022 sg_req->ch_regs.csr = csr;
1023 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301024 sg_req->ch_regs.apb_seq = apb_seq;
1025 sg_req->ch_regs.ahb_seq = ahb_seq;
1026 sg_req->configured = false;
1027 sg_req->last_sg = false;
1028 sg_req->dma_desc = dma_desc;
1029 sg_req->req_len = len;
1030
1031 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1032 }
1033 sg_req->last_sg = true;
1034 if (flags & DMA_CTRL_ACK)
1035 dma_desc->txd.flags = DMA_CTRL_ACK;
1036
1037 /*
1038 * Make sure that mode should not be conflicting with currently
1039 * configured mode.
1040 */
1041 if (!tdc->isr_handler) {
1042 tdc->isr_handler = handle_once_dma_done;
1043 tdc->cyclic = false;
1044 } else {
1045 if (tdc->cyclic) {
1046 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1047 tegra_dma_desc_put(tdc, dma_desc);
1048 return NULL;
1049 }
1050 }
1051
1052 return &dma_desc->txd;
1053}
1054
Sachin Kamat404ff6692013-09-06 17:16:22 +05301055static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301056 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1057 size_t period_len, enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02001058 unsigned long flags)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301059{
1060 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1061 struct tegra_dma_desc *dma_desc = NULL;
1062 struct tegra_dma_sg_req *sg_req = NULL;
1063 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1064 int len;
1065 size_t remain_len;
1066 dma_addr_t mem = buf_addr;
1067 u32 burst_size;
1068 enum dma_slave_buswidth slave_bw;
1069 int ret;
1070
1071 if (!buf_len || !period_len) {
1072 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1073 return NULL;
1074 }
1075
1076 if (!tdc->config_init) {
1077 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1078 return NULL;
1079 }
1080
1081 /*
1082 * We allow to take more number of requests till DMA is
1083 * not started. The driver will loop over all requests.
1084 * Once DMA is started then new requests can be queued only after
1085 * terminating the DMA.
1086 */
1087 if (tdc->busy) {
1088 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1089 return NULL;
1090 }
1091
1092 /*
1093 * We only support cycle transfer when buf_len is multiple of
1094 * period_len.
1095 */
1096 if (buf_len % period_len) {
1097 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1098 return NULL;
1099 }
1100
1101 len = period_len;
1102 if ((len & 3) || (buf_addr & 3) ||
1103 (len > tdc->tdma->chip_data->max_dma_count)) {
1104 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1105 return NULL;
1106 }
1107
1108 ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1109 &burst_size, &slave_bw);
1110 if (ret < 0)
1111 return NULL;
1112
1113
1114 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1115 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1116 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1117 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1118
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301119 csr |= TEGRA_APBDMA_CSR_FLOW;
1120 if (flags & DMA_PREP_INTERRUPT)
1121 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Stephen Warren996556c2013-11-11 13:09:35 -07001122 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301123
1124 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1125
1126 dma_desc = tegra_dma_desc_get(tdc);
1127 if (!dma_desc) {
1128 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1129 return NULL;
1130 }
1131
1132 INIT_LIST_HEAD(&dma_desc->tx_list);
1133 INIT_LIST_HEAD(&dma_desc->cb_node);
1134 dma_desc->cb_count = 0;
1135
1136 dma_desc->bytes_transferred = 0;
1137 dma_desc->bytes_requested = buf_len;
1138 remain_len = buf_len;
1139
1140 /* Split transfer equal to period size */
1141 while (remain_len) {
1142 sg_req = tegra_dma_sg_req_get(tdc);
1143 if (!sg_req) {
1144 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1145 tegra_dma_desc_put(tdc, dma_desc);
1146 return NULL;
1147 }
1148
1149 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1150 sg_req->ch_regs.apb_ptr = apb_ptr;
1151 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001152 sg_req->ch_regs.csr = csr;
1153 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301154 sg_req->ch_regs.apb_seq = apb_seq;
1155 sg_req->ch_regs.ahb_seq = ahb_seq;
1156 sg_req->configured = false;
1157 sg_req->half_done = false;
1158 sg_req->last_sg = false;
1159 sg_req->dma_desc = dma_desc;
1160 sg_req->req_len = len;
1161
1162 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1163 remain_len -= len;
1164 mem += len;
1165 }
1166 sg_req->last_sg = true;
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301167 if (flags & DMA_CTRL_ACK)
1168 dma_desc->txd.flags = DMA_CTRL_ACK;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301169
1170 /*
1171 * Make sure that mode should not be conflicting with currently
1172 * configured mode.
1173 */
1174 if (!tdc->isr_handler) {
1175 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1176 tdc->cyclic = true;
1177 } else {
1178 if (!tdc->cyclic) {
1179 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1180 tegra_dma_desc_put(tdc, dma_desc);
1181 return NULL;
1182 }
1183 }
1184
1185 return &dma_desc->txd;
1186}
1187
1188static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1189{
1190 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301191 struct tegra_dma *tdma = tdc->tdma;
1192 int ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301193
1194 dma_cookie_init(&tdc->dma_chan);
1195 tdc->config_init = false;
Laxman Dewanganffc49302012-07-20 13:31:08 +05301196 ret = clk_prepare_enable(tdma->dma_clk);
1197 if (ret < 0)
1198 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1199 return ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301200}
1201
1202static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1203{
1204 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301205 struct tegra_dma *tdma = tdc->tdma;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301206
1207 struct tegra_dma_desc *dma_desc;
1208 struct tegra_dma_sg_req *sg_req;
1209 struct list_head dma_desc_list;
1210 struct list_head sg_req_list;
1211 unsigned long flags;
1212
1213 INIT_LIST_HEAD(&dma_desc_list);
1214 INIT_LIST_HEAD(&sg_req_list);
1215
1216 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1217
1218 if (tdc->busy)
1219 tegra_dma_terminate_all(dc);
1220
1221 spin_lock_irqsave(&tdc->lock, flags);
1222 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1223 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1224 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1225 INIT_LIST_HEAD(&tdc->cb_desc);
1226 tdc->config_init = false;
Dmitry Osipenko7bdc1e22013-05-11 20:30:53 +04001227 tdc->isr_handler = NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301228 spin_unlock_irqrestore(&tdc->lock, flags);
1229
1230 while (!list_empty(&dma_desc_list)) {
1231 dma_desc = list_first_entry(&dma_desc_list,
1232 typeof(*dma_desc), node);
1233 list_del(&dma_desc->node);
1234 kfree(dma_desc);
1235 }
1236
1237 while (!list_empty(&sg_req_list)) {
1238 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1239 list_del(&sg_req->node);
1240 kfree(sg_req);
1241 }
Laxman Dewanganffc49302012-07-20 13:31:08 +05301242 clk_disable_unprepare(tdma->dma_clk);
Stephen Warren996556c2013-11-11 13:09:35 -07001243
1244 tdc->slave_id = 0;
1245}
1246
1247static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1248 struct of_dma *ofdma)
1249{
1250 struct tegra_dma *tdma = ofdma->of_dma_data;
1251 struct dma_chan *chan;
1252 struct tegra_dma_channel *tdc;
1253
1254 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1255 if (!chan)
1256 return NULL;
1257
1258 tdc = to_tegra_dma_chan(chan);
1259 tdc->slave_id = dma_spec->args[0];
1260
1261 return chan;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301262}
1263
1264/* Tegra20 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001265static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301266 .nr_channels = 16,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001267 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301268 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301269 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001270 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301271};
1272
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301273/* Tegra30 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001274static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301275 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001276 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301277 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301278 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001279 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301280};
1281
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301282/* Tegra114 specific DMA controller information */
1283static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1284 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001285 .channel_reg_size = 0x20,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301286 .max_dma_count = 1024UL * 64,
1287 .support_channel_pause = true,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001288 .support_separate_wcount_reg = false,
1289};
1290
1291/* Tegra148 specific DMA controller information */
1292static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1293 .nr_channels = 32,
1294 .channel_reg_size = 0x40,
1295 .max_dma_count = 1024UL * 64,
1296 .support_channel_pause = true,
1297 .support_separate_wcount_reg = true,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301298};
1299
1300
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001301static const struct of_device_id tegra_dma_of_match[] = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301302 {
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001303 .compatible = "nvidia,tegra148-apbdma",
1304 .data = &tegra148_dma_chip_data,
1305 }, {
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301306 .compatible = "nvidia,tegra114-apbdma",
1307 .data = &tegra114_dma_chip_data,
1308 }, {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301309 .compatible = "nvidia,tegra30-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301310 .data = &tegra30_dma_chip_data,
1311 }, {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301312 .compatible = "nvidia,tegra20-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301313 .data = &tegra20_dma_chip_data,
1314 }, {
1315 },
1316};
1317MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301318
Bill Pemberton463a1f82012-11-19 13:22:55 -05001319static int tegra_dma_probe(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301320{
1321 struct resource *res;
1322 struct tegra_dma *tdma;
1323 int ret;
1324 int i;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +02001325 const struct tegra_dma_chip_data *cdata = NULL;
Stephen Warrendc7badb2013-03-11 16:30:26 -06001326 const struct of_device_id *match;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301327
Stephen Warrendc7badb2013-03-11 16:30:26 -06001328 match = of_match_device(tegra_dma_of_match, &pdev->dev);
1329 if (!match) {
1330 dev_err(&pdev->dev, "Error: No device match found\n");
1331 return -ENODEV;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301332 }
Stephen Warrendc7badb2013-03-11 16:30:26 -06001333 cdata = match->data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301334
1335 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1336 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1337 if (!tdma) {
1338 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1339 return -ENOMEM;
1340 }
1341
1342 tdma->dev = &pdev->dev;
1343 tdma->chip_data = cdata;
1344 platform_set_drvdata(pdev, tdma);
1345
1346 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thierry Reding73312052013-01-21 11:09:00 +01001347 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1348 if (IS_ERR(tdma->base_addr))
1349 return PTR_ERR(tdma->base_addr);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301350
1351 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1352 if (IS_ERR(tdma->dma_clk)) {
1353 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1354 return PTR_ERR(tdma->dma_clk);
1355 }
1356
Stephen Warren9aa433d2013-11-06 16:35:34 -07001357 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1358 if (IS_ERR(tdma->rst)) {
1359 dev_err(&pdev->dev, "Error: Missing reset\n");
1360 return PTR_ERR(tdma->rst);
1361 }
1362
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301363 spin_lock_init(&tdma->global_lock);
1364
1365 pm_runtime_enable(&pdev->dev);
1366 if (!pm_runtime_enabled(&pdev->dev)) {
1367 ret = tegra_dma_runtime_resume(&pdev->dev);
1368 if (ret) {
1369 dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1370 ret);
1371 goto err_pm_disable;
1372 }
1373 }
1374
Laxman Dewanganffc49302012-07-20 13:31:08 +05301375 /* Enable clock before accessing registers */
1376 ret = clk_prepare_enable(tdma->dma_clk);
1377 if (ret < 0) {
1378 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1379 goto err_pm_disable;
1380 }
1381
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301382 /* Reset DMA controller */
Stephen Warren9aa433d2013-11-06 16:35:34 -07001383 reset_control_assert(tdma->rst);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301384 udelay(2);
Stephen Warren9aa433d2013-11-06 16:35:34 -07001385 reset_control_deassert(tdma->rst);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301386
1387 /* Enable global DMA registers */
1388 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1389 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1390 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1391
Laxman Dewanganffc49302012-07-20 13:31:08 +05301392 clk_disable_unprepare(tdma->dma_clk);
1393
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301394 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1395 for (i = 0; i < cdata->nr_channels; i++) {
1396 struct tegra_dma_channel *tdc = &tdma->channels[i];
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301397
1398 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001399 i * cdata->channel_reg_size;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301400
1401 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1402 if (!res) {
1403 ret = -EINVAL;
1404 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1405 goto err_irq;
1406 }
1407 tdc->irq = res->start;
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301408 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301409 ret = devm_request_irq(&pdev->dev, tdc->irq,
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301410 tegra_dma_isr, 0, tdc->name, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301411 if (ret) {
1412 dev_err(&pdev->dev,
1413 "request_irq failed with err %d channel %d\n",
Dmitry Osipenkoac7ae752013-05-11 20:30:52 +04001414 ret, i);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301415 goto err_irq;
1416 }
1417
1418 tdc->dma_chan.device = &tdma->dma_dev;
1419 dma_cookie_init(&tdc->dma_chan);
1420 list_add_tail(&tdc->dma_chan.device_node,
1421 &tdma->dma_dev.channels);
1422 tdc->tdma = tdma;
1423 tdc->id = i;
1424
1425 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1426 (unsigned long)tdc);
1427 spin_lock_init(&tdc->lock);
1428
1429 INIT_LIST_HEAD(&tdc->pending_sg_req);
1430 INIT_LIST_HEAD(&tdc->free_sg_req);
1431 INIT_LIST_HEAD(&tdc->free_dma_desc);
1432 INIT_LIST_HEAD(&tdc->cb_desc);
1433 }
1434
1435 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1436 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
Laxman Dewangan46fb3f82012-06-22 17:12:43 +05301437 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1438
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301439 tdma->dma_dev.dev = &pdev->dev;
1440 tdma->dma_dev.device_alloc_chan_resources =
1441 tegra_dma_alloc_chan_resources;
1442 tdma->dma_dev.device_free_chan_resources =
1443 tegra_dma_free_chan_resources;
1444 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1445 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1446 tdma->dma_dev.device_control = tegra_dma_device_control;
1447 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1448 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1449
1450 ret = dma_async_device_register(&tdma->dma_dev);
1451 if (ret < 0) {
1452 dev_err(&pdev->dev,
1453 "Tegra20 APB DMA driver registration failed %d\n", ret);
1454 goto err_irq;
1455 }
1456
Stephen Warren996556c2013-11-11 13:09:35 -07001457 ret = of_dma_controller_register(pdev->dev.of_node,
1458 tegra_dma_of_xlate, tdma);
1459 if (ret < 0) {
1460 dev_err(&pdev->dev,
1461 "Tegra20 APB DMA OF registration failed %d\n", ret);
1462 goto err_unregister_dma_dev;
1463 }
1464
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301465 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1466 cdata->nr_channels);
1467 return 0;
1468
Stephen Warren996556c2013-11-11 13:09:35 -07001469err_unregister_dma_dev:
1470 dma_async_device_unregister(&tdma->dma_dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301471err_irq:
1472 while (--i >= 0) {
1473 struct tegra_dma_channel *tdc = &tdma->channels[i];
1474 tasklet_kill(&tdc->tasklet);
1475 }
1476
1477err_pm_disable:
1478 pm_runtime_disable(&pdev->dev);
1479 if (!pm_runtime_status_suspended(&pdev->dev))
1480 tegra_dma_runtime_suspend(&pdev->dev);
1481 return ret;
1482}
1483
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001484static int tegra_dma_remove(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301485{
1486 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1487 int i;
1488 struct tegra_dma_channel *tdc;
1489
1490 dma_async_device_unregister(&tdma->dma_dev);
1491
1492 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1493 tdc = &tdma->channels[i];
1494 tasklet_kill(&tdc->tasklet);
1495 }
1496
1497 pm_runtime_disable(&pdev->dev);
1498 if (!pm_runtime_status_suspended(&pdev->dev))
1499 tegra_dma_runtime_suspend(&pdev->dev);
1500
1501 return 0;
1502}
1503
1504static int tegra_dma_runtime_suspend(struct device *dev)
1505{
1506 struct platform_device *pdev = to_platform_device(dev);
1507 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1508
Prashant Gaikwad56482ec2012-06-25 12:01:31 +05301509 clk_disable_unprepare(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301510 return 0;
1511}
1512
1513static int tegra_dma_runtime_resume(struct device *dev)
1514{
1515 struct platform_device *pdev = to_platform_device(dev);
1516 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1517 int ret;
1518
Prashant Gaikwad56482ec2012-06-25 12:01:31 +05301519 ret = clk_prepare_enable(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301520 if (ret < 0) {
1521 dev_err(dev, "clk_enable failed: %d\n", ret);
1522 return ret;
1523 }
1524 return 0;
1525}
1526
Laxman Dewangan3065c192013-04-24 15:24:27 +05301527#ifdef CONFIG_PM_SLEEP
1528static int tegra_dma_pm_suspend(struct device *dev)
1529{
1530 struct tegra_dma *tdma = dev_get_drvdata(dev);
1531 int i;
1532 int ret;
1533
1534 /* Enable clock before accessing register */
1535 ret = tegra_dma_runtime_resume(dev);
1536 if (ret < 0)
1537 return ret;
1538
1539 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1540 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1541 struct tegra_dma_channel *tdc = &tdma->channels[i];
1542 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1543
1544 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1545 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1546 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1547 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1548 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1549 }
1550
1551 /* Disable clock */
1552 tegra_dma_runtime_suspend(dev);
1553 return 0;
1554}
1555
1556static int tegra_dma_pm_resume(struct device *dev)
1557{
1558 struct tegra_dma *tdma = dev_get_drvdata(dev);
1559 int i;
1560 int ret;
1561
1562 /* Enable clock before accessing register */
1563 ret = tegra_dma_runtime_resume(dev);
1564 if (ret < 0)
1565 return ret;
1566
1567 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1568 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1569 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1570
1571 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1572 struct tegra_dma_channel *tdc = &tdma->channels[i];
1573 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1574
1575 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1576 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1577 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1578 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1579 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1580 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1581 }
1582
1583 /* Disable clock */
1584 tegra_dma_runtime_suspend(dev);
1585 return 0;
1586}
1587#endif
1588
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001589static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
Rafael J. Wysockiee343502014-12-05 23:28:59 +01001590#ifdef CONFIG_PM
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301591 .runtime_suspend = tegra_dma_runtime_suspend,
1592 .runtime_resume = tegra_dma_runtime_resume,
1593#endif
Laxman Dewangan3065c192013-04-24 15:24:27 +05301594 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301595};
1596
1597static struct platform_driver tegra_dmac_driver = {
1598 .driver = {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301599 .name = "tegra-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301600 .pm = &tegra_dma_dev_pm_ops,
Stephen Warrendc7badb2013-03-11 16:30:26 -06001601 .of_match_table = tegra_dma_of_match,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301602 },
1603 .probe = tegra_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001604 .remove = tegra_dma_remove,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301605};
1606
1607module_platform_driver(tegra_dmac_driver);
1608
1609MODULE_ALIAS("platform:tegra20-apbdma");
1610MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1611MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1612MODULE_LICENSE("GPL v2");