blob: f68bccf55a24081a7937b70f0370be63d2d77b24 [file] [log] [blame]
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301/*
2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
3 *
Stephen Warren996556c2013-11-11 13:09:35 -07004 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
Laxman Dewanganec8a1582012-06-06 10:55:27 +05305 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/bitops.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
Thierry Reding73312052013-01-21 11:09:00 +010024#include <linux/err.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053025#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
Stephen Warren996556c2013-11-11 13:09:35 -070032#include <linux/of_dma.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053033#include <linux/platform_device.h>
Laxman Dewangan3065c192013-04-24 15:24:27 +053034#include <linux/pm.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053035#include <linux/pm_runtime.h>
Stephen Warren9aa433d2013-11-06 16:35:34 -070036#include <linux/reset.h>
Laxman Dewanganec8a1582012-06-06 10:55:27 +053037#include <linux/slab.h>
38
Laxman Dewanganec8a1582012-06-06 10:55:27 +053039#include "dmaengine.h"
40
41#define TEGRA_APBDMA_GENERAL 0x0
42#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
43
44#define TEGRA_APBDMA_CONTROL 0x010
45#define TEGRA_APBDMA_IRQ_MASK 0x01c
46#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
47
48/* CSR register */
49#define TEGRA_APBDMA_CHAN_CSR 0x00
50#define TEGRA_APBDMA_CSR_ENB BIT(31)
51#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
52#define TEGRA_APBDMA_CSR_HOLD BIT(29)
53#define TEGRA_APBDMA_CSR_DIR BIT(28)
54#define TEGRA_APBDMA_CSR_ONCE BIT(27)
55#define TEGRA_APBDMA_CSR_FLOW BIT(21)
56#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
57#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
58
59/* STATUS register */
60#define TEGRA_APBDMA_CHAN_STATUS 0x004
61#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
62#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
63#define TEGRA_APBDMA_STATUS_HALT BIT(29)
64#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
65#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
66#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
67
Laxman Dewangan1b140902013-01-06 21:52:02 +053068#define TEGRA_APBDMA_CHAN_CSRE 0x00C
69#define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
70
Laxman Dewanganec8a1582012-06-06 10:55:27 +053071/* AHB memory address */
72#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
73
74/* AHB sequence register */
75#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
76#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
77#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
78#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
79#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
80#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
81#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
82#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
83#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
84#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
85#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
86#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
87#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
88#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
89
90/* APB address */
91#define TEGRA_APBDMA_CHAN_APBPTR 0x018
92
93/* APB sequence register */
94#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
95#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
96#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
97#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
98#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
99#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
100#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
101#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
102
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700103/* Tegra148 specific registers */
104#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
105
106#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
107
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530108/*
109 * If any burst is in flight and DMA paused then this is the time to complete
110 * on-flight burst and update DMA status register.
111 */
112#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
113
114/* Channel base address offset from APBDMA base address */
115#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
116
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530117struct tegra_dma;
118
119/*
120 * tegra_dma_chip_data Tegra chip specific DMA data
121 * @nr_channels: Number of channels available in the controller.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700122 * @channel_reg_size: Channel register size/stride.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530123 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
Laxman Dewangan1b140902013-01-06 21:52:02 +0530124 * @support_channel_pause: Support channel wise pause of dma.
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700125 * @support_separate_wcount_reg: Support separate word count register.
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530126 */
127struct tegra_dma_chip_data {
128 int nr_channels;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700129 int channel_reg_size;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530130 int max_dma_count;
Laxman Dewangan1b140902013-01-06 21:52:02 +0530131 bool support_channel_pause;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700132 bool support_separate_wcount_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530133};
134
135/* DMA channel registers */
136struct tegra_dma_channel_regs {
137 unsigned long csr;
138 unsigned long ahb_ptr;
139 unsigned long apb_ptr;
140 unsigned long ahb_seq;
141 unsigned long apb_seq;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700142 unsigned long wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530143};
144
145/*
146 * tegra_dma_sg_req: Dma request details to configure hardware. This
147 * contains the details for one transfer to configure DMA hw.
148 * The client's request for data transfer can be broken into multiple
149 * sub-transfer as per requester details and hw support.
150 * This sub transfer get added in the list of transfer and point to Tegra
151 * DMA descriptor which manages the transfer details.
152 */
153struct tegra_dma_sg_req {
154 struct tegra_dma_channel_regs ch_regs;
155 int req_len;
156 bool configured;
157 bool last_sg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530158 struct list_head node;
159 struct tegra_dma_desc *dma_desc;
160};
161
162/*
163 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
164 * This descriptor keep track of transfer status, callbacks and request
165 * counts etc.
166 */
167struct tegra_dma_desc {
168 struct dma_async_tx_descriptor txd;
169 int bytes_requested;
170 int bytes_transferred;
171 enum dma_status dma_status;
172 struct list_head node;
173 struct list_head tx_list;
174 struct list_head cb_node;
175 int cb_count;
176};
177
178struct tegra_dma_channel;
179
180typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
181 bool to_terminate);
182
183/* tegra_dma_channel: Channel specific information */
184struct tegra_dma_channel {
185 struct dma_chan dma_chan;
Laxman Dewangand0fc9052012-10-03 22:48:07 +0530186 char name[30];
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530187 bool config_init;
188 int id;
189 int irq;
Jon Hunter13a33282015-08-06 14:32:31 +0100190 void __iomem *chan_addr;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530191 spinlock_t lock;
192 bool busy;
193 struct tegra_dma *tdma;
194 bool cyclic;
195
196 /* Different lists for managing the requests */
197 struct list_head free_sg_req;
198 struct list_head pending_sg_req;
199 struct list_head free_dma_desc;
200 struct list_head cb_desc;
201
202 /* ISR handler and tasklet for bottom half of isr handling */
203 dma_isr_handler isr_handler;
204 struct tasklet_struct tasklet;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530205
206 /* Channel-slave specific configuration */
Stephen Warren996556c2013-11-11 13:09:35 -0700207 unsigned int slave_id;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530208 struct dma_slave_config dma_sconfig;
Laxman Dewangan3065c192013-04-24 15:24:27 +0530209 struct tegra_dma_channel_regs channel_reg;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530210};
211
212/* tegra_dma: Tegra DMA specific information */
213struct tegra_dma {
214 struct dma_device dma_dev;
215 struct device *dev;
216 struct clk *dma_clk;
Stephen Warren9aa433d2013-11-06 16:35:34 -0700217 struct reset_control *rst;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530218 spinlock_t global_lock;
219 void __iomem *base_addr;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +0200220 const struct tegra_dma_chip_data *chip_data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530221
Jon Hunter23a1ec32015-08-06 14:32:33 +0100222 /*
223 * Counter for managing global pausing of the DMA controller.
224 * Only applicable for devices that don't support individual
225 * channel pausing.
226 */
227 u32 global_pause_count;
228
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530229 /* Some register need to be cache before suspend */
230 u32 reg_gen;
231
232 /* Last member of the structure */
233 struct tegra_dma_channel channels[0];
234};
235
236static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
237{
238 writel(val, tdma->base_addr + reg);
239}
240
241static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
242{
243 return readl(tdma->base_addr + reg);
244}
245
246static inline void tdc_write(struct tegra_dma_channel *tdc,
247 u32 reg, u32 val)
248{
Jon Hunter13a33282015-08-06 14:32:31 +0100249 writel(val, tdc->chan_addr + reg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530250}
251
252static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
253{
Jon Hunter13a33282015-08-06 14:32:31 +0100254 return readl(tdc->chan_addr + reg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530255}
256
257static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
258{
259 return container_of(dc, struct tegra_dma_channel, dma_chan);
260}
261
262static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
263 struct dma_async_tx_descriptor *td)
264{
265 return container_of(td, struct tegra_dma_desc, txd);
266}
267
268static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
269{
270 return &tdc->dma_chan.dev->device;
271}
272
273static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
274static int tegra_dma_runtime_suspend(struct device *dev);
275static int tegra_dma_runtime_resume(struct device *dev);
276
277/* Get DMA desc from free list, if not there then allocate it. */
278static struct tegra_dma_desc *tegra_dma_desc_get(
279 struct tegra_dma_channel *tdc)
280{
281 struct tegra_dma_desc *dma_desc;
282 unsigned long flags;
283
284 spin_lock_irqsave(&tdc->lock, flags);
285
286 /* Do not allocate if desc are waiting for ack */
287 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
288 if (async_tx_test_ack(&dma_desc->txd)) {
289 list_del(&dma_desc->node);
290 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +0530291 dma_desc->txd.flags = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530292 return dma_desc;
293 }
294 }
295
296 spin_unlock_irqrestore(&tdc->lock, flags);
297
298 /* Allocate DMA desc */
299 dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
300 if (!dma_desc) {
301 dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
302 return NULL;
303 }
304
305 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
306 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
307 dma_desc->txd.flags = 0;
308 return dma_desc;
309}
310
311static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
312 struct tegra_dma_desc *dma_desc)
313{
314 unsigned long flags;
315
316 spin_lock_irqsave(&tdc->lock, flags);
317 if (!list_empty(&dma_desc->tx_list))
318 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
319 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
320 spin_unlock_irqrestore(&tdc->lock, flags);
321}
322
323static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
324 struct tegra_dma_channel *tdc)
325{
326 struct tegra_dma_sg_req *sg_req = NULL;
327 unsigned long flags;
328
329 spin_lock_irqsave(&tdc->lock, flags);
330 if (!list_empty(&tdc->free_sg_req)) {
331 sg_req = list_first_entry(&tdc->free_sg_req,
332 typeof(*sg_req), node);
333 list_del(&sg_req->node);
334 spin_unlock_irqrestore(&tdc->lock, flags);
335 return sg_req;
336 }
337 spin_unlock_irqrestore(&tdc->lock, flags);
338
339 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
340 if (!sg_req)
341 dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
342 return sg_req;
343}
344
345static int tegra_dma_slave_config(struct dma_chan *dc,
346 struct dma_slave_config *sconfig)
347{
348 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
349
350 if (!list_empty(&tdc->pending_sg_req)) {
351 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
352 return -EBUSY;
353 }
354
355 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
Stephen Warren996556c2013-11-11 13:09:35 -0700356 if (!tdc->slave_id)
357 tdc->slave_id = sconfig->slave_id;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530358 tdc->config_init = true;
359 return 0;
360}
361
362static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
363 bool wait_for_burst_complete)
364{
365 struct tegra_dma *tdma = tdc->tdma;
366
367 spin_lock(&tdma->global_lock);
Jon Hunter23a1ec32015-08-06 14:32:33 +0100368
369 if (tdc->tdma->global_pause_count == 0) {
370 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
371 if (wait_for_burst_complete)
372 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
373 }
374
375 tdc->tdma->global_pause_count++;
376
377 spin_unlock(&tdma->global_lock);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530378}
379
380static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
381{
382 struct tegra_dma *tdma = tdc->tdma;
383
Jon Hunter23a1ec32015-08-06 14:32:33 +0100384 spin_lock(&tdma->global_lock);
385
386 if (WARN_ON(tdc->tdma->global_pause_count == 0))
387 goto out;
388
389 if (--tdc->tdma->global_pause_count == 0)
390 tdma_write(tdma, TEGRA_APBDMA_GENERAL,
391 TEGRA_APBDMA_GENERAL_ENABLE);
392
393out:
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530394 spin_unlock(&tdma->global_lock);
395}
396
Laxman Dewangan1b140902013-01-06 21:52:02 +0530397static void tegra_dma_pause(struct tegra_dma_channel *tdc,
398 bool wait_for_burst_complete)
399{
400 struct tegra_dma *tdma = tdc->tdma;
401
402 if (tdma->chip_data->support_channel_pause) {
403 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
404 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
405 if (wait_for_burst_complete)
406 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
407 } else {
408 tegra_dma_global_pause(tdc, wait_for_burst_complete);
409 }
410}
411
412static void tegra_dma_resume(struct tegra_dma_channel *tdc)
413{
414 struct tegra_dma *tdma = tdc->tdma;
415
416 if (tdma->chip_data->support_channel_pause) {
417 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
418 } else {
419 tegra_dma_global_resume(tdc);
420 }
421}
422
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530423static void tegra_dma_stop(struct tegra_dma_channel *tdc)
424{
425 u32 csr;
426 u32 status;
427
428 /* Disable interrupts */
429 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
430 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
431 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
432
433 /* Disable DMA */
434 csr &= ~TEGRA_APBDMA_CSR_ENB;
435 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
436
437 /* Clear interrupt status if it is there */
438 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
439 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
440 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
441 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
442 }
443 tdc->busy = false;
444}
445
446static void tegra_dma_start(struct tegra_dma_channel *tdc,
447 struct tegra_dma_sg_req *sg_req)
448{
449 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
450
451 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
452 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
453 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
454 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
455 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700456 if (tdc->tdma->chip_data->support_separate_wcount_reg)
457 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530458
459 /* Start DMA */
460 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
461 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
462}
463
464static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
465 struct tegra_dma_sg_req *nsg_req)
466{
467 unsigned long status;
468
469 /*
470 * The DMA controller reloads the new configuration for next transfer
471 * after last burst of current transfer completes.
472 * If there is no IEC status then this makes sure that last burst
473 * has not be completed. There may be case that last burst is on
474 * flight and so it can complete but because DMA is paused, it
475 * will not generates interrupt as well as not reload the new
476 * configuration.
477 * If there is already IEC status then interrupt handler need to
478 * load new configuration.
479 */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530480 tegra_dma_pause(tdc, false);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530481 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
482
483 /*
484 * If interrupt is pending then do nothing as the ISR will handle
485 * the programing for new request.
486 */
487 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
488 dev_err(tdc2dev(tdc),
489 "Skipping new configuration as interrupt is pending\n");
Laxman Dewangan1b140902013-01-06 21:52:02 +0530490 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530491 return;
492 }
493
494 /* Safe to program new configuration */
495 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
496 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700497 if (tdc->tdma->chip_data->support_separate_wcount_reg)
498 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
499 nsg_req->ch_regs.wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530500 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
501 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
502 nsg_req->configured = true;
503
Laxman Dewangan1b140902013-01-06 21:52:02 +0530504 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530505}
506
507static void tdc_start_head_req(struct tegra_dma_channel *tdc)
508{
509 struct tegra_dma_sg_req *sg_req;
510
511 if (list_empty(&tdc->pending_sg_req))
512 return;
513
514 sg_req = list_first_entry(&tdc->pending_sg_req,
515 typeof(*sg_req), node);
516 tegra_dma_start(tdc, sg_req);
517 sg_req->configured = true;
518 tdc->busy = true;
519}
520
521static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
522{
523 struct tegra_dma_sg_req *hsgreq;
524 struct tegra_dma_sg_req *hnsgreq;
525
526 if (list_empty(&tdc->pending_sg_req))
527 return;
528
529 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
530 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
531 hnsgreq = list_first_entry(&hsgreq->node,
532 typeof(*hnsgreq), node);
533 tegra_dma_configure_for_next(tdc, hnsgreq);
534 }
535}
536
537static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
538 struct tegra_dma_sg_req *sg_req, unsigned long status)
539{
540 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
541}
542
543static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
544{
545 struct tegra_dma_sg_req *sgreq;
546 struct tegra_dma_desc *dma_desc;
547
548 while (!list_empty(&tdc->pending_sg_req)) {
549 sgreq = list_first_entry(&tdc->pending_sg_req,
550 typeof(*sgreq), node);
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800551 list_move_tail(&sgreq->node, &tdc->free_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530552 if (sgreq->last_sg) {
553 dma_desc = sgreq->dma_desc;
554 dma_desc->dma_status = DMA_ERROR;
555 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
556
557 /* Add in cb list if it is not there. */
558 if (!dma_desc->cb_count)
559 list_add_tail(&dma_desc->cb_node,
560 &tdc->cb_desc);
561 dma_desc->cb_count++;
562 }
563 }
564 tdc->isr_handler = NULL;
565}
566
567static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
568 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
569{
570 struct tegra_dma_sg_req *hsgreq = NULL;
571
572 if (list_empty(&tdc->pending_sg_req)) {
573 dev_err(tdc2dev(tdc), "Dma is running without req\n");
574 tegra_dma_stop(tdc);
575 return false;
576 }
577
578 /*
579 * Check that head req on list should be in flight.
580 * If it is not in flight then abort transfer as
581 * looping of transfer can not continue.
582 */
583 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
584 if (!hsgreq->configured) {
585 tegra_dma_stop(tdc);
586 dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
587 tegra_dma_abort_all(tdc);
588 return false;
589 }
590
591 /* Configure next request */
592 if (!to_terminate)
593 tdc_configure_next_head_desc(tdc);
594 return true;
595}
596
597static void handle_once_dma_done(struct tegra_dma_channel *tdc,
598 bool to_terminate)
599{
600 struct tegra_dma_sg_req *sgreq;
601 struct tegra_dma_desc *dma_desc;
602
603 tdc->busy = false;
604 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
605 dma_desc = sgreq->dma_desc;
606 dma_desc->bytes_transferred += sgreq->req_len;
607
608 list_del(&sgreq->node);
609 if (sgreq->last_sg) {
Vinod Koul00d696f2013-10-16 21:04:50 +0530610 dma_desc->dma_status = DMA_COMPLETE;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530611 dma_cookie_complete(&dma_desc->txd);
612 if (!dma_desc->cb_count)
613 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
614 dma_desc->cb_count++;
615 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
616 }
617 list_add_tail(&sgreq->node, &tdc->free_sg_req);
618
619 /* Do not start DMA if it is going to be terminate */
620 if (to_terminate || list_empty(&tdc->pending_sg_req))
621 return;
622
623 tdc_start_head_req(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530624}
625
626static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
627 bool to_terminate)
628{
629 struct tegra_dma_sg_req *sgreq;
630 struct tegra_dma_desc *dma_desc;
631 bool st;
632
633 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
634 dma_desc = sgreq->dma_desc;
635 dma_desc->bytes_transferred += sgreq->req_len;
636
637 /* Callback need to be call */
638 if (!dma_desc->cb_count)
639 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
640 dma_desc->cb_count++;
641
642 /* If not last req then put at end of pending list */
643 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
Wei Yongjun2cc44e62012-09-05 15:08:56 +0800644 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530645 sgreq->configured = false;
646 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
647 if (!st)
648 dma_desc->dma_status = DMA_ERROR;
649 }
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530650}
651
652static void tegra_dma_tasklet(unsigned long data)
653{
654 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
655 dma_async_tx_callback callback = NULL;
656 void *callback_param = NULL;
657 struct tegra_dma_desc *dma_desc;
658 unsigned long flags;
659 int cb_count;
660
661 spin_lock_irqsave(&tdc->lock, flags);
662 while (!list_empty(&tdc->cb_desc)) {
663 dma_desc = list_first_entry(&tdc->cb_desc,
664 typeof(*dma_desc), cb_node);
665 list_del(&dma_desc->cb_node);
666 callback = dma_desc->txd.callback;
667 callback_param = dma_desc->txd.callback_param;
668 cb_count = dma_desc->cb_count;
669 dma_desc->cb_count = 0;
670 spin_unlock_irqrestore(&tdc->lock, flags);
671 while (cb_count-- && callback)
672 callback(callback_param);
673 spin_lock_irqsave(&tdc->lock, flags);
674 }
675 spin_unlock_irqrestore(&tdc->lock, flags);
676}
677
678static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
679{
680 struct tegra_dma_channel *tdc = dev_id;
681 unsigned long status;
682 unsigned long flags;
683
684 spin_lock_irqsave(&tdc->lock, flags);
685
686 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
687 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
688 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
689 tdc->isr_handler(tdc, false);
690 tasklet_schedule(&tdc->tasklet);
691 spin_unlock_irqrestore(&tdc->lock, flags);
692 return IRQ_HANDLED;
693 }
694
695 spin_unlock_irqrestore(&tdc->lock, flags);
696 dev_info(tdc2dev(tdc),
697 "Interrupt already served status 0x%08lx\n", status);
698 return IRQ_NONE;
699}
700
701static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
702{
703 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
704 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
705 unsigned long flags;
706 dma_cookie_t cookie;
707
708 spin_lock_irqsave(&tdc->lock, flags);
709 dma_desc->dma_status = DMA_IN_PROGRESS;
710 cookie = dma_cookie_assign(&dma_desc->txd);
711 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
712 spin_unlock_irqrestore(&tdc->lock, flags);
713 return cookie;
714}
715
716static void tegra_dma_issue_pending(struct dma_chan *dc)
717{
718 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
719 unsigned long flags;
720
721 spin_lock_irqsave(&tdc->lock, flags);
722 if (list_empty(&tdc->pending_sg_req)) {
723 dev_err(tdc2dev(tdc), "No DMA request\n");
724 goto end;
725 }
726 if (!tdc->busy) {
727 tdc_start_head_req(tdc);
728
729 /* Continuous single mode: Configure next req */
730 if (tdc->cyclic) {
731 /*
732 * Wait for 1 burst time for configure DMA for
733 * next transfer.
734 */
735 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
736 tdc_configure_next_head_desc(tdc);
737 }
738 }
739end:
740 spin_unlock_irqrestore(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530741}
742
Vinod Koula7c439a2014-12-08 11:30:17 +0530743static int tegra_dma_terminate_all(struct dma_chan *dc)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530744{
745 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
746 struct tegra_dma_sg_req *sgreq;
747 struct tegra_dma_desc *dma_desc;
748 unsigned long flags;
749 unsigned long status;
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700750 unsigned long wcount;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530751 bool was_busy;
752
753 spin_lock_irqsave(&tdc->lock, flags);
754 if (list_empty(&tdc->pending_sg_req)) {
755 spin_unlock_irqrestore(&tdc->lock, flags);
Vinod Koula7c439a2014-12-08 11:30:17 +0530756 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530757 }
758
759 if (!tdc->busy)
760 goto skip_dma_stop;
761
762 /* Pause DMA before checking the queue status */
Laxman Dewangan1b140902013-01-06 21:52:02 +0530763 tegra_dma_pause(tdc, true);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530764
765 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
766 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
767 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
768 tdc->isr_handler(tdc, true);
769 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
770 }
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700771 if (tdc->tdma->chip_data->support_separate_wcount_reg)
772 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
773 else
774 wcount = status;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530775
776 was_busy = tdc->busy;
777 tegra_dma_stop(tdc);
778
779 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
780 sgreq = list_first_entry(&tdc->pending_sg_req,
781 typeof(*sgreq), node);
782 sgreq->dma_desc->bytes_transferred +=
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700783 get_current_xferred_count(tdc, sgreq, wcount);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530784 }
Laxman Dewangan1b140902013-01-06 21:52:02 +0530785 tegra_dma_resume(tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530786
787skip_dma_stop:
788 tegra_dma_abort_all(tdc);
789
790 while (!list_empty(&tdc->cb_desc)) {
791 dma_desc = list_first_entry(&tdc->cb_desc,
792 typeof(*dma_desc), cb_node);
793 list_del(&dma_desc->cb_node);
794 dma_desc->cb_count = 0;
795 }
796 spin_unlock_irqrestore(&tdc->lock, flags);
Vinod Koula7c439a2014-12-08 11:30:17 +0530797 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530798}
799
800static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
801 dma_cookie_t cookie, struct dma_tx_state *txstate)
802{
803 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
804 struct tegra_dma_desc *dma_desc;
805 struct tegra_dma_sg_req *sg_req;
806 enum dma_status ret;
807 unsigned long flags;
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530808 unsigned int residual;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530809
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530810 ret = dma_cookie_status(dc, cookie, txstate);
Vinod Koul00d696f2013-10-16 21:04:50 +0530811 if (ret == DMA_COMPLETE)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530812 return ret;
Andy Shevchenko0a0aee22013-05-27 15:14:39 +0300813
814 spin_lock_irqsave(&tdc->lock, flags);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530815
816 /* Check on wait_ack desc status */
817 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
818 if (dma_desc->txd.cookie == cookie) {
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530819 residual = dma_desc->bytes_requested -
820 (dma_desc->bytes_transferred %
821 dma_desc->bytes_requested);
822 dma_set_residue(txstate, residual);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530823 ret = dma_desc->dma_status;
824 spin_unlock_irqrestore(&tdc->lock, flags);
825 return ret;
826 }
827 }
828
829 /* Check in pending list */
830 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
831 dma_desc = sg_req->dma_desc;
832 if (dma_desc->txd.cookie == cookie) {
Laxman Dewangan4a46ba32012-07-02 13:52:07 +0530833 residual = dma_desc->bytes_requested -
834 (dma_desc->bytes_transferred %
835 dma_desc->bytes_requested);
836 dma_set_residue(txstate, residual);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530837 ret = dma_desc->dma_status;
838 spin_unlock_irqrestore(&tdc->lock, flags);
839 return ret;
840 }
841 }
842
843 dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
844 spin_unlock_irqrestore(&tdc->lock, flags);
845 return ret;
846}
847
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530848static inline int get_bus_width(struct tegra_dma_channel *tdc,
849 enum dma_slave_buswidth slave_bw)
850{
851 switch (slave_bw) {
852 case DMA_SLAVE_BUSWIDTH_1_BYTE:
853 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
854 case DMA_SLAVE_BUSWIDTH_2_BYTES:
855 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
856 case DMA_SLAVE_BUSWIDTH_4_BYTES:
857 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
858 case DMA_SLAVE_BUSWIDTH_8_BYTES:
859 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
860 default:
861 dev_warn(tdc2dev(tdc),
862 "slave bw is not supported, using 32bits\n");
863 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
864 }
865}
866
867static inline int get_burst_size(struct tegra_dma_channel *tdc,
868 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
869{
870 int burst_byte;
871 int burst_ahb_width;
872
873 /*
874 * burst_size from client is in terms of the bus_width.
875 * convert them into AHB memory width which is 4 byte.
876 */
877 burst_byte = burst_size * slave_bw;
878 burst_ahb_width = burst_byte / 4;
879
880 /* If burst size is 0 then calculate the burst size based on length */
881 if (!burst_ahb_width) {
882 if (len & 0xF)
883 return TEGRA_APBDMA_AHBSEQ_BURST_1;
884 else if ((len >> 4) & 0x1)
885 return TEGRA_APBDMA_AHBSEQ_BURST_4;
886 else
887 return TEGRA_APBDMA_AHBSEQ_BURST_8;
888 }
889 if (burst_ahb_width < 4)
890 return TEGRA_APBDMA_AHBSEQ_BURST_1;
891 else if (burst_ahb_width < 8)
892 return TEGRA_APBDMA_AHBSEQ_BURST_4;
893 else
894 return TEGRA_APBDMA_AHBSEQ_BURST_8;
895}
896
897static int get_transfer_param(struct tegra_dma_channel *tdc,
898 enum dma_transfer_direction direction, unsigned long *apb_addr,
899 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
900 enum dma_slave_buswidth *slave_bw)
901{
902
903 switch (direction) {
904 case DMA_MEM_TO_DEV:
905 *apb_addr = tdc->dma_sconfig.dst_addr;
906 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
907 *burst_size = tdc->dma_sconfig.dst_maxburst;
908 *slave_bw = tdc->dma_sconfig.dst_addr_width;
909 *csr = TEGRA_APBDMA_CSR_DIR;
910 return 0;
911
912 case DMA_DEV_TO_MEM:
913 *apb_addr = tdc->dma_sconfig.src_addr;
914 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
915 *burst_size = tdc->dma_sconfig.src_maxburst;
916 *slave_bw = tdc->dma_sconfig.src_addr_width;
917 *csr = 0;
918 return 0;
919
920 default:
921 dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
922 return -EINVAL;
923 }
924 return -EINVAL;
925}
926
Laxman Dewangan911dacc2014-01-06 11:16:45 -0700927static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
928 struct tegra_dma_channel_regs *ch_regs, u32 len)
929{
930 u32 len_field = (len - 4) & 0xFFFC;
931
932 if (tdc->tdma->chip_data->support_separate_wcount_reg)
933 ch_regs->wcount = len_field;
934 else
935 ch_regs->csr |= len_field;
936}
937
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530938static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
939 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
940 enum dma_transfer_direction direction, unsigned long flags,
941 void *context)
942{
943 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
944 struct tegra_dma_desc *dma_desc;
945 unsigned int i;
946 struct scatterlist *sg;
947 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
948 struct list_head req_list;
949 struct tegra_dma_sg_req *sg_req = NULL;
950 u32 burst_size;
951 enum dma_slave_buswidth slave_bw;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530952
953 if (!tdc->config_init) {
954 dev_err(tdc2dev(tdc), "dma channel is not configured\n");
955 return NULL;
956 }
957 if (sg_len < 1) {
958 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
959 return NULL;
960 }
961
Jon Hunterdc1ff4b2015-08-06 14:32:32 +0100962 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
963 &burst_size, &slave_bw) < 0)
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530964 return NULL;
965
966 INIT_LIST_HEAD(&req_list);
967
968 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
969 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
970 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
971 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
972
973 csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
Stephen Warren996556c2013-11-11 13:09:35 -0700974 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530975 if (flags & DMA_PREP_INTERRUPT)
976 csr |= TEGRA_APBDMA_CSR_IE_EOC;
977
978 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
979
980 dma_desc = tegra_dma_desc_get(tdc);
981 if (!dma_desc) {
982 dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
983 return NULL;
984 }
985 INIT_LIST_HEAD(&dma_desc->tx_list);
986 INIT_LIST_HEAD(&dma_desc->cb_node);
987 dma_desc->cb_count = 0;
988 dma_desc->bytes_requested = 0;
989 dma_desc->bytes_transferred = 0;
990 dma_desc->dma_status = DMA_IN_PROGRESS;
991
992 /* Make transfer requests */
993 for_each_sg(sgl, sg, sg_len, i) {
994 u32 len, mem;
995
Laxman Dewangan597c8542012-06-22 20:41:10 +0530996 mem = sg_dma_address(sg);
Laxman Dewanganec8a1582012-06-06 10:55:27 +0530997 len = sg_dma_len(sg);
998
999 if ((len & 3) || (mem & 3) ||
1000 (len > tdc->tdma->chip_data->max_dma_count)) {
1001 dev_err(tdc2dev(tdc),
1002 "Dma length/memory address is not supported\n");
1003 tegra_dma_desc_put(tdc, dma_desc);
1004 return NULL;
1005 }
1006
1007 sg_req = tegra_dma_sg_req_get(tdc);
1008 if (!sg_req) {
1009 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1010 tegra_dma_desc_put(tdc, dma_desc);
1011 return NULL;
1012 }
1013
1014 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1015 dma_desc->bytes_requested += len;
1016
1017 sg_req->ch_regs.apb_ptr = apb_ptr;
1018 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001019 sg_req->ch_regs.csr = csr;
1020 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301021 sg_req->ch_regs.apb_seq = apb_seq;
1022 sg_req->ch_regs.ahb_seq = ahb_seq;
1023 sg_req->configured = false;
1024 sg_req->last_sg = false;
1025 sg_req->dma_desc = dma_desc;
1026 sg_req->req_len = len;
1027
1028 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1029 }
1030 sg_req->last_sg = true;
1031 if (flags & DMA_CTRL_ACK)
1032 dma_desc->txd.flags = DMA_CTRL_ACK;
1033
1034 /*
1035 * Make sure that mode should not be conflicting with currently
1036 * configured mode.
1037 */
1038 if (!tdc->isr_handler) {
1039 tdc->isr_handler = handle_once_dma_done;
1040 tdc->cyclic = false;
1041 } else {
1042 if (tdc->cyclic) {
1043 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1044 tegra_dma_desc_put(tdc, dma_desc);
1045 return NULL;
1046 }
1047 }
1048
1049 return &dma_desc->txd;
1050}
1051
Sachin Kamat404ff6692013-09-06 17:16:22 +05301052static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301053 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1054 size_t period_len, enum dma_transfer_direction direction,
Laurent Pinchart31c1e5a2014-08-01 12:20:10 +02001055 unsigned long flags)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301056{
1057 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1058 struct tegra_dma_desc *dma_desc = NULL;
1059 struct tegra_dma_sg_req *sg_req = NULL;
1060 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1061 int len;
1062 size_t remain_len;
1063 dma_addr_t mem = buf_addr;
1064 u32 burst_size;
1065 enum dma_slave_buswidth slave_bw;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301066
1067 if (!buf_len || !period_len) {
1068 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1069 return NULL;
1070 }
1071
1072 if (!tdc->config_init) {
1073 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1074 return NULL;
1075 }
1076
1077 /*
1078 * We allow to take more number of requests till DMA is
1079 * not started. The driver will loop over all requests.
1080 * Once DMA is started then new requests can be queued only after
1081 * terminating the DMA.
1082 */
1083 if (tdc->busy) {
1084 dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1085 return NULL;
1086 }
1087
1088 /*
1089 * We only support cycle transfer when buf_len is multiple of
1090 * period_len.
1091 */
1092 if (buf_len % period_len) {
1093 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1094 return NULL;
1095 }
1096
1097 len = period_len;
1098 if ((len & 3) || (buf_addr & 3) ||
1099 (len > tdc->tdma->chip_data->max_dma_count)) {
1100 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1101 return NULL;
1102 }
1103
Jon Hunterdc1ff4b2015-08-06 14:32:32 +01001104 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1105 &burst_size, &slave_bw) < 0)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301106 return NULL;
1107
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301108 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1109 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1110 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1111 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1112
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301113 csr |= TEGRA_APBDMA_CSR_FLOW;
1114 if (flags & DMA_PREP_INTERRUPT)
1115 csr |= TEGRA_APBDMA_CSR_IE_EOC;
Stephen Warren996556c2013-11-11 13:09:35 -07001116 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301117
1118 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1119
1120 dma_desc = tegra_dma_desc_get(tdc);
1121 if (!dma_desc) {
1122 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1123 return NULL;
1124 }
1125
1126 INIT_LIST_HEAD(&dma_desc->tx_list);
1127 INIT_LIST_HEAD(&dma_desc->cb_node);
1128 dma_desc->cb_count = 0;
1129
1130 dma_desc->bytes_transferred = 0;
1131 dma_desc->bytes_requested = buf_len;
1132 remain_len = buf_len;
1133
1134 /* Split transfer equal to period size */
1135 while (remain_len) {
1136 sg_req = tegra_dma_sg_req_get(tdc);
1137 if (!sg_req) {
1138 dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1139 tegra_dma_desc_put(tdc, dma_desc);
1140 return NULL;
1141 }
1142
1143 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1144 sg_req->ch_regs.apb_ptr = apb_ptr;
1145 sg_req->ch_regs.ahb_ptr = mem;
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001146 sg_req->ch_regs.csr = csr;
1147 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301148 sg_req->ch_regs.apb_seq = apb_seq;
1149 sg_req->ch_regs.ahb_seq = ahb_seq;
1150 sg_req->configured = false;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301151 sg_req->last_sg = false;
1152 sg_req->dma_desc = dma_desc;
1153 sg_req->req_len = len;
1154
1155 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1156 remain_len -= len;
1157 mem += len;
1158 }
1159 sg_req->last_sg = true;
Laxman Dewanganb9bb37f2013-01-09 15:26:22 +05301160 if (flags & DMA_CTRL_ACK)
1161 dma_desc->txd.flags = DMA_CTRL_ACK;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301162
1163 /*
1164 * Make sure that mode should not be conflicting with currently
1165 * configured mode.
1166 */
1167 if (!tdc->isr_handler) {
1168 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1169 tdc->cyclic = true;
1170 } else {
1171 if (!tdc->cyclic) {
1172 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1173 tegra_dma_desc_put(tdc, dma_desc);
1174 return NULL;
1175 }
1176 }
1177
1178 return &dma_desc->txd;
1179}
1180
1181static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1182{
1183 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301184 struct tegra_dma *tdma = tdc->tdma;
1185 int ret;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301186
1187 dma_cookie_init(&tdc->dma_chan);
1188 tdc->config_init = false;
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001189
1190 ret = pm_runtime_get_sync(tdma->dev);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301191 if (ret < 0)
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001192 return ret;
1193
1194 return 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301195}
1196
1197static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1198{
1199 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301200 struct tegra_dma *tdma = tdc->tdma;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301201
1202 struct tegra_dma_desc *dma_desc;
1203 struct tegra_dma_sg_req *sg_req;
1204 struct list_head dma_desc_list;
1205 struct list_head sg_req_list;
1206 unsigned long flags;
1207
1208 INIT_LIST_HEAD(&dma_desc_list);
1209 INIT_LIST_HEAD(&sg_req_list);
1210
1211 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1212
1213 if (tdc->busy)
1214 tegra_dma_terminate_all(dc);
1215
1216 spin_lock_irqsave(&tdc->lock, flags);
1217 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1218 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1219 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1220 INIT_LIST_HEAD(&tdc->cb_desc);
1221 tdc->config_init = false;
Dmitry Osipenko7bdc1e22013-05-11 20:30:53 +04001222 tdc->isr_handler = NULL;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301223 spin_unlock_irqrestore(&tdc->lock, flags);
1224
1225 while (!list_empty(&dma_desc_list)) {
1226 dma_desc = list_first_entry(&dma_desc_list,
1227 typeof(*dma_desc), node);
1228 list_del(&dma_desc->node);
1229 kfree(dma_desc);
1230 }
1231
1232 while (!list_empty(&sg_req_list)) {
1233 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1234 list_del(&sg_req->node);
1235 kfree(sg_req);
1236 }
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001237 pm_runtime_put(tdma->dev);
Stephen Warren996556c2013-11-11 13:09:35 -07001238
1239 tdc->slave_id = 0;
1240}
1241
1242static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1243 struct of_dma *ofdma)
1244{
1245 struct tegra_dma *tdma = ofdma->of_dma_data;
1246 struct dma_chan *chan;
1247 struct tegra_dma_channel *tdc;
1248
1249 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1250 if (!chan)
1251 return NULL;
1252
1253 tdc = to_tegra_dma_chan(chan);
1254 tdc->slave_id = dma_spec->args[0];
1255
1256 return chan;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301257}
1258
1259/* Tegra20 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001260static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301261 .nr_channels = 16,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001262 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301263 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301264 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001265 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301266};
1267
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301268/* Tegra30 specific DMA controller information */
Laxman Dewangan75f21632012-08-29 10:31:18 +02001269static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301270 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001271 .channel_reg_size = 0x20,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301272 .max_dma_count = 1024UL * 64,
Laxman Dewangan1b140902013-01-06 21:52:02 +05301273 .support_channel_pause = false,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001274 .support_separate_wcount_reg = false,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301275};
1276
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301277/* Tegra114 specific DMA controller information */
1278static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1279 .nr_channels = 32,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001280 .channel_reg_size = 0x20,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301281 .max_dma_count = 1024UL * 64,
1282 .support_channel_pause = true,
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001283 .support_separate_wcount_reg = false,
1284};
1285
1286/* Tegra148 specific DMA controller information */
1287static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1288 .nr_channels = 32,
1289 .channel_reg_size = 0x40,
1290 .max_dma_count = 1024UL * 64,
1291 .support_channel_pause = true,
1292 .support_separate_wcount_reg = true,
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301293};
1294
1295
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001296static const struct of_device_id tegra_dma_of_match[] = {
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301297 {
Laxman Dewangan911dacc2014-01-06 11:16:45 -07001298 .compatible = "nvidia,tegra148-apbdma",
1299 .data = &tegra148_dma_chip_data,
1300 }, {
Laxman Dewangan5ea7caf2013-01-06 21:52:03 +05301301 .compatible = "nvidia,tegra114-apbdma",
1302 .data = &tegra114_dma_chip_data,
1303 }, {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301304 .compatible = "nvidia,tegra30-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301305 .data = &tegra30_dma_chip_data,
1306 }, {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301307 .compatible = "nvidia,tegra20-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301308 .data = &tegra20_dma_chip_data,
1309 }, {
1310 },
1311};
1312MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301313
Bill Pemberton463a1f82012-11-19 13:22:55 -05001314static int tegra_dma_probe(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301315{
1316 struct resource *res;
1317 struct tegra_dma *tdma;
1318 int ret;
1319 int i;
Laxman Dewangan83a1ef22012-08-29 10:23:07 +02001320 const struct tegra_dma_chip_data *cdata = NULL;
Stephen Warrendc7badb2013-03-11 16:30:26 -06001321 const struct of_device_id *match;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301322
Stephen Warrendc7badb2013-03-11 16:30:26 -06001323 match = of_match_device(tegra_dma_of_match, &pdev->dev);
1324 if (!match) {
1325 dev_err(&pdev->dev, "Error: No device match found\n");
1326 return -ENODEV;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301327 }
Stephen Warrendc7badb2013-03-11 16:30:26 -06001328 cdata = match->data;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301329
1330 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1331 sizeof(struct tegra_dma_channel), GFP_KERNEL);
1332 if (!tdma) {
1333 dev_err(&pdev->dev, "Error: memory allocation failed\n");
1334 return -ENOMEM;
1335 }
1336
1337 tdma->dev = &pdev->dev;
1338 tdma->chip_data = cdata;
1339 platform_set_drvdata(pdev, tdma);
1340
1341 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thierry Reding73312052013-01-21 11:09:00 +01001342 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1343 if (IS_ERR(tdma->base_addr))
1344 return PTR_ERR(tdma->base_addr);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301345
1346 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1347 if (IS_ERR(tdma->dma_clk)) {
1348 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1349 return PTR_ERR(tdma->dma_clk);
1350 }
1351
Stephen Warren9aa433d2013-11-06 16:35:34 -07001352 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1353 if (IS_ERR(tdma->rst)) {
1354 dev_err(&pdev->dev, "Error: Missing reset\n");
1355 return PTR_ERR(tdma->rst);
1356 }
1357
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301358 spin_lock_init(&tdma->global_lock);
1359
1360 pm_runtime_enable(&pdev->dev);
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001361 if (!pm_runtime_enabled(&pdev->dev))
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301362 ret = tegra_dma_runtime_resume(&pdev->dev);
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001363 else
1364 ret = pm_runtime_get_sync(&pdev->dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301365
Laxman Dewanganffc49302012-07-20 13:31:08 +05301366 if (ret < 0) {
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001367 pm_runtime_disable(&pdev->dev);
1368 return ret;
Laxman Dewanganffc49302012-07-20 13:31:08 +05301369 }
1370
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301371 /* Reset DMA controller */
Stephen Warren9aa433d2013-11-06 16:35:34 -07001372 reset_control_assert(tdma->rst);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301373 udelay(2);
Stephen Warren9aa433d2013-11-06 16:35:34 -07001374 reset_control_deassert(tdma->rst);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301375
1376 /* Enable global DMA registers */
1377 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1378 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1379 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1380
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001381 pm_runtime_put(&pdev->dev);
Laxman Dewanganffc49302012-07-20 13:31:08 +05301382
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301383 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1384 for (i = 0; i < cdata->nr_channels; i++) {
1385 struct tegra_dma_channel *tdc = &tdma->channels[i];
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301386
Jon Hunter13a33282015-08-06 14:32:31 +01001387 tdc->chan_addr = tdma->base_addr +
1388 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1389 (i * cdata->channel_reg_size);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301390
1391 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1392 if (!res) {
1393 ret = -EINVAL;
1394 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1395 goto err_irq;
1396 }
1397 tdc->irq = res->start;
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301398 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301399 ret = devm_request_irq(&pdev->dev, tdc->irq,
Laxman Dewangand0fc9052012-10-03 22:48:07 +05301400 tegra_dma_isr, 0, tdc->name, tdc);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301401 if (ret) {
1402 dev_err(&pdev->dev,
1403 "request_irq failed with err %d channel %d\n",
Dmitry Osipenkoac7ae752013-05-11 20:30:52 +04001404 ret, i);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301405 goto err_irq;
1406 }
1407
1408 tdc->dma_chan.device = &tdma->dma_dev;
1409 dma_cookie_init(&tdc->dma_chan);
1410 list_add_tail(&tdc->dma_chan.device_node,
1411 &tdma->dma_dev.channels);
1412 tdc->tdma = tdma;
1413 tdc->id = i;
1414
1415 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1416 (unsigned long)tdc);
1417 spin_lock_init(&tdc->lock);
1418
1419 INIT_LIST_HEAD(&tdc->pending_sg_req);
1420 INIT_LIST_HEAD(&tdc->free_sg_req);
1421 INIT_LIST_HEAD(&tdc->free_dma_desc);
1422 INIT_LIST_HEAD(&tdc->cb_desc);
1423 }
1424
1425 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1426 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
Laxman Dewangan46fb3f82012-06-22 17:12:43 +05301427 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1428
Jon Hunter23a1ec32015-08-06 14:32:33 +01001429 tdma->global_pause_count = 0;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301430 tdma->dma_dev.dev = &pdev->dev;
1431 tdma->dma_dev.device_alloc_chan_resources =
1432 tegra_dma_alloc_chan_resources;
1433 tdma->dma_dev.device_free_chan_resources =
1434 tegra_dma_free_chan_resources;
1435 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1436 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
Paul Walmsley891653a2015-01-06 06:44:56 +00001437 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1438 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1439 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1440 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1441 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1442 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1443 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1444 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1445 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1446 /*
1447 * XXX The hardware appears to support
1448 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1449 * only used by this driver during tegra_dma_terminate_all()
1450 */
1451 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
Maxime Ripard662f1ac2014-11-17 14:42:37 +01001452 tdma->dma_dev.device_config = tegra_dma_slave_config;
1453 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301454 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1455 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1456
1457 ret = dma_async_device_register(&tdma->dma_dev);
1458 if (ret < 0) {
1459 dev_err(&pdev->dev,
1460 "Tegra20 APB DMA driver registration failed %d\n", ret);
1461 goto err_irq;
1462 }
1463
Stephen Warren996556c2013-11-11 13:09:35 -07001464 ret = of_dma_controller_register(pdev->dev.of_node,
1465 tegra_dma_of_xlate, tdma);
1466 if (ret < 0) {
1467 dev_err(&pdev->dev,
1468 "Tegra20 APB DMA OF registration failed %d\n", ret);
1469 goto err_unregister_dma_dev;
1470 }
1471
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301472 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1473 cdata->nr_channels);
1474 return 0;
1475
Stephen Warren996556c2013-11-11 13:09:35 -07001476err_unregister_dma_dev:
1477 dma_async_device_unregister(&tdma->dma_dev);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301478err_irq:
1479 while (--i >= 0) {
1480 struct tegra_dma_channel *tdc = &tdma->channels[i];
1481 tasklet_kill(&tdc->tasklet);
1482 }
1483
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301484 pm_runtime_disable(&pdev->dev);
1485 if (!pm_runtime_status_suspended(&pdev->dev))
1486 tegra_dma_runtime_suspend(&pdev->dev);
1487 return ret;
1488}
1489
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001490static int tegra_dma_remove(struct platform_device *pdev)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301491{
1492 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1493 int i;
1494 struct tegra_dma_channel *tdc;
1495
1496 dma_async_device_unregister(&tdma->dma_dev);
1497
1498 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1499 tdc = &tdma->channels[i];
1500 tasklet_kill(&tdc->tasklet);
1501 }
1502
1503 pm_runtime_disable(&pdev->dev);
1504 if (!pm_runtime_status_suspended(&pdev->dev))
1505 tegra_dma_runtime_suspend(&pdev->dev);
1506
1507 return 0;
1508}
1509
1510static int tegra_dma_runtime_suspend(struct device *dev)
1511{
1512 struct platform_device *pdev = to_platform_device(dev);
1513 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1514
Prashant Gaikwad56482ec2012-06-25 12:01:31 +05301515 clk_disable_unprepare(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301516 return 0;
1517}
1518
1519static int tegra_dma_runtime_resume(struct device *dev)
1520{
1521 struct platform_device *pdev = to_platform_device(dev);
1522 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1523 int ret;
1524
Prashant Gaikwad56482ec2012-06-25 12:01:31 +05301525 ret = clk_prepare_enable(tdma->dma_clk);
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301526 if (ret < 0) {
1527 dev_err(dev, "clk_enable failed: %d\n", ret);
1528 return ret;
1529 }
1530 return 0;
1531}
1532
Laxman Dewangan3065c192013-04-24 15:24:27 +05301533#ifdef CONFIG_PM_SLEEP
1534static int tegra_dma_pm_suspend(struct device *dev)
1535{
1536 struct tegra_dma *tdma = dev_get_drvdata(dev);
1537 int i;
1538 int ret;
1539
1540 /* Enable clock before accessing register */
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001541 ret = pm_runtime_get_sync(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301542 if (ret < 0)
1543 return ret;
1544
1545 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1546 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1547 struct tegra_dma_channel *tdc = &tdma->channels[i];
1548 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1549
1550 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1551 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1552 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1553 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1554 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1555 }
1556
1557 /* Disable clock */
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001558 pm_runtime_put(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301559 return 0;
1560}
1561
1562static int tegra_dma_pm_resume(struct device *dev)
1563{
1564 struct tegra_dma *tdma = dev_get_drvdata(dev);
1565 int i;
1566 int ret;
1567
1568 /* Enable clock before accessing register */
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001569 ret = pm_runtime_get_sync(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301570 if (ret < 0)
1571 return ret;
1572
1573 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1574 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1575 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1576
1577 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1578 struct tegra_dma_channel *tdc = &tdma->channels[i];
1579 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1580
1581 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1582 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1583 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1584 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1585 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1586 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1587 }
1588
1589 /* Disable clock */
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001590 pm_runtime_put(dev);
Laxman Dewangan3065c192013-04-24 15:24:27 +05301591 return 0;
1592}
1593#endif
1594
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -08001595static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
Jon Hunteredd3bdb2015-11-13 16:39:38 +00001596 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1597 NULL)
Laxman Dewangan3065c192013-04-24 15:24:27 +05301598 SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301599};
1600
1601static struct platform_driver tegra_dmac_driver = {
1602 .driver = {
Laxman Dewangancd9092c2012-07-02 13:52:08 +05301603 .name = "tegra-apbdma",
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301604 .pm = &tegra_dma_dev_pm_ops,
Stephen Warrendc7badb2013-03-11 16:30:26 -06001605 .of_match_table = tegra_dma_of_match,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301606 },
1607 .probe = tegra_dma_probe,
Bill Pembertona7d6e3e2012-11-19 13:20:04 -05001608 .remove = tegra_dma_remove,
Laxman Dewanganec8a1582012-06-06 10:55:27 +05301609};
1610
1611module_platform_driver(tegra_dmac_driver);
1612
1613MODULE_ALIAS("platform:tegra20-apbdma");
1614MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1615MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1616MODULE_LICENSE("GPL v2");