blob: 19a0c64d45d3643a99e7b8972c8c3f3a16fc6e5f [file] [log] [blame]
Vinod Koulb3c567e2010-07-21 13:28:10 +05301/*
2 * intel_mid_dma.c - Intel Langwell DMA Drivers
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Author: Vinod Koul <vinod.koul@intel.com>
6 * The driver design is based on dw_dmac driver
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 *
25 */
26#include <linux/pci.h>
27#include <linux/interrupt.h>
Koul, Vinod53a61ba2010-10-04 10:42:40 +000028#include <linux/pm_runtime.h>
Vinod Koulb3c567e2010-07-21 13:28:10 +053029#include <linux/intel_mid_dma.h>
Paul Gortmaker7c52d552011-05-27 12:33:10 -040030#include <linux/module.h>
Vinod Koulb3c567e2010-07-21 13:28:10 +053031
32#define MAX_CHAN 4 /*max ch across controllers*/
33#include "intel_mid_dma_regs.h"
34
35#define INTEL_MID_DMAC1_ID 0x0814
36#define INTEL_MID_DMAC2_ID 0x0813
37#define INTEL_MID_GP_DMAC2_ID 0x0827
38#define INTEL_MFLD_DMAC1_ID 0x0830
39#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
40#define LNW_PERIPHRAL_MASK_SIZE 0x10
41#define LNW_PERIPHRAL_STATUS 0x0
42#define LNW_PERIPHRAL_MASK 0x8
43
44struct intel_mid_dma_probe_info {
45 u8 max_chan;
46 u8 ch_base;
47 u16 block_size;
48 u32 pimr_mask;
49};
50
51#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
52 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
53 .max_chan = (_max_chan), \
54 .ch_base = (_ch_base), \
55 .block_size = (_block_size), \
56 .pimr_mask = (_pimr_mask), \
57 })
58
59/*****************************************************************************
60Utility Functions*/
61/**
62 * get_ch_index - convert status to channel
63 * @status: status mask
64 * @base: dma ch base value
65 *
66 * Modify the status mask and return the channel index needing
67 * attention (or -1 if neither)
68 */
69static int get_ch_index(int *status, unsigned int base)
70{
71 int i;
72 for (i = 0; i < MAX_CHAN; i++) {
73 if (*status & (1 << (i + base))) {
74 *status = *status & ~(1 << (i + base));
75 pr_debug("MDMA: index %d New status %x\n", i, *status);
76 return i;
77 }
78 }
79 return -1;
80}
81
82/**
83 * get_block_ts - calculates dma transaction length
84 * @len: dma transfer length
85 * @tx_width: dma transfer src width
86 * @block_size: dma controller max block size
87 *
88 * Based on src width calculate the DMA trsaction length in data items
89 * return data items or FFFF if exceeds max length for block
90 */
91static int get_block_ts(int len, int tx_width, int block_size)
92{
93 int byte_width = 0, block_ts = 0;
94
95 switch (tx_width) {
Koul, Vinod20dd6392010-10-04 10:38:43 +000096 case DMA_SLAVE_BUSWIDTH_1_BYTE:
Vinod Koulb3c567e2010-07-21 13:28:10 +053097 byte_width = 1;
98 break;
Koul, Vinod20dd6392010-10-04 10:38:43 +000099 case DMA_SLAVE_BUSWIDTH_2_BYTES:
Vinod Koulb3c567e2010-07-21 13:28:10 +0530100 byte_width = 2;
101 break;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000102 case DMA_SLAVE_BUSWIDTH_4_BYTES:
Vinod Koulb3c567e2010-07-21 13:28:10 +0530103 default:
104 byte_width = 4;
105 break;
106 }
107
108 block_ts = len/byte_width;
109 if (block_ts > block_size)
110 block_ts = 0xFFFF;
111 return block_ts;
112}
113
114/*****************************************************************************
115DMAC1 interrupt Functions*/
116
117/**
118 * dmac1_mask_periphral_intr - mask the periphral interrupt
Vinod Koul4598fc22011-10-10 12:33:59 +0530119 * @mid: dma device for which masking is required
Vinod Koulb3c567e2010-07-21 13:28:10 +0530120 *
121 * Masks the DMA periphral interrupt
122 * this is valid for DMAC1 family controllers only
123 * This controller should have periphral mask registers already mapped
124 */
Vinod Koul4598fc22011-10-10 12:33:59 +0530125static void dmac1_mask_periphral_intr(struct middma_device *mid)
Vinod Koulb3c567e2010-07-21 13:28:10 +0530126{
127 u32 pimr;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530128
129 if (mid->pimr_mask) {
130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
131 pimr |= mid->pimr_mask;
132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
133 }
134 return;
135}
136
137/**
138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt
139 * @midc: dma channel for which masking is required
140 *
141 * UnMasks the DMA periphral interrupt,
142 * this is valid for DMAC1 family controllers only
143 * This controller should have periphral mask registers already mapped
144 */
145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc)
146{
147 u32 pimr;
148 struct middma_device *mid = to_middma_device(midc->chan.device);
149
150 if (mid->pimr_mask) {
151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
152 pimr &= ~mid->pimr_mask;
153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK);
154 }
155 return;
156}
157
158/**
159 * enable_dma_interrupt - enable the periphral interrupt
160 * @midc: dma channel for which enable interrupt is required
161 *
162 * Enable the DMA periphral interrupt,
163 * this is valid for DMAC1 family controllers only
164 * This controller should have periphral mask registers already mapped
165 */
166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
167{
168 dmac1_unmask_periphral_intr(midc);
169
170 /*en ch interrupts*/
171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
173 return;
174}
175
176/**
177 * disable_dma_interrupt - disable the periphral interrupt
178 * @midc: dma channel for which disable interrupt is required
179 *
180 * Disable the DMA periphral interrupt,
181 * this is valid for DMAC1 family controllers only
182 * This controller should have periphral mask registers already mapped
183 */
184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
185{
186 /*Check LPE PISR, make sure fwd is disabled*/
Vinod Koulb3c567e2010-07-21 13:28:10 +0530187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
190 return;
191}
192
193/*****************************************************************************
194DMA channel helper Functions*/
195/**
196 * mid_desc_get - get a descriptor
197 * @midc: dma channel for which descriptor is required
198 *
199 * Obtain a descriptor for the channel. Returns NULL if none are free.
200 * Once the descriptor is returned it is private until put on another
201 * list or freed
202 */
203static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc)
204{
205 struct intel_mid_dma_desc *desc, *_desc;
206 struct intel_mid_dma_desc *ret = NULL;
207
208 spin_lock_bh(&midc->lock);
209 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
210 if (async_tx_test_ack(&desc->txd)) {
211 list_del(&desc->desc_node);
212 ret = desc;
213 break;
214 }
215 }
216 spin_unlock_bh(&midc->lock);
217 return ret;
218}
219
220/**
221 * mid_desc_put - put a descriptor
222 * @midc: dma channel for which descriptor is required
223 * @desc: descriptor to put
224 *
225 * Return a descriptor from lwn_desc_get back to the free pool
226 */
227static void midc_desc_put(struct intel_mid_dma_chan *midc,
228 struct intel_mid_dma_desc *desc)
229{
230 if (desc) {
231 spin_lock_bh(&midc->lock);
232 list_add_tail(&desc->desc_node, &midc->free_list);
233 spin_unlock_bh(&midc->lock);
234 }
235}
236/**
237 * midc_dostart - begin a DMA transaction
238 * @midc: channel for which txn is to be started
239 * @first: first descriptor of series
240 *
241 * Load a transaction into the engine. This must be called with midc->lock
242 * held and bh disabled.
243 */
244static void midc_dostart(struct intel_mid_dma_chan *midc,
245 struct intel_mid_dma_desc *first)
246{
247 struct middma_device *mid = to_middma_device(midc->chan.device);
248
249 /* channel is idle */
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000250 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530251 /*error*/
252 pr_err("ERR_MDMA: channel is busy in start\n");
253 /* The tasklet will hopefully advance the queue... */
254 return;
255 }
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000256 midc->busy = true;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530257 /*write registers and en*/
258 iowrite32(first->sar, midc->ch_regs + SAR);
259 iowrite32(first->dar, midc->ch_regs + DAR);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000260 iowrite32(first->lli_phys, midc->ch_regs + LLP);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530261 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
262 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
263 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
264 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH);
265 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
266 (int)first->sar, (int)first->dar, first->cfg_hi,
267 first->cfg_lo, first->ctl_hi, first->ctl_lo);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000268 first->status = DMA_IN_PROGRESS;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530269
270 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530271}
272
273/**
274 * midc_descriptor_complete - process completed descriptor
275 * @midc: channel owning the descriptor
276 * @desc: the descriptor itself
277 *
278 * Process a completed descriptor and perform any callbacks upon
279 * the completion. The completion handling drops the lock during the
280 * callbacks but must be called with the lock held.
281 */
282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
283 struct intel_mid_dma_desc *desc)
284{
285 struct dma_async_tx_descriptor *txd = &desc->txd;
286 dma_async_tx_callback callback_txd = NULL;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000287 struct intel_mid_dma_lli *llitem;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530288 void *param_txd = NULL;
289
290 midc->completed = txd->cookie;
291 callback_txd = txd->callback;
292 param_txd = txd->callback_param;
293
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000294 if (desc->lli != NULL) {
295 /*clear the DONE bit of completed LLI in memory*/
296 llitem = desc->lli + desc->current_lli;
297 llitem->ctl_hi &= CLEAR_DONE;
298 if (desc->current_lli < desc->lli_length-1)
299 (desc->current_lli)++;
300 else
301 desc->current_lli = 0;
302 }
Vinod Koulb3c567e2010-07-21 13:28:10 +0530303 spin_unlock_bh(&midc->lock);
304 if (callback_txd) {
305 pr_debug("MDMA: TXD callback set ... calling\n");
306 callback_txd(param_txd);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000307 }
308 if (midc->raw_tfr) {
309 desc->status = DMA_SUCCESS;
310 if (desc->lli != NULL) {
311 pci_pool_free(desc->lli_pool, desc->lli,
312 desc->lli_phys);
313 pci_pool_destroy(desc->lli_pool);
314 }
315 list_move(&desc->desc_node, &midc->free_list);
316 midc->busy = false;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530317 }
318 spin_lock_bh(&midc->lock);
319
320}
321/**
322 * midc_scan_descriptors - check the descriptors in channel
323 * mark completed when tx is completete
324 * @mid: device
325 * @midc: channel to scan
326 *
327 * Walk the descriptor chain for the device and process any entries
328 * that are complete.
329 */
330static void midc_scan_descriptors(struct middma_device *mid,
331 struct intel_mid_dma_chan *midc)
332{
333 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL;
334
335 /*tx is complete*/
336 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000337 if (desc->status == DMA_IN_PROGRESS)
Vinod Koulb3c567e2010-07-21 13:28:10 +0530338 midc_descriptor_complete(midc, desc);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530339 }
340 return;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000341 }
342/**
343 * midc_lli_fill_sg - Helper function to convert
344 * SG list to Linked List Items.
345 *@midc: Channel
346 *@desc: DMA descriptor
347 *@sglist: Pointer to SG list
348 *@sglen: SG list length
349 *@flags: DMA transaction flags
350 *
351 * Walk through the SG list and convert the SG list into Linked
352 * List Items (LLI).
353 */
354static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
355 struct intel_mid_dma_desc *desc,
356 struct scatterlist *sglist,
357 unsigned int sglen,
358 unsigned int flags)
359{
360 struct intel_mid_dma_slave *mids;
361 struct scatterlist *sg;
362 dma_addr_t lli_next, sg_phy_addr;
363 struct intel_mid_dma_lli *lli_bloc_desc;
364 union intel_mid_dma_ctl_lo ctl_lo;
365 union intel_mid_dma_ctl_hi ctl_hi;
366 int i;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530367
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000368 pr_debug("MDMA: Entered midc_lli_fill_sg\n");
Koul, Vinod20dd6392010-10-04 10:38:43 +0000369 mids = midc->mid_slave;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000370
371 lli_bloc_desc = desc->lli;
372 lli_next = desc->lli_phys;
373
374 ctl_lo.ctl_lo = desc->ctl_lo;
375 ctl_hi.ctl_hi = desc->ctl_hi;
376 for_each_sg(sglist, sg, sglen, i) {
377 /*Populate CTL_LOW and LLI values*/
378 if (i != sglen - 1) {
379 lli_next = lli_next +
380 sizeof(struct intel_mid_dma_lli);
381 } else {
382 /*Check for circular list, otherwise terminate LLI to ZERO*/
383 if (flags & DMA_PREP_CIRCULAR_LIST) {
384 pr_debug("MDMA: LLI is configured in circular mode\n");
385 lli_next = desc->lli_phys;
386 } else {
387 lli_next = 0;
388 ctl_lo.ctlx.llp_dst_en = 0;
389 ctl_lo.ctlx.llp_src_en = 0;
390 }
391 }
392 /*Populate CTL_HI values*/
393 ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
394 desc->width,
395 midc->dma->block_size);
396 /*Populate SAR and DAR values*/
397 sg_phy_addr = sg_phys(sg);
398 if (desc->dirn == DMA_TO_DEVICE) {
399 lli_bloc_desc->sar = sg_phy_addr;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000400 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000401 } else if (desc->dirn == DMA_FROM_DEVICE) {
Koul, Vinod20dd6392010-10-04 10:38:43 +0000402 lli_bloc_desc->sar = mids->dma_slave.src_addr;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000403 lli_bloc_desc->dar = sg_phy_addr;
404 }
405 /*Copy values into block descriptor in system memroy*/
406 lli_bloc_desc->llp = lli_next;
407 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
408 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
409
410 lli_bloc_desc++;
411 }
412 /*Copy very first LLI values to descriptor*/
413 desc->ctl_lo = desc->lli->ctl_lo;
414 desc->ctl_hi = desc->lli->ctl_hi;
415 desc->sar = desc->lli->sar;
416 desc->dar = desc->lli->dar;
417
418 return 0;
419}
Vinod Koulb3c567e2010-07-21 13:28:10 +0530420/*****************************************************************************
421DMA engine callback Functions*/
422/**
423 * intel_mid_dma_tx_submit - callback to submit DMA transaction
424 * @tx: dma engine descriptor
425 *
426 * Submit the DMA trasaction for this descriptor, start if ch idle
427 */
428static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
429{
430 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx);
431 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan);
432 dma_cookie_t cookie;
433
434 spin_lock_bh(&midc->lock);
435 cookie = midc->chan.cookie;
436
437 if (++cookie < 0)
438 cookie = 1;
439
440 midc->chan.cookie = cookie;
441 desc->txd.cookie = cookie;
442
443
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000444 if (list_empty(&midc->active_list))
Vinod Koulb3c567e2010-07-21 13:28:10 +0530445 list_add_tail(&desc->desc_node, &midc->active_list);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000446 else
Vinod Koulb3c567e2010-07-21 13:28:10 +0530447 list_add_tail(&desc->desc_node, &midc->queue);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000448
449 midc_dostart(midc, desc);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530450 spin_unlock_bh(&midc->lock);
451
452 return cookie;
453}
454
455/**
456 * intel_mid_dma_issue_pending - callback to issue pending txn
457 * @chan: chan where pending trascation needs to be checked and submitted
458 *
459 * Call for scan to issue pending descriptors
460 */
461static void intel_mid_dma_issue_pending(struct dma_chan *chan)
462{
463 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
464
465 spin_lock_bh(&midc->lock);
466 if (!list_empty(&midc->queue))
467 midc_scan_descriptors(to_middma_device(chan->device), midc);
468 spin_unlock_bh(&midc->lock);
469}
470
471/**
472 * intel_mid_dma_tx_status - Return status of txn
473 * @chan: chan for where status needs to be checked
474 * @cookie: cookie for txn
475 * @txstate: DMA txn state
476 *
477 * Return status of DMA txn
478 */
479static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
480 dma_cookie_t cookie,
481 struct dma_tx_state *txstate)
482{
483 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
484 dma_cookie_t last_used;
485 dma_cookie_t last_complete;
486 int ret;
487
488 last_complete = midc->completed;
489 last_used = chan->cookie;
490
491 ret = dma_async_is_complete(cookie, last_complete, last_used);
492 if (ret != DMA_SUCCESS) {
493 midc_scan_descriptors(to_middma_device(chan->device), midc);
494
495 last_complete = midc->completed;
496 last_used = chan->cookie;
497
498 ret = dma_async_is_complete(cookie, last_complete, last_used);
499 }
500
501 if (txstate) {
502 txstate->last = last_complete;
503 txstate->used = last_used;
504 txstate->residue = 0;
505 }
506 return ret;
507}
508
Koul, Vinod20dd6392010-10-04 10:38:43 +0000509static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
510{
511 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
512 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
513 struct intel_mid_dma_slave *mid_slave;
514
515 BUG_ON(!midc);
516 BUG_ON(!slave);
517 pr_debug("MDMA: slave control called\n");
518
519 mid_slave = to_intel_mid_dma_slave(slave);
520
521 BUG_ON(!mid_slave);
522
523 midc->mid_slave = mid_slave;
524 return 0;
525}
Vinod Koulb3c567e2010-07-21 13:28:10 +0530526/**
527 * intel_mid_dma_device_control - DMA device control
528 * @chan: chan for DMA control
529 * @cmd: control cmd
530 * @arg: cmd arg value
531 *
532 * Perform DMA control command
533 */
534static int intel_mid_dma_device_control(struct dma_chan *chan,
535 enum dma_ctrl_cmd cmd, unsigned long arg)
536{
537 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
538 struct middma_device *mid = to_middma_device(chan->device);
539 struct intel_mid_dma_desc *desc, *_desc;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000540 union intel_mid_dma_cfg_lo cfg_lo;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530541
Koul, Vinod20dd6392010-10-04 10:38:43 +0000542 if (cmd == DMA_SLAVE_CONFIG)
543 return dma_slave_control(chan, arg);
544
Vinod Koulb3c567e2010-07-21 13:28:10 +0530545 if (cmd != DMA_TERMINATE_ALL)
546 return -ENXIO;
547
548 spin_lock_bh(&midc->lock);
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000549 if (midc->busy == false) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530550 spin_unlock_bh(&midc->lock);
551 return 0;
552 }
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000553 /*Suspend and disable the channel*/
554 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
555 cfg_lo.cfgx.ch_susp = 1;
556 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
557 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
558 midc->busy = false;
559 /* Disable interrupts */
560 disable_dma_interrupt(midc);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530561 midc->descs_allocated = 0;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530562
Vinod Koulb3c567e2010-07-21 13:28:10 +0530563 spin_unlock_bh(&midc->lock);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000564 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
565 if (desc->lli != NULL) {
566 pci_pool_free(desc->lli_pool, desc->lli,
567 desc->lli_phys);
568 pci_pool_destroy(desc->lli_pool);
569 }
570 list_move(&desc->desc_node, &midc->free_list);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530571 }
572 return 0;
573}
574
Vinod Koulb3c567e2010-07-21 13:28:10 +0530575
576/**
577 * intel_mid_dma_prep_memcpy - Prep memcpy txn
578 * @chan: chan for DMA transfer
579 * @dest: destn address
580 * @src: src address
581 * @len: DMA transfer len
582 * @flags: DMA flags
583 *
584 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only
585 * The periphral txn details should be filled in slave structure properly
586 * Returns the descriptor for this txn
587 */
588static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
589 struct dma_chan *chan, dma_addr_t dest,
590 dma_addr_t src, size_t len, unsigned long flags)
591{
592 struct intel_mid_dma_chan *midc;
593 struct intel_mid_dma_desc *desc = NULL;
594 struct intel_mid_dma_slave *mids;
595 union intel_mid_dma_ctl_lo ctl_lo;
596 union intel_mid_dma_ctl_hi ctl_hi;
597 union intel_mid_dma_cfg_lo cfg_lo;
598 union intel_mid_dma_cfg_hi cfg_hi;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000599 enum dma_slave_buswidth width;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530600
601 pr_debug("MDMA: Prep for memcpy\n");
Koul, Vinod8b649222010-10-04 10:38:25 +0000602 BUG_ON(!chan);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530603 if (!len)
604 return NULL;
605
Vinod Koulb3c567e2010-07-21 13:28:10 +0530606 midc = to_intel_mid_dma_chan(chan);
Koul, Vinod8b649222010-10-04 10:38:25 +0000607 BUG_ON(!midc);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530608
Koul, Vinod20dd6392010-10-04 10:38:43 +0000609 mids = midc->mid_slave;
610 BUG_ON(!mids);
611
Vinod Koulb3c567e2010-07-21 13:28:10 +0530612 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
613 midc->dma->pci_id, midc->ch_id, len);
614 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
Koul, Vinod20dd6392010-10-04 10:38:43 +0000615 mids->cfg_mode, mids->dma_slave.direction,
616 mids->hs_mode, mids->dma_slave.src_addr_width);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530617
618 /*calculate CFG_LO*/
619 if (mids->hs_mode == LNW_DMA_SW_HS) {
620 cfg_lo.cfg_lo = 0;
621 cfg_lo.cfgx.hs_sel_dst = 1;
622 cfg_lo.cfgx.hs_sel_src = 1;
623 } else if (mids->hs_mode == LNW_DMA_HW_HS)
624 cfg_lo.cfg_lo = 0x00000;
625
626 /*calculate CFG_HI*/
627 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
628 /*SW HS only*/
629 cfg_hi.cfg_hi = 0;
630 } else {
631 cfg_hi.cfg_hi = 0;
632 if (midc->dma->pimr_mask) {
633 cfg_hi.cfgx.protctl = 0x0; /*default value*/
634 cfg_hi.cfgx.fifo_mode = 1;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000635 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530636 cfg_hi.cfgx.src_per = 0;
637 if (mids->device_instance == 0)
638 cfg_hi.cfgx.dst_per = 3;
639 if (mids->device_instance == 1)
640 cfg_hi.cfgx.dst_per = 1;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000641 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530642 if (mids->device_instance == 0)
643 cfg_hi.cfgx.src_per = 2;
644 if (mids->device_instance == 1)
645 cfg_hi.cfgx.src_per = 0;
646 cfg_hi.cfgx.dst_per = 0;
647 }
648 } else {
649 cfg_hi.cfgx.protctl = 0x1; /*default value*/
650 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per =
651 midc->ch_id - midc->dma->chan_base;
652 }
653 }
654
655 /*calculate CTL_HI*/
656 ctl_hi.ctlx.reser = 0;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000657 ctl_hi.ctlx.done = 0;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000658 width = mids->dma_slave.src_addr_width;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530659
660 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
661 pr_debug("MDMA:calc len %d for block size %d\n",
662 ctl_hi.ctlx.block_ts, midc->dma->block_size);
663 /*calculate CTL_LO*/
664 ctl_lo.ctl_lo = 0;
665 ctl_lo.ctlx.int_en = 1;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000666 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
667 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530668
Feng Tang0be035f2010-12-02 17:14:30 +0800669 /*
670 * Here we need some translation from "enum dma_slave_buswidth"
671 * to the format for our dma controller
672 * standard intel_mid_dmac's format
673 * 1 Byte 0b000
674 * 2 Bytes 0b001
675 * 4 Bytes 0b010
676 */
677 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
678 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
679
Vinod Koulb3c567e2010-07-21 13:28:10 +0530680 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
681 ctl_lo.ctlx.tt_fc = 0;
682 ctl_lo.ctlx.sinc = 0;
683 ctl_lo.ctlx.dinc = 0;
684 } else {
Koul, Vinod20dd6392010-10-04 10:38:43 +0000685 if (mids->dma_slave.direction == DMA_TO_DEVICE) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530686 ctl_lo.ctlx.sinc = 0;
687 ctl_lo.ctlx.dinc = 2;
688 ctl_lo.ctlx.tt_fc = 1;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000689 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530690 ctl_lo.ctlx.sinc = 2;
691 ctl_lo.ctlx.dinc = 0;
692 ctl_lo.ctlx.tt_fc = 2;
693 }
694 }
695
696 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
697 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
698
699 enable_dma_interrupt(midc);
700
701 desc = midc_desc_get(midc);
702 if (desc == NULL)
703 goto err_desc_get;
704 desc->sar = src;
705 desc->dar = dest ;
706 desc->len = len;
707 desc->cfg_hi = cfg_hi.cfg_hi;
708 desc->cfg_lo = cfg_lo.cfg_lo;
709 desc->ctl_lo = ctl_lo.ctl_lo;
710 desc->ctl_hi = ctl_hi.ctl_hi;
711 desc->width = width;
Koul, Vinod20dd6392010-10-04 10:38:43 +0000712 desc->dirn = mids->dma_slave.direction;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000713 desc->lli_phys = 0;
714 desc->lli = NULL;
715 desc->lli_pool = NULL;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530716 return &desc->txd;
717
718err_desc_get:
719 pr_err("ERR_MDMA: Failed to get desc\n");
720 midc_desc_put(midc, desc);
721 return NULL;
722}
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000723/**
724 * intel_mid_dma_prep_slave_sg - Prep slave sg txn
725 * @chan: chan for DMA transfer
726 * @sgl: scatter gather list
727 * @sg_len: length of sg txn
728 * @direction: DMA transfer dirtn
729 * @flags: DMA flags
730 *
731 * Prepares LLI based periphral transfer
732 */
733static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
734 struct dma_chan *chan, struct scatterlist *sgl,
735 unsigned int sg_len, enum dma_data_direction direction,
736 unsigned long flags)
737{
738 struct intel_mid_dma_chan *midc = NULL;
739 struct intel_mid_dma_slave *mids = NULL;
740 struct intel_mid_dma_desc *desc = NULL;
741 struct dma_async_tx_descriptor *txd = NULL;
742 union intel_mid_dma_ctl_lo ctl_lo;
743
744 pr_debug("MDMA: Prep for slave SG\n");
745
746 if (!sg_len) {
747 pr_err("MDMA: Invalid SG length\n");
748 return NULL;
749 }
750 midc = to_intel_mid_dma_chan(chan);
751 BUG_ON(!midc);
752
Koul, Vinod20dd6392010-10-04 10:38:43 +0000753 mids = midc->mid_slave;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000754 BUG_ON(!mids);
755
756 if (!midc->dma->pimr_mask) {
Feng Tang0be035f2010-12-02 17:14:30 +0800757 /* We can still handle sg list with only one item */
758 if (sg_len == 1) {
759 txd = intel_mid_dma_prep_memcpy(chan,
760 mids->dma_slave.dst_addr,
761 mids->dma_slave.src_addr,
762 sgl->length,
763 flags);
764 return txd;
765 } else {
766 pr_warn("MDMA: SG list is not supported by this controller\n");
767 return NULL;
768 }
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000769 }
770
771 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
772 sg_len, direction, flags);
773
774 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
775 if (NULL == txd) {
776 pr_err("MDMA: Prep memcpy failed\n");
777 return NULL;
778 }
Feng Tang0be035f2010-12-02 17:14:30 +0800779
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000780 desc = to_intel_mid_dma_desc(txd);
781 desc->dirn = direction;
782 ctl_lo.ctl_lo = desc->ctl_lo;
783 ctl_lo.ctlx.llp_dst_en = 1;
784 ctl_lo.ctlx.llp_src_en = 1;
785 desc->ctl_lo = ctl_lo.ctl_lo;
786 desc->lli_length = sg_len;
787 desc->current_lli = 0;
788 /* DMA coherent memory pool for LLI descriptors*/
789 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
790 midc->dma->pdev,
791 (sizeof(struct intel_mid_dma_lli)*sg_len),
792 32, 0);
793 if (NULL == desc->lli_pool) {
794 pr_err("MID_DMA:LLI pool create failed\n");
795 return NULL;
796 }
797
798 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
799 if (!desc->lli) {
800 pr_err("MID_DMA: LLI alloc failed\n");
801 pci_pool_destroy(desc->lli_pool);
802 return NULL;
803 }
804
805 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
806 if (flags & DMA_PREP_INTERRUPT) {
807 iowrite32(UNMASK_INTR_REG(midc->ch_id),
808 midc->dma_base + MASK_BLOCK);
809 pr_debug("MDMA:Enabled Block interrupt\n");
810 }
811 return &desc->txd;
812}
Vinod Koulb3c567e2010-07-21 13:28:10 +0530813
814/**
815 * intel_mid_dma_free_chan_resources - Frees dma resources
816 * @chan: chan requiring attention
817 *
818 * Frees the allocated resources on this DMA chan
819 */
820static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
821{
822 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
823 struct middma_device *mid = to_middma_device(chan->device);
824 struct intel_mid_dma_desc *desc, *_desc;
825
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000826 if (true == midc->busy) {
Vinod Koulb3c567e2010-07-21 13:28:10 +0530827 /*trying to free ch in use!!!!!*/
828 pr_err("ERR_MDMA: trying to free ch in use\n");
829 }
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000830 pm_runtime_put(&mid->pdev->dev);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530831 spin_lock_bh(&midc->lock);
832 midc->descs_allocated = 0;
833 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
834 list_del(&desc->desc_node);
835 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
836 }
837 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) {
838 list_del(&desc->desc_node);
839 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
840 }
841 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) {
842 list_del(&desc->desc_node);
843 pci_pool_free(mid->dma_pool, desc, desc->txd.phys);
844 }
845 spin_unlock_bh(&midc->lock);
846 midc->in_use = false;
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000847 midc->busy = false;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530848 /* Disable CH interrupts */
849 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
850 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
851}
852
853/**
854 * intel_mid_dma_alloc_chan_resources - Allocate dma resources
855 * @chan: chan requiring attention
856 *
857 * Allocates DMA resources on this chan
858 * Return the descriptors allocated
859 */
860static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
861{
862 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
863 struct middma_device *mid = to_middma_device(chan->device);
864 struct intel_mid_dma_desc *desc;
865 dma_addr_t phys;
866 int i = 0;
867
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000868 pm_runtime_get_sync(&mid->pdev->dev);
869
870 if (mid->state == SUSPENDED) {
871 if (dma_resume(mid->pdev)) {
872 pr_err("ERR_MDMA: resume failed");
873 return -EFAULT;
874 }
875 }
Vinod Koulb3c567e2010-07-21 13:28:10 +0530876
877 /* ASSERT: channel is idle */
878 if (test_ch_en(mid->dma_base, midc->ch_id)) {
879 /*ch is not idle*/
880 pr_err("ERR_MDMA: ch not idle\n");
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000881 pm_runtime_put(&mid->pdev->dev);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530882 return -EIO;
883 }
884 midc->completed = chan->cookie = 1;
885
886 spin_lock_bh(&midc->lock);
887 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
888 spin_unlock_bh(&midc->lock);
889 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
890 if (!desc) {
891 pr_err("ERR_MDMA: desc failed\n");
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000892 pm_runtime_put(&mid->pdev->dev);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530893 return -ENOMEM;
894 /*check*/
895 }
896 dma_async_tx_descriptor_init(&desc->txd, chan);
897 desc->txd.tx_submit = intel_mid_dma_tx_submit;
898 desc->txd.flags = DMA_CTRL_ACK;
899 desc->txd.phys = phys;
900 spin_lock_bh(&midc->lock);
901 i = ++midc->descs_allocated;
902 list_add_tail(&desc->desc_node, &midc->free_list);
903 }
904 spin_unlock_bh(&midc->lock);
Koul, Vinod53a61ba2010-10-04 10:42:40 +0000905 midc->in_use = true;
906 midc->busy = false;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530907 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
908 return i;
909}
910
911/**
912 * midc_handle_error - Handle DMA txn error
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300913 * @mid: controller where error occurred
914 * @midc: chan where error occurred
Vinod Koulb3c567e2010-07-21 13:28:10 +0530915 *
916 * Scan the descriptor for error
917 */
918static void midc_handle_error(struct middma_device *mid,
919 struct intel_mid_dma_chan *midc)
920{
921 midc_scan_descriptors(mid, midc);
922}
923
924/**
925 * dma_tasklet - DMA interrupt tasklet
926 * @data: tasklet arg (the controller structure)
927 *
928 * Scan the controller for interrupts for completion/error
929 * Clear the interrupt and call for handling completion/error
930 */
931static void dma_tasklet(unsigned long data)
932{
933 struct middma_device *mid = NULL;
934 struct intel_mid_dma_chan *midc = NULL;
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000935 u32 status, raw_tfr, raw_block;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530936 int i;
937
938 mid = (struct middma_device *)data;
939 if (mid == NULL) {
940 pr_err("ERR_MDMA: tasklet Null param\n");
941 return;
942 }
943 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000944 raw_tfr = ioread32(mid->dma_base + RAW_TFR);
945 raw_block = ioread32(mid->dma_base + RAW_BLOCK);
946 status = raw_tfr | raw_block;
Vinod Koulb3c567e2010-07-21 13:28:10 +0530947 status &= mid->intr_mask;
948 while (status) {
949 /*txn interrupt*/
950 i = get_ch_index(&status, mid->chan_base);
951 if (i < 0) {
952 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
953 return;
954 }
955 midc = &mid->ch[i];
956 if (midc == NULL) {
957 pr_err("ERR_MDMA:Null param midc\n");
958 return;
959 }
960 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
961 status, midc->ch_id, i);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000962 midc->raw_tfr = raw_tfr;
963 midc->raw_block = raw_block;
964 spin_lock_bh(&midc->lock);
Vinod Koulb3c567e2010-07-21 13:28:10 +0530965 /*clearing this interrupts first*/
966 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000967 if (raw_block) {
968 iowrite32((1 << midc->ch_id),
969 mid->dma_base + CLEAR_BLOCK);
970 }
Vinod Koulb3c567e2010-07-21 13:28:10 +0530971 midc_scan_descriptors(mid, midc);
972 pr_debug("MDMA:Scan of desc... complete, unmasking\n");
973 iowrite32(UNMASK_INTR_REG(midc->ch_id),
974 mid->dma_base + MASK_TFR);
Ramesh Babu K V576e3c32010-10-04 10:37:53 +0000975 if (raw_block) {
976 iowrite32(UNMASK_INTR_REG(midc->ch_id),
977 mid->dma_base + MASK_BLOCK);
978 }
Vinod Koulb3c567e2010-07-21 13:28:10 +0530979 spin_unlock_bh(&midc->lock);
980 }
981
982 status = ioread32(mid->dma_base + RAW_ERR);
983 status &= mid->intr_mask;
984 while (status) {
985 /*err interrupt*/
986 i = get_ch_index(&status, mid->chan_base);
987 if (i < 0) {
988 pr_err("ERR_MDMA:Invalid ch index %x\n", i);
989 return;
990 }
991 midc = &mid->ch[i];
992 if (midc == NULL) {
993 pr_err("ERR_MDMA:Null param midc\n");
994 return;
995 }
996 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
997 status, midc->ch_id, i);
998
999 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR);
1000 spin_lock_bh(&midc->lock);
1001 midc_handle_error(mid, midc);
1002 iowrite32(UNMASK_INTR_REG(midc->ch_id),
1003 mid->dma_base + MASK_ERR);
1004 spin_unlock_bh(&midc->lock);
1005 }
1006 pr_debug("MDMA:Exiting takslet...\n");
1007 return;
1008}
1009
1010static void dma_tasklet1(unsigned long data)
1011{
1012 pr_debug("MDMA:in takslet1...\n");
1013 return dma_tasklet(data);
1014}
1015
1016static void dma_tasklet2(unsigned long data)
1017{
1018 pr_debug("MDMA:in takslet2...\n");
1019 return dma_tasklet(data);
1020}
1021
1022/**
1023 * intel_mid_dma_interrupt - DMA ISR
1024 * @irq: IRQ where interrupt occurred
1025 * @data: ISR cllback data (the controller structure)
1026 *
1027 * See if this is our interrupt if so then schedule the tasklet
1028 * otherwise ignore
1029 */
1030static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1031{
1032 struct middma_device *mid = data;
Yong Wangb306df52010-10-04 10:37:02 +00001033 u32 tfr_status, err_status;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301034 int call_tasklet = 0;
1035
Yong Wangb306df52010-10-04 10:37:02 +00001036 tfr_status = ioread32(mid->dma_base + RAW_TFR);
1037 err_status = ioread32(mid->dma_base + RAW_ERR);
1038 if (!tfr_status && !err_status)
1039 return IRQ_NONE;
1040
Vinod Koulb3c567e2010-07-21 13:28:10 +05301041 /*DMA Interrupt*/
1042 pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
Yong Wangb306df52010-10-04 10:37:02 +00001043 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
1044 tfr_status &= mid->intr_mask;
1045 if (tfr_status) {
Vinod Koulb3c567e2010-07-21 13:28:10 +05301046 /*need to disable intr*/
Ramesh Babu K V576e3c32010-10-04 10:37:53 +00001047 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
1048 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
Yong Wangb306df52010-10-04 10:37:02 +00001049 pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301050 call_tasklet = 1;
1051 }
Yong Wangb306df52010-10-04 10:37:02 +00001052 err_status &= mid->intr_mask;
1053 if (err_status) {
1054 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301055 call_tasklet = 1;
1056 }
1057 if (call_tasklet)
1058 tasklet_schedule(&mid->tasklet);
1059
1060 return IRQ_HANDLED;
1061}
1062
1063static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data)
1064{
1065 return intel_mid_dma_interrupt(irq, data);
1066}
1067
1068static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data)
1069{
1070 return intel_mid_dma_interrupt(irq, data);
1071}
1072
1073/**
1074 * mid_setup_dma - Setup the DMA controller
1075 * @pdev: Controller PCI device structure
1076 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001077 * Initialize the DMA controller, channels, registers with DMA engine,
1078 * ISR. Initialize DMA controller channels.
Vinod Koulb3c567e2010-07-21 13:28:10 +05301079 */
1080static int mid_setup_dma(struct pci_dev *pdev)
1081{
1082 struct middma_device *dma = pci_get_drvdata(pdev);
1083 int err, i;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301084
1085 /* DMA coherent memory pool for DMA descriptor allocations */
1086 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
1087 sizeof(struct intel_mid_dma_desc),
1088 32, 0);
1089 if (NULL == dma->dma_pool) {
1090 pr_err("ERR_MDMA:pci_pool_create failed\n");
1091 err = -ENOMEM;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301092 goto err_dma_pool;
1093 }
1094
1095 INIT_LIST_HEAD(&dma->common.channels);
1096 dma->pci_id = pdev->device;
1097 if (dma->pimr_mask) {
1098 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1099 LNW_PERIPHRAL_MASK_SIZE);
1100 if (dma->mask_reg == NULL) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001101 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
Vinod Koulb3c567e2010-07-21 13:28:10 +05301102 return -ENOMEM;
1103 }
1104 } else
1105 dma->mask_reg = NULL;
1106
1107 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
1108 /*init CH structures*/
1109 dma->intr_mask = 0;
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001110 dma->state = RUNNING;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301111 for (i = 0; i < dma->max_chan; i++) {
1112 struct intel_mid_dma_chan *midch = &dma->ch[i];
1113
1114 midch->chan.device = &dma->common;
1115 midch->chan.cookie = 1;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301116 midch->ch_id = dma->chan_base + i;
1117 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1118
1119 midch->dma_base = dma->dma_base;
1120 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id;
1121 midch->dma = dma;
1122 dma->intr_mask |= 1 << (dma->chan_base + i);
1123 spin_lock_init(&midch->lock);
1124
1125 INIT_LIST_HEAD(&midch->active_list);
1126 INIT_LIST_HEAD(&midch->queue);
1127 INIT_LIST_HEAD(&midch->free_list);
1128 /*mask interrupts*/
1129 iowrite32(MASK_INTR_REG(midch->ch_id),
1130 dma->dma_base + MASK_BLOCK);
1131 iowrite32(MASK_INTR_REG(midch->ch_id),
1132 dma->dma_base + MASK_SRC_TRAN);
1133 iowrite32(MASK_INTR_REG(midch->ch_id),
1134 dma->dma_base + MASK_DST_TRAN);
1135 iowrite32(MASK_INTR_REG(midch->ch_id),
1136 dma->dma_base + MASK_ERR);
1137 iowrite32(MASK_INTR_REG(midch->ch_id),
1138 dma->dma_base + MASK_TFR);
1139
1140 disable_dma_interrupt(midch);
1141 list_add_tail(&midch->chan.device_node, &dma->common.channels);
1142 }
1143 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask);
1144
1145 /*init dma structure*/
1146 dma_cap_zero(dma->common.cap_mask);
1147 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
1148 dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
1149 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
1150 dma->common.dev = &pdev->dev;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301151
1152 dma->common.device_alloc_chan_resources =
1153 intel_mid_dma_alloc_chan_resources;
1154 dma->common.device_free_chan_resources =
1155 intel_mid_dma_free_chan_resources;
1156
1157 dma->common.device_tx_status = intel_mid_dma_tx_status;
1158 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1159 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1160 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1161 dma->common.device_control = intel_mid_dma_device_control;
1162
1163 /*enable dma cntrl*/
1164 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
1165
1166 /*register irq */
1167 if (dma->pimr_mask) {
Vinod Koulb3c567e2010-07-21 13:28:10 +05301168 pr_debug("MDMA:Requesting irq shared for DMAC1\n");
1169 err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
1170 IRQF_SHARED, "INTEL_MID_DMAC1", dma);
1171 if (0 != err)
1172 goto err_irq;
1173 } else {
1174 dma->intr_mask = 0x03;
Vinod Koulb3c567e2010-07-21 13:28:10 +05301175 pr_debug("MDMA:Requesting irq for DMAC2\n");
1176 err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
Yong Wang03b96dc2010-10-04 10:37:27 +00001177 IRQF_SHARED, "INTEL_MID_DMAC2", dma);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301178 if (0 != err)
1179 goto err_irq;
1180 }
1181 /*register device w/ engine*/
1182 err = dma_async_device_register(&dma->common);
1183 if (0 != err) {
1184 pr_err("ERR_MDMA:device_register failed: %d\n", err);
1185 goto err_engine;
1186 }
1187 if (dma->pimr_mask) {
1188 pr_debug("setting up tasklet1 for DMAC1\n");
1189 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
1190 } else {
1191 pr_debug("setting up tasklet2 for DMAC2\n");
1192 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma);
1193 }
1194 return 0;
1195
1196err_engine:
1197 free_irq(pdev->irq, dma);
1198err_irq:
1199 pci_pool_destroy(dma->dma_pool);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301200err_dma_pool:
1201 pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
1202 return err;
1203
1204}
1205
1206/**
1207 * middma_shutdown - Shutdown the DMA controller
1208 * @pdev: Controller PCI device structure
1209 *
1210 * Called by remove
1211 * Unregister DMa controller, clear all structures and free interrupt
1212 */
1213static void middma_shutdown(struct pci_dev *pdev)
1214{
1215 struct middma_device *device = pci_get_drvdata(pdev);
1216
1217 dma_async_device_unregister(&device->common);
1218 pci_pool_destroy(device->dma_pool);
1219 if (device->mask_reg)
1220 iounmap(device->mask_reg);
1221 if (device->dma_base)
1222 iounmap(device->dma_base);
1223 free_irq(pdev->irq, device);
1224 return;
1225}
1226
1227/**
1228 * intel_mid_dma_probe - PCI Probe
1229 * @pdev: Controller PCI device structure
1230 * @id: pci device id structure
1231 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001232 * Initialize the PCI device, map BARs, query driver data.
Vinod Koulb3c567e2010-07-21 13:28:10 +05301233 * Call setup_dma to complete contoller and chan initilzation
1234 */
1235static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
1236 const struct pci_device_id *id)
1237{
1238 struct middma_device *device;
1239 u32 base_addr, bar_size;
1240 struct intel_mid_dma_probe_info *info;
1241 int err;
1242
1243 pr_debug("MDMA: probe for %x\n", pdev->device);
1244 info = (void *)id->driver_data;
1245 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1246 info->max_chan, info->ch_base,
1247 info->block_size, info->pimr_mask);
1248
1249 err = pci_enable_device(pdev);
1250 if (err)
1251 goto err_enable_device;
1252
1253 err = pci_request_regions(pdev, "intel_mid_dmac");
1254 if (err)
1255 goto err_request_regions;
1256
1257 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1258 if (err)
1259 goto err_set_dma_mask;
1260
1261 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1262 if (err)
1263 goto err_set_dma_mask;
1264
1265 device = kzalloc(sizeof(*device), GFP_KERNEL);
1266 if (!device) {
1267 pr_err("ERR_MDMA:kzalloc failed probe\n");
1268 err = -ENOMEM;
1269 goto err_kzalloc;
1270 }
1271 device->pdev = pci_dev_get(pdev);
1272
1273 base_addr = pci_resource_start(pdev, 0);
1274 bar_size = pci_resource_len(pdev, 0);
1275 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
1276 if (!device->dma_base) {
1277 pr_err("ERR_MDMA:ioremap failed\n");
1278 err = -ENOMEM;
1279 goto err_ioremap;
1280 }
1281 pci_set_drvdata(pdev, device);
1282 pci_set_master(pdev);
1283 device->max_chan = info->max_chan;
1284 device->chan_base = info->ch_base;
1285 device->block_size = info->block_size;
1286 device->pimr_mask = info->pimr_mask;
1287
1288 err = mid_setup_dma(pdev);
1289 if (err)
1290 goto err_dma;
1291
Kristen Carlson Accardie2142df2011-03-31 11:02:43 -07001292 pm_runtime_put_noidle(&pdev->dev);
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001293 pm_runtime_allow(&pdev->dev);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301294 return 0;
1295
1296err_dma:
1297 iounmap(device->dma_base);
1298err_ioremap:
1299 pci_dev_put(pdev);
1300 kfree(device);
1301err_kzalloc:
1302err_set_dma_mask:
1303 pci_release_regions(pdev);
1304 pci_disable_device(pdev);
1305err_request_regions:
1306err_enable_device:
1307 pr_err("ERR_MDMA:Probe failed %d\n", err);
1308 return err;
1309}
1310
1311/**
1312 * intel_mid_dma_remove - PCI remove
1313 * @pdev: Controller PCI device structure
1314 *
1315 * Free up all resources and data
1316 * Call shutdown_dma to complete contoller and chan cleanup
1317 */
1318static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
1319{
1320 struct middma_device *device = pci_get_drvdata(pdev);
Kristen Carlson Accardie2142df2011-03-31 11:02:43 -07001321
1322 pm_runtime_get_noresume(&pdev->dev);
1323 pm_runtime_forbid(&pdev->dev);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301324 middma_shutdown(pdev);
1325 pci_dev_put(pdev);
1326 kfree(device);
1327 pci_release_regions(pdev);
1328 pci_disable_device(pdev);
1329}
1330
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001331/* Power Management */
1332/*
1333* dma_suspend - PCI suspend function
1334*
1335* @pci: PCI device structure
1336* @state: PM message
1337*
1338* This function is called by OS when a power event occurs
1339*/
1340int dma_suspend(struct pci_dev *pci, pm_message_t state)
1341{
1342 int i;
1343 struct middma_device *device = pci_get_drvdata(pci);
1344 pr_debug("MDMA: dma_suspend called\n");
1345
1346 for (i = 0; i < device->max_chan; i++) {
1347 if (device->ch[i].in_use)
1348 return -EAGAIN;
1349 }
Vinod Koul4598fc22011-10-10 12:33:59 +05301350 dmac1_mask_periphral_intr(device);
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001351 device->state = SUSPENDED;
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001352 pci_save_state(pci);
1353 pci_disable_device(pci);
1354 pci_set_power_state(pci, PCI_D3hot);
1355 return 0;
1356}
1357
1358/**
1359* dma_resume - PCI resume function
1360*
1361* @pci: PCI device structure
1362*
1363* This function is called by OS when a power event occurs
1364*/
1365int dma_resume(struct pci_dev *pci)
1366{
1367 int ret;
1368 struct middma_device *device = pci_get_drvdata(pci);
1369
1370 pr_debug("MDMA: dma_resume called\n");
1371 pci_set_power_state(pci, PCI_D0);
1372 pci_restore_state(pci);
1373 ret = pci_enable_device(pci);
1374 if (ret) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001375 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001376 return ret;
1377 }
1378 device->state = RUNNING;
1379 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001380 return 0;
1381}
1382
1383static int dma_runtime_suspend(struct device *dev)
1384{
1385 struct pci_dev *pci_dev = to_pci_dev(dev);
Kristen Carlson Accardie2142df2011-03-31 11:02:43 -07001386 struct middma_device *device = pci_get_drvdata(pci_dev);
1387
1388 device->state = SUSPENDED;
1389 return 0;
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001390}
1391
1392static int dma_runtime_resume(struct device *dev)
1393{
1394 struct pci_dev *pci_dev = to_pci_dev(dev);
Kristen Carlson Accardie2142df2011-03-31 11:02:43 -07001395 struct middma_device *device = pci_get_drvdata(pci_dev);
1396
1397 device->state = RUNNING;
1398 iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
1399 return 0;
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001400}
1401
1402static int dma_runtime_idle(struct device *dev)
1403{
1404 struct pci_dev *pdev = to_pci_dev(dev);
1405 struct middma_device *device = pci_get_drvdata(pdev);
1406 int i;
1407
1408 for (i = 0; i < device->max_chan; i++) {
1409 if (device->ch[i].in_use)
1410 return -EAGAIN;
1411 }
1412
1413 return pm_schedule_suspend(dev, 0);
1414}
1415
Vinod Koulb3c567e2010-07-21 13:28:10 +05301416/******************************************************************************
1417* PCI stuff
1418*/
1419static struct pci_device_id intel_mid_dma_ids[] = {
1420 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)},
1421 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)},
1422 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)},
1423 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)},
1424 { 0, }
1425};
1426MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
1427
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001428static const struct dev_pm_ops intel_mid_dma_pm = {
1429 .runtime_suspend = dma_runtime_suspend,
1430 .runtime_resume = dma_runtime_resume,
1431 .runtime_idle = dma_runtime_idle,
1432};
1433
Dan Williamscf2f9c52010-12-04 14:53:32 -08001434static struct pci_driver intel_mid_dma_pci_driver = {
Vinod Koulb3c567e2010-07-21 13:28:10 +05301435 .name = "Intel MID DMA",
1436 .id_table = intel_mid_dma_ids,
1437 .probe = intel_mid_dma_probe,
1438 .remove = __devexit_p(intel_mid_dma_remove),
Koul, Vinod53a61ba2010-10-04 10:42:40 +00001439#ifdef CONFIG_PM
1440 .suspend = dma_suspend,
1441 .resume = dma_resume,
1442 .driver = {
1443 .pm = &intel_mid_dma_pm,
1444 },
1445#endif
Vinod Koulb3c567e2010-07-21 13:28:10 +05301446};
1447
1448static int __init intel_mid_dma_init(void)
1449{
1450 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n",
1451 INTEL_MID_DMA_DRIVER_VERSION);
Dan Williamscf2f9c52010-12-04 14:53:32 -08001452 return pci_register_driver(&intel_mid_dma_pci_driver);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301453}
1454fs_initcall(intel_mid_dma_init);
1455
1456static void __exit intel_mid_dma_exit(void)
1457{
Dan Williamscf2f9c52010-12-04 14:53:32 -08001458 pci_unregister_driver(&intel_mid_dma_pci_driver);
Vinod Koulb3c567e2010-07-21 13:28:10 +05301459}
1460module_exit(intel_mid_dma_exit);
1461
1462MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
1463MODULE_DESCRIPTION("Intel (R) MID DMAC Driver");
1464MODULE_LICENSE("GPL v2");
1465MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);