blob: 2edcc9c10297c6dbb71bd1fbfe19efb6b0eff93d [file] [log] [blame]
Yong Wang0c42bd02010-07-30 16:23:03 +08001/*
2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +09004 * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
Yong Wang0c42bd02010-07-30 16:23:03 +08005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/dmaengine.h>
21#include <linux/dma-mapping.h>
22#include <linux/init.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/module.h>
26#include <linux/pch_dma.h>
27
28#define DRV_NAME "pch-dma"
29
30#define DMA_CTL0_DISABLE 0x0
31#define DMA_CTL0_SG 0x1
32#define DMA_CTL0_ONESHOT 0x2
33#define DMA_CTL0_MODE_MASK_BITS 0x3
34#define DMA_CTL0_DIR_SHIFT_BITS 2
35#define DMA_CTL0_BITS_PER_CH 4
36
37#define DMA_CTL2_START_SHIFT_BITS 8
38#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
39
40#define DMA_STATUS_IDLE 0x0
41#define DMA_STATUS_DESC_READ 0x1
42#define DMA_STATUS_WAIT 0x2
43#define DMA_STATUS_ACCESS 0x3
44#define DMA_STATUS_BITS_PER_CH 2
45#define DMA_STATUS_MASK_BITS 0x3
46#define DMA_STATUS_SHIFT_BITS 16
47#define DMA_STATUS_IRQ(x) (0x1 << (x))
48#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
49
50#define DMA_DESC_WIDTH_SHIFT_BITS 12
51#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
52#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
53#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
54#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
55#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
56#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
57#define DMA_DESC_END_WITHOUT_IRQ 0x0
58#define DMA_DESC_END_WITH_IRQ 0x1
59#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
60#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
61
62#define MAX_CHAN_NR 8
63
64static unsigned int init_nr_desc_per_channel = 64;
65module_param(init_nr_desc_per_channel, uint, 0644);
66MODULE_PARM_DESC(init_nr_desc_per_channel,
67 "initial descriptors per channel (default: 64)");
68
69struct pch_dma_desc_regs {
70 u32 dev_addr;
71 u32 mem_addr;
72 u32 size;
73 u32 next;
74};
75
76struct pch_dma_regs {
77 u32 dma_ctl0;
78 u32 dma_ctl1;
79 u32 dma_ctl2;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +090080 u32 dma_ctl3;
Yong Wang0c42bd02010-07-30 16:23:03 +080081 u32 dma_sts0;
82 u32 dma_sts1;
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +090083 u32 dma_sts2;
Yong Wang0c42bd02010-07-30 16:23:03 +080084 u32 reserved3;
Tomoya MORINAGA26d890f2011-02-18 10:01:21 +053085 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
Yong Wang0c42bd02010-07-30 16:23:03 +080086};
87
88struct pch_dma_desc {
89 struct pch_dma_desc_regs regs;
90 struct dma_async_tx_descriptor txd;
91 struct list_head desc_node;
92 struct list_head tx_list;
93};
94
95struct pch_dma_chan {
96 struct dma_chan chan;
97 void __iomem *membase;
98 enum dma_data_direction dir;
99 struct tasklet_struct tasklet;
100 unsigned long err_status;
101
102 spinlock_t lock;
103
104 dma_cookie_t completed_cookie;
105 struct list_head active_list;
106 struct list_head queue;
107 struct list_head free_list;
108 unsigned int descs_allocated;
109};
110
111#define PDC_DEV_ADDR 0x00
112#define PDC_MEM_ADDR 0x04
113#define PDC_SIZE 0x08
114#define PDC_NEXT 0x0C
115
116#define channel_readl(pdc, name) \
117 readl((pdc)->membase + PDC_##name)
118#define channel_writel(pdc, name, val) \
119 writel((val), (pdc)->membase + PDC_##name)
120
121struct pch_dma {
122 struct dma_device dma;
123 void __iomem *membase;
124 struct pci_pool *pool;
125 struct pch_dma_regs regs;
126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
Tomoya MORINAGA26d890f2011-02-18 10:01:21 +0530127 struct pch_dma_chan channels[MAX_CHAN_NR];
Yong Wang0c42bd02010-07-30 16:23:03 +0800128};
129
130#define PCH_DMA_CTL0 0x00
131#define PCH_DMA_CTL1 0x04
132#define PCH_DMA_CTL2 0x08
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900133#define PCH_DMA_CTL3 0x0C
Yong Wang0c42bd02010-07-30 16:23:03 +0800134#define PCH_DMA_STS0 0x10
135#define PCH_DMA_STS1 0x14
136
137#define dma_readl(pd, name) \
Yong Wang61cd2202010-08-05 10:38:43 +0800138 readl((pd)->membase + PCH_DMA_##name)
Yong Wang0c42bd02010-07-30 16:23:03 +0800139#define dma_writel(pd, name, val) \
Yong Wang61cd2202010-08-05 10:38:43 +0800140 writel((val), (pd)->membase + PCH_DMA_##name)
Yong Wang0c42bd02010-07-30 16:23:03 +0800141
Tomoya MORINAGA08645fd2011-05-09 16:09:36 +0900142static inline
143struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
Yong Wang0c42bd02010-07-30 16:23:03 +0800144{
145 return container_of(txd, struct pch_dma_desc, txd);
146}
147
148static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
149{
150 return container_of(chan, struct pch_dma_chan, chan);
151}
152
153static inline struct pch_dma *to_pd(struct dma_device *ddev)
154{
155 return container_of(ddev, struct pch_dma, dma);
156}
157
158static inline struct device *chan2dev(struct dma_chan *chan)
159{
160 return &chan->dev->device;
161}
162
163static inline struct device *chan2parent(struct dma_chan *chan)
164{
165 return chan->dev->device.parent;
166}
167
Tomoya MORINAGA08645fd2011-05-09 16:09:36 +0900168static inline
169struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
Yong Wang0c42bd02010-07-30 16:23:03 +0800170{
171 return list_first_entry(&pd_chan->active_list,
172 struct pch_dma_desc, desc_node);
173}
174
Tomoya MORINAGA08645fd2011-05-09 16:09:36 +0900175static inline
176struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
Yong Wang0c42bd02010-07-30 16:23:03 +0800177{
178 return list_first_entry(&pd_chan->queue,
179 struct pch_dma_desc, desc_node);
180}
181
182static void pdc_enable_irq(struct dma_chan *chan, int enable)
183{
184 struct pch_dma *pd = to_pd(chan->device);
185 u32 val;
186
187 val = dma_readl(pd, CTL2);
188
189 if (enable)
190 val |= 0x1 << chan->chan_id;
191 else
192 val &= ~(0x1 << chan->chan_id);
193
194 dma_writel(pd, CTL2, val);
195
196 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
197 chan->chan_id, val);
198}
199
200static void pdc_set_dir(struct dma_chan *chan)
201{
202 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
203 struct pch_dma *pd = to_pd(chan->device);
204 u32 val;
205
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900206 if (chan->chan_id < 8) {
207 val = dma_readl(pd, CTL0);
Yong Wang0c42bd02010-07-30 16:23:03 +0800208
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900209 if (pd_chan->dir == DMA_TO_DEVICE)
210 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
211 DMA_CTL0_DIR_SHIFT_BITS);
212 else
213 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
214 DMA_CTL0_DIR_SHIFT_BITS));
Yong Wang0c42bd02010-07-30 16:23:03 +0800215
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900216 dma_writel(pd, CTL0, val);
217 } else {
218 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
219 val = dma_readl(pd, CTL3);
220
221 if (pd_chan->dir == DMA_TO_DEVICE)
222 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
223 DMA_CTL0_DIR_SHIFT_BITS);
224 else
225 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
226 DMA_CTL0_DIR_SHIFT_BITS));
227
228 dma_writel(pd, CTL3, val);
229 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800230
231 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
232 chan->chan_id, val);
233}
234
235static void pdc_set_mode(struct dma_chan *chan, u32 mode)
236{
237 struct pch_dma *pd = to_pd(chan->device);
238 u32 val;
239
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900240 if (chan->chan_id < 8) {
241 val = dma_readl(pd, CTL0);
Yong Wang0c42bd02010-07-30 16:23:03 +0800242
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900243 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
244 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
245 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
Yong Wang0c42bd02010-07-30 16:23:03 +0800246
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900247 dma_writel(pd, CTL0, val);
248 } else {
249 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
250
251 val = dma_readl(pd, CTL3);
252
253 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
254 (DMA_CTL0_BITS_PER_CH * ch));
255 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
256
257 dma_writel(pd, CTL3, val);
258
259 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800260
261 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
262 chan->chan_id, val);
263}
264
265static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
266{
267 struct pch_dma *pd = to_pd(pd_chan->chan.device);
268 u32 val;
269
270 val = dma_readl(pd, STS0);
271 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
272 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
273}
274
275static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
276{
277 if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
278 return true;
279 else
280 return false;
281}
282
283static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
284{
Yong Wang0c42bd02010-07-30 16:23:03 +0800285 if (!pdc_is_idle(pd_chan)) {
286 dev_err(chan2dev(&pd_chan->chan),
287 "BUG: Attempt to start non-idle channel\n");
288 return;
289 }
290
Yong Wang0c42bd02010-07-30 16:23:03 +0800291 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
292 pd_chan->chan.chan_id, desc->regs.dev_addr);
293 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
294 pd_chan->chan.chan_id, desc->regs.mem_addr);
295 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
296 pd_chan->chan.chan_id, desc->regs.size);
297 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
298 pd_chan->chan.chan_id, desc->regs.next);
299
Tomoya MORINAGA943d8d82010-12-01 19:49:48 +0900300 if (list_empty(&desc->tx_list)) {
301 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
302 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
303 channel_writel(pd_chan, SIZE, desc->regs.size);
304 channel_writel(pd_chan, NEXT, desc->regs.next);
Yong Wang0c42bd02010-07-30 16:23:03 +0800305 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
Tomoya MORINAGA943d8d82010-12-01 19:49:48 +0900306 } else {
307 channel_writel(pd_chan, NEXT, desc->txd.phys);
Yong Wang0c42bd02010-07-30 16:23:03 +0800308 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
Tomoya MORINAGA943d8d82010-12-01 19:49:48 +0900309 }
Yong Wang0c42bd02010-07-30 16:23:03 +0800310}
311
312static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
313 struct pch_dma_desc *desc)
314{
315 struct dma_async_tx_descriptor *txd = &desc->txd;
316 dma_async_tx_callback callback = txd->callback;
317 void *param = txd->callback_param;
318
319 list_splice_init(&desc->tx_list, &pd_chan->free_list);
320 list_move(&desc->desc_node, &pd_chan->free_list);
321
322 if (callback)
323 callback(param);
324}
325
326static void pdc_complete_all(struct pch_dma_chan *pd_chan)
327{
328 struct pch_dma_desc *desc, *_d;
329 LIST_HEAD(list);
330
331 BUG_ON(!pdc_is_idle(pd_chan));
332
333 if (!list_empty(&pd_chan->queue))
334 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
335
336 list_splice_init(&pd_chan->active_list, &list);
337 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
338
339 list_for_each_entry_safe(desc, _d, &list, desc_node)
340 pdc_chain_complete(pd_chan, desc);
341}
342
343static void pdc_handle_error(struct pch_dma_chan *pd_chan)
344{
345 struct pch_dma_desc *bad_desc;
346
347 bad_desc = pdc_first_active(pd_chan);
348 list_del(&bad_desc->desc_node);
349
350 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
351
352 if (!list_empty(&pd_chan->active_list))
353 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
354
355 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
356 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
357 bad_desc->txd.cookie);
358
359 pdc_chain_complete(pd_chan, bad_desc);
360}
361
362static void pdc_advance_work(struct pch_dma_chan *pd_chan)
363{
364 if (list_empty(&pd_chan->active_list) ||
365 list_is_singular(&pd_chan->active_list)) {
366 pdc_complete_all(pd_chan);
367 } else {
368 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
369 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
370 }
371}
372
373static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
374 struct pch_dma_desc *desc)
375{
376 dma_cookie_t cookie = pd_chan->chan.cookie;
377
378 if (++cookie < 0)
379 cookie = 1;
380
381 pd_chan->chan.cookie = cookie;
382 desc->txd.cookie = cookie;
383
384 return cookie;
385}
386
387static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
388{
389 struct pch_dma_desc *desc = to_pd_desc(txd);
390 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
391 dma_cookie_t cookie;
392
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530393 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800394 cookie = pdc_assign_cookie(pd_chan, desc);
395
396 if (list_empty(&pd_chan->active_list)) {
397 list_add_tail(&desc->desc_node, &pd_chan->active_list);
398 pdc_dostart(pd_chan, desc);
399 } else {
400 list_add_tail(&desc->desc_node, &pd_chan->queue);
401 }
402
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530403 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800404 return 0;
405}
406
407static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
408{
409 struct pch_dma_desc *desc = NULL;
410 struct pch_dma *pd = to_pd(chan->device);
411 dma_addr_t addr;
412
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530413 desc = pci_pool_alloc(pd->pool, flags, &addr);
Yong Wang0c42bd02010-07-30 16:23:03 +0800414 if (desc) {
415 memset(desc, 0, sizeof(struct pch_dma_desc));
416 INIT_LIST_HEAD(&desc->tx_list);
417 dma_async_tx_descriptor_init(&desc->txd, chan);
418 desc->txd.tx_submit = pd_tx_submit;
419 desc->txd.flags = DMA_CTRL_ACK;
420 desc->txd.phys = addr;
421 }
422
423 return desc;
424}
425
426static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
427{
428 struct pch_dma_desc *desc, *_d;
429 struct pch_dma_desc *ret = NULL;
Liu Yuan364de772011-04-02 14:20:47 +0800430 int i = 0;
Yong Wang0c42bd02010-07-30 16:23:03 +0800431
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530432 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800433 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
434 i++;
435 if (async_tx_test_ack(&desc->txd)) {
436 list_del(&desc->desc_node);
437 ret = desc;
438 break;
439 }
440 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
441 }
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530442 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800443 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
444
445 if (!ret) {
446 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
447 if (ret) {
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530448 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800449 pd_chan->descs_allocated++;
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530450 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800451 } else {
452 dev_err(chan2dev(&pd_chan->chan),
453 "failed to alloc desc\n");
454 }
455 }
456
457 return ret;
458}
459
460static void pdc_desc_put(struct pch_dma_chan *pd_chan,
461 struct pch_dma_desc *desc)
462{
463 if (desc) {
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530464 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800465 list_splice_init(&desc->tx_list, &pd_chan->free_list);
466 list_add(&desc->desc_node, &pd_chan->free_list);
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530467 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800468 }
469}
470
471static int pd_alloc_chan_resources(struct dma_chan *chan)
472{
473 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
474 struct pch_dma_desc *desc;
475 LIST_HEAD(tmp_list);
476 int i;
477
478 if (!pdc_is_idle(pd_chan)) {
479 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
480 return -EIO;
481 }
482
483 if (!list_empty(&pd_chan->free_list))
484 return pd_chan->descs_allocated;
485
486 for (i = 0; i < init_nr_desc_per_channel; i++) {
487 desc = pdc_alloc_desc(chan, GFP_KERNEL);
488
489 if (!desc) {
490 dev_warn(chan2dev(chan),
491 "Only allocated %d initial descriptors\n", i);
492 break;
493 }
494
495 list_add_tail(&desc->desc_node, &tmp_list);
496 }
497
498 spin_lock_bh(&pd_chan->lock);
499 list_splice(&tmp_list, &pd_chan->free_list);
500 pd_chan->descs_allocated = i;
501 pd_chan->completed_cookie = chan->cookie = 1;
502 spin_unlock_bh(&pd_chan->lock);
503
504 pdc_enable_irq(chan, 1);
Yong Wang0c42bd02010-07-30 16:23:03 +0800505
506 return pd_chan->descs_allocated;
507}
508
509static void pd_free_chan_resources(struct dma_chan *chan)
510{
511 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
512 struct pch_dma *pd = to_pd(chan->device);
513 struct pch_dma_desc *desc, *_d;
514 LIST_HEAD(tmp_list);
515
516 BUG_ON(!pdc_is_idle(pd_chan));
517 BUG_ON(!list_empty(&pd_chan->active_list));
518 BUG_ON(!list_empty(&pd_chan->queue));
519
520 spin_lock_bh(&pd_chan->lock);
521 list_splice_init(&pd_chan->free_list, &tmp_list);
522 pd_chan->descs_allocated = 0;
523 spin_unlock_bh(&pd_chan->lock);
524
525 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
526 pci_pool_free(pd->pool, desc, desc->txd.phys);
527
528 pdc_enable_irq(chan, 0);
529}
530
531static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
532 struct dma_tx_state *txstate)
533{
534 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
535 dma_cookie_t last_used;
536 dma_cookie_t last_completed;
537 int ret;
538
539 spin_lock_bh(&pd_chan->lock);
540 last_completed = pd_chan->completed_cookie;
541 last_used = chan->cookie;
542 spin_unlock_bh(&pd_chan->lock);
543
544 ret = dma_async_is_complete(cookie, last_completed, last_used);
545
546 dma_set_tx_state(txstate, last_completed, last_used, 0);
547
548 return ret;
549}
550
551static void pd_issue_pending(struct dma_chan *chan)
552{
553 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
554
555 if (pdc_is_idle(pd_chan)) {
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530556 spin_lock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800557 pdc_advance_work(pd_chan);
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530558 spin_unlock(&pd_chan->lock);
Yong Wang0c42bd02010-07-30 16:23:03 +0800559 }
560}
561
562static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
563 struct scatterlist *sgl, unsigned int sg_len,
564 enum dma_data_direction direction, unsigned long flags)
565{
566 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
567 struct pch_dma_slave *pd_slave = chan->private;
568 struct pch_dma_desc *first = NULL;
569 struct pch_dma_desc *prev = NULL;
570 struct pch_dma_desc *desc = NULL;
571 struct scatterlist *sg;
572 dma_addr_t reg;
573 int i;
574
575 if (unlikely(!sg_len)) {
576 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
577 return NULL;
578 }
579
580 if (direction == DMA_FROM_DEVICE)
581 reg = pd_slave->rx_reg;
582 else if (direction == DMA_TO_DEVICE)
583 reg = pd_slave->tx_reg;
584 else
585 return NULL;
586
Tomoya MORINAGAc8fcba62011-05-09 16:09:35 +0900587 pd_chan->dir = direction;
588 pdc_set_dir(chan);
589
Yong Wang0c42bd02010-07-30 16:23:03 +0800590 for_each_sg(sgl, sg, sg_len, i) {
591 desc = pdc_desc_get(pd_chan);
592
593 if (!desc)
594 goto err_desc_get;
595
596 desc->regs.dev_addr = reg;
597 desc->regs.mem_addr = sg_phys(sg);
598 desc->regs.size = sg_dma_len(sg);
599 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
600
601 switch (pd_slave->width) {
602 case PCH_DMA_WIDTH_1_BYTE:
603 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
604 goto err_desc_get;
605 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
606 break;
607 case PCH_DMA_WIDTH_2_BYTES:
608 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
609 goto err_desc_get;
610 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
611 break;
612 case PCH_DMA_WIDTH_4_BYTES:
613 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
614 goto err_desc_get;
615 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
616 break;
617 default:
618 goto err_desc_get;
619 }
620
Yong Wang0c42bd02010-07-30 16:23:03 +0800621 if (!first) {
622 first = desc;
623 } else {
624 prev->regs.next |= desc->txd.phys;
625 list_add_tail(&desc->desc_node, &first->tx_list);
626 }
627
628 prev = desc;
629 }
630
631 if (flags & DMA_PREP_INTERRUPT)
632 desc->regs.next = DMA_DESC_END_WITH_IRQ;
633 else
634 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
635
636 first->txd.cookie = -EBUSY;
637 desc->txd.flags = flags;
638
639 return &first->txd;
640
641err_desc_get:
642 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
643 pdc_desc_put(pd_chan, first);
644 return NULL;
645}
646
647static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
648 unsigned long arg)
649{
650 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
651 struct pch_dma_desc *desc, *_d;
652 LIST_HEAD(list);
653
654 if (cmd != DMA_TERMINATE_ALL)
655 return -ENXIO;
656
657 spin_lock_bh(&pd_chan->lock);
658
659 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
660
661 list_splice_init(&pd_chan->active_list, &list);
662 list_splice_init(&pd_chan->queue, &list);
663
664 list_for_each_entry_safe(desc, _d, &list, desc_node)
665 pdc_chain_complete(pd_chan, desc);
666
667 spin_unlock_bh(&pd_chan->lock);
668
Yong Wang0c42bd02010-07-30 16:23:03 +0800669 return 0;
670}
671
672static void pdc_tasklet(unsigned long data)
673{
674 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530675 unsigned long flags;
Yong Wang0c42bd02010-07-30 16:23:03 +0800676
677 if (!pdc_is_idle(pd_chan)) {
678 dev_err(chan2dev(&pd_chan->chan),
679 "BUG: handle non-idle channel in tasklet\n");
680 return;
681 }
682
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530683 spin_lock_irqsave(&pd_chan->lock, flags);
Yong Wang0c42bd02010-07-30 16:23:03 +0800684 if (test_and_clear_bit(0, &pd_chan->err_status))
685 pdc_handle_error(pd_chan);
686 else
687 pdc_advance_work(pd_chan);
Tomoya MORINAGAc5a9f9d2011-02-18 10:01:20 +0530688 spin_unlock_irqrestore(&pd_chan->lock, flags);
Yong Wang0c42bd02010-07-30 16:23:03 +0800689}
690
691static irqreturn_t pd_irq(int irq, void *devid)
692{
693 struct pch_dma *pd = (struct pch_dma *)devid;
694 struct pch_dma_chan *pd_chan;
695 u32 sts0;
696 int i;
697 int ret = IRQ_NONE;
698
699 sts0 = dma_readl(pd, STS0);
700
701 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
702
703 for (i = 0; i < pd->dma.chancnt; i++) {
704 pd_chan = &pd->channels[i];
705
706 if (sts0 & DMA_STATUS_IRQ(i)) {
707 if (sts0 & DMA_STATUS_ERR(i))
708 set_bit(0, &pd_chan->err_status);
709
710 tasklet_schedule(&pd_chan->tasklet);
711 ret = IRQ_HANDLED;
712 }
713
714 }
715
716 /* clear interrupt bits in status register */
717 dma_writel(pd, STS0, sts0);
718
719 return ret;
720}
721
Rakib Mullick0b863b32011-03-06 17:26:10 +0600722#ifdef CONFIG_PM
Yong Wang0c42bd02010-07-30 16:23:03 +0800723static void pch_dma_save_regs(struct pch_dma *pd)
724{
725 struct pch_dma_chan *pd_chan;
726 struct dma_chan *chan, *_c;
727 int i = 0;
728
729 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
730 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
731 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900732 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
Yong Wang0c42bd02010-07-30 16:23:03 +0800733
734 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
735 pd_chan = to_pd_chan(chan);
736
737 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
738 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
739 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
740 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
741
742 i++;
743 }
744}
745
746static void pch_dma_restore_regs(struct pch_dma *pd)
747{
748 struct pch_dma_chan *pd_chan;
749 struct dma_chan *chan, *_c;
750 int i = 0;
751
752 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
753 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
754 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900755 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
Yong Wang0c42bd02010-07-30 16:23:03 +0800756
757 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
758 pd_chan = to_pd_chan(chan);
759
760 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
761 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
762 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
763 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
764
765 i++;
766 }
767}
768
769static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
770{
771 struct pch_dma *pd = pci_get_drvdata(pdev);
772
773 if (pd)
774 pch_dma_save_regs(pd);
775
776 pci_save_state(pdev);
777 pci_disable_device(pdev);
778 pci_set_power_state(pdev, pci_choose_state(pdev, state));
779
780 return 0;
781}
782
783static int pch_dma_resume(struct pci_dev *pdev)
784{
785 struct pch_dma *pd = pci_get_drvdata(pdev);
786 int err;
787
788 pci_set_power_state(pdev, PCI_D0);
789 pci_restore_state(pdev);
790
791 err = pci_enable_device(pdev);
792 if (err) {
793 dev_dbg(&pdev->dev, "failed to enable device\n");
794 return err;
795 }
796
797 if (pd)
798 pch_dma_restore_regs(pd);
799
800 return 0;
801}
Rakib Mullick0b863b32011-03-06 17:26:10 +0600802#endif
Yong Wang0c42bd02010-07-30 16:23:03 +0800803
804static int __devinit pch_dma_probe(struct pci_dev *pdev,
805 const struct pci_device_id *id)
806{
807 struct pch_dma *pd;
808 struct pch_dma_regs *regs;
809 unsigned int nr_channels;
810 int err;
811 int i;
812
813 nr_channels = id->driver_data;
814 pd = kzalloc(sizeof(struct pch_dma)+
815 sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
816 if (!pd)
817 return -ENOMEM;
818
819 pci_set_drvdata(pdev, pd);
820
821 err = pci_enable_device(pdev);
822 if (err) {
823 dev_err(&pdev->dev, "Cannot enable PCI device\n");
824 goto err_free_mem;
825 }
826
827 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
828 dev_err(&pdev->dev, "Cannot find proper base address\n");
829 goto err_disable_pdev;
830 }
831
832 err = pci_request_regions(pdev, DRV_NAME);
833 if (err) {
834 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
835 goto err_disable_pdev;
836 }
837
838 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
839 if (err) {
840 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
841 goto err_free_res;
842 }
843
844 regs = pd->membase = pci_iomap(pdev, 1, 0);
845 if (!pd->membase) {
846 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
847 err = -ENOMEM;
848 goto err_free_res;
849 }
850
851 pci_set_master(pdev);
852
853 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
854 if (err) {
855 dev_err(&pdev->dev, "Failed to request IRQ\n");
856 goto err_iounmap;
857 }
858
859 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
860 sizeof(struct pch_dma_desc), 4, 0);
861 if (!pd->pool) {
862 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
863 err = -ENOMEM;
864 goto err_free_irq;
865 }
866
867 pd->dma.dev = &pdev->dev;
868 pd->dma.chancnt = nr_channels;
869
870 INIT_LIST_HEAD(&pd->dma.channels);
871
872 for (i = 0; i < nr_channels; i++) {
873 struct pch_dma_chan *pd_chan = &pd->channels[i];
874
875 pd_chan->chan.device = &pd->dma;
876 pd_chan->chan.cookie = 1;
877 pd_chan->chan.chan_id = i;
878
879 pd_chan->membase = &regs->desc[i];
880
Yong Wang0c42bd02010-07-30 16:23:03 +0800881 spin_lock_init(&pd_chan->lock);
882
883 INIT_LIST_HEAD(&pd_chan->active_list);
884 INIT_LIST_HEAD(&pd_chan->queue);
885 INIT_LIST_HEAD(&pd_chan->free_list);
886
887 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
888 (unsigned long)pd_chan);
889 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
890 }
891
892 dma_cap_zero(pd->dma.cap_mask);
893 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
894 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
895
896 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
897 pd->dma.device_free_chan_resources = pd_free_chan_resources;
898 pd->dma.device_tx_status = pd_tx_status;
899 pd->dma.device_issue_pending = pd_issue_pending;
900 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
901 pd->dma.device_control = pd_device_control;
902
903 err = dma_async_device_register(&pd->dma);
904 if (err) {
905 dev_err(&pdev->dev, "Failed to register DMA device\n");
906 goto err_free_pool;
907 }
908
909 return 0;
910
911err_free_pool:
912 pci_pool_destroy(pd->pool);
913err_free_irq:
914 free_irq(pdev->irq, pd);
915err_iounmap:
916 pci_iounmap(pdev, pd->membase);
917err_free_res:
918 pci_release_regions(pdev);
919err_disable_pdev:
920 pci_disable_device(pdev);
921err_free_mem:
922 return err;
923}
924
925static void __devexit pch_dma_remove(struct pci_dev *pdev)
926{
927 struct pch_dma *pd = pci_get_drvdata(pdev);
928 struct pch_dma_chan *pd_chan;
929 struct dma_chan *chan, *_c;
930
931 if (pd) {
932 dma_async_device_unregister(&pd->dma);
933
934 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
935 device_node) {
936 pd_chan = to_pd_chan(chan);
937
938 tasklet_disable(&pd_chan->tasklet);
939 tasklet_kill(&pd_chan->tasklet);
940 }
941
942 pci_pool_destroy(pd->pool);
943 free_irq(pdev->irq, pd);
944 pci_iounmap(pdev, pd->membase);
945 pci_release_regions(pdev);
946 pci_disable_device(pdev);
947 kfree(pd);
948 }
949}
950
951/* PCI Device ID of DMA device */
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +0900952#define PCI_VENDOR_ID_ROHM 0x10DB
953#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
954#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
955#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
956#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
957#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900958#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
Yong Wang0c42bd02010-07-30 16:23:03 +0800959
960static const struct pci_device_id pch_dma_id_table[] = {
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +0900961 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
962 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
963 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
964 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
965 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
Tomoya MORINAGA194f5f22011-05-09 16:09:38 +0900966 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
Dzianis Kahanovich87acf5a2010-10-27 20:33:05 -0600967 { 0, },
Yong Wang0c42bd02010-07-30 16:23:03 +0800968};
969
970static struct pci_driver pch_dma_driver = {
971 .name = DRV_NAME,
972 .id_table = pch_dma_id_table,
973 .probe = pch_dma_probe,
974 .remove = __devexit_p(pch_dma_remove),
975#ifdef CONFIG_PM
976 .suspend = pch_dma_suspend,
977 .resume = pch_dma_resume,
978#endif
979};
980
981static int __init pch_dma_init(void)
982{
983 return pci_register_driver(&pch_dma_driver);
984}
985
986static void __exit pch_dma_exit(void)
987{
988 pci_unregister_driver(&pch_dma_driver);
989}
990
991module_init(pch_dma_init);
992module_exit(pch_dma_exit);
993
Tomoya MORINAGA2cdf2452011-01-05 17:43:52 +0900994MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
995 "DMA controller driver");
Yong Wang0c42bd02010-07-30 16:23:03 +0800996MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
997MODULE_LICENSE("GPL v2");