blob: 3cb7b2c781970fc6e43d11fde0ea8779fbb2a6d1 [file] [log] [blame]
Andy Shevchenko667dfed2015-07-27 18:04:02 +03001/*
2 * Core driver for the Intel integrated DMA 64-bit
3 *
4 * Copyright (C) 2015 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/bitops.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21
22#include "idma64.h"
23
24/* Platform driver name */
25#define DRV_NAME "idma64"
26
27/* For now we support only two channels */
28#define IDMA64_NR_CHAN 2
29
30/* ---------------------------------------------------------------------- */
31
32static struct device *chan2dev(struct dma_chan *chan)
33{
34 return &chan->dev->device;
35}
36
37/* ---------------------------------------------------------------------- */
38
39static void idma64_off(struct idma64 *idma64)
40{
41 unsigned short count = 100;
42
43 dma_writel(idma64, CFG, 0);
44
45 channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
46 channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
47 channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
48 channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
49 channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
50
51 do {
52 cpu_relax();
53 } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
54}
55
56static void idma64_on(struct idma64 *idma64)
57{
58 dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
59}
60
61/* ---------------------------------------------------------------------- */
62
63static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
64{
65 u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
66 u32 cfglo = 0;
67
Andy Shevchenko667dfed2015-07-27 18:04:02 +030068 /* Set default burst alignment */
69 cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
70
71 channel_writel(idma64c, CFG_LO, cfglo);
72 channel_writel(idma64c, CFG_HI, cfghi);
73
74 /* Enable interrupts */
75 channel_set_bit(idma64, MASK(XFER), idma64c->mask);
76 channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
77
78 /*
79 * Enforce the controller to be turned on.
80 *
81 * The iDMA is turned off in ->probe() and looses context during system
82 * suspend / resume cycle. That's why we have to enable it each time we
83 * use it.
84 */
85 idma64_on(idma64);
86}
87
88static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
89{
90 channel_clear_bit(idma64, CH_EN, idma64c->mask);
91}
92
93static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
94{
95 struct idma64_desc *desc = idma64c->desc;
96 struct idma64_hw_desc *hw = &desc->hw[0];
97
98 channel_writeq(idma64c, SAR, 0);
99 channel_writeq(idma64c, DAR, 0);
100
101 channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
102 channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
103
104 channel_writeq(idma64c, LLP, hw->llp);
105
106 channel_set_bit(idma64, CH_EN, idma64c->mask);
107}
108
109static void idma64_stop_transfer(struct idma64_chan *idma64c)
110{
111 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
112
113 idma64_chan_stop(idma64, idma64c);
114}
115
116static void idma64_start_transfer(struct idma64_chan *idma64c)
117{
118 struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
119 struct virt_dma_desc *vdesc;
120
121 /* Get the next descriptor */
122 vdesc = vchan_next_desc(&idma64c->vchan);
123 if (!vdesc) {
124 idma64c->desc = NULL;
125 return;
126 }
127
128 list_del(&vdesc->node);
129 idma64c->desc = to_idma64_desc(vdesc);
130
131 /* Configure the channel */
132 idma64_chan_init(idma64, idma64c);
133
134 /* Start the channel with a new descriptor */
135 idma64_chan_start(idma64, idma64c);
136}
137
138/* ---------------------------------------------------------------------- */
139
140static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
141 u32 status_err, u32 status_xfer)
142{
143 struct idma64_chan *idma64c = &idma64->chan[c];
144 struct idma64_desc *desc;
145 unsigned long flags;
146
147 spin_lock_irqsave(&idma64c->vchan.lock, flags);
148 desc = idma64c->desc;
149 if (desc) {
150 if (status_err & (1 << c)) {
151 dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
152 desc->status = DMA_ERROR;
153 } else if (status_xfer & (1 << c)) {
154 dma_writel(idma64, CLEAR(XFER), idma64c->mask);
155 desc->status = DMA_COMPLETE;
156 vchan_cookie_complete(&desc->vdesc);
157 idma64_start_transfer(idma64c);
158 }
159
160 /* idma64_start_transfer() updates idma64c->desc */
161 if (idma64c->desc == NULL || desc->status == DMA_ERROR)
162 idma64_stop_transfer(idma64c);
163 }
164 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
165}
166
167static irqreturn_t idma64_irq(int irq, void *dev)
168{
169 struct idma64 *idma64 = dev;
170 u32 status = dma_readl(idma64, STATUS_INT);
171 u32 status_xfer;
172 u32 status_err;
173 unsigned short i;
174
175 dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
176
177 /* Check if we have any interrupt from the DMA controller */
178 if (!status)
179 return IRQ_NONE;
180
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300181 status_xfer = dma_readl(idma64, RAW(XFER));
182 status_err = dma_readl(idma64, RAW(ERROR));
183
184 for (i = 0; i < idma64->dma.chancnt; i++)
185 idma64_chan_irq(idma64, i, status_err, status_xfer);
186
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300187 return IRQ_HANDLED;
188}
189
190/* ---------------------------------------------------------------------- */
191
192static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
193{
194 struct idma64_desc *desc;
195
196 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
197 if (!desc)
198 return NULL;
199
200 desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
201 if (!desc->hw) {
202 kfree(desc);
203 return NULL;
204 }
205
206 return desc;
207}
208
209static void idma64_desc_free(struct idma64_chan *idma64c,
210 struct idma64_desc *desc)
211{
212 struct idma64_hw_desc *hw;
213
214 if (desc->ndesc) {
215 unsigned int i = desc->ndesc;
216
217 do {
218 hw = &desc->hw[--i];
219 dma_pool_free(idma64c->pool, hw->lli, hw->llp);
220 } while (i);
221 }
222
223 kfree(desc->hw);
224 kfree(desc);
225}
226
227static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
228{
229 struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
230
231 idma64_desc_free(idma64c, to_idma64_desc(vdesc));
232}
233
Andy Shevchenkoac029792015-11-17 13:37:09 +0200234static void idma64_hw_desc_fill(struct idma64_hw_desc *hw,
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300235 struct dma_slave_config *config,
236 enum dma_transfer_direction direction, u64 llp)
237{
238 struct idma64_lli *lli = hw->lli;
239 u64 sar, dar;
240 u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
241 u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
242 u32 src_width, dst_width;
243
244 if (direction == DMA_MEM_TO_DEV) {
245 sar = hw->phys;
246 dar = config->dst_addr;
247 ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
248 IDMA64C_CTLL_FC_M2P;
Andy Shevchenko22b74402015-09-14 11:55:38 +0300249 src_width = __ffs(sar | hw->len | 4);
Andy Shevchenko87b04592015-09-14 11:55:37 +0300250 dst_width = __ffs(config->dst_addr_width);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300251 } else { /* DMA_DEV_TO_MEM */
252 sar = config->src_addr;
253 dar = hw->phys;
254 ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
255 IDMA64C_CTLL_FC_P2M;
Andy Shevchenko87b04592015-09-14 11:55:37 +0300256 src_width = __ffs(config->src_addr_width);
Andy Shevchenko22b74402015-09-14 11:55:38 +0300257 dst_width = __ffs(dar | hw->len | 4);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300258 }
259
260 lli->sar = sar;
261 lli->dar = dar;
262
263 lli->ctlhi = ctlhi;
264 lli->ctllo = ctllo |
265 IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
266 IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
267 IDMA64C_CTLL_DST_WIDTH(dst_width) |
268 IDMA64C_CTLL_SRC_WIDTH(src_width);
269
270 lli->llp = llp;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300271}
272
273static void idma64_desc_fill(struct idma64_chan *idma64c,
274 struct idma64_desc *desc)
275{
276 struct dma_slave_config *config = &idma64c->config;
Andy Shevchenko390c49f2015-11-17 13:37:10 +0200277 unsigned int i = desc->ndesc;
278 struct idma64_hw_desc *hw = &desc->hw[i - 1];
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300279 struct idma64_lli *lli = hw->lli;
280 u64 llp = 0;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300281
282 /* Fill the hardware descriptors and link them to a list */
283 do {
284 hw = &desc->hw[--i];
Andy Shevchenkoac029792015-11-17 13:37:09 +0200285 idma64_hw_desc_fill(hw, config, desc->direction, llp);
286 llp = hw->llp;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300287 desc->length += hw->len;
288 } while (i);
289
Andy Shevchenko390c49f2015-11-17 13:37:10 +0200290 /* Trigger an interrupt after the last block is transfered */
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300291 lli->ctllo |= IDMA64C_CTLL_INT_EN;
292}
293
294static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
295 struct dma_chan *chan, struct scatterlist *sgl,
296 unsigned int sg_len, enum dma_transfer_direction direction,
297 unsigned long flags, void *context)
298{
299 struct idma64_chan *idma64c = to_idma64_chan(chan);
300 struct idma64_desc *desc;
301 struct scatterlist *sg;
302 unsigned int i;
303
304 desc = idma64_alloc_desc(sg_len);
305 if (!desc)
306 return NULL;
307
308 for_each_sg(sgl, sg, sg_len, i) {
309 struct idma64_hw_desc *hw = &desc->hw[i];
310
311 /* Allocate DMA capable memory for hardware descriptor */
312 hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
313 if (!hw->lli) {
314 desc->ndesc = i;
315 idma64_desc_free(idma64c, desc);
316 return NULL;
317 }
318
319 hw->phys = sg_dma_address(sg);
320 hw->len = sg_dma_len(sg);
321 }
322
323 desc->ndesc = sg_len;
324 desc->direction = direction;
325 desc->status = DMA_IN_PROGRESS;
326
327 idma64_desc_fill(idma64c, desc);
328 return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
329}
330
331static void idma64_issue_pending(struct dma_chan *chan)
332{
333 struct idma64_chan *idma64c = to_idma64_chan(chan);
334 unsigned long flags;
335
336 spin_lock_irqsave(&idma64c->vchan.lock, flags);
337 if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
338 idma64_start_transfer(idma64c);
339 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
340}
341
342static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
343{
344 struct idma64_desc *desc = idma64c->desc;
345 struct idma64_hw_desc *hw;
346 size_t bytes = desc->length;
Andy Shevchenko0b23a1e2015-09-14 11:55:36 +0300347 u64 llp = channel_readq(idma64c, LLP);
348 u32 ctlhi = channel_readl(idma64c, CTL_HI);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300349 unsigned int i = 0;
350
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300351 do {
352 hw = &desc->hw[i];
Andy Shevchenko0b23a1e2015-09-14 11:55:36 +0300353 if (hw->llp == llp)
354 break;
355 bytes -= hw->len;
356 } while (++i < desc->ndesc);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300357
358 if (!i)
359 return bytes;
360
Andy Shevchenko0b23a1e2015-09-14 11:55:36 +0300361 /* The current chunk is not fully transfered yet */
362 bytes += desc->hw[--i].len;
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300363
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300364 return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
365}
366
367static enum dma_status idma64_tx_status(struct dma_chan *chan,
368 dma_cookie_t cookie, struct dma_tx_state *state)
369{
370 struct idma64_chan *idma64c = to_idma64_chan(chan);
371 struct virt_dma_desc *vdesc;
372 enum dma_status status;
373 size_t bytes;
374 unsigned long flags;
375
376 status = dma_cookie_status(chan, cookie, state);
377 if (status == DMA_COMPLETE)
378 return status;
379
380 spin_lock_irqsave(&idma64c->vchan.lock, flags);
381 vdesc = vchan_find_desc(&idma64c->vchan, cookie);
382 if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
383 bytes = idma64_active_desc_size(idma64c);
384 dma_set_residue(state, bytes);
385 status = idma64c->desc->status;
386 } else if (vdesc) {
387 bytes = to_idma64_desc(vdesc)->length;
388 dma_set_residue(state, bytes);
389 }
390 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
391
392 return status;
393}
394
395static void convert_burst(u32 *maxburst)
396{
397 if (*maxburst)
398 *maxburst = __fls(*maxburst);
399 else
400 *maxburst = 0;
401}
402
403static int idma64_slave_config(struct dma_chan *chan,
404 struct dma_slave_config *config)
405{
406 struct idma64_chan *idma64c = to_idma64_chan(chan);
407
408 /* Check if chan will be configured for slave transfers */
409 if (!is_slave_direction(config->direction))
410 return -EINVAL;
411
412 memcpy(&idma64c->config, config, sizeof(idma64c->config));
413
414 convert_burst(&idma64c->config.src_maxburst);
415 convert_burst(&idma64c->config.dst_maxburst);
416
417 return 0;
418}
419
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300420static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain)
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300421{
422 unsigned short count = 100;
423 u32 cfglo;
424
425 cfglo = channel_readl(idma64c, CFG_LO);
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300426 if (drain)
427 cfglo |= IDMA64C_CFGL_CH_DRAIN;
428 else
429 cfglo &= ~IDMA64C_CFGL_CH_DRAIN;
430
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300431 channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
432 do {
433 udelay(1);
434 cfglo = channel_readl(idma64c, CFG_LO);
435 } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
436}
437
438static void idma64_chan_activate(struct idma64_chan *idma64c)
439{
440 u32 cfglo;
441
442 cfglo = channel_readl(idma64c, CFG_LO);
443 channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
444}
445
446static int idma64_pause(struct dma_chan *chan)
447{
448 struct idma64_chan *idma64c = to_idma64_chan(chan);
449 unsigned long flags;
450
451 spin_lock_irqsave(&idma64c->vchan.lock, flags);
452 if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300453 idma64_chan_deactivate(idma64c, false);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300454 idma64c->desc->status = DMA_PAUSED;
455 }
456 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
457
458 return 0;
459}
460
461static int idma64_resume(struct dma_chan *chan)
462{
463 struct idma64_chan *idma64c = to_idma64_chan(chan);
464 unsigned long flags;
465
466 spin_lock_irqsave(&idma64c->vchan.lock, flags);
467 if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
468 idma64c->desc->status = DMA_IN_PROGRESS;
469 idma64_chan_activate(idma64c);
470 }
471 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
472
473 return 0;
474}
475
476static int idma64_terminate_all(struct dma_chan *chan)
477{
478 struct idma64_chan *idma64c = to_idma64_chan(chan);
479 unsigned long flags;
480 LIST_HEAD(head);
481
482 spin_lock_irqsave(&idma64c->vchan.lock, flags);
Andy Shevchenko2e9b55b2015-09-14 11:55:40 +0300483 idma64_chan_deactivate(idma64c, true);
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300484 idma64_stop_transfer(idma64c);
485 if (idma64c->desc) {
486 idma64_vdesc_free(&idma64c->desc->vdesc);
487 idma64c->desc = NULL;
488 }
489 vchan_get_all_descriptors(&idma64c->vchan, &head);
490 spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
491
492 vchan_dma_desc_free_list(&idma64c->vchan, &head);
493 return 0;
494}
495
496static int idma64_alloc_chan_resources(struct dma_chan *chan)
497{
498 struct idma64_chan *idma64c = to_idma64_chan(chan);
499
500 /* Create a pool of consistent memory blocks for hardware descriptors */
501 idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
502 chan->device->dev,
503 sizeof(struct idma64_lli), 8, 0);
504 if (!idma64c->pool) {
505 dev_err(chan2dev(chan), "No memory for descriptors\n");
506 return -ENOMEM;
507 }
508
509 return 0;
510}
511
512static void idma64_free_chan_resources(struct dma_chan *chan)
513{
514 struct idma64_chan *idma64c = to_idma64_chan(chan);
515
516 vchan_free_chan_resources(to_virt_chan(chan));
517 dma_pool_destroy(idma64c->pool);
518 idma64c->pool = NULL;
519}
520
521/* ---------------------------------------------------------------------- */
522
523#define IDMA64_BUSWIDTHS \
524 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
525 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
526 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
527
528static int idma64_probe(struct idma64_chip *chip)
529{
530 struct idma64 *idma64;
531 unsigned short nr_chan = IDMA64_NR_CHAN;
532 unsigned short i;
533 int ret;
534
535 idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
536 if (!idma64)
537 return -ENOMEM;
538
539 idma64->regs = chip->regs;
540 chip->idma64 = idma64;
541
542 idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
543 GFP_KERNEL);
544 if (!idma64->chan)
545 return -ENOMEM;
546
547 idma64->all_chan_mask = (1 << nr_chan) - 1;
548
549 /* Turn off iDMA controller */
550 idma64_off(idma64);
551
552 ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
553 dev_name(chip->dev), idma64);
554 if (ret)
555 return ret;
556
557 INIT_LIST_HEAD(&idma64->dma.channels);
558 for (i = 0; i < nr_chan; i++) {
559 struct idma64_chan *idma64c = &idma64->chan[i];
560
561 idma64c->vchan.desc_free = idma64_vdesc_free;
562 vchan_init(&idma64c->vchan, &idma64->dma);
563
564 idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
565 idma64c->mask = BIT(i);
566 }
567
568 dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
569 dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
570
571 idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
572 idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
573
574 idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
575
576 idma64->dma.device_issue_pending = idma64_issue_pending;
577 idma64->dma.device_tx_status = idma64_tx_status;
578
579 idma64->dma.device_config = idma64_slave_config;
580 idma64->dma.device_pause = idma64_pause;
581 idma64->dma.device_resume = idma64_resume;
582 idma64->dma.device_terminate_all = idma64_terminate_all;
583
584 idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
585 idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
586 idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
587 idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
588
589 idma64->dma.dev = chip->dev;
590
Andy Shevchenkoe3fdb182015-11-17 13:37:08 +0200591 dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
592
Andy Shevchenko667dfed2015-07-27 18:04:02 +0300593 ret = dma_async_device_register(&idma64->dma);
594 if (ret)
595 return ret;
596
597 dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
598 return 0;
599}
600
601static int idma64_remove(struct idma64_chip *chip)
602{
603 struct idma64 *idma64 = chip->idma64;
604 unsigned short i;
605
606 dma_async_device_unregister(&idma64->dma);
607
608 /*
609 * Explicitly call devm_request_irq() to avoid the side effects with
610 * the scheduled tasklets.
611 */
612 devm_free_irq(chip->dev, chip->irq, idma64);
613
614 for (i = 0; i < idma64->dma.chancnt; i++) {
615 struct idma64_chan *idma64c = &idma64->chan[i];
616
617 tasklet_kill(&idma64c->vchan.task);
618 }
619
620 return 0;
621}
622
623/* ---------------------------------------------------------------------- */
624
625static int idma64_platform_probe(struct platform_device *pdev)
626{
627 struct idma64_chip *chip;
628 struct device *dev = &pdev->dev;
629 struct resource *mem;
630 int ret;
631
632 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
633 if (!chip)
634 return -ENOMEM;
635
636 chip->irq = platform_get_irq(pdev, 0);
637 if (chip->irq < 0)
638 return chip->irq;
639
640 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
641 chip->regs = devm_ioremap_resource(dev, mem);
642 if (IS_ERR(chip->regs))
643 return PTR_ERR(chip->regs);
644
645 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
646 if (ret)
647 return ret;
648
649 chip->dev = dev;
650
651 ret = idma64_probe(chip);
652 if (ret)
653 return ret;
654
655 platform_set_drvdata(pdev, chip);
656 return 0;
657}
658
659static int idma64_platform_remove(struct platform_device *pdev)
660{
661 struct idma64_chip *chip = platform_get_drvdata(pdev);
662
663 return idma64_remove(chip);
664}
665
666#ifdef CONFIG_PM_SLEEP
667
668static int idma64_pm_suspend(struct device *dev)
669{
670 struct platform_device *pdev = to_platform_device(dev);
671 struct idma64_chip *chip = platform_get_drvdata(pdev);
672
673 idma64_off(chip->idma64);
674 return 0;
675}
676
677static int idma64_pm_resume(struct device *dev)
678{
679 struct platform_device *pdev = to_platform_device(dev);
680 struct idma64_chip *chip = platform_get_drvdata(pdev);
681
682 idma64_on(chip->idma64);
683 return 0;
684}
685
686#endif /* CONFIG_PM_SLEEP */
687
688static const struct dev_pm_ops idma64_dev_pm_ops = {
689 SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
690};
691
692static struct platform_driver idma64_platform_driver = {
693 .probe = idma64_platform_probe,
694 .remove = idma64_platform_remove,
695 .driver = {
696 .name = DRV_NAME,
697 .pm = &idma64_dev_pm_ops,
698 },
699};
700
701module_platform_driver(idma64_platform_driver);
702
703MODULE_LICENSE("GPL v2");
704MODULE_DESCRIPTION("iDMA64 core driver");
705MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
706MODULE_ALIAS("platform:" DRV_NAME);