blob: b8493bafdb3fd5c8bfb7ad3f07974faa6253038c [file] [log] [blame]
Sinan Kaya67a20032016-02-04 23:34:35 -05001/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
Sinan Kaya570d0172016-05-01 00:25:27 -04004 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
Sinan Kaya67a20032016-02-04 23:34:35 -05005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/property.h>
54#include <linux/delay.h>
55#include <linux/acpi.h>
56#include <linux/irq.h>
57#include <linux/atomic.h>
58#include <linux/pm_runtime.h>
59
60#include "../dmaengine.h"
61#include "hidma.h"
62
63/*
64 * Default idle time is 2 seconds. This parameter can
65 * be overridden by changing the following
66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
67 * during kernel boot.
68 */
69#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
70#define HIDMA_ERR_INFO_SW 0xFF
71#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
72#define HIDMA_NR_DEFAULT_DESC 10
73
74static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
75{
76 return container_of(dmadev, struct hidma_dev, ddev);
77}
78
79static inline
80struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
81{
82 return container_of(_lldevp, struct hidma_dev, lldev);
83}
84
85static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
86{
87 return container_of(dmach, struct hidma_chan, chan);
88}
89
90static inline
91struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
92{
93 return container_of(t, struct hidma_desc, desc);
94}
95
96static void hidma_free(struct hidma_dev *dmadev)
97{
98 INIT_LIST_HEAD(&dmadev->ddev.channels);
99}
100
101static unsigned int nr_desc_prm;
102module_param(nr_desc_prm, uint, 0644);
103MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
104
105
106/* process completed descriptors */
107static void hidma_process_completed(struct hidma_chan *mchan)
108{
109 struct dma_device *ddev = mchan->chan.device;
110 struct hidma_dev *mdma = to_hidma_dev(ddev);
111 struct dma_async_tx_descriptor *desc;
112 dma_cookie_t last_cookie;
113 struct hidma_desc *mdesc;
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400114 struct hidma_desc *next;
Sinan Kaya67a20032016-02-04 23:34:35 -0500115 unsigned long irqflags;
116 struct list_head list;
117
118 INIT_LIST_HEAD(&list);
119
120 /* Get all completed descriptors */
121 spin_lock_irqsave(&mchan->lock, irqflags);
122 list_splice_tail_init(&mchan->completed, &list);
123 spin_unlock_irqrestore(&mchan->lock, irqflags);
124
125 /* Execute callbacks and run dependencies */
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400126 list_for_each_entry_safe(mdesc, next, &list, node) {
Sinan Kaya67a20032016-02-04 23:34:35 -0500127 enum dma_status llstat;
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400128 struct dmaengine_desc_callback cb;
Sinan Kaya67a20032016-02-04 23:34:35 -0500129
130 desc = &mdesc->desc;
131
132 spin_lock_irqsave(&mchan->lock, irqflags);
133 dma_cookie_complete(desc);
134 spin_unlock_irqrestore(&mchan->lock, irqflags);
135
136 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400137 dmaengine_desc_get_callback(desc, &cb);
Sinan Kaya67a20032016-02-04 23:34:35 -0500138
139 last_cookie = desc->cookie;
140 dma_run_dependencies(desc);
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400141
142 spin_lock_irqsave(&mchan->lock, irqflags);
143 list_move(&mdesc->node, &mchan->free);
144 spin_unlock_irqrestore(&mchan->lock, irqflags);
145
146 if (llstat == DMA_COMPLETE)
147 dmaengine_desc_callback_invoke(&cb, NULL);
Sinan Kaya67a20032016-02-04 23:34:35 -0500148 }
Sinan Kaya67a20032016-02-04 23:34:35 -0500149}
150
151/*
152 * Called once for each submitted descriptor.
153 * PM is locked once for each descriptor that is currently
154 * in execution.
155 */
156static void hidma_callback(void *data)
157{
158 struct hidma_desc *mdesc = data;
159 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
160 struct dma_device *ddev = mchan->chan.device;
161 struct hidma_dev *dmadev = to_hidma_dev(ddev);
162 unsigned long irqflags;
163 bool queued = false;
164
165 spin_lock_irqsave(&mchan->lock, irqflags);
166 if (mdesc->node.next) {
167 /* Delete from the active list, add to completed list */
168 list_move_tail(&mdesc->node, &mchan->completed);
169 queued = true;
170
171 /* calculate the next running descriptor */
172 mchan->running = list_first_entry(&mchan->active,
173 struct hidma_desc, node);
174 }
175 spin_unlock_irqrestore(&mchan->lock, irqflags);
176
177 hidma_process_completed(mchan);
178
179 if (queued) {
180 pm_runtime_mark_last_busy(dmadev->ddev.dev);
181 pm_runtime_put_autosuspend(dmadev->ddev.dev);
182 }
183}
184
185static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
186{
187 struct hidma_chan *mchan;
188 struct dma_device *ddev;
189
190 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
191 if (!mchan)
192 return -ENOMEM;
193
194 ddev = &dmadev->ddev;
195 mchan->dma_sig = dma_sig;
196 mchan->dmadev = dmadev;
197 mchan->chan.device = ddev;
198 dma_cookie_init(&mchan->chan);
199
200 INIT_LIST_HEAD(&mchan->free);
201 INIT_LIST_HEAD(&mchan->prepared);
202 INIT_LIST_HEAD(&mchan->active);
203 INIT_LIST_HEAD(&mchan->completed);
204
205 spin_lock_init(&mchan->lock);
206 list_add_tail(&mchan->chan.device_node, &ddev->channels);
207 dmadev->ddev.chancnt++;
208 return 0;
209}
210
211static void hidma_issue_task(unsigned long arg)
212{
213 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
214
215 pm_runtime_get_sync(dmadev->ddev.dev);
216 hidma_ll_start(dmadev->lldev);
217}
218
219static void hidma_issue_pending(struct dma_chan *dmach)
220{
221 struct hidma_chan *mchan = to_hidma_chan(dmach);
222 struct hidma_dev *dmadev = mchan->dmadev;
223 unsigned long flags;
224 int status;
225
226 spin_lock_irqsave(&mchan->lock, flags);
227 if (!mchan->running) {
228 struct hidma_desc *desc = list_first_entry(&mchan->active,
229 struct hidma_desc,
230 node);
231 mchan->running = desc;
232 }
233 spin_unlock_irqrestore(&mchan->lock, flags);
234
235 /* PM will be released in hidma_callback function. */
236 status = pm_runtime_get(dmadev->ddev.dev);
237 if (status < 0)
238 tasklet_schedule(&dmadev->task);
239 else
240 hidma_ll_start(dmadev->lldev);
241}
242
243static enum dma_status hidma_tx_status(struct dma_chan *dmach,
244 dma_cookie_t cookie,
245 struct dma_tx_state *txstate)
246{
247 struct hidma_chan *mchan = to_hidma_chan(dmach);
248 enum dma_status ret;
249
250 ret = dma_cookie_status(dmach, cookie, txstate);
251 if (ret == DMA_COMPLETE)
252 return ret;
253
254 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
255 unsigned long flags;
256 dma_cookie_t runcookie;
257
258 spin_lock_irqsave(&mchan->lock, flags);
259 if (mchan->running)
260 runcookie = mchan->running->desc.cookie;
261 else
262 runcookie = -EINVAL;
263
264 if (runcookie == cookie)
265 ret = DMA_PAUSED;
266
267 spin_unlock_irqrestore(&mchan->lock, flags);
268 }
269
270 return ret;
271}
272
273/*
274 * Submit descriptor to hardware.
275 * Lock the PM for each descriptor we are sending.
276 */
277static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
278{
279 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
280 struct hidma_dev *dmadev = mchan->dmadev;
281 struct hidma_desc *mdesc;
282 unsigned long irqflags;
283 dma_cookie_t cookie;
284
285 pm_runtime_get_sync(dmadev->ddev.dev);
286 if (!hidma_ll_isenabled(dmadev->lldev)) {
287 pm_runtime_mark_last_busy(dmadev->ddev.dev);
288 pm_runtime_put_autosuspend(dmadev->ddev.dev);
289 return -ENODEV;
290 }
291
292 mdesc = container_of(txd, struct hidma_desc, desc);
293 spin_lock_irqsave(&mchan->lock, irqflags);
294
295 /* Move descriptor to active */
296 list_move_tail(&mdesc->node, &mchan->active);
297
298 /* Update cookie */
299 cookie = dma_cookie_assign(txd);
300
301 hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
302 spin_unlock_irqrestore(&mchan->lock, irqflags);
303
304 return cookie;
305}
306
307static int hidma_alloc_chan_resources(struct dma_chan *dmach)
308{
309 struct hidma_chan *mchan = to_hidma_chan(dmach);
310 struct hidma_dev *dmadev = mchan->dmadev;
311 struct hidma_desc *mdesc, *tmp;
312 unsigned long irqflags;
313 LIST_HEAD(descs);
314 unsigned int i;
315 int rc = 0;
316
317 if (mchan->allocated)
318 return 0;
319
320 /* Alloc descriptors for this channel */
321 for (i = 0; i < dmadev->nr_descriptors; i++) {
322 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
323 if (!mdesc) {
324 rc = -ENOMEM;
325 break;
326 }
327 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
328 mdesc->desc.tx_submit = hidma_tx_submit;
329
330 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
331 "DMA engine", hidma_callback, mdesc,
332 &mdesc->tre_ch);
333 if (rc) {
334 dev_err(dmach->device->dev,
335 "channel alloc failed at %u\n", i);
336 kfree(mdesc);
337 break;
338 }
339 list_add_tail(&mdesc->node, &descs);
340 }
341
342 if (rc) {
343 /* return the allocated descriptors */
344 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
345 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
346 kfree(mdesc);
347 }
348 return rc;
349 }
350
351 spin_lock_irqsave(&mchan->lock, irqflags);
352 list_splice_tail_init(&descs, &mchan->free);
353 mchan->allocated = true;
354 spin_unlock_irqrestore(&mchan->lock, irqflags);
355 return 1;
356}
357
358static struct dma_async_tx_descriptor *
359hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
360 size_t len, unsigned long flags)
361{
362 struct hidma_chan *mchan = to_hidma_chan(dmach);
363 struct hidma_desc *mdesc = NULL;
364 struct hidma_dev *mdma = mchan->dmadev;
365 unsigned long irqflags;
366
367 /* Get free descriptor */
368 spin_lock_irqsave(&mchan->lock, irqflags);
369 if (!list_empty(&mchan->free)) {
370 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
371 list_del(&mdesc->node);
372 }
373 spin_unlock_irqrestore(&mchan->lock, irqflags);
374
375 if (!mdesc)
376 return NULL;
377
378 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
379 src, dest, len, flags);
380
381 /* Place descriptor in prepared list */
382 spin_lock_irqsave(&mchan->lock, irqflags);
383 list_add_tail(&mdesc->node, &mchan->prepared);
384 spin_unlock_irqrestore(&mchan->lock, irqflags);
385
386 return &mdesc->desc;
387}
388
389static int hidma_terminate_channel(struct dma_chan *chan)
390{
391 struct hidma_chan *mchan = to_hidma_chan(chan);
392 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
393 struct hidma_desc *tmp, *mdesc;
394 unsigned long irqflags;
395 LIST_HEAD(list);
396 int rc;
397
398 pm_runtime_get_sync(dmadev->ddev.dev);
399 /* give completed requests a chance to finish */
400 hidma_process_completed(mchan);
401
402 spin_lock_irqsave(&mchan->lock, irqflags);
403 list_splice_init(&mchan->active, &list);
404 list_splice_init(&mchan->prepared, &list);
405 list_splice_init(&mchan->completed, &list);
406 spin_unlock_irqrestore(&mchan->lock, irqflags);
407
408 /* this suspends the existing transfer */
Sinan Kayad1615ca2016-05-01 00:25:26 -0400409 rc = hidma_ll_disable(dmadev->lldev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500410 if (rc) {
411 dev_err(dmadev->ddev.dev, "channel did not pause\n");
412 goto out;
413 }
414
415 /* return all user requests */
416 list_for_each_entry_safe(mdesc, tmp, &list, node) {
417 struct dma_async_tx_descriptor *txd = &mdesc->desc;
Sinan Kaya67a20032016-02-04 23:34:35 -0500418
419 dma_descriptor_unmap(txd);
Dave Jiang5ade6682016-07-20 13:12:47 -0700420 dmaengine_desc_get_callback_invoke(txd, NULL);
Sinan Kaya67a20032016-02-04 23:34:35 -0500421 dma_run_dependencies(txd);
422
423 /* move myself to free_list */
424 list_move(&mdesc->node, &mchan->free);
425 }
426
Sinan Kayad1615ca2016-05-01 00:25:26 -0400427 rc = hidma_ll_enable(dmadev->lldev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500428out:
429 pm_runtime_mark_last_busy(dmadev->ddev.dev);
430 pm_runtime_put_autosuspend(dmadev->ddev.dev);
431 return rc;
432}
433
434static int hidma_terminate_all(struct dma_chan *chan)
435{
436 struct hidma_chan *mchan = to_hidma_chan(chan);
437 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
438 int rc;
439
440 rc = hidma_terminate_channel(chan);
441 if (rc)
442 return rc;
443
444 /* reinitialize the hardware */
445 pm_runtime_get_sync(dmadev->ddev.dev);
446 rc = hidma_ll_setup(dmadev->lldev);
447 pm_runtime_mark_last_busy(dmadev->ddev.dev);
448 pm_runtime_put_autosuspend(dmadev->ddev.dev);
449 return rc;
450}
451
452static void hidma_free_chan_resources(struct dma_chan *dmach)
453{
454 struct hidma_chan *mchan = to_hidma_chan(dmach);
455 struct hidma_dev *mdma = mchan->dmadev;
456 struct hidma_desc *mdesc, *tmp;
457 unsigned long irqflags;
458 LIST_HEAD(descs);
459
460 /* terminate running transactions and free descriptors */
461 hidma_terminate_channel(dmach);
462
463 spin_lock_irqsave(&mchan->lock, irqflags);
464
465 /* Move data */
466 list_splice_tail_init(&mchan->free, &descs);
467
468 /* Free descriptors */
469 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
470 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
471 list_del(&mdesc->node);
472 kfree(mdesc);
473 }
474
475 mchan->allocated = 0;
476 spin_unlock_irqrestore(&mchan->lock, irqflags);
477}
478
479static int hidma_pause(struct dma_chan *chan)
480{
481 struct hidma_chan *mchan;
482 struct hidma_dev *dmadev;
483
484 mchan = to_hidma_chan(chan);
485 dmadev = to_hidma_dev(mchan->chan.device);
486 if (!mchan->paused) {
487 pm_runtime_get_sync(dmadev->ddev.dev);
Sinan Kayad1615ca2016-05-01 00:25:26 -0400488 if (hidma_ll_disable(dmadev->lldev))
Sinan Kaya67a20032016-02-04 23:34:35 -0500489 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
490 mchan->paused = true;
491 pm_runtime_mark_last_busy(dmadev->ddev.dev);
492 pm_runtime_put_autosuspend(dmadev->ddev.dev);
493 }
494 return 0;
495}
496
497static int hidma_resume(struct dma_chan *chan)
498{
499 struct hidma_chan *mchan;
500 struct hidma_dev *dmadev;
501 int rc = 0;
502
503 mchan = to_hidma_chan(chan);
504 dmadev = to_hidma_dev(mchan->chan.device);
505 if (mchan->paused) {
506 pm_runtime_get_sync(dmadev->ddev.dev);
Sinan Kayad1615ca2016-05-01 00:25:26 -0400507 rc = hidma_ll_enable(dmadev->lldev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500508 if (!rc)
509 mchan->paused = false;
510 else
511 dev_err(dmadev->ddev.dev,
512 "failed to resume the channel");
513 pm_runtime_mark_last_busy(dmadev->ddev.dev);
514 pm_runtime_put_autosuspend(dmadev->ddev.dev);
515 }
516 return rc;
517}
518
519static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
520{
521 struct hidma_lldev *lldev = arg;
522
523 /*
524 * All interrupts are request driven.
525 * HW doesn't send an interrupt by itself.
526 */
527 return hidma_ll_inthandler(chirq, lldev);
528}
529
Sinan Kaya42d236f2016-05-01 00:25:28 -0400530static ssize_t hidma_show_values(struct device *dev,
531 struct device_attribute *attr, char *buf)
532{
533 struct platform_device *pdev = to_platform_device(dev);
534 struct hidma_dev *mdev = platform_get_drvdata(pdev);
535
536 buf[0] = 0;
537
538 if (strcmp(attr->attr.name, "chid") == 0)
539 sprintf(buf, "%d\n", mdev->chidx);
540
541 return strlen(buf);
542}
543
544static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
545 int mode)
546{
547 struct device_attribute *attrs;
548 char *name_copy;
549
550 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
551 GFP_KERNEL);
552 if (!attrs)
553 return -ENOMEM;
554
555 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
556 if (!name_copy)
557 return -ENOMEM;
558
559 attrs->attr.name = name_copy;
560 attrs->attr.mode = mode;
561 attrs->show = hidma_show_values;
562 sysfs_attr_init(&attrs->attr);
563
564 return device_create_file(dev->ddev.dev, attrs);
565}
566
Sinan Kaya67a20032016-02-04 23:34:35 -0500567static int hidma_probe(struct platform_device *pdev)
568{
569 struct hidma_dev *dmadev;
570 struct resource *trca_resource;
571 struct resource *evca_resource;
572 int chirq;
573 void __iomem *evca;
574 void __iomem *trca;
575 int rc;
576
577 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
578 pm_runtime_use_autosuspend(&pdev->dev);
579 pm_runtime_set_active(&pdev->dev);
580 pm_runtime_enable(&pdev->dev);
581
582 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
583 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
584 if (IS_ERR(trca)) {
585 rc = -ENOMEM;
586 goto bailout;
587 }
588
589 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
590 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
591 if (IS_ERR(evca)) {
592 rc = -ENOMEM;
593 goto bailout;
594 }
595
596 /*
597 * This driver only handles the channel IRQs.
598 * Common IRQ is handled by the management driver.
599 */
600 chirq = platform_get_irq(pdev, 0);
601 if (chirq < 0) {
602 rc = -ENODEV;
603 goto bailout;
604 }
605
606 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
607 if (!dmadev) {
608 rc = -ENOMEM;
609 goto bailout;
610 }
611
612 INIT_LIST_HEAD(&dmadev->ddev.channels);
613 spin_lock_init(&dmadev->lock);
614 dmadev->ddev.dev = &pdev->dev;
615 pm_runtime_get_sync(dmadev->ddev.dev);
616
617 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
618 if (WARN_ON(!pdev->dev.dma_mask)) {
619 rc = -ENXIO;
620 goto dmafree;
621 }
622
623 dmadev->dev_evca = evca;
624 dmadev->evca_resource = evca_resource;
625 dmadev->dev_trca = trca;
626 dmadev->trca_resource = trca_resource;
627 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
628 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
629 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
630 dmadev->ddev.device_tx_status = hidma_tx_status;
631 dmadev->ddev.device_issue_pending = hidma_issue_pending;
632 dmadev->ddev.device_pause = hidma_pause;
633 dmadev->ddev.device_resume = hidma_resume;
634 dmadev->ddev.device_terminate_all = hidma_terminate_all;
635 dmadev->ddev.copy_align = 8;
636
637 device_property_read_u32(&pdev->dev, "desc-count",
638 &dmadev->nr_descriptors);
639
640 if (!dmadev->nr_descriptors && nr_desc_prm)
641 dmadev->nr_descriptors = nr_desc_prm;
642
643 if (!dmadev->nr_descriptors)
644 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
645
646 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
647
648 /* Set DMA mask to 64 bits. */
649 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
650 if (rc) {
651 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
652 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
653 if (rc)
654 goto dmafree;
655 }
656
657 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
658 dmadev->nr_descriptors, dmadev->dev_trca,
659 dmadev->dev_evca, dmadev->chidx);
660 if (!dmadev->lldev) {
661 rc = -EPROBE_DEFER;
662 goto dmafree;
663 }
664
665 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
666 "qcom-hidma", dmadev->lldev);
667 if (rc)
668 goto uninit;
669
670 INIT_LIST_HEAD(&dmadev->ddev.channels);
671 rc = hidma_chan_init(dmadev, 0);
672 if (rc)
673 goto uninit;
674
675 rc = dma_async_device_register(&dmadev->ddev);
676 if (rc)
677 goto uninit;
678
679 dmadev->irq = chirq;
680 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
Sinan Kaya570d0172016-05-01 00:25:27 -0400681 hidma_debug_init(dmadev);
Sinan Kaya42d236f2016-05-01 00:25:28 -0400682 hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
Sinan Kaya67a20032016-02-04 23:34:35 -0500683 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
684 platform_set_drvdata(pdev, dmadev);
685 pm_runtime_mark_last_busy(dmadev->ddev.dev);
686 pm_runtime_put_autosuspend(dmadev->ddev.dev);
687 return 0;
688
689uninit:
Sinan Kaya570d0172016-05-01 00:25:27 -0400690 hidma_debug_uninit(dmadev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500691 hidma_ll_uninit(dmadev->lldev);
692dmafree:
693 if (dmadev)
694 hidma_free(dmadev);
695bailout:
696 pm_runtime_put_sync(&pdev->dev);
697 pm_runtime_disable(&pdev->dev);
698 return rc;
699}
700
701static int hidma_remove(struct platform_device *pdev)
702{
703 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
704
705 pm_runtime_get_sync(dmadev->ddev.dev);
706 dma_async_device_unregister(&dmadev->ddev);
707 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
Vinod Koulbd169342016-07-05 14:57:40 +0530708 tasklet_kill(&dmadev->task);
Sinan Kaya570d0172016-05-01 00:25:27 -0400709 hidma_debug_uninit(dmadev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500710 hidma_ll_uninit(dmadev->lldev);
711 hidma_free(dmadev);
712
713 dev_info(&pdev->dev, "HI-DMA engine removed\n");
714 pm_runtime_put_sync_suspend(&pdev->dev);
715 pm_runtime_disable(&pdev->dev);
716
717 return 0;
718}
719
720#if IS_ENABLED(CONFIG_ACPI)
721static const struct acpi_device_id hidma_acpi_ids[] = {
722 {"QCOM8061"},
723 {},
724};
725#endif
726
727static const struct of_device_id hidma_match[] = {
728 {.compatible = "qcom,hidma-1.0",},
729 {},
730};
Sinan Kaya67a20032016-02-04 23:34:35 -0500731MODULE_DEVICE_TABLE(of, hidma_match);
732
733static struct platform_driver hidma_driver = {
734 .probe = hidma_probe,
735 .remove = hidma_remove,
736 .driver = {
737 .name = "hidma",
738 .of_match_table = hidma_match,
739 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
740 },
741};
742
743module_platform_driver(hidma_driver);
744MODULE_LICENSE("GPL v2");