blob: 3c982c96b4b7ccc4f65bec3ed048163be6c86bf8 [file] [log] [blame]
Sinan Kaya67a20032016-02-04 23:34:35 -05001/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
Sinan Kaya570d0172016-05-01 00:25:27 -04004 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
Sinan Kaya67a20032016-02-04 23:34:35 -05005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/property.h>
54#include <linux/delay.h>
55#include <linux/acpi.h>
56#include <linux/irq.h>
57#include <linux/atomic.h>
58#include <linux/pm_runtime.h>
Sinan Kaya1c0e3e82016-10-21 12:37:59 -040059#include <linux/msi.h>
Sinan Kaya67a20032016-02-04 23:34:35 -050060
61#include "../dmaengine.h"
62#include "hidma.h"
63
64/*
65 * Default idle time is 2 seconds. This parameter can
66 * be overridden by changing the following
67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
68 * during kernel boot.
69 */
70#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
71#define HIDMA_ERR_INFO_SW 0xFF
72#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
73#define HIDMA_NR_DEFAULT_DESC 10
Sinan Kaya1c0e3e82016-10-21 12:37:59 -040074#define HIDMA_MSI_INTS 11
Sinan Kaya67a20032016-02-04 23:34:35 -050075
76static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
77{
78 return container_of(dmadev, struct hidma_dev, ddev);
79}
80
81static inline
82struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
83{
84 return container_of(_lldevp, struct hidma_dev, lldev);
85}
86
87static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
88{
89 return container_of(dmach, struct hidma_chan, chan);
90}
91
92static inline
93struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
94{
95 return container_of(t, struct hidma_desc, desc);
96}
97
98static void hidma_free(struct hidma_dev *dmadev)
99{
100 INIT_LIST_HEAD(&dmadev->ddev.channels);
101}
102
103static unsigned int nr_desc_prm;
104module_param(nr_desc_prm, uint, 0644);
105MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
106
107
108/* process completed descriptors */
109static void hidma_process_completed(struct hidma_chan *mchan)
110{
111 struct dma_device *ddev = mchan->chan.device;
112 struct hidma_dev *mdma = to_hidma_dev(ddev);
113 struct dma_async_tx_descriptor *desc;
114 dma_cookie_t last_cookie;
115 struct hidma_desc *mdesc;
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400116 struct hidma_desc *next;
Sinan Kaya67a20032016-02-04 23:34:35 -0500117 unsigned long irqflags;
118 struct list_head list;
119
120 INIT_LIST_HEAD(&list);
121
122 /* Get all completed descriptors */
123 spin_lock_irqsave(&mchan->lock, irqflags);
124 list_splice_tail_init(&mchan->completed, &list);
125 spin_unlock_irqrestore(&mchan->lock, irqflags);
126
127 /* Execute callbacks and run dependencies */
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400128 list_for_each_entry_safe(mdesc, next, &list, node) {
Sinan Kaya67a20032016-02-04 23:34:35 -0500129 enum dma_status llstat;
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400130 struct dmaengine_desc_callback cb;
Sinan Kaya55c370e2016-08-31 11:10:28 -0400131 struct dmaengine_result result;
Sinan Kaya67a20032016-02-04 23:34:35 -0500132
133 desc = &mdesc->desc;
Sinan Kaya793ae662016-08-31 11:10:29 -0400134 last_cookie = desc->cookie;
Sinan Kaya67a20032016-02-04 23:34:35 -0500135
136 spin_lock_irqsave(&mchan->lock, irqflags);
137 dma_cookie_complete(desc);
138 spin_unlock_irqrestore(&mchan->lock, irqflags);
139
140 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400141 dmaengine_desc_get_callback(desc, &cb);
Sinan Kaya67a20032016-02-04 23:34:35 -0500142
Sinan Kaya67a20032016-02-04 23:34:35 -0500143 dma_run_dependencies(desc);
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400144
145 spin_lock_irqsave(&mchan->lock, irqflags);
146 list_move(&mdesc->node, &mchan->free);
Sinan Kaya8a31f8b2016-08-31 11:10:27 -0400147
Sinan Kaya793ae662016-08-31 11:10:29 -0400148 if (llstat == DMA_COMPLETE) {
149 mchan->last_success = last_cookie;
Sinan Kaya55c370e2016-08-31 11:10:28 -0400150 result.result = DMA_TRANS_NOERROR;
Sinan Kaya793ae662016-08-31 11:10:29 -0400151 } else
Sinan Kaya55c370e2016-08-31 11:10:28 -0400152 result.result = DMA_TRANS_ABORTED;
153
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
155
156 dmaengine_desc_callback_invoke(&cb, &result);
Sinan Kaya67a20032016-02-04 23:34:35 -0500157 }
Sinan Kaya67a20032016-02-04 23:34:35 -0500158}
159
160/*
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
163 * in execution.
164 */
165static void hidma_callback(void *data)
166{
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
172 bool queued = false;
173
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176 /* Delete from the active list, add to completed list */
177 list_move_tail(&mdesc->node, &mchan->completed);
178 queued = true;
179
180 /* calculate the next running descriptor */
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
183 }
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
185
186 hidma_process_completed(mchan);
187
188 if (queued) {
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
191 }
192}
193
194static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
195{
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
198
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
200 if (!mchan)
201 return -ENOMEM;
202
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
208
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
213
214 spin_lock_init(&mchan->lock);
215 list_add_tail(&mchan->chan.device_node, &ddev->channels);
216 dmadev->ddev.chancnt++;
217 return 0;
218}
219
220static void hidma_issue_task(unsigned long arg)
221{
222 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
223
224 pm_runtime_get_sync(dmadev->ddev.dev);
225 hidma_ll_start(dmadev->lldev);
226}
227
228static void hidma_issue_pending(struct dma_chan *dmach)
229{
230 struct hidma_chan *mchan = to_hidma_chan(dmach);
231 struct hidma_dev *dmadev = mchan->dmadev;
232 unsigned long flags;
233 int status;
234
235 spin_lock_irqsave(&mchan->lock, flags);
236 if (!mchan->running) {
237 struct hidma_desc *desc = list_first_entry(&mchan->active,
238 struct hidma_desc,
239 node);
240 mchan->running = desc;
241 }
242 spin_unlock_irqrestore(&mchan->lock, flags);
243
244 /* PM will be released in hidma_callback function. */
245 status = pm_runtime_get(dmadev->ddev.dev);
246 if (status < 0)
247 tasklet_schedule(&dmadev->task);
248 else
249 hidma_ll_start(dmadev->lldev);
250}
251
Sinan Kaya793ae662016-08-31 11:10:29 -0400252static inline bool hidma_txn_is_success(dma_cookie_t cookie,
253 dma_cookie_t last_success, dma_cookie_t last_used)
254{
255 if (last_success <= last_used) {
256 if ((cookie <= last_success) || (cookie > last_used))
257 return true;
258 } else {
259 if ((cookie <= last_success) && (cookie > last_used))
260 return true;
261 }
262 return false;
263}
264
Sinan Kaya67a20032016-02-04 23:34:35 -0500265static enum dma_status hidma_tx_status(struct dma_chan *dmach,
266 dma_cookie_t cookie,
267 struct dma_tx_state *txstate)
268{
269 struct hidma_chan *mchan = to_hidma_chan(dmach);
270 enum dma_status ret;
271
272 ret = dma_cookie_status(dmach, cookie, txstate);
Sinan Kaya793ae662016-08-31 11:10:29 -0400273 if (ret == DMA_COMPLETE) {
274 bool is_success;
275
276 is_success = hidma_txn_is_success(cookie, mchan->last_success,
277 dmach->cookie);
278 return is_success ? ret : DMA_ERROR;
279 }
Sinan Kaya67a20032016-02-04 23:34:35 -0500280
281 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
282 unsigned long flags;
283 dma_cookie_t runcookie;
284
285 spin_lock_irqsave(&mchan->lock, flags);
286 if (mchan->running)
287 runcookie = mchan->running->desc.cookie;
288 else
289 runcookie = -EINVAL;
290
291 if (runcookie == cookie)
292 ret = DMA_PAUSED;
293
294 spin_unlock_irqrestore(&mchan->lock, flags);
295 }
296
297 return ret;
298}
299
300/*
301 * Submit descriptor to hardware.
302 * Lock the PM for each descriptor we are sending.
303 */
304static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
305{
306 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
307 struct hidma_dev *dmadev = mchan->dmadev;
308 struct hidma_desc *mdesc;
309 unsigned long irqflags;
310 dma_cookie_t cookie;
311
312 pm_runtime_get_sync(dmadev->ddev.dev);
313 if (!hidma_ll_isenabled(dmadev->lldev)) {
314 pm_runtime_mark_last_busy(dmadev->ddev.dev);
315 pm_runtime_put_autosuspend(dmadev->ddev.dev);
316 return -ENODEV;
317 }
318
319 mdesc = container_of(txd, struct hidma_desc, desc);
320 spin_lock_irqsave(&mchan->lock, irqflags);
321
322 /* Move descriptor to active */
323 list_move_tail(&mdesc->node, &mchan->active);
324
325 /* Update cookie */
326 cookie = dma_cookie_assign(txd);
327
328 hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
329 spin_unlock_irqrestore(&mchan->lock, irqflags);
330
331 return cookie;
332}
333
334static int hidma_alloc_chan_resources(struct dma_chan *dmach)
335{
336 struct hidma_chan *mchan = to_hidma_chan(dmach);
337 struct hidma_dev *dmadev = mchan->dmadev;
338 struct hidma_desc *mdesc, *tmp;
339 unsigned long irqflags;
340 LIST_HEAD(descs);
341 unsigned int i;
342 int rc = 0;
343
344 if (mchan->allocated)
345 return 0;
346
347 /* Alloc descriptors for this channel */
348 for (i = 0; i < dmadev->nr_descriptors; i++) {
349 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
350 if (!mdesc) {
351 rc = -ENOMEM;
352 break;
353 }
354 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
355 mdesc->desc.tx_submit = hidma_tx_submit;
356
357 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
358 "DMA engine", hidma_callback, mdesc,
359 &mdesc->tre_ch);
360 if (rc) {
361 dev_err(dmach->device->dev,
362 "channel alloc failed at %u\n", i);
363 kfree(mdesc);
364 break;
365 }
366 list_add_tail(&mdesc->node, &descs);
367 }
368
369 if (rc) {
370 /* return the allocated descriptors */
371 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
372 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
373 kfree(mdesc);
374 }
375 return rc;
376 }
377
378 spin_lock_irqsave(&mchan->lock, irqflags);
379 list_splice_tail_init(&descs, &mchan->free);
380 mchan->allocated = true;
381 spin_unlock_irqrestore(&mchan->lock, irqflags);
382 return 1;
383}
384
385static struct dma_async_tx_descriptor *
386hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
387 size_t len, unsigned long flags)
388{
389 struct hidma_chan *mchan = to_hidma_chan(dmach);
390 struct hidma_desc *mdesc = NULL;
391 struct hidma_dev *mdma = mchan->dmadev;
392 unsigned long irqflags;
393
394 /* Get free descriptor */
395 spin_lock_irqsave(&mchan->lock, irqflags);
396 if (!list_empty(&mchan->free)) {
397 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
398 list_del(&mdesc->node);
399 }
400 spin_unlock_irqrestore(&mchan->lock, irqflags);
401
402 if (!mdesc)
403 return NULL;
404
405 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
406 src, dest, len, flags);
407
408 /* Place descriptor in prepared list */
409 spin_lock_irqsave(&mchan->lock, irqflags);
410 list_add_tail(&mdesc->node, &mchan->prepared);
411 spin_unlock_irqrestore(&mchan->lock, irqflags);
412
413 return &mdesc->desc;
414}
415
416static int hidma_terminate_channel(struct dma_chan *chan)
417{
418 struct hidma_chan *mchan = to_hidma_chan(chan);
419 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
420 struct hidma_desc *tmp, *mdesc;
421 unsigned long irqflags;
422 LIST_HEAD(list);
423 int rc;
424
425 pm_runtime_get_sync(dmadev->ddev.dev);
426 /* give completed requests a chance to finish */
427 hidma_process_completed(mchan);
428
429 spin_lock_irqsave(&mchan->lock, irqflags);
Sinan Kaya793ae662016-08-31 11:10:29 -0400430 mchan->last_success = 0;
Sinan Kaya67a20032016-02-04 23:34:35 -0500431 list_splice_init(&mchan->active, &list);
432 list_splice_init(&mchan->prepared, &list);
433 list_splice_init(&mchan->completed, &list);
434 spin_unlock_irqrestore(&mchan->lock, irqflags);
435
436 /* this suspends the existing transfer */
Sinan Kayad1615ca2016-05-01 00:25:26 -0400437 rc = hidma_ll_disable(dmadev->lldev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500438 if (rc) {
439 dev_err(dmadev->ddev.dev, "channel did not pause\n");
440 goto out;
441 }
442
443 /* return all user requests */
444 list_for_each_entry_safe(mdesc, tmp, &list, node) {
445 struct dma_async_tx_descriptor *txd = &mdesc->desc;
Sinan Kaya67a20032016-02-04 23:34:35 -0500446
447 dma_descriptor_unmap(txd);
Dave Jiang5ade6682016-07-20 13:12:47 -0700448 dmaengine_desc_get_callback_invoke(txd, NULL);
Sinan Kaya67a20032016-02-04 23:34:35 -0500449 dma_run_dependencies(txd);
450
451 /* move myself to free_list */
452 list_move(&mdesc->node, &mchan->free);
453 }
454
Sinan Kayad1615ca2016-05-01 00:25:26 -0400455 rc = hidma_ll_enable(dmadev->lldev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500456out:
457 pm_runtime_mark_last_busy(dmadev->ddev.dev);
458 pm_runtime_put_autosuspend(dmadev->ddev.dev);
459 return rc;
460}
461
462static int hidma_terminate_all(struct dma_chan *chan)
463{
464 struct hidma_chan *mchan = to_hidma_chan(chan);
465 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
466 int rc;
467
468 rc = hidma_terminate_channel(chan);
469 if (rc)
470 return rc;
471
472 /* reinitialize the hardware */
473 pm_runtime_get_sync(dmadev->ddev.dev);
474 rc = hidma_ll_setup(dmadev->lldev);
475 pm_runtime_mark_last_busy(dmadev->ddev.dev);
476 pm_runtime_put_autosuspend(dmadev->ddev.dev);
477 return rc;
478}
479
480static void hidma_free_chan_resources(struct dma_chan *dmach)
481{
482 struct hidma_chan *mchan = to_hidma_chan(dmach);
483 struct hidma_dev *mdma = mchan->dmadev;
484 struct hidma_desc *mdesc, *tmp;
485 unsigned long irqflags;
486 LIST_HEAD(descs);
487
488 /* terminate running transactions and free descriptors */
489 hidma_terminate_channel(dmach);
490
491 spin_lock_irqsave(&mchan->lock, irqflags);
492
493 /* Move data */
494 list_splice_tail_init(&mchan->free, &descs);
495
496 /* Free descriptors */
497 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
498 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
499 list_del(&mdesc->node);
500 kfree(mdesc);
501 }
502
503 mchan->allocated = 0;
504 spin_unlock_irqrestore(&mchan->lock, irqflags);
505}
506
507static int hidma_pause(struct dma_chan *chan)
508{
509 struct hidma_chan *mchan;
510 struct hidma_dev *dmadev;
511
512 mchan = to_hidma_chan(chan);
513 dmadev = to_hidma_dev(mchan->chan.device);
514 if (!mchan->paused) {
515 pm_runtime_get_sync(dmadev->ddev.dev);
Sinan Kayad1615ca2016-05-01 00:25:26 -0400516 if (hidma_ll_disable(dmadev->lldev))
Sinan Kaya67a20032016-02-04 23:34:35 -0500517 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
518 mchan->paused = true;
519 pm_runtime_mark_last_busy(dmadev->ddev.dev);
520 pm_runtime_put_autosuspend(dmadev->ddev.dev);
521 }
522 return 0;
523}
524
525static int hidma_resume(struct dma_chan *chan)
526{
527 struct hidma_chan *mchan;
528 struct hidma_dev *dmadev;
529 int rc = 0;
530
531 mchan = to_hidma_chan(chan);
532 dmadev = to_hidma_dev(mchan->chan.device);
533 if (mchan->paused) {
534 pm_runtime_get_sync(dmadev->ddev.dev);
Sinan Kayad1615ca2016-05-01 00:25:26 -0400535 rc = hidma_ll_enable(dmadev->lldev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500536 if (!rc)
537 mchan->paused = false;
538 else
539 dev_err(dmadev->ddev.dev,
540 "failed to resume the channel");
541 pm_runtime_mark_last_busy(dmadev->ddev.dev);
542 pm_runtime_put_autosuspend(dmadev->ddev.dev);
543 }
544 return rc;
545}
546
547static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
548{
549 struct hidma_lldev *lldev = arg;
550
551 /*
552 * All interrupts are request driven.
553 * HW doesn't send an interrupt by itself.
554 */
555 return hidma_ll_inthandler(chirq, lldev);
556}
557
Arnd Bergmann8cc12b22016-11-08 14:48:59 +0100558#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400559static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
560{
561 struct hidma_lldev **lldevp = arg;
562 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
563
564 return hidma_ll_inthandler_msi(chirq, *lldevp,
565 1 << (chirq - dmadev->msi_virqbase));
566}
Arnd Bergmann8cc12b22016-11-08 14:48:59 +0100567#endif
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400568
Sinan Kaya42d236f2016-05-01 00:25:28 -0400569static ssize_t hidma_show_values(struct device *dev,
570 struct device_attribute *attr, char *buf)
571{
572 struct platform_device *pdev = to_platform_device(dev);
573 struct hidma_dev *mdev = platform_get_drvdata(pdev);
574
575 buf[0] = 0;
576
577 if (strcmp(attr->attr.name, "chid") == 0)
578 sprintf(buf, "%d\n", mdev->chidx);
579
580 return strlen(buf);
581}
582
Sinan Kayac6e45842016-11-14 14:34:53 -0500583static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
584{
585 device_remove_file(dev->ddev.dev, dev->chid_attrs);
586}
587
588static struct device_attribute*
589hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
Sinan Kaya42d236f2016-05-01 00:25:28 -0400590{
591 struct device_attribute *attrs;
592 char *name_copy;
593
594 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
595 GFP_KERNEL);
596 if (!attrs)
Sinan Kayac6e45842016-11-14 14:34:53 -0500597 return NULL;
Sinan Kaya42d236f2016-05-01 00:25:28 -0400598
599 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
600 if (!name_copy)
Sinan Kayac6e45842016-11-14 14:34:53 -0500601 return NULL;
Sinan Kaya42d236f2016-05-01 00:25:28 -0400602
603 attrs->attr.name = name_copy;
604 attrs->attr.mode = mode;
605 attrs->show = hidma_show_values;
606 sysfs_attr_init(&attrs->attr);
607
Sinan Kayac6e45842016-11-14 14:34:53 -0500608 return attrs;
609}
610
611static int hidma_sysfs_init(struct hidma_dev *dev)
612{
613 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
614 if (!dev->chid_attrs)
615 return -ENOMEM;
616
617 return device_create_file(dev->ddev.dev, dev->chid_attrs);
Sinan Kaya42d236f2016-05-01 00:25:28 -0400618}
619
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400620#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
621static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
622{
623 struct device *dev = msi_desc_to_dev(desc);
624 struct hidma_dev *dmadev = dev_get_drvdata(dev);
625
626 if (!desc->platform.msi_index) {
627 writel(msg->address_lo, dmadev->dev_evca + 0x118);
628 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
629 writel(msg->data, dmadev->dev_evca + 0x120);
630 }
631}
632#endif
633
634static void hidma_free_msis(struct hidma_dev *dmadev)
635{
636#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
637 struct device *dev = dmadev->ddev.dev;
638 struct msi_desc *desc;
639
640 /* free allocated MSI interrupts above */
641 for_each_msi_entry(desc, dev)
642 devm_free_irq(dev, desc->irq, &dmadev->lldev);
643
644 platform_msi_domain_free_irqs(dev);
645#endif
646}
647
648static int hidma_request_msi(struct hidma_dev *dmadev,
649 struct platform_device *pdev)
650{
651#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
652 int rc;
653 struct msi_desc *desc;
654 struct msi_desc *failed_desc = NULL;
655
656 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
657 hidma_write_msi_msg);
658 if (rc)
659 return rc;
660
661 for_each_msi_entry(desc, &pdev->dev) {
662 if (!desc->platform.msi_index)
663 dmadev->msi_virqbase = desc->irq;
664
665 rc = devm_request_irq(&pdev->dev, desc->irq,
666 hidma_chirq_handler_msi,
667 0, "qcom-hidma-msi",
668 &dmadev->lldev);
669 if (rc) {
670 failed_desc = desc;
671 break;
672 }
673 }
674
675 if (rc) {
676 /* free allocated MSI interrupts above */
677 for_each_msi_entry(desc, &pdev->dev) {
678 if (desc == failed_desc)
679 break;
680 devm_free_irq(&pdev->dev, desc->irq,
681 &dmadev->lldev);
682 }
683 } else {
684 /* Add callback to free MSIs on teardown */
685 hidma_ll_setup_irq(dmadev->lldev, true);
686
687 }
688 if (rc)
689 dev_warn(&pdev->dev,
690 "failed to request MSI irq, falling back to wired IRQ\n");
691 return rc;
692#else
693 return -EINVAL;
694#endif
695}
696
697static bool hidma_msi_capable(struct device *dev)
698{
699 struct acpi_device *adev = ACPI_COMPANION(dev);
700 const char *of_compat;
701 int ret = -EINVAL;
702
703 if (!adev || acpi_disabled) {
704 ret = device_property_read_string(dev, "compatible",
705 &of_compat);
706 if (ret)
707 return false;
708
709 ret = strcmp(of_compat, "qcom,hidma-1.1");
710 } else {
711#ifdef CONFIG_ACPI
712 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
713#endif
714 }
715 return ret == 0;
716}
717
Sinan Kaya67a20032016-02-04 23:34:35 -0500718static int hidma_probe(struct platform_device *pdev)
719{
720 struct hidma_dev *dmadev;
721 struct resource *trca_resource;
722 struct resource *evca_resource;
723 int chirq;
724 void __iomem *evca;
725 void __iomem *trca;
726 int rc;
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400727 bool msi;
Sinan Kaya67a20032016-02-04 23:34:35 -0500728
729 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
730 pm_runtime_use_autosuspend(&pdev->dev);
731 pm_runtime_set_active(&pdev->dev);
732 pm_runtime_enable(&pdev->dev);
733
734 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
735 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
736 if (IS_ERR(trca)) {
737 rc = -ENOMEM;
738 goto bailout;
739 }
740
741 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
742 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
743 if (IS_ERR(evca)) {
744 rc = -ENOMEM;
745 goto bailout;
746 }
747
748 /*
749 * This driver only handles the channel IRQs.
750 * Common IRQ is handled by the management driver.
751 */
752 chirq = platform_get_irq(pdev, 0);
753 if (chirq < 0) {
754 rc = -ENODEV;
755 goto bailout;
756 }
757
758 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
759 if (!dmadev) {
760 rc = -ENOMEM;
761 goto bailout;
762 }
763
764 INIT_LIST_HEAD(&dmadev->ddev.channels);
765 spin_lock_init(&dmadev->lock);
766 dmadev->ddev.dev = &pdev->dev;
767 pm_runtime_get_sync(dmadev->ddev.dev);
768
769 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
770 if (WARN_ON(!pdev->dev.dma_mask)) {
771 rc = -ENXIO;
772 goto dmafree;
773 }
774
775 dmadev->dev_evca = evca;
776 dmadev->evca_resource = evca_resource;
777 dmadev->dev_trca = trca;
778 dmadev->trca_resource = trca_resource;
779 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
780 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
781 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
782 dmadev->ddev.device_tx_status = hidma_tx_status;
783 dmadev->ddev.device_issue_pending = hidma_issue_pending;
784 dmadev->ddev.device_pause = hidma_pause;
785 dmadev->ddev.device_resume = hidma_resume;
786 dmadev->ddev.device_terminate_all = hidma_terminate_all;
787 dmadev->ddev.copy_align = 8;
788
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400789 /*
790 * Determine the MSI capability of the platform. Old HW doesn't
791 * support MSI.
792 */
793 msi = hidma_msi_capable(&pdev->dev);
794
Sinan Kaya67a20032016-02-04 23:34:35 -0500795 device_property_read_u32(&pdev->dev, "desc-count",
796 &dmadev->nr_descriptors);
797
798 if (!dmadev->nr_descriptors && nr_desc_prm)
799 dmadev->nr_descriptors = nr_desc_prm;
800
801 if (!dmadev->nr_descriptors)
802 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
803
804 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
805
806 /* Set DMA mask to 64 bits. */
807 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
808 if (rc) {
809 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
810 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
811 if (rc)
812 goto dmafree;
813 }
814
815 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
816 dmadev->nr_descriptors, dmadev->dev_trca,
817 dmadev->dev_evca, dmadev->chidx);
818 if (!dmadev->lldev) {
819 rc = -EPROBE_DEFER;
820 goto dmafree;
821 }
822
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400823 platform_set_drvdata(pdev, dmadev);
824 if (msi)
825 rc = hidma_request_msi(dmadev, pdev);
826
827 if (!msi || rc) {
828 hidma_ll_setup_irq(dmadev->lldev, false);
829 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
830 0, "qcom-hidma", dmadev->lldev);
831 if (rc)
832 goto uninit;
833 }
Sinan Kaya67a20032016-02-04 23:34:35 -0500834
835 INIT_LIST_HEAD(&dmadev->ddev.channels);
836 rc = hidma_chan_init(dmadev, 0);
837 if (rc)
838 goto uninit;
839
840 rc = dma_async_device_register(&dmadev->ddev);
841 if (rc)
842 goto uninit;
843
844 dmadev->irq = chirq;
845 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
Sinan Kaya570d0172016-05-01 00:25:27 -0400846 hidma_debug_init(dmadev);
Sinan Kayac6e45842016-11-14 14:34:53 -0500847 hidma_sysfs_init(dmadev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500848 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
Sinan Kaya67a20032016-02-04 23:34:35 -0500849 pm_runtime_mark_last_busy(dmadev->ddev.dev);
850 pm_runtime_put_autosuspend(dmadev->ddev.dev);
851 return 0;
852
853uninit:
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400854 if (msi)
855 hidma_free_msis(dmadev);
856
Sinan Kaya570d0172016-05-01 00:25:27 -0400857 hidma_debug_uninit(dmadev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500858 hidma_ll_uninit(dmadev->lldev);
859dmafree:
860 if (dmadev)
861 hidma_free(dmadev);
862bailout:
863 pm_runtime_put_sync(&pdev->dev);
864 pm_runtime_disable(&pdev->dev);
865 return rc;
866}
867
868static int hidma_remove(struct platform_device *pdev)
869{
870 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
871
872 pm_runtime_get_sync(dmadev->ddev.dev);
873 dma_async_device_unregister(&dmadev->ddev);
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400874 if (!dmadev->lldev->msi_support)
875 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
876 else
877 hidma_free_msis(dmadev);
878
Vinod Koulbd169342016-07-05 14:57:40 +0530879 tasklet_kill(&dmadev->task);
Sinan Kayac6e45842016-11-14 14:34:53 -0500880 hidma_sysfs_uninit(dmadev);
Sinan Kaya570d0172016-05-01 00:25:27 -0400881 hidma_debug_uninit(dmadev);
Sinan Kaya67a20032016-02-04 23:34:35 -0500882 hidma_ll_uninit(dmadev->lldev);
883 hidma_free(dmadev);
884
885 dev_info(&pdev->dev, "HI-DMA engine removed\n");
886 pm_runtime_put_sync_suspend(&pdev->dev);
887 pm_runtime_disable(&pdev->dev);
888
889 return 0;
890}
891
892#if IS_ENABLED(CONFIG_ACPI)
893static const struct acpi_device_id hidma_acpi_ids[] = {
894 {"QCOM8061"},
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400895 {"QCOM8062"},
Sinan Kaya67a20032016-02-04 23:34:35 -0500896 {},
897};
Sinan Kaya75ff7662016-11-19 14:28:37 -0500898MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
Sinan Kaya67a20032016-02-04 23:34:35 -0500899#endif
900
901static const struct of_device_id hidma_match[] = {
902 {.compatible = "qcom,hidma-1.0",},
Sinan Kaya1c0e3e82016-10-21 12:37:59 -0400903 {.compatible = "qcom,hidma-1.1",},
Sinan Kaya67a20032016-02-04 23:34:35 -0500904 {},
905};
Sinan Kaya67a20032016-02-04 23:34:35 -0500906MODULE_DEVICE_TABLE(of, hidma_match);
907
908static struct platform_driver hidma_driver = {
909 .probe = hidma_probe,
910 .remove = hidma_remove,
911 .driver = {
912 .name = "hidma",
913 .of_match_table = hidma_match,
914 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
915 },
916};
917
918module_platform_driver(hidma_driver);
919MODULE_LICENSE("GPL v2");