blob: d9fd653fd1ffc31600fd0221efc663c407f84611 [file] [log] [blame]
Shrey Vijay8322d812017-08-09 18:00:58 +05301/*
2 * Copyright (c) 2014-2015,2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15/*
16 * Qualcomm technologies inc, DMA API for BAM (Bus Access Manager).
17 * This DMA driver uses sps-BAM API to access the HW, thus it is effectively a
18 * DMA engine wrapper of the sps-BAM API.
19 *
20 * Client channel configuration example:
21 * struct dma_slave_config config {
22 * .direction = DMA_MEM_TO_DEV;
23 * };
24 *
25 * chan = dma_request_slave_channel(client_dev, "rx");
26 * dmaengine_slave_config(chan, &config);
27 */
28
29#include <linux/kernel.h>
30#include <linux/io.h>
31#include <linux/init.h>
32#include <linux/slab.h>
33#include <linux/module.h>
34#include <linux/dma-mapping.h>
35#include <linux/scatterlist.h>
36#include <linux/device.h>
37#include <linux/platform_device.h>
38#include <linux/of.h>
39#include <linux/of_dma.h>
40#include <linux/list.h>
41#include <linux/msm-sps.h>
42#include "dmaengine.h"
43
44#define QBAM_OF_SLAVE_N_ARGS (4)
45#define QBAM_OF_MANAGE_LOCAL "qcom,managed-locally"
46#define QBAM_OF_SUM_THRESHOLD "qcom,summing-threshold"
47#define QBAM_MAX_DESCRIPTORS (0x100)
48#define QBAM_MAX_CHANNELS (32)
49
50/*
51 * qbam_async_tx_descriptor - dma descriptor plus a list of xfer_bufs
52 *
53 * @sgl scatterlist of transfer buffers
54 * @sg_len size of that list
55 * @flags dma xfer flags
56 */
57struct qbam_async_tx_descriptor {
58 struct dma_async_tx_descriptor dma_desc;
59 struct scatterlist *sgl;
60 unsigned int sg_len;
61 unsigned long flags;
62};
63
64#define DMA_TO_QBAM_ASYNC_DESC(dma_async_desc) \
65 container_of(dma_async_desc, struct qbam_async_tx_descriptor, dma_desc)
66
67struct qbam_channel;
68/*
69 * qbam_device - top level device of current driver
70 * @handle bam sps handle.
71 * @regs bam register space virtual base address.
72 * @mem_resource bam register space resource.
73 * @deregister_required if bam is registered by this driver it need to be
74 * unregistered by this driver.
75 * @manage is bame managed locally or remotely,
76 * @summing_threshold event threshold.
77 * @irq bam interrupt line.
78 * @channels has the same channels as qbam_dev->dma_dev.channels but
79 * supports fast access by pipe index.
80 */
81struct qbam_device {
82 struct dma_device dma_dev;
83 void __iomem *regs;
84 struct resource *mem_resource;
85 ulong handle;
86 bool deregister_required;
87 u32 summing_threshold;
88 u32 manage;
89 int irq;
90 struct qbam_channel *channels[QBAM_MAX_CHANNELS];
91};
92
93/* qbam_pipe: aggregate of bam pipe related entries of qbam_channel */
94struct qbam_pipe {
95 u32 index;
96 struct sps_pipe *handle;
97 struct sps_connect cfg;
98 u32 num_descriptors;
99 u32 sps_connect_flags;
100 u32 sps_register_event_flags;
101};
102
103/*
104 * qbam_channel - dma channel plus bam pipe info and current pending transfers
105 *
106 * @direction is a producer or consumer (MEM => DEV or DEV => MEM)
107 * @pending_desc next set of transfer to process
108 * @error last error that took place on the current pending_desc
109 */
110struct qbam_channel {
111 struct qbam_pipe bam_pipe;
112
113 struct dma_chan chan;
114 enum dma_transfer_direction direction;
115 struct qbam_async_tx_descriptor pending_desc;
116
117 struct qbam_device *qbam_dev;
118 struct mutex lock;
119 int error;
120};
121#define DMA_TO_QBAM_CHAN(dma_chan) \
122 container_of(dma_chan, struct qbam_channel, chan)
123#define qbam_err(qbam_dev, fmt ...) dev_err(qbam_dev->dma_dev.dev, fmt)
124
125/* qbam_disconnect_chan - disconnect a channel */
126static int qbam_disconnect_chan(struct qbam_channel *qbam_chan)
127{
128 struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
129 struct sps_pipe *pipe_handle = qbam_chan->bam_pipe.handle;
130 struct sps_connect pipe_config_no_irq = {.options = SPS_O_POLL};
131 int ret;
132
133 /*
134 * SW workaround:
135 * When disconnecting BAM pipe a spurious interrupt sometimes appears.
136 * To avoid that, we change the pipe setting from interrupt (default)
137 * to polling (SPS_O_POLL) before diconnecting the pipe.
138 */
139 ret = sps_set_config(pipe_handle, &pipe_config_no_irq);
140 if (ret)
141 qbam_err(qbam_dev,
142 "error:%d sps_set_config(pipe:%d) before disconnect\n",
143 ret, qbam_chan->bam_pipe.index);
144
145 ret = sps_disconnect(pipe_handle);
146 if (ret)
147 qbam_err(qbam_dev, "error:%d sps_disconnect(pipe:%d)\n",
148 ret, qbam_chan->bam_pipe.index);
149
150 return ret;
151}
152
153/* qbam_free_chan - disconnect channel and free its resources */
154static void qbam_free_chan(struct dma_chan *chan)
155{
156 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
157 struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
158
159 mutex_lock(&qbam_chan->lock);
160 if (qbam_disconnect_chan(qbam_chan))
161 qbam_err(qbam_dev,
162 "error free_chan() failed to disconnect(pipe:%d)\n",
163 qbam_chan->bam_pipe.index);
164 qbam_chan->pending_desc.sgl = NULL;
165 qbam_chan->pending_desc.sg_len = 0;
166 mutex_unlock(&qbam_chan->lock);
167}
168
169static struct dma_chan *qbam_dma_xlate(struct of_phandle_args *dma_spec,
170 struct of_dma *of)
171{
172 struct qbam_device *qbam_dev = of->of_dma_data;
173 struct qbam_channel *qbam_chan;
174 u32 channel_index;
175 u32 num_descriptors;
176
177 if (dma_spec->args_count != QBAM_OF_SLAVE_N_ARGS) {
178 qbam_err(qbam_dev,
179 "invalid number of dma arguments, expect:%d got:%d\n",
180 QBAM_OF_SLAVE_N_ARGS, dma_spec->args_count);
181 return NULL;
182 };
183
184 channel_index = dma_spec->args[0];
185
186 if (channel_index >= QBAM_MAX_CHANNELS) {
187 qbam_err(qbam_dev,
188 "error: channel_index:%d out of bounds",
189 channel_index);
190 return NULL;
191 }
192 qbam_chan = qbam_dev->channels[channel_index];
193 /* return qbam_chan if exists, or create one */
194 if (qbam_chan) {
195 qbam_chan->chan.client_count = 1;
196 return &qbam_chan->chan;
197 }
198
199 num_descriptors = dma_spec->args[1];
200 if (!num_descriptors || (num_descriptors > QBAM_MAX_DESCRIPTORS)) {
201 qbam_err(qbam_dev,
202 "invalid number of descriptors, range[1..%d] got:%d\n",
203 QBAM_MAX_DESCRIPTORS, num_descriptors);
204 return NULL;
205 }
206
207 /* allocate a channel */
208 qbam_chan = kzalloc(sizeof(*qbam_chan), GFP_KERNEL);
209 if (!qbam_chan)
210 return NULL;
211
212 /* allocate BAM resources for that channel */
213 qbam_chan->bam_pipe.handle = sps_alloc_endpoint();
214 if (!qbam_chan->bam_pipe.handle) {
215 qbam_err(qbam_dev, "error: sps_alloc_endpoint() return NULL\n");
216 kfree(qbam_chan);
217 return NULL;
218 }
219
220 /* init dma_chan */
221 qbam_chan->chan.device = &qbam_dev->dma_dev;
222 dma_cookie_init(&qbam_chan->chan);
223 qbam_chan->chan.client_count = 1;
224 /* init qbam_chan */
225 qbam_chan->bam_pipe.index = channel_index;
226 qbam_chan->bam_pipe.num_descriptors = num_descriptors;
227 qbam_chan->bam_pipe.sps_connect_flags = dma_spec->args[2];
228 qbam_chan->bam_pipe.sps_register_event_flags = dma_spec->args[3];
229 qbam_chan->qbam_dev = qbam_dev;
230 mutex_init(&qbam_chan->lock);
231
232 /* add to dma_device list of channels */
233 list_add(&qbam_chan->chan.device_node, &qbam_dev->dma_dev.channels);
234 qbam_dev->channels[channel_index] = qbam_chan;
235
236 return &qbam_chan->chan;
237}
238
239static enum dma_status qbam_tx_status(struct dma_chan *chan,
240 dma_cookie_t cookie, struct dma_tx_state *state)
241{
242 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
243 struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
244 enum dma_status ret;
245
246 mutex_lock(&qbam_chan->lock);
247
248 if (qbam_chan->error) {
249 mutex_unlock(&qbam_chan->lock);
250 return DMA_ERROR;
251 }
252
253 ret = dma_cookie_status(chan, cookie, state);
254 if (ret == DMA_IN_PROGRESS) {
255 struct scatterlist *sg;
256 int i;
257 u32 transfer_size = 0;
258
259 for_each_sg(qbam_desc->sgl, sg, qbam_desc->sg_len, i)
260 transfer_size += sg_dma_len(sg);
261
262 dma_set_residue(state, transfer_size);
263 }
264 mutex_unlock(&qbam_chan->lock);
265
266 return ret;
267}
268
269/*
270 * qbam_init_bam_handle - find or create bam handle.
271 *
272 * BAM device needs to be registered for each BLSP once and only once. if it
273 * was registered, then we find the handle to the registered bam and return
274 * it, otherwise we register it here.
275 * The module which registered BAM is responsible for deregistering it.
276 */
277static int qbam_init_bam_handle(struct qbam_device *qbam_dev)
278{
279 int ret = 0;
280 struct sps_bam_props bam_props = {0};
281
282 /*
283 * Check if BAM is already registred with SPS on the current
284 * BLSP. If it isn't then go ahead and register it.
285 */
286 ret = sps_phy2h(qbam_dev->mem_resource->start, &qbam_dev->handle);
287 if (qbam_dev->handle)
288 return 0;
289
290 qbam_dev->regs = devm_ioremap_resource(qbam_dev->dma_dev.dev,
291 qbam_dev->mem_resource);
292 if (IS_ERR(qbam_dev->regs)) {
293 qbam_err(qbam_dev, "error:%ld ioremap(phy:0x%lx len:0x%lx)\n",
294 PTR_ERR(qbam_dev->regs),
295 (ulong) qbam_dev->mem_resource->start,
296 (ulong) resource_size(qbam_dev->mem_resource));
297 return PTR_ERR(qbam_dev->regs);
298 };
299
300 bam_props.phys_addr = qbam_dev->mem_resource->start;
301 bam_props.virt_addr = qbam_dev->regs;
302 bam_props.summing_threshold = qbam_dev->summing_threshold;
303 bam_props.manage = qbam_dev->manage;
304 bam_props.irq = qbam_dev->irq;
305
306 ret = sps_register_bam_device(&bam_props, &qbam_dev->handle);
307 if (ret)
308 qbam_err(qbam_dev, "error:%d sps_register_bam_device\n"
309 "(phy:0x%lx virt:0x%lx irq:%d)\n",
310 ret, (ulong) bam_props.phys_addr,
311 (ulong) bam_props.virt_addr, qbam_dev->irq);
312 else
313 qbam_dev->deregister_required = true;
314
315 return ret;
316}
317
318
319static int qbam_alloc_chan(struct dma_chan *chan)
320{
321 return 0;
322}
323
324static void qbam_eot_callback(struct sps_event_notify *notify)
325{
326 struct qbam_async_tx_descriptor *qbam_desc = notify->data.transfer.user;
327 struct dma_async_tx_descriptor *dma_desc = &qbam_desc->dma_desc;
328 dma_async_tx_callback callback = dma_desc->callback;
329 void *param = dma_desc->callback_param;
330
331 if (callback)
332 callback(param);
333}
334
335static void qbam_error_callback(struct sps_event_notify *notify)
336{
337 struct qbam_channel *qbam_chan = notify->user;
338
339 qbam_err(qbam_chan->qbam_dev, "error: qbam_error_callback(pipe:%d\n)",
340 qbam_chan->bam_pipe.index);
341}
342
343static int qbam_connect_chan(struct qbam_channel *qbam_chan)
344{
345 int ret = 0;
346 struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
347 struct sps_register_event bam_eot_event = {
348 .mode = SPS_TRIGGER_CALLBACK,
349 .options = qbam_chan->bam_pipe.sps_register_event_flags,
350 .callback = qbam_eot_callback,
351 };
352 struct sps_register_event bam_error_event = {
353 .mode = SPS_TRIGGER_CALLBACK,
354 .options = SPS_O_ERROR,
355 .callback = qbam_error_callback,
356 .user = qbam_chan,
357 };
358
359 ret = sps_connect(qbam_chan->bam_pipe.handle, &qbam_chan->bam_pipe.cfg);
360 if (ret) {
361 qbam_err(qbam_dev, "error:%d sps_connect(pipe:%d)\n", ret,
362 qbam_chan->bam_pipe.index);
363 return ret;
364 }
365
366 ret = sps_register_event(qbam_chan->bam_pipe.handle, &bam_eot_event);
367 if (ret) {
368 qbam_err(qbam_dev, "error:%d sps_register_event(eot@pipe:%d)\n",
369 ret, qbam_chan->bam_pipe.index);
370 goto need_disconnect;
371 }
372
373 ret = sps_register_event(qbam_chan->bam_pipe.handle, &bam_error_event);
374 if (ret) {
375 qbam_err(qbam_dev, "error:%d sps_register_event(err@pipe:%d)\n",
376 ret, qbam_chan->bam_pipe.index);
377 goto need_disconnect;
378 }
379
380 return 0;
381
382need_disconnect:
383 ret = sps_disconnect(qbam_chan->bam_pipe.handle);
384 if (ret)
385 qbam_err(qbam_dev, "error:%d sps_disconnect(pipe:%d)\n", ret,
386 qbam_chan->bam_pipe.index);
387 return ret;
388}
389
390/*
391 * qbam_slave_cfg - configure and connect a BAM pipe
392 *
393 * @cfg only cares about cfg->direction
394 */
395static int qbam_slave_cfg(struct dma_chan *chan,
396 struct dma_slave_config *cfg)
397{
398 int ret = 0;
399 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
400 struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
401 struct sps_connect *pipe_cfg = &qbam_chan->bam_pipe.cfg;
402
403 if (!qbam_dev->handle) {
404 ret = qbam_init_bam_handle(qbam_dev);
405 if (ret)
406 return ret;
407 }
408
409 if (qbam_chan->bam_pipe.cfg.desc.base)
410 goto cfg_done;
411
412 ret = sps_get_config(qbam_chan->bam_pipe.handle,
413 &qbam_chan->bam_pipe.cfg);
414 if (ret) {
415 qbam_err(qbam_dev, "error:%d sps_get_config(0x%p)\n",
416 ret, qbam_chan->bam_pipe.handle);
417 return ret;
418 }
419
420 qbam_chan->direction = cfg->direction;
421 if (cfg->direction == DMA_MEM_TO_DEV) {
422 pipe_cfg->source = SPS_DEV_HANDLE_MEM;
423 pipe_cfg->destination = qbam_dev->handle;
424 pipe_cfg->mode = SPS_MODE_DEST;
425 pipe_cfg->src_pipe_index = 0;
426 pipe_cfg->dest_pipe_index = qbam_chan->bam_pipe.index;
427 } else {
428 pipe_cfg->source = qbam_dev->handle;
429 pipe_cfg->destination = SPS_DEV_HANDLE_MEM;
430 pipe_cfg->mode = SPS_MODE_SRC;
431 pipe_cfg->src_pipe_index = qbam_chan->bam_pipe.index;
432 pipe_cfg->dest_pipe_index = 0;
433 }
434 pipe_cfg->options = qbam_chan->bam_pipe.sps_connect_flags;
435 pipe_cfg->desc.size = (qbam_chan->bam_pipe.num_descriptors + 1) *
436 sizeof(struct sps_iovec);
437 /* managed dma_alloc_coherent() */
438 pipe_cfg->desc.base = dmam_alloc_coherent(qbam_dev->dma_dev.dev,
439 pipe_cfg->desc.size,
440 &pipe_cfg->desc.phys_base,
441 GFP_KERNEL);
442 if (!pipe_cfg->desc.base) {
443 qbam_err(qbam_dev,
444 "error dma_alloc_coherent(desc-sz:%llu * n-descs:%d)\n",
445 (u64) sizeof(struct sps_iovec),
446 qbam_chan->bam_pipe.num_descriptors);
447 return -ENOMEM;
448 }
449cfg_done:
450 ret = qbam_connect_chan(qbam_chan);
451 if (ret)
452 dmam_free_coherent(qbam_dev->dma_dev.dev, pipe_cfg->desc.size,
453 pipe_cfg->desc.base, pipe_cfg->desc.phys_base);
454
455 return ret;
456}
457
458static int qbam_flush_chan(struct dma_chan *chan)
459{
460 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
461 int ret = qbam_disconnect_chan(qbam_chan);
462
463 if (ret) {
464 qbam_err(qbam_chan->qbam_dev,
465 "error: disconnect flush(pipe:%d\n)",
466 qbam_chan->bam_pipe.index);
467 return ret;
468 }
469 ret = qbam_connect_chan(qbam_chan);
470 if (ret)
471 qbam_err(qbam_chan->qbam_dev,
472 "error: reconnect flush(pipe:%d\n)",
473 qbam_chan->bam_pipe.index);
474 return ret;
475}
476
477/* qbam_tx_submit - sets the descriptor as the next one to be executed */
478static dma_cookie_t qbam_tx_submit(struct dma_async_tx_descriptor *dma_desc)
479{
480 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(dma_desc->chan);
481 dma_cookie_t ret;
482
483 mutex_lock(&qbam_chan->lock);
484
485 ret = dma_cookie_assign(dma_desc);
486
487 mutex_unlock(&qbam_chan->lock);
488
489 return ret;
490}
491
492/*
493 * qbam_prep_slave_sg - creates qbam_xfer_buf from a list of sg
494 *
495 * @chan: dma channel
496 * @sgl: scatter gather list
497 * @sg_len: length of sg
498 * @direction: DMA transfer direction
499 * @flags: DMA flags
500 * @context: transfer context (unused)
501 * @return the newly created descriptor or negative ERR_PTR() on error
502 */
503static struct dma_async_tx_descriptor *qbam_prep_slave_sg(struct dma_chan *chan,
504 struct scatterlist *sgl, unsigned int sg_len,
505 enum dma_transfer_direction direction, unsigned long flags,
506 void *context)
507{
508 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
509 struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
510 struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
511
512 if (qbam_chan->direction != direction) {
513 qbam_err(qbam_dev,
514 "invalid dma transfer direction expected:%d given:%d\n",
515 qbam_chan->direction, direction);
516 return ERR_PTR(-EINVAL);
517 }
518
519 qbam_desc->dma_desc.chan = &qbam_chan->chan;
520 qbam_desc->dma_desc.tx_submit = qbam_tx_submit;
521 qbam_desc->sgl = sgl;
522 qbam_desc->sg_len = sg_len;
523 qbam_desc->flags = flags;
524 return &qbam_desc->dma_desc;
525}
526
527/*
528 * qbam_issue_pending - queue pending descriptor to BAM
529 *
530 * Iterate over the transfers of the pending descriptor and push them to bam
531 */
532static void qbam_issue_pending(struct dma_chan *chan)
533{
534 int i;
535 int ret = 0;
536 struct qbam_channel *qbam_chan = DMA_TO_QBAM_CHAN(chan);
537 struct qbam_device *qbam_dev = qbam_chan->qbam_dev;
538 struct qbam_async_tx_descriptor *qbam_desc = &qbam_chan->pending_desc;
539 struct scatterlist *sg;
540
541 mutex_lock(&qbam_chan->lock);
542 if (!qbam_chan->pending_desc.sgl) {
543 qbam_err(qbam_dev,
544 "error qbam_issue_pending() no pending descriptor pipe:%d\n",
545 qbam_chan->bam_pipe.index);
546 mutex_unlock(&qbam_chan->lock);
547 return;
548 }
549
550 for_each_sg(qbam_desc->sgl, sg, qbam_desc->sg_len, i) {
551
552 /* Add BAM flags only on the last buffer */
553 bool is_last_buf = (i == ((qbam_desc->sg_len) - 1));
554
555 ret = sps_transfer_one(qbam_chan->bam_pipe.handle,
556 sg_dma_address(sg), sg_dma_len(sg),
557 qbam_desc,
558 (is_last_buf ? qbam_desc->flags : 0));
559 if (ret < 0) {
560 qbam_chan->error = ret;
561
562 qbam_err(qbam_dev, "erorr:%d sps_transfer_one\n"
563 "(addr:0x%lx len:%d flags:0x%lx pipe:%d)\n",
564 ret, (ulong) sg_dma_address(sg), sg_dma_len(sg),
565 qbam_desc->flags, qbam_chan->bam_pipe.index);
566 break;
567 }
568 }
569
570 dma_cookie_complete(&qbam_desc->dma_desc);
571 qbam_chan->error = 0;
572 qbam_desc->sgl = NULL;
573 qbam_desc->sg_len = 0;
574 mutex_unlock(&qbam_chan->lock);
575};
576
577static int qbam_deregister_bam_dev(struct qbam_device *qbam_dev)
578{
579 int ret;
580
581 if (!qbam_dev->handle)
582 return 0;
583
584 ret = sps_deregister_bam_device(qbam_dev->handle);
585 if (ret)
586 qbam_err(qbam_dev,
587 "error:%d sps_deregister_bam_device(hndl:0x%lx) failed",
588 ret, qbam_dev->handle);
589 return ret;
590}
591
592static void qbam_pipes_free(struct qbam_device *qbam_dev)
593{
594 struct qbam_channel *qbam_chan_cur, *qbam_chan_next;
595
596 list_for_each_entry_safe(qbam_chan_cur, qbam_chan_next,
597 &qbam_dev->dma_dev.channels, chan.device_node) {
598 mutex_lock(&qbam_chan_cur->lock);
599 qbam_free_chan(&qbam_chan_cur->chan);
600 sps_free_endpoint(qbam_chan_cur->bam_pipe.handle);
601 list_del(&qbam_chan_cur->chan.device_node);
602 mutex_unlock(&qbam_chan_cur->lock);
603 kfree(qbam_chan_cur);
604 }
605}
606
607static int qbam_probe(struct platform_device *pdev)
608{
609 struct qbam_device *qbam_dev;
610 int ret;
611 bool managed_locally;
612 struct device_node *of_node = pdev->dev.of_node;
613
614 qbam_dev = devm_kzalloc(&pdev->dev, sizeof(*qbam_dev), GFP_KERNEL);
615 if (!qbam_dev)
616 return -ENOMEM;
617
618 qbam_dev->dma_dev.dev = &pdev->dev;
619 platform_set_drvdata(pdev, qbam_dev);
620
621 qbam_dev->mem_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
622 if (!qbam_dev->mem_resource) {
623 qbam_err(qbam_dev, "missing 'reg' DT entry");
624 return -ENODEV;
625 }
626
627 qbam_dev->irq = platform_get_irq(pdev, 0);
628 if (qbam_dev->irq < 0) {
629 qbam_err(qbam_dev, "missing DT IRQ resource entry");
630 return -EINVAL;
631 }
632
633 ret = of_property_read_u32(of_node, QBAM_OF_SUM_THRESHOLD,
634 &qbam_dev->summing_threshold);
635 if (ret) {
636 qbam_err(qbam_dev, "missing '%s' DT entry",
637 QBAM_OF_SUM_THRESHOLD);
638 return ret;
639 }
640
641 /* read from DT and set sps_bam_props.manage */
642 managed_locally = of_property_read_bool(of_node, QBAM_OF_MANAGE_LOCAL);
643 qbam_dev->manage = managed_locally ? SPS_BAM_MGR_LOCAL :
644 SPS_BAM_MGR_DEVICE_REMOTE;
645
646 /* Init channels */
647 INIT_LIST_HEAD(&qbam_dev->dma_dev.channels);
648
649 /* Set capabilities */
650 dma_cap_zero(qbam_dev->dma_dev.cap_mask);
651 dma_cap_set(DMA_SLAVE, qbam_dev->dma_dev.cap_mask);
652 dma_cap_set(DMA_PRIVATE, qbam_dev->dma_dev.cap_mask);
653
654 /* Initialize dmaengine callback apis */
655 qbam_dev->dma_dev.device_alloc_chan_resources = qbam_alloc_chan;
656 qbam_dev->dma_dev.device_free_chan_resources = qbam_free_chan;
657 qbam_dev->dma_dev.device_prep_slave_sg = qbam_prep_slave_sg;
658 qbam_dev->dma_dev.device_terminate_all = qbam_flush_chan;
659 qbam_dev->dma_dev.device_config = qbam_slave_cfg;
660 qbam_dev->dma_dev.device_issue_pending = qbam_issue_pending;
661 qbam_dev->dma_dev.device_tx_status = qbam_tx_status;
662
663 /* Regiser to DMA framework */
664 dma_async_device_register(&qbam_dev->dma_dev);
665
666 /*
667 * Do not return error in order to not break the existing
668 * way of requesting channels.
669 */
670 ret = of_dma_controller_register(of_node, qbam_dma_xlate, qbam_dev);
671 if (ret) {
672 qbam_err(qbam_dev, "error:%d of_dma_controller_register()\n",
673 ret);
674 goto err_unregister_dma;
675 }
676 return 0;
677
678err_unregister_dma:
679 dma_async_device_unregister(&qbam_dev->dma_dev);
680 if (qbam_dev->deregister_required)
681 return qbam_deregister_bam_dev(qbam_dev);
682
683 return ret;
684}
685
686static int qbam_remove(struct platform_device *pdev)
687{
688 struct qbam_device *qbam_dev = platform_get_drvdata(pdev);
689
690 dma_async_device_unregister(&qbam_dev->dma_dev);
691
692 /* free BAM pipes resources */
693 qbam_pipes_free(qbam_dev);
694
695 if (qbam_dev->deregister_required)
696 return qbam_deregister_bam_dev(qbam_dev);
697
698 return 0;
699}
700
701static const struct of_device_id qbam_of_match[] = {
702 { .compatible = "qcom,sps-dma" },
703 {}
704};
705MODULE_DEVICE_TABLE(of, qbam_of_match);
706
707static struct platform_driver qbam_driver = {
708 .probe = qbam_probe,
709 .remove = qbam_remove,
710 .driver = {
711 .name = "qcom-sps-dma",
712 .owner = THIS_MODULE,
713 .of_match_table = qbam_of_match,
714 },
715};
716
717module_platform_driver(qbam_driver);
718
719MODULE_DESCRIPTION("DMA-API driver to qcom BAM");
720MODULE_LICENSE("GPL v2");
721MODULE_ALIAS("platform:qcom-sps-dma");