blob: 50d6569585b494265676a895f0b23c4935726fdb [file] [log] [blame]
Baolin Wang9b3b8172017-10-24 13:47:50 +08001/*
2 * Copyright (C) 2017 Spreadtrum Communications Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
Eric Longab42ddb2018-04-19 10:00:48 +08009#include <linux/dma/sprd-dma.h>
Baolin Wang9b3b8172017-10-24 13:47:50 +080010#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_dma.h>
18#include <linux/of_device.h>
19#include <linux/pm_runtime.h>
20#include <linux/slab.h>
21
22#include "virt-dma.h"
23
24#define SPRD_DMA_CHN_REG_OFFSET 0x1000
25#define SPRD_DMA_CHN_REG_LENGTH 0x40
26#define SPRD_DMA_MEMCPY_MIN_SIZE 64
27
28/* DMA global registers definition */
29#define SPRD_DMA_GLB_PAUSE 0x0
30#define SPRD_DMA_GLB_FRAG_WAIT 0x4
31#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
32#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
33#define SPRD_DMA_GLB_INT_RAW_STS 0x10
34#define SPRD_DMA_GLB_INT_MSK_STS 0x14
35#define SPRD_DMA_GLB_REQ_STS 0x18
36#define SPRD_DMA_GLB_CHN_EN_STS 0x1c
37#define SPRD_DMA_GLB_DEBUG_STS 0x20
38#define SPRD_DMA_GLB_ARB_SEL_STS 0x24
Eric Long770399d2018-11-06 13:01:36 +080039#define SPRD_DMA_GLB_2STAGE_GRP1 0x28
40#define SPRD_DMA_GLB_2STAGE_GRP2 0x2c
Baolin Wang9b3b8172017-10-24 13:47:50 +080041#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
42#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
43
44/* DMA channel registers definition */
45#define SPRD_DMA_CHN_PAUSE 0x0
46#define SPRD_DMA_CHN_REQ 0x4
47#define SPRD_DMA_CHN_CFG 0x8
48#define SPRD_DMA_CHN_INTC 0xc
49#define SPRD_DMA_CHN_SRC_ADDR 0x10
50#define SPRD_DMA_CHN_DES_ADDR 0x14
51#define SPRD_DMA_CHN_FRG_LEN 0x18
52#define SPRD_DMA_CHN_BLK_LEN 0x1c
53#define SPRD_DMA_CHN_TRSC_LEN 0x20
54#define SPRD_DMA_CHN_TRSF_STEP 0x24
55#define SPRD_DMA_CHN_WARP_PTR 0x28
56#define SPRD_DMA_CHN_WARP_TO 0x2c
57#define SPRD_DMA_CHN_LLIST_PTR 0x30
58#define SPRD_DMA_CHN_FRAG_STEP 0x34
59#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
60#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
61
Eric Long770399d2018-11-06 13:01:36 +080062/* SPRD_DMA_GLB_2STAGE_GRP register definition */
63#define SPRD_DMA_GLB_2STAGE_EN BIT(24)
64#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
65#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
66#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
67#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
68#define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16)
69#define SPRD_DMA_GLB_TRG_OFFSET 16
70#define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8)
71#define SPRD_DMA_GLB_DEST_CHN_OFFSET 8
72#define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0)
73
Baolin Wang9b3b8172017-10-24 13:47:50 +080074/* SPRD_DMA_CHN_INTC register definition */
75#define SPRD_DMA_INT_MASK GENMASK(4, 0)
76#define SPRD_DMA_INT_CLR_OFFSET 24
77#define SPRD_DMA_FRAG_INT_EN BIT(0)
78#define SPRD_DMA_BLK_INT_EN BIT(1)
79#define SPRD_DMA_TRANS_INT_EN BIT(2)
80#define SPRD_DMA_LIST_INT_EN BIT(3)
81#define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
82
83/* SPRD_DMA_CHN_CFG register definition */
84#define SPRD_DMA_CHN_EN BIT(0)
Eric Long4ac69542018-08-28 19:09:07 +080085#define SPRD_DMA_LINKLIST_EN BIT(4)
Baolin Wang9b3b8172017-10-24 13:47:50 +080086#define SPRD_DMA_WAIT_BDONE_OFFSET 24
87#define SPRD_DMA_DONOT_WAIT_BDONE 1
88
89/* SPRD_DMA_CHN_REQ register definition */
90#define SPRD_DMA_REQ_EN BIT(0)
91
92/* SPRD_DMA_CHN_PAUSE register definition */
93#define SPRD_DMA_PAUSE_EN BIT(0)
94#define SPRD_DMA_PAUSE_STS BIT(2)
95#define SPRD_DMA_PAUSE_CNT 0x2000
96
97/* DMA_CHN_WARP_* register definition */
98#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
99#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
100#define SPRD_DMA_HIGH_ADDR_OFFSET 4
101
102/* SPRD_DMA_CHN_INTC register definition */
103#define SPRD_DMA_FRAG_INT_STS BIT(16)
104#define SPRD_DMA_BLK_INT_STS BIT(17)
105#define SPRD_DMA_TRSC_INT_STS BIT(18)
106#define SPRD_DMA_LIST_INT_STS BIT(19)
107#define SPRD_DMA_CFGERR_INT_STS BIT(20)
108#define SPRD_DMA_CHN_INT_STS \
109 (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
110 SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
111 SPRD_DMA_CFGERR_INT_STS)
112
113/* SPRD_DMA_CHN_FRG_LEN register definition */
114#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
115#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
116#define SPRD_DMA_SWT_MODE_OFFSET 26
117#define SPRD_DMA_REQ_MODE_OFFSET 24
118#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
119#define SPRD_DMA_FIX_SEL_OFFSET 21
120#define SPRD_DMA_FIX_EN_OFFSET 20
Eric Long4ac69542018-08-28 19:09:07 +0800121#define SPRD_DMA_LLIST_END BIT(19)
Baolin Wang9b3b8172017-10-24 13:47:50 +0800122#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
123
124/* SPRD_DMA_CHN_BLK_LEN register definition */
125#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
126
127/* SPRD_DMA_CHN_TRSC_LEN register definition */
128#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
129
130/* SPRD_DMA_CHN_TRSF_STEP register definition */
131#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
132#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
133#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
134
Eric Long770399d2018-11-06 13:01:36 +0800135/* define DMA channel mode & trigger mode mask */
136#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
137#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
138
Eric Long6b1d2552018-04-19 10:00:46 +0800139/* define the DMA transfer step type */
140#define SPRD_DMA_NONE_STEP 0
141#define SPRD_DMA_BYTE_STEP 1
142#define SPRD_DMA_SHORT_STEP 2
143#define SPRD_DMA_WORD_STEP 4
144#define SPRD_DMA_DWORD_STEP 8
145
Baolin Wang9b3b8172017-10-24 13:47:50 +0800146#define SPRD_DMA_SOFTWARE_UID 0
147
Baolin Wangd7c33cf2018-04-19 10:00:47 +0800148/* dma data width values */
149enum sprd_dma_datawidth {
150 SPRD_DMA_DATAWIDTH_1_BYTE,
151 SPRD_DMA_DATAWIDTH_2_BYTES,
152 SPRD_DMA_DATAWIDTH_4_BYTES,
153 SPRD_DMA_DATAWIDTH_8_BYTES,
Baolin Wang9b3b8172017-10-24 13:47:50 +0800154};
155
156/* dma channel hardware configuration */
157struct sprd_dma_chn_hw {
158 u32 pause;
159 u32 req;
160 u32 cfg;
161 u32 intc;
162 u32 src_addr;
163 u32 des_addr;
164 u32 frg_len;
165 u32 blk_len;
166 u32 trsc_len;
167 u32 trsf_step;
168 u32 wrap_ptr;
169 u32 wrap_to;
170 u32 llist_ptr;
171 u32 frg_step;
172 u32 src_blk_step;
173 u32 des_blk_step;
174};
175
176/* dma request description */
177struct sprd_dma_desc {
178 struct virt_dma_desc vd;
179 struct sprd_dma_chn_hw chn_hw;
Eric Longd762ab32018-11-06 13:01:32 +0800180 enum dma_transfer_direction dir;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800181};
182
183/* dma channel description */
184struct sprd_dma_chn {
185 struct virt_dma_chan vc;
186 void __iomem *chn_base;
Eric Long4ac69542018-08-28 19:09:07 +0800187 struct sprd_dma_linklist linklist;
Eric Longca1b7d32018-05-23 17:31:11 +0800188 struct dma_slave_config slave_cfg;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800189 u32 chn_num;
190 u32 dev_id;
Eric Long770399d2018-11-06 13:01:36 +0800191 enum sprd_dma_chn_mode chn_mode;
192 enum sprd_dma_trg_mode trg_mode;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800193 struct sprd_dma_desc *cur_desc;
194};
195
196/* SPRD dma device */
197struct sprd_dma_dev {
198 struct dma_device dma_dev;
199 void __iomem *glb_base;
200 struct clk *clk;
201 struct clk *ashb_clk;
202 int irq;
203 u32 total_chns;
204 struct sprd_dma_chn channels[0];
205};
206
207static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
208static struct of_dma_filter_info sprd_dma_info = {
209 .filter_fn = sprd_dma_filter_fn,
210};
211
212static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
213{
214 return container_of(c, struct sprd_dma_chn, vc.chan);
215}
216
217static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
218{
219 struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
220
221 return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
222}
223
224static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
225{
226 return container_of(vd, struct sprd_dma_desc, vd);
227}
228
Eric Long770399d2018-11-06 13:01:36 +0800229static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg,
230 u32 mask, u32 val)
231{
232 u32 orig = readl(sdev->glb_base + reg);
233 u32 tmp;
234
235 tmp = (orig & ~mask) | val;
236 writel(tmp, sdev->glb_base + reg);
237}
238
Baolin Wang9b3b8172017-10-24 13:47:50 +0800239static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
240 u32 mask, u32 val)
241{
242 u32 orig = readl(schan->chn_base + reg);
243 u32 tmp;
244
245 tmp = (orig & ~mask) | val;
246 writel(tmp, schan->chn_base + reg);
247}
248
249static int sprd_dma_enable(struct sprd_dma_dev *sdev)
250{
251 int ret;
252
253 ret = clk_prepare_enable(sdev->clk);
254 if (ret)
255 return ret;
256
257 /*
258 * The ashb_clk is optional and only for AGCP DMA controller, so we
259 * need add one condition to check if the ashb_clk need enable.
260 */
261 if (!IS_ERR(sdev->ashb_clk))
262 ret = clk_prepare_enable(sdev->ashb_clk);
263
264 return ret;
265}
266
267static void sprd_dma_disable(struct sprd_dma_dev *sdev)
268{
269 clk_disable_unprepare(sdev->clk);
270
271 /*
272 * Need to check if we need disable the optional ashb_clk for AGCP DMA.
273 */
274 if (!IS_ERR(sdev->ashb_clk))
275 clk_disable_unprepare(sdev->ashb_clk);
276}
277
278static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
279{
280 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
281 u32 dev_id = schan->dev_id;
282
283 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
284 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
285 SPRD_DMA_GLB_REQ_UID(dev_id);
286
287 writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
288 }
289}
290
291static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
292{
293 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
294 u32 dev_id = schan->dev_id;
295
296 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
297 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
298 SPRD_DMA_GLB_REQ_UID(dev_id);
299
300 writel(0, sdev->glb_base + uid_offset);
301 }
302}
303
304static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
305{
306 sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
307 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
308 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
309}
310
311static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
312{
313 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
314 SPRD_DMA_CHN_EN);
315}
316
317static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
318{
319 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
320}
321
322static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
323{
324 sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
325 SPRD_DMA_REQ_EN);
326}
327
328static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
329{
330 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
331 u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
332
333 if (enable) {
334 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
335 SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
336
337 do {
338 pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
339 if (pause & SPRD_DMA_PAUSE_STS)
340 break;
341
342 cpu_relax();
343 } while (--timeout > 0);
344
345 if (!timeout)
346 dev_warn(sdev->dma_dev.dev,
347 "pause dma controller timeout\n");
348 } else {
349 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
350 SPRD_DMA_PAUSE_EN, 0);
351 }
352}
353
354static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
355{
356 u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
357
358 if (!(cfg & SPRD_DMA_CHN_EN))
359 return;
360
361 sprd_dma_pause_resume(schan, true);
362 sprd_dma_disable_chn(schan);
363}
364
Eric Longd762ab32018-11-06 13:01:32 +0800365static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan)
366{
367 unsigned long addr, addr_high;
368
369 addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
370 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) &
371 SPRD_DMA_HIGH_ADDR_MASK;
372
373 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
374}
375
Baolin Wang9b3b8172017-10-24 13:47:50 +0800376static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
377{
378 unsigned long addr, addr_high;
379
380 addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
381 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
382 SPRD_DMA_HIGH_ADDR_MASK;
383
384 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
385}
386
387static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
388{
389 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
390 u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
391 SPRD_DMA_CHN_INT_STS;
392
393 switch (intc_sts) {
394 case SPRD_DMA_CFGERR_INT_STS:
395 return SPRD_DMA_CFGERR_INT;
396
397 case SPRD_DMA_LIST_INT_STS:
398 return SPRD_DMA_LIST_INT;
399
400 case SPRD_DMA_TRSC_INT_STS:
401 return SPRD_DMA_TRANS_INT;
402
403 case SPRD_DMA_BLK_INT_STS:
404 return SPRD_DMA_BLK_INT;
405
406 case SPRD_DMA_FRAG_INT_STS:
407 return SPRD_DMA_FRAG_INT;
408
409 default:
410 dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
411 return SPRD_DMA_NO_INT;
412 }
413}
414
415static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
416{
417 u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
418
419 return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
420}
421
Eric Long770399d2018-11-06 13:01:36 +0800422static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
423{
424 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
425 u32 val, chn = schan->chn_num + 1;
426
427 switch (schan->chn_mode) {
428 case SPRD_DMA_SRC_CHN0:
429 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
430 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
431 val |= SPRD_DMA_GLB_2STAGE_EN;
432 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
433 break;
434
435 case SPRD_DMA_SRC_CHN1:
436 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
437 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
438 val |= SPRD_DMA_GLB_2STAGE_EN;
439 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
440 break;
441
442 case SPRD_DMA_DST_CHN0:
443 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
444 SPRD_DMA_GLB_DEST_CHN_MASK;
445 val |= SPRD_DMA_GLB_2STAGE_EN;
446 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
447 break;
448
449 case SPRD_DMA_DST_CHN1:
450 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
451 SPRD_DMA_GLB_DEST_CHN_MASK;
452 val |= SPRD_DMA_GLB_2STAGE_EN;
453 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
454 break;
455
456 default:
457 dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n",
458 schan->chn_mode);
459 return -EINVAL;
460 }
461
462 return 0;
463}
464
Baolin Wang9b3b8172017-10-24 13:47:50 +0800465static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
466 struct sprd_dma_desc *sdesc)
467{
468 struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
469
470 writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
471 writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
472 writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
473 writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
474 writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
475 writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
476 writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
477 writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
478 writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
479 writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
480 writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
481 writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
482 writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
483 writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
484 writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
485 writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
486}
487
488static void sprd_dma_start(struct sprd_dma_chn *schan)
489{
490 struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
491
492 if (!vd)
493 return;
494
495 list_del(&vd->node);
496 schan->cur_desc = to_sprd_dma_desc(vd);
497
498 /*
Eric Long770399d2018-11-06 13:01:36 +0800499 * Set 2-stage configuration if the channel starts one 2-stage
500 * transfer.
501 */
502 if (schan->chn_mode && sprd_dma_set_2stage_config(schan))
503 return;
504
505 /*
Baolin Wang9b3b8172017-10-24 13:47:50 +0800506 * Copy the DMA configuration from DMA descriptor to this hardware
507 * channel.
508 */
509 sprd_dma_set_chn_config(schan, schan->cur_desc);
510 sprd_dma_set_uid(schan);
511 sprd_dma_enable_chn(schan);
512
513 if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
514 sprd_dma_soft_request(schan);
515}
516
517static void sprd_dma_stop(struct sprd_dma_chn *schan)
518{
519 sprd_dma_stop_and_disable(schan);
520 sprd_dma_unset_uid(schan);
521 sprd_dma_clear_int(schan);
Eric Long0e5d7b12018-11-06 13:01:34 +0800522 schan->cur_desc = NULL;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800523}
524
525static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
526 enum sprd_dma_int_type int_type,
527 enum sprd_dma_req_mode req_mode)
528{
529 if (int_type == SPRD_DMA_NO_INT)
530 return false;
531
532 if (int_type >= req_mode + 1)
533 return true;
534 else
535 return false;
536}
537
538static irqreturn_t dma_irq_handle(int irq, void *dev_id)
539{
540 struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
541 u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
542 struct sprd_dma_chn *schan;
543 struct sprd_dma_desc *sdesc;
544 enum sprd_dma_req_mode req_type;
545 enum sprd_dma_int_type int_type;
Eric Long97dbd6e2018-11-06 13:01:35 +0800546 bool trans_done = false, cyclic = false;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800547 u32 i;
548
549 while (irq_status) {
550 i = __ffs(irq_status);
551 irq_status &= (irq_status - 1);
552 schan = &sdev->channels[i];
553
554 spin_lock(&schan->vc.lock);
555 int_type = sprd_dma_get_int_type(schan);
556 req_type = sprd_dma_get_req_type(schan);
557 sprd_dma_clear_int(schan);
558
559 sdesc = schan->cur_desc;
560
Eric Long97dbd6e2018-11-06 13:01:35 +0800561 /* cyclic mode schedule callback */
562 cyclic = schan->linklist.phy_addr ? true : false;
563 if (cyclic == true) {
564 vchan_cyclic_callback(&sdesc->vd);
565 } else {
566 /* Check if the dma request descriptor is done. */
567 trans_done = sprd_dma_check_trans_done(sdesc, int_type,
568 req_type);
569 if (trans_done == true) {
570 vchan_cookie_complete(&sdesc->vd);
571 schan->cur_desc = NULL;
572 sprd_dma_start(schan);
573 }
Baolin Wang9b3b8172017-10-24 13:47:50 +0800574 }
575 spin_unlock(&schan->vc.lock);
576 }
577
578 return IRQ_HANDLED;
579}
580
581static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
582{
583 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
584 int ret;
585
586 ret = pm_runtime_get_sync(chan->device->dev);
587 if (ret < 0)
588 return ret;
589
590 schan->dev_id = SPRD_DMA_SOFTWARE_UID;
591 return 0;
592}
593
594static void sprd_dma_free_chan_resources(struct dma_chan *chan)
595{
596 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
597 unsigned long flags;
598
599 spin_lock_irqsave(&schan->vc.lock, flags);
600 sprd_dma_stop(schan);
601 spin_unlock_irqrestore(&schan->vc.lock, flags);
602
603 vchan_free_chan_resources(&schan->vc);
604 pm_runtime_put(chan->device->dev);
605}
606
607static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
608 dma_cookie_t cookie,
609 struct dma_tx_state *txstate)
610{
611 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
612 struct virt_dma_desc *vd;
613 unsigned long flags;
614 enum dma_status ret;
615 u32 pos;
616
617 ret = dma_cookie_status(chan, cookie, txstate);
618 if (ret == DMA_COMPLETE || !txstate)
619 return ret;
620
621 spin_lock_irqsave(&schan->vc.lock, flags);
622 vd = vchan_find_desc(&schan->vc, cookie);
623 if (vd) {
624 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
625 struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
626
627 if (hw->trsc_len > 0)
628 pos = hw->trsc_len;
629 else if (hw->blk_len > 0)
630 pos = hw->blk_len;
631 else if (hw->frg_len > 0)
632 pos = hw->frg_len;
633 else
634 pos = 0;
635 } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
Eric Longd762ab32018-11-06 13:01:32 +0800636 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
637
638 if (sdesc->dir == DMA_DEV_TO_MEM)
639 pos = sprd_dma_get_dst_addr(schan);
640 else
641 pos = sprd_dma_get_src_addr(schan);
Baolin Wang9b3b8172017-10-24 13:47:50 +0800642 } else {
643 pos = 0;
644 }
645 spin_unlock_irqrestore(&schan->vc.lock, flags);
646
647 dma_set_residue(txstate, pos);
648 return ret;
649}
650
651static void sprd_dma_issue_pending(struct dma_chan *chan)
652{
653 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
654 unsigned long flags;
655
656 spin_lock_irqsave(&schan->vc.lock, flags);
657 if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
658 sprd_dma_start(schan);
659 spin_unlock_irqrestore(&schan->vc.lock, flags);
660}
661
Eric Longca1b7d32018-05-23 17:31:11 +0800662static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
663{
664 switch (buswidth) {
665 case DMA_SLAVE_BUSWIDTH_1_BYTE:
666 case DMA_SLAVE_BUSWIDTH_2_BYTES:
667 case DMA_SLAVE_BUSWIDTH_4_BYTES:
668 case DMA_SLAVE_BUSWIDTH_8_BYTES:
669 return ffs(buswidth) - 1;
670
671 default:
672 return -EINVAL;
673 }
674}
675
676static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
677{
678 switch (buswidth) {
679 case DMA_SLAVE_BUSWIDTH_1_BYTE:
680 case DMA_SLAVE_BUSWIDTH_2_BYTES:
681 case DMA_SLAVE_BUSWIDTH_4_BYTES:
682 case DMA_SLAVE_BUSWIDTH_8_BYTES:
683 return buswidth;
684
685 default:
686 return -EINVAL;
687 }
688}
689
690static int sprd_dma_fill_desc(struct dma_chan *chan,
Eric Long4ac69542018-08-28 19:09:07 +0800691 struct sprd_dma_chn_hw *hw,
692 unsigned int sglen, int sg_index,
Eric Longca1b7d32018-05-23 17:31:11 +0800693 dma_addr_t src, dma_addr_t dst, u32 len,
694 enum dma_transfer_direction dir,
695 unsigned long flags,
696 struct dma_slave_config *slave_cfg)
Baolin Wang9b3b8172017-10-24 13:47:50 +0800697{
698 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
Eric Longca1b7d32018-05-23 17:31:11 +0800699 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
Eric Long770399d2018-11-06 13:01:36 +0800700 enum sprd_dma_chn_mode chn_mode = schan->chn_mode;
Eric Longca1b7d32018-05-23 17:31:11 +0800701 u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
702 u32 int_mode = flags & SPRD_DMA_INT_MASK;
703 int src_datawidth, dst_datawidth, src_step, dst_step;
704 u32 temp, fix_mode = 0, fix_en = 0;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800705
Eric Longca1b7d32018-05-23 17:31:11 +0800706 if (dir == DMA_MEM_TO_DEV) {
707 src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
708 if (src_step < 0) {
709 dev_err(sdev->dma_dev.dev, "invalid source step\n");
710 return src_step;
711 }
Eric Long770399d2018-11-06 13:01:36 +0800712
713 /*
714 * For 2-stage transfer, destination channel step can not be 0,
715 * since destination device is AON IRAM.
716 */
717 if (chn_mode == SPRD_DMA_DST_CHN0 ||
718 chn_mode == SPRD_DMA_DST_CHN1)
719 dst_step = src_step;
720 else
721 dst_step = SPRD_DMA_NONE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800722 } else {
Eric Longca1b7d32018-05-23 17:31:11 +0800723 dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
724 if (dst_step < 0) {
725 dev_err(sdev->dma_dev.dev, "invalid destination step\n");
726 return dst_step;
727 }
728 src_step = SPRD_DMA_NONE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800729 }
730
Eric Longca1b7d32018-05-23 17:31:11 +0800731 src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
732 if (src_datawidth < 0) {
733 dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
734 return src_datawidth;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800735 }
736
Eric Longca1b7d32018-05-23 17:31:11 +0800737 dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
738 if (dst_datawidth < 0) {
739 dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
740 return dst_datawidth;
741 }
742
743 if (slave_cfg->slave_id)
744 schan->dev_id = slave_cfg->slave_id;
745
Baolin Wang9b3b8172017-10-24 13:47:50 +0800746 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800747
Eric Longca1b7d32018-05-23 17:31:11 +0800748 /*
749 * wrap_ptr and wrap_to will save the high 4 bits source address and
750 * destination address.
751 */
752 hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
753 hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
754 hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
755 hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800756
Eric Longca1b7d32018-05-23 17:31:11 +0800757 /*
758 * If the src step and dst step both are 0 or both are not 0, that means
759 * we can not enable the fix mode. If one is 0 and another one is not,
760 * we can enable the fix mode.
761 */
762 if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
Baolin Wang9b3b8172017-10-24 13:47:50 +0800763 fix_en = 0;
764 } else {
765 fix_en = 1;
766 if (src_step)
767 fix_mode = 1;
768 else
769 fix_mode = 0;
770 }
771
Eric Longca1b7d32018-05-23 17:31:11 +0800772 hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800773
Eric Longca1b7d32018-05-23 17:31:11 +0800774 temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
775 temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
776 temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
777 temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
778 temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
779 temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
780 hw->frg_len = temp;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800781
Eric Longca1b7d32018-05-23 17:31:11 +0800782 hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
783 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800784
Eric Longca1b7d32018-05-23 17:31:11 +0800785 temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
786 temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
787 hw->trsf_step = temp;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800788
Eric Long4ac69542018-08-28 19:09:07 +0800789 /* link-list configuration */
790 if (schan->linklist.phy_addr) {
Eric Long4ac69542018-08-28 19:09:07 +0800791 hw->cfg |= SPRD_DMA_LINKLIST_EN;
792
793 /* link-list index */
Eric Long13e89972018-11-06 13:01:33 +0800794 temp = sglen ? (sg_index + 1) % sglen : 0;
795
Eric Long4ac69542018-08-28 19:09:07 +0800796 /* Next link-list configuration's physical address offset */
797 temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
798 /*
799 * Set the link-list pointer point to next link-list
800 * configuration's physical address.
801 */
802 hw->llist_ptr = schan->linklist.phy_addr + temp;
803 } else {
804 hw->llist_ptr = 0;
805 }
806
Baolin Wang9b3b8172017-10-24 13:47:50 +0800807 hw->frg_step = 0;
808 hw->src_blk_step = 0;
809 hw->des_blk_step = 0;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800810 return 0;
811}
812
Eric Long4ac69542018-08-28 19:09:07 +0800813static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
814 unsigned int sglen, int sg_index,
815 dma_addr_t src, dma_addr_t dst, u32 len,
816 enum dma_transfer_direction dir,
817 unsigned long flags,
818 struct dma_slave_config *slave_cfg)
819{
820 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
821 struct sprd_dma_chn_hw *hw;
822
823 if (!schan->linklist.virt_addr)
824 return -EINVAL;
825
826 hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
827 sg_index * sizeof(*hw));
828
829 return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
830 dir, flags, slave_cfg);
831}
832
Vinod Koul1ab8da12018-01-12 22:31:17 +0530833static struct dma_async_tx_descriptor *
Baolin Wang9b3b8172017-10-24 13:47:50 +0800834sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
835 size_t len, unsigned long flags)
836{
837 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
838 struct sprd_dma_desc *sdesc;
Eric Long32fa2012018-05-23 17:31:10 +0800839 struct sprd_dma_chn_hw *hw;
840 enum sprd_dma_datawidth datawidth;
841 u32 step, temp;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800842
843 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
844 if (!sdesc)
845 return NULL;
846
Eric Long32fa2012018-05-23 17:31:10 +0800847 hw = &sdesc->chn_hw;
848
849 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
850 hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
851 hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
852 hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
853 hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
854 SPRD_DMA_HIGH_ADDR_MASK;
855 hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
856 SPRD_DMA_HIGH_ADDR_MASK;
857
858 if (IS_ALIGNED(len, 8)) {
859 datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
860 step = SPRD_DMA_DWORD_STEP;
861 } else if (IS_ALIGNED(len, 4)) {
862 datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
863 step = SPRD_DMA_WORD_STEP;
864 } else if (IS_ALIGNED(len, 2)) {
865 datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
866 step = SPRD_DMA_SHORT_STEP;
867 } else {
868 datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
869 step = SPRD_DMA_BYTE_STEP;
Baolin Wang9b3b8172017-10-24 13:47:50 +0800870 }
871
Eric Long32fa2012018-05-23 17:31:10 +0800872 temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
873 temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
874 temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
875 temp |= len & SPRD_DMA_FRG_LEN_MASK;
876 hw->frg_len = temp;
877
878 hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
879 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
880
881 temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
882 temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
883 hw->trsf_step = temp;
884
Baolin Wang9b3b8172017-10-24 13:47:50 +0800885 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
886}
887
Eric Longca1b7d32018-05-23 17:31:11 +0800888static struct dma_async_tx_descriptor *
889sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
890 unsigned int sglen, enum dma_transfer_direction dir,
891 unsigned long flags, void *context)
892{
893 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
894 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
895 dma_addr_t src = 0, dst = 0;
896 struct sprd_dma_desc *sdesc;
897 struct scatterlist *sg;
898 u32 len = 0;
899 int ret, i;
900
Eric Long4ac69542018-08-28 19:09:07 +0800901 if (!is_slave_direction(dir))
Eric Longca1b7d32018-05-23 17:31:11 +0800902 return NULL;
903
Eric Long4ac69542018-08-28 19:09:07 +0800904 if (context) {
905 struct sprd_dma_linklist *ll_cfg =
906 (struct sprd_dma_linklist *)context;
907
908 schan->linklist.phy_addr = ll_cfg->phy_addr;
909 schan->linklist.virt_addr = ll_cfg->virt_addr;
910 } else {
911 schan->linklist.phy_addr = 0;
912 schan->linklist.virt_addr = 0;
913 }
914
Eric Longca1b7d32018-05-23 17:31:11 +0800915 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
916 if (!sdesc)
917 return NULL;
918
Eric Longd762ab32018-11-06 13:01:32 +0800919 sdesc->dir = dir;
920
Eric Longca1b7d32018-05-23 17:31:11 +0800921 for_each_sg(sgl, sg, sglen, i) {
922 len = sg_dma_len(sg);
923
924 if (dir == DMA_MEM_TO_DEV) {
925 src = sg_dma_address(sg);
926 dst = slave_cfg->dst_addr;
927 } else {
928 src = slave_cfg->src_addr;
929 dst = sg_dma_address(sg);
930 }
Eric Long4ac69542018-08-28 19:09:07 +0800931
932 /*
933 * The link-list mode needs at least 2 link-list
934 * configurations. If there is only one sg, it doesn't
935 * need to fill the link-list configuration.
936 */
937 if (sglen < 2)
938 break;
939
940 ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
941 dir, flags, slave_cfg);
942 if (ret) {
943 kfree(sdesc);
944 return NULL;
945 }
Eric Longca1b7d32018-05-23 17:31:11 +0800946 }
947
Eric Long770399d2018-11-06 13:01:36 +0800948 /* Set channel mode and trigger mode for 2-stage transfer */
949 schan->chn_mode =
950 (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
951 schan->trg_mode =
952 (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
953
Eric Long4ac69542018-08-28 19:09:07 +0800954 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
955 dir, flags, slave_cfg);
Baolin Wang9b3b8172017-10-24 13:47:50 +0800956 if (ret) {
957 kfree(sdesc);
958 return NULL;
959 }
960
961 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
962}
963
Eric Longca1b7d32018-05-23 17:31:11 +0800964static int sprd_dma_slave_config(struct dma_chan *chan,
965 struct dma_slave_config *config)
966{
967 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
968 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
969
Eric Longca1b7d32018-05-23 17:31:11 +0800970 memcpy(slave_cfg, config, sizeof(*config));
971 return 0;
972}
973
Baolin Wang9b3b8172017-10-24 13:47:50 +0800974static int sprd_dma_pause(struct dma_chan *chan)
975{
976 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
977 unsigned long flags;
978
979 spin_lock_irqsave(&schan->vc.lock, flags);
980 sprd_dma_pause_resume(schan, true);
981 spin_unlock_irqrestore(&schan->vc.lock, flags);
982
983 return 0;
984}
985
986static int sprd_dma_resume(struct dma_chan *chan)
987{
988 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
989 unsigned long flags;
990
991 spin_lock_irqsave(&schan->vc.lock, flags);
992 sprd_dma_pause_resume(schan, false);
993 spin_unlock_irqrestore(&schan->vc.lock, flags);
994
995 return 0;
996}
997
998static int sprd_dma_terminate_all(struct dma_chan *chan)
999{
1000 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1001 unsigned long flags;
1002 LIST_HEAD(head);
1003
1004 spin_lock_irqsave(&schan->vc.lock, flags);
1005 sprd_dma_stop(schan);
1006
1007 vchan_get_all_descriptors(&schan->vc, &head);
1008 spin_unlock_irqrestore(&schan->vc.lock, flags);
1009
1010 vchan_dma_desc_free_list(&schan->vc, &head);
1011 return 0;
1012}
1013
1014static void sprd_dma_free_desc(struct virt_dma_desc *vd)
1015{
1016 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
1017
1018 kfree(sdesc);
1019}
1020
1021static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
1022{
1023 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1024 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
1025 u32 req = *(u32 *)param;
1026
1027 if (req < sdev->total_chns)
1028 return req == schan->chn_num + 1;
1029 else
1030 return false;
1031}
1032
1033static int sprd_dma_probe(struct platform_device *pdev)
1034{
1035 struct device_node *np = pdev->dev.of_node;
1036 struct sprd_dma_dev *sdev;
1037 struct sprd_dma_chn *dma_chn;
1038 struct resource *res;
1039 u32 chn_count;
1040 int ret, i;
1041
1042 ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
1043 if (ret) {
1044 dev_err(&pdev->dev, "get dma channels count failed\n");
1045 return ret;
1046 }
1047
Kees Cook0ed2dd02018-05-08 16:08:53 -07001048 sdev = devm_kzalloc(&pdev->dev,
1049 struct_size(sdev, channels, chn_count),
Baolin Wang9b3b8172017-10-24 13:47:50 +08001050 GFP_KERNEL);
1051 if (!sdev)
1052 return -ENOMEM;
1053
1054 sdev->clk = devm_clk_get(&pdev->dev, "enable");
1055 if (IS_ERR(sdev->clk)) {
1056 dev_err(&pdev->dev, "get enable clock failed\n");
1057 return PTR_ERR(sdev->clk);
1058 }
1059
1060 /* ashb clock is optional for AGCP DMA */
1061 sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
1062 if (IS_ERR(sdev->ashb_clk))
1063 dev_warn(&pdev->dev, "no optional ashb eb clock\n");
1064
1065 /*
1066 * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
1067 * DMA controller, it can or do not request the irq, which will save
1068 * system power without resuming system by DMA interrupts if AGCP DMA
1069 * does not request the irq. Thus the DMA interrupts property should
1070 * be optional.
1071 */
1072 sdev->irq = platform_get_irq(pdev, 0);
1073 if (sdev->irq > 0) {
1074 ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
1075 0, "sprd_dma", (void *)sdev);
1076 if (ret < 0) {
1077 dev_err(&pdev->dev, "request dma irq failed\n");
1078 return ret;
1079 }
1080 } else {
1081 dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
1082 }
1083
1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Baolin Wange7f063a2018-05-09 11:23:50 +08001085 sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
Dan Carpenterfd8d26a2018-05-16 11:48:07 +03001086 if (IS_ERR(sdev->glb_base))
1087 return PTR_ERR(sdev->glb_base);
Baolin Wang9b3b8172017-10-24 13:47:50 +08001088
1089 dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
1090 sdev->total_chns = chn_count;
1091 sdev->dma_dev.chancnt = chn_count;
1092 INIT_LIST_HEAD(&sdev->dma_dev.channels);
1093 INIT_LIST_HEAD(&sdev->dma_dev.global_node);
1094 sdev->dma_dev.dev = &pdev->dev;
1095 sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
1096 sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
1097 sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
1098 sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
1099 sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
Eric Longca1b7d32018-05-23 17:31:11 +08001100 sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
1101 sdev->dma_dev.device_config = sprd_dma_slave_config;
Baolin Wang9b3b8172017-10-24 13:47:50 +08001102 sdev->dma_dev.device_pause = sprd_dma_pause;
1103 sdev->dma_dev.device_resume = sprd_dma_resume;
1104 sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
1105
1106 for (i = 0; i < chn_count; i++) {
1107 dma_chn = &sdev->channels[i];
1108 dma_chn->chn_num = i;
1109 dma_chn->cur_desc = NULL;
1110 /* get each channel's registers base address. */
1111 dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
1112 SPRD_DMA_CHN_REG_LENGTH * i;
1113
1114 dma_chn->vc.desc_free = sprd_dma_free_desc;
1115 vchan_init(&dma_chn->vc, &sdev->dma_dev);
1116 }
1117
1118 platform_set_drvdata(pdev, sdev);
1119 ret = sprd_dma_enable(sdev);
1120 if (ret)
1121 return ret;
1122
1123 pm_runtime_set_active(&pdev->dev);
1124 pm_runtime_enable(&pdev->dev);
1125
1126 ret = pm_runtime_get_sync(&pdev->dev);
1127 if (ret < 0)
1128 goto err_rpm;
1129
1130 ret = dma_async_device_register(&sdev->dma_dev);
1131 if (ret < 0) {
1132 dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
1133 goto err_register;
1134 }
1135
1136 sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
1137 ret = of_dma_controller_register(np, of_dma_simple_xlate,
1138 &sprd_dma_info);
1139 if (ret)
1140 goto err_of_register;
1141
1142 pm_runtime_put(&pdev->dev);
1143 return 0;
1144
1145err_of_register:
1146 dma_async_device_unregister(&sdev->dma_dev);
1147err_register:
1148 pm_runtime_put_noidle(&pdev->dev);
1149 pm_runtime_disable(&pdev->dev);
1150err_rpm:
1151 sprd_dma_disable(sdev);
1152 return ret;
1153}
1154
1155static int sprd_dma_remove(struct platform_device *pdev)
1156{
1157 struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
1158 struct sprd_dma_chn *c, *cn;
1159 int ret;
1160
1161 ret = pm_runtime_get_sync(&pdev->dev);
1162 if (ret < 0)
1163 return ret;
1164
1165 /* explicitly free the irq */
1166 if (sdev->irq > 0)
1167 devm_free_irq(&pdev->dev, sdev->irq, sdev);
1168
1169 list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
1170 vc.chan.device_node) {
1171 list_del(&c->vc.chan.device_node);
1172 tasklet_kill(&c->vc.task);
1173 }
1174
1175 of_dma_controller_free(pdev->dev.of_node);
1176 dma_async_device_unregister(&sdev->dma_dev);
1177 sprd_dma_disable(sdev);
1178
1179 pm_runtime_put_noidle(&pdev->dev);
1180 pm_runtime_disable(&pdev->dev);
1181 return 0;
1182}
1183
1184static const struct of_device_id sprd_dma_match[] = {
1185 { .compatible = "sprd,sc9860-dma", },
1186 {},
1187};
1188
1189static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
1190{
1191 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
1192
1193 sprd_dma_disable(sdev);
1194 return 0;
1195}
1196
1197static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
1198{
1199 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
1200 int ret;
1201
1202 ret = sprd_dma_enable(sdev);
1203 if (ret)
1204 dev_err(sdev->dma_dev.dev, "enable dma failed\n");
1205
1206 return ret;
1207}
1208
1209static const struct dev_pm_ops sprd_dma_pm_ops = {
1210 SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
1211 sprd_dma_runtime_resume,
1212 NULL)
1213};
1214
1215static struct platform_driver sprd_dma_driver = {
1216 .probe = sprd_dma_probe,
1217 .remove = sprd_dma_remove,
1218 .driver = {
1219 .name = "sprd-dma",
1220 .of_match_table = sprd_dma_match,
1221 .pm = &sprd_dma_pm_ops,
1222 },
1223};
1224module_platform_driver(sprd_dma_driver);
1225
1226MODULE_LICENSE("GPL v2");
1227MODULE_DESCRIPTION("DMA driver for Spreadtrum");
1228MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
1229MODULE_ALIAS("platform:sprd-dma");