blob: 367ef15a3cd8094d7bd0e3b5661aa30e7653ced0 [file] [log] [blame]
Linus Walleij8d318a52010-03-30 15:33:42 +02001/*
Per Forlind49278e2010-12-20 18:31:38 +01002 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
Per Forlin661385f2010-10-06 09:05:28 +00004 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
Jonas Aaberg767a9672010-08-09 12:08:34 +00005 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
Linus Walleij8d318a52010-03-30 15:33:42 +02006 * License terms: GNU General Public License (GPL) version 2
Linus Walleij8d318a52010-03-30 15:33:42 +02007 */
8
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +00009#include <linux/dma-mapping.h>
Linus Walleij8d318a52010-03-30 15:33:42 +020010#include <linux/kernel.h>
11#include <linux/slab.h>
Paul Gortmakerf492b212011-07-31 16:17:36 -040012#include <linux/export.h>
Linus Walleij8d318a52010-03-30 15:33:42 +020013#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
Narayanan G7fb3e752011-11-17 17:26:41 +053017#include <linux/pm.h>
18#include <linux/pm_runtime.h>
Jonas Aaberg698e4732010-08-09 12:08:56 +000019#include <linux/err.h>
Linus Walleijf4b89762011-06-27 11:33:46 +020020#include <linux/amba/bus.h>
Linus Walleij15e4b782012-04-12 18:12:43 +020021#include <linux/regulator/consumer.h>
Linus Walleij865fab62012-10-18 14:20:16 +020022#include <linux/platform_data/dma-ste-dma40.h>
Linus Walleij8d318a52010-03-30 15:33:42 +020023
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000024#include "dmaengine.h"
Linus Walleij8d318a52010-03-30 15:33:42 +020025#include "ste_dma40_ll.h"
26
27#define D40_NAME "dma40"
28
29#define D40_PHY_CHAN -1
30
31/* For masking out/in 2 bit channel positions */
32#define D40_CHAN_POS(chan) (2 * (chan / 2))
33#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
34
35/* Maximum iterations taken before giving up suspending a channel */
36#define D40_SUSPEND_MAX_IT 500
37
Narayanan G7fb3e752011-11-17 17:26:41 +053038/* Milliseconds */
39#define DMA40_AUTOSUSPEND_DELAY 100
40
Linus Walleij508849a2010-06-20 21:26:07 +000041/* Hardware requirement on LCLA alignment */
42#define LCLA_ALIGNMENT 0x40000
Jonas Aaberg698e4732010-08-09 12:08:56 +000043
44/* Max number of links per event group */
45#define D40_LCLA_LINK_PER_EVENT_GRP 128
46#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
47
Linus Walleij508849a2010-06-20 21:26:07 +000048/* Attempts before giving up to trying to get pages that are aligned */
49#define MAX_LCLA_ALLOC_ATTEMPTS 256
50
51/* Bit markings for allocation map */
Linus Walleij8d318a52010-03-30 15:33:42 +020052#define D40_ALLOC_FREE (1 << 31)
53#define D40_ALLOC_PHY (1 << 30)
54#define D40_ALLOC_LOG_FREE 0
55
Lee Jones664a57e2013-05-03 15:31:53 +010056/* Reserved event lines for memcpy only. */
Linus Walleija2acaa22013-05-03 21:46:09 +020057#define DB8500_DMA_MEMCPY_EV_0 51
58#define DB8500_DMA_MEMCPY_EV_1 56
59#define DB8500_DMA_MEMCPY_EV_2 57
60#define DB8500_DMA_MEMCPY_EV_3 58
61#define DB8500_DMA_MEMCPY_EV_4 59
62#define DB8500_DMA_MEMCPY_EV_5 60
63
64static int dma40_memcpy_channels[] = {
65 DB8500_DMA_MEMCPY_EV_0,
66 DB8500_DMA_MEMCPY_EV_1,
67 DB8500_DMA_MEMCPY_EV_2,
68 DB8500_DMA_MEMCPY_EV_3,
69 DB8500_DMA_MEMCPY_EV_4,
70 DB8500_DMA_MEMCPY_EV_5,
71};
Lee Jones664a57e2013-05-03 15:31:53 +010072
Lee Jones29027a12013-05-03 15:31:54 +010073/* Default configuration for physcial memcpy */
74struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
75 .mode = STEDMA40_MODE_PHYSICAL,
76 .dir = STEDMA40_MEM_TO_MEM,
77
78 .src_info.data_width = STEDMA40_BYTE_WIDTH,
79 .src_info.psize = STEDMA40_PSIZE_PHY_1,
80 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
81
82 .dst_info.data_width = STEDMA40_BYTE_WIDTH,
83 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
84 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
85};
86
87/* Default configuration for logical memcpy */
88struct stedma40_chan_cfg dma40_memcpy_conf_log = {
89 .mode = STEDMA40_MODE_LOGICAL,
90 .dir = STEDMA40_MEM_TO_MEM,
91
92 .src_info.data_width = STEDMA40_BYTE_WIDTH,
93 .src_info.psize = STEDMA40_PSIZE_LOG_1,
94 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
95
96 .dst_info.data_width = STEDMA40_BYTE_WIDTH,
97 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
98 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
99};
100
Linus Walleij8d318a52010-03-30 15:33:42 +0200101/**
102 * enum 40_command - The different commands and/or statuses.
103 *
104 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
105 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
106 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
107 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
108 */
109enum d40_command {
110 D40_DMA_STOP = 0,
111 D40_DMA_RUN = 1,
112 D40_DMA_SUSPEND_REQ = 2,
113 D40_DMA_SUSPENDED = 3
114};
115
Narayanan G7fb3e752011-11-17 17:26:41 +0530116/*
Narayanan G1bdae6f2012-02-09 12:41:37 +0530117 * enum d40_events - The different Event Enables for the event lines.
118 *
119 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
120 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
121 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
122 * @D40_ROUND_EVENTLINE: Status check for event line.
123 */
124
125enum d40_events {
126 D40_DEACTIVATE_EVENTLINE = 0,
127 D40_ACTIVATE_EVENTLINE = 1,
128 D40_SUSPEND_REQ_EVENTLINE = 2,
129 D40_ROUND_EVENTLINE = 3
130};
131
132/*
Narayanan G7fb3e752011-11-17 17:26:41 +0530133 * These are the registers that has to be saved and later restored
134 * when the DMA hw is powered off.
135 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
136 */
137static u32 d40_backup_regs[] = {
138 D40_DREG_LCPA,
139 D40_DREG_LCLA,
140 D40_DREG_PRMSE,
141 D40_DREG_PRMSO,
142 D40_DREG_PRMOE,
143 D40_DREG_PRMOO,
144};
145
146#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
147
Tong Liu3cb645d2012-09-26 10:07:30 +0000148/*
149 * since 9540 and 8540 has the same HW revision
150 * use v4a for 9540 or ealier
151 * use v4b for 8540 or later
152 * HW revision:
153 * DB8500ed has revision 0
154 * DB8500v1 has revision 2
155 * DB8500v2 has revision 3
156 * AP9540v1 has revision 4
157 * DB8540v1 has revision 4
158 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
159 */
160static u32 d40_backup_regs_v4a[] = {
Narayanan G7fb3e752011-11-17 17:26:41 +0530161 D40_DREG_PSEG1,
162 D40_DREG_PSEG2,
163 D40_DREG_PSEG3,
164 D40_DREG_PSEG4,
165 D40_DREG_PCEG1,
166 D40_DREG_PCEG2,
167 D40_DREG_PCEG3,
168 D40_DREG_PCEG4,
169 D40_DREG_RSEG1,
170 D40_DREG_RSEG2,
171 D40_DREG_RSEG3,
172 D40_DREG_RSEG4,
173 D40_DREG_RCEG1,
174 D40_DREG_RCEG2,
175 D40_DREG_RCEG3,
176 D40_DREG_RCEG4,
177};
178
Tong Liu3cb645d2012-09-26 10:07:30 +0000179#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
180
181static u32 d40_backup_regs_v4b[] = {
182 D40_DREG_CPSEG1,
183 D40_DREG_CPSEG2,
184 D40_DREG_CPSEG3,
185 D40_DREG_CPSEG4,
186 D40_DREG_CPSEG5,
187 D40_DREG_CPCEG1,
188 D40_DREG_CPCEG2,
189 D40_DREG_CPCEG3,
190 D40_DREG_CPCEG4,
191 D40_DREG_CPCEG5,
192 D40_DREG_CRSEG1,
193 D40_DREG_CRSEG2,
194 D40_DREG_CRSEG3,
195 D40_DREG_CRSEG4,
196 D40_DREG_CRSEG5,
197 D40_DREG_CRCEG1,
198 D40_DREG_CRCEG2,
199 D40_DREG_CRCEG3,
200 D40_DREG_CRCEG4,
201 D40_DREG_CRCEG5,
202};
203
204#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
Narayanan G7fb3e752011-11-17 17:26:41 +0530205
206static u32 d40_backup_regs_chan[] = {
207 D40_CHAN_REG_SSCFG,
208 D40_CHAN_REG_SSELT,
209 D40_CHAN_REG_SSPTR,
210 D40_CHAN_REG_SSLNK,
211 D40_CHAN_REG_SDCFG,
212 D40_CHAN_REG_SDELT,
213 D40_CHAN_REG_SDPTR,
214 D40_CHAN_REG_SDLNK,
215};
216
Lee Jones84b3da12013-05-03 15:31:58 +0100217#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
218 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
219
Linus Walleij8d318a52010-03-30 15:33:42 +0200220/**
Tong Liu3cb645d2012-09-26 10:07:30 +0000221 * struct d40_interrupt_lookup - lookup table for interrupt handler
222 *
223 * @src: Interrupt mask register.
224 * @clr: Interrupt clear register.
225 * @is_error: true if this is an error interrupt.
226 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
227 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
228 */
229struct d40_interrupt_lookup {
230 u32 src;
231 u32 clr;
232 bool is_error;
233 int offset;
234};
235
236
237static struct d40_interrupt_lookup il_v4a[] = {
238 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
239 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
240 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
241 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
242 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
243 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
244 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
245 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
246 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
247 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
248};
249
250static struct d40_interrupt_lookup il_v4b[] = {
251 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
252 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
253 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
254 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
255 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
256 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
257 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
258 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
259 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
260 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
261 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
262 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
263};
264
265/**
266 * struct d40_reg_val - simple lookup struct
267 *
268 * @reg: The register.
269 * @val: The value that belongs to the register in reg.
270 */
271struct d40_reg_val {
272 unsigned int reg;
273 unsigned int val;
274};
275
276static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
277 /* Clock every part of the DMA block from start */
278 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
279
280 /* Interrupts on all logical channels */
281 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
282 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
283 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
284 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
285 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
286 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
287 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
288 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
289 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
293};
294static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
295 /* Clock every part of the DMA block from start */
296 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
297
298 /* Interrupts on all logical channels */
299 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
300 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
301 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
302 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
303 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
304 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
305 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
306 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
307 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
314};
315
316/**
Linus Walleij8d318a52010-03-30 15:33:42 +0200317 * struct d40_lli_pool - Structure for keeping LLIs in memory
318 *
319 * @base: Pointer to memory area when the pre_alloc_lli's are not large
320 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
321 * pre_alloc_lli is used.
Rabin Vincentb00f9382011-01-25 11:18:15 +0100322 * @dma_addr: DMA address, if mapped
Linus Walleij8d318a52010-03-30 15:33:42 +0200323 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
324 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
325 * one buffer to one buffer.
326 */
327struct d40_lli_pool {
328 void *base;
Linus Walleij508849a2010-06-20 21:26:07 +0000329 int size;
Rabin Vincentb00f9382011-01-25 11:18:15 +0100330 dma_addr_t dma_addr;
Linus Walleij8d318a52010-03-30 15:33:42 +0200331 /* Space for dst and src, plus an extra for padding */
Linus Walleij508849a2010-06-20 21:26:07 +0000332 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
Linus Walleij8d318a52010-03-30 15:33:42 +0200333};
334
335/**
336 * struct d40_desc - A descriptor is one DMA job.
337 *
338 * @lli_phy: LLI settings for physical channel. Both src and dst=
339 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
340 * lli_len equals one.
341 * @lli_log: Same as above but for logical channels.
342 * @lli_pool: The pool with two entries pre-allocated.
Per Friden941b77a2010-06-20 21:24:45 +0000343 * @lli_len: Number of llis of current descriptor.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300344 * @lli_current: Number of transferred llis.
Jonas Aaberg698e4732010-08-09 12:08:56 +0000345 * @lcla_alloc: Number of LCLA entries allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200346 * @txd: DMA engine struct. Used for among other things for communication
347 * during a transfer.
348 * @node: List entry.
Linus Walleij8d318a52010-03-30 15:33:42 +0200349 * @is_in_client_list: true if the client owns this descriptor.
Narayanan G7fb3e752011-11-17 17:26:41 +0530350 * @cyclic: true if this is a cyclic job
Linus Walleij8d318a52010-03-30 15:33:42 +0200351 *
352 * This descriptor is used for both logical and physical transfers.
353 */
Linus Walleij8d318a52010-03-30 15:33:42 +0200354struct d40_desc {
355 /* LLI physical */
356 struct d40_phy_lli_bidir lli_phy;
357 /* LLI logical */
358 struct d40_log_lli_bidir lli_log;
359
360 struct d40_lli_pool lli_pool;
Per Friden941b77a2010-06-20 21:24:45 +0000361 int lli_len;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000362 int lli_current;
363 int lcla_alloc;
Linus Walleij8d318a52010-03-30 15:33:42 +0200364
365 struct dma_async_tx_descriptor txd;
366 struct list_head node;
367
Linus Walleij8d318a52010-03-30 15:33:42 +0200368 bool is_in_client_list;
Rabin Vincent0c842b52011-01-25 11:18:35 +0100369 bool cyclic;
Linus Walleij8d318a52010-03-30 15:33:42 +0200370};
371
372/**
373 * struct d40_lcla_pool - LCLA pool settings and data.
374 *
Linus Walleij508849a2010-06-20 21:26:07 +0000375 * @base: The virtual address of LCLA. 18 bit aligned.
376 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
377 * This pointer is only there for clean-up on error.
378 * @pages: The number of pages needed for all physical channels.
379 * Only used later for clean-up on error
Linus Walleij8d318a52010-03-30 15:33:42 +0200380 * @lock: Lock to protect the content in this struct.
Jonas Aaberg698e4732010-08-09 12:08:56 +0000381 * @alloc_map: big map over which LCLA entry is own by which job.
Linus Walleij8d318a52010-03-30 15:33:42 +0200382 */
383struct d40_lcla_pool {
384 void *base;
Rabin Vincent026cbc42011-01-25 11:18:14 +0100385 dma_addr_t dma_addr;
Linus Walleij508849a2010-06-20 21:26:07 +0000386 void *base_unaligned;
387 int pages;
Linus Walleij8d318a52010-03-30 15:33:42 +0200388 spinlock_t lock;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000389 struct d40_desc **alloc_map;
Linus Walleij8d318a52010-03-30 15:33:42 +0200390};
391
392/**
393 * struct d40_phy_res - struct for handling eventlines mapped to physical
394 * channels.
395 *
396 * @lock: A lock protection this entity.
Narayanan G7fb3e752011-11-17 17:26:41 +0530397 * @reserved: True if used by secure world or otherwise.
Linus Walleij8d318a52010-03-30 15:33:42 +0200398 * @num: The physical channel number of this entity.
399 * @allocated_src: Bit mapped to show which src event line's are mapped to
400 * this physical channel. Can also be free or physically allocated.
401 * @allocated_dst: Same as for src but is dst.
402 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
Jonas Aaberg767a9672010-08-09 12:08:34 +0000403 * event line number.
Fabio Baltieri74070482012-12-18 12:25:14 +0100404 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
Linus Walleij8d318a52010-03-30 15:33:42 +0200405 */
406struct d40_phy_res {
407 spinlock_t lock;
Narayanan G7fb3e752011-11-17 17:26:41 +0530408 bool reserved;
Linus Walleij8d318a52010-03-30 15:33:42 +0200409 int num;
410 u32 allocated_src;
411 u32 allocated_dst;
Fabio Baltieri74070482012-12-18 12:25:14 +0100412 bool use_soft_lli;
Linus Walleij8d318a52010-03-30 15:33:42 +0200413};
414
415struct d40_base;
416
417/**
418 * struct d40_chan - Struct that describes a channel.
419 *
420 * @lock: A spinlock to protect this struct.
421 * @log_num: The logical number, if any of this channel.
Linus Walleij8d318a52010-03-30 15:33:42 +0200422 * @pending_tx: The number of pending transfers. Used between interrupt handler
423 * and tasklet.
424 * @busy: Set to true when transfer is ongoing on this channel.
Jonas Aaberg2a614342010-06-20 21:25:24 +0000425 * @phy_chan: Pointer to physical channel which this instance runs on. If this
426 * point is NULL, then the channel is not allocated.
Linus Walleij8d318a52010-03-30 15:33:42 +0200427 * @chan: DMA engine handle.
428 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
429 * transfer and call client callback.
430 * @client: Cliented owned descriptor list.
Per Forlinda063d22011-08-29 13:33:32 +0200431 * @pending_queue: Submitted jobs, to be issued by issue_pending()
Linus Walleij8d318a52010-03-30 15:33:42 +0200432 * @active: Active descriptor.
Fabio Baltieri4226dd82012-12-13 13:46:16 +0100433 * @done: Completed jobs
Linus Walleij8d318a52010-03-30 15:33:42 +0200434 * @queue: Queued jobs.
Per Forlin82babbb362011-08-29 13:33:35 +0200435 * @prepare_queue: Prepared jobs.
Linus Walleij8d318a52010-03-30 15:33:42 +0200436 * @dma_cfg: The client configuration of this dma channel.
Rabin Vincentce2ca122010-10-12 13:00:49 +0000437 * @configured: whether the dma_cfg configuration is valid
Linus Walleij8d318a52010-03-30 15:33:42 +0200438 * @base: Pointer to the device instance struct.
439 * @src_def_cfg: Default cfg register setting for src.
440 * @dst_def_cfg: Default cfg register setting for dst.
441 * @log_def: Default logical channel settings.
Linus Walleij8d318a52010-03-30 15:33:42 +0200442 * @lcpa: Pointer to dst and src lcpa settings.
om prakashae752bf2011-06-27 11:33:31 +0200443 * @runtime_addr: runtime configured address.
444 * @runtime_direction: runtime configured direction.
Linus Walleij8d318a52010-03-30 15:33:42 +0200445 *
446 * This struct can either "be" a logical or a physical channel.
447 */
448struct d40_chan {
449 spinlock_t lock;
450 int log_num;
Linus Walleij8d318a52010-03-30 15:33:42 +0200451 int pending_tx;
452 bool busy;
453 struct d40_phy_res *phy_chan;
454 struct dma_chan chan;
455 struct tasklet_struct tasklet;
456 struct list_head client;
Per Forlina8f30672011-06-26 23:29:52 +0200457 struct list_head pending_queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200458 struct list_head active;
Fabio Baltieri4226dd82012-12-13 13:46:16 +0100459 struct list_head done;
Linus Walleij8d318a52010-03-30 15:33:42 +0200460 struct list_head queue;
Per Forlin82babbb362011-08-29 13:33:35 +0200461 struct list_head prepare_queue;
Linus Walleij8d318a52010-03-30 15:33:42 +0200462 struct stedma40_chan_cfg dma_cfg;
Rabin Vincentce2ca122010-10-12 13:00:49 +0000463 bool configured;
Linus Walleij8d318a52010-03-30 15:33:42 +0200464 struct d40_base *base;
465 /* Default register configurations */
466 u32 src_def_cfg;
467 u32 dst_def_cfg;
468 struct d40_def_lcsp log_def;
Linus Walleij8d318a52010-03-30 15:33:42 +0200469 struct d40_log_lli_full *lcpa;
Linus Walleij95e14002010-08-04 13:37:45 +0200470 /* Runtime reconfiguration */
471 dma_addr_t runtime_addr;
Vinod Kouldb8196d2011-10-13 22:34:23 +0530472 enum dma_transfer_direction runtime_direction;
Linus Walleij8d318a52010-03-30 15:33:42 +0200473};
474
475/**
Tong Liu3cb645d2012-09-26 10:07:30 +0000476 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
477 * controller
478 *
479 * @backup: the pointer to the registers address array for backup
480 * @backup_size: the size of the registers address array for backup
481 * @realtime_en: the realtime enable register
482 * @realtime_clear: the realtime clear register
483 * @high_prio_en: the high priority enable register
484 * @high_prio_clear: the high priority clear register
485 * @interrupt_en: the interrupt enable register
486 * @interrupt_clear: the interrupt clear register
487 * @il: the pointer to struct d40_interrupt_lookup
488 * @il_size: the size of d40_interrupt_lookup array
489 * @init_reg: the pointer to the struct d40_reg_val
490 * @init_reg_size: the size of d40_reg_val array
491 */
492struct d40_gen_dmac {
493 u32 *backup;
494 u32 backup_size;
495 u32 realtime_en;
496 u32 realtime_clear;
497 u32 high_prio_en;
498 u32 high_prio_clear;
499 u32 interrupt_en;
500 u32 interrupt_clear;
501 struct d40_interrupt_lookup *il;
502 u32 il_size;
503 struct d40_reg_val *init_reg;
504 u32 init_reg_size;
505};
506
507/**
Linus Walleij8d318a52010-03-30 15:33:42 +0200508 * struct d40_base - The big global struct, one for each probe'd instance.
509 *
510 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
511 * @execmd_lock: Lock for execute command usage since several channels share
512 * the same physical register.
513 * @dev: The device structure.
514 * @virtbase: The virtual base address of the DMA's register.
Linus Walleijf4185592010-06-22 18:06:42 -0700515 * @rev: silicon revision detected.
Linus Walleij8d318a52010-03-30 15:33:42 +0200516 * @clk: Pointer to the DMA clock structure.
517 * @phy_start: Physical memory start of the DMA registers.
518 * @phy_size: Size of the DMA register map.
519 * @irq: The IRQ number.
520 * @num_phy_chans: The number of physical channels. Read from HW. This
521 * is the number of available channels for this driver, not counting "Secure
522 * mode" allocated physical channels.
523 * @num_log_chans: The number of logical channels. Calculated from
524 * num_phy_chans.
525 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
526 * @dma_slave: dma_device channels that can do only do slave transfers.
527 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
Narayanan G7fb3e752011-11-17 17:26:41 +0530528 * @phy_chans: Room for all possible physical channels in system.
Linus Walleij8d318a52010-03-30 15:33:42 +0200529 * @log_chans: Room for all possible logical channels in system.
530 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
531 * to log_chans entries.
532 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
533 * to phy_chans entries.
534 * @plat_data: Pointer to provided platform_data which is the driver
535 * configuration.
Narayanan G28c7a192011-11-22 13:56:55 +0530536 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
Linus Walleij8d318a52010-03-30 15:33:42 +0200537 * @phy_res: Vector containing all physical channels.
538 * @lcla_pool: lcla pool settings and data.
539 * @lcpa_base: The virtual mapped address of LCPA.
540 * @phy_lcpa: The physical address of the LCPA.
541 * @lcpa_size: The size of the LCPA area.
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000542 * @desc_slab: cache for descriptors.
Narayanan G7fb3e752011-11-17 17:26:41 +0530543 * @reg_val_backup: Here the values of some hardware registers are stored
544 * before the DMA is powered off. They are restored when the power is back on.
Tong Liu3cb645d2012-09-26 10:07:30 +0000545 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
546 * later
Narayanan G7fb3e752011-11-17 17:26:41 +0530547 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
548 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
549 * @initialized: true if the dma has been initialized
Tong Liu3cb645d2012-09-26 10:07:30 +0000550 * @gen_dmac: the struct for generic registers values to represent u8500/8540
551 * DMA controller
Linus Walleij8d318a52010-03-30 15:33:42 +0200552 */
553struct d40_base {
554 spinlock_t interrupt_lock;
555 spinlock_t execmd_lock;
556 struct device *dev;
557 void __iomem *virtbase;
Linus Walleijf4185592010-06-22 18:06:42 -0700558 u8 rev:4;
Linus Walleij8d318a52010-03-30 15:33:42 +0200559 struct clk *clk;
560 phys_addr_t phy_start;
561 resource_size_t phy_size;
562 int irq;
563 int num_phy_chans;
564 int num_log_chans;
Per Forlinb96710e2011-10-18 18:39:47 +0200565 struct device_dma_parameters dma_parms;
Linus Walleij8d318a52010-03-30 15:33:42 +0200566 struct dma_device dma_both;
567 struct dma_device dma_slave;
568 struct dma_device dma_memcpy;
569 struct d40_chan *phy_chans;
570 struct d40_chan *log_chans;
571 struct d40_chan **lookup_log_chans;
572 struct d40_chan **lookup_phy_chans;
573 struct stedma40_platform_data *plat_data;
Narayanan G28c7a192011-11-22 13:56:55 +0530574 struct regulator *lcpa_regulator;
Linus Walleij8d318a52010-03-30 15:33:42 +0200575 /* Physical half channels */
576 struct d40_phy_res *phy_res;
577 struct d40_lcla_pool lcla_pool;
578 void *lcpa_base;
579 dma_addr_t phy_lcpa;
580 resource_size_t lcpa_size;
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000581 struct kmem_cache *desc_slab;
Narayanan G7fb3e752011-11-17 17:26:41 +0530582 u32 reg_val_backup[BACKUP_REGS_SZ];
Lee Jones84b3da12013-05-03 15:31:58 +0100583 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
Narayanan G7fb3e752011-11-17 17:26:41 +0530584 u32 *reg_val_backup_chan;
585 u16 gcc_pwr_off_mask;
586 bool initialized;
Tong Liu3cb645d2012-09-26 10:07:30 +0000587 struct d40_gen_dmac gen_dmac;
Linus Walleij8d318a52010-03-30 15:33:42 +0200588};
589
Rabin Vincent262d2912011-01-25 11:18:05 +0100590static struct device *chan2dev(struct d40_chan *d40c)
591{
592 return &d40c->chan.dev->device;
593}
594
Rabin Vincent724a8572011-01-25 11:18:08 +0100595static bool chan_is_physical(struct d40_chan *chan)
596{
597 return chan->log_num == D40_PHY_CHAN;
598}
599
600static bool chan_is_logical(struct d40_chan *chan)
601{
602 return !chan_is_physical(chan);
603}
604
Rabin Vincent8ca84682011-01-25 11:18:07 +0100605static void __iomem *chan_base(struct d40_chan *chan)
606{
607 return chan->base->virtbase + D40_DREG_PCBASE +
608 chan->phy_chan->num * D40_DREG_PCDELTA;
609}
610
Rabin Vincent6db5a8b2011-01-25 11:18:09 +0100611#define d40_err(dev, format, arg...) \
612 dev_err(dev, "[%s] " format, __func__, ## arg)
613
614#define chan_err(d40c, format, arg...) \
615 d40_err(chan2dev(d40c), format, ## arg)
616
Rabin Vincentb00f9382011-01-25 11:18:15 +0100617static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
Rabin Vincentdbd88782011-01-25 11:18:19 +0100618 int lli_len)
Linus Walleij8d318a52010-03-30 15:33:42 +0200619{
Rabin Vincentdbd88782011-01-25 11:18:19 +0100620 bool is_log = chan_is_logical(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +0200621 u32 align;
622 void *base;
623
624 if (is_log)
625 align = sizeof(struct d40_log_lli);
626 else
627 align = sizeof(struct d40_phy_lli);
628
629 if (lli_len == 1) {
630 base = d40d->lli_pool.pre_alloc_lli;
631 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
632 d40d->lli_pool.base = NULL;
633 } else {
Rabin Vincent594ece42011-01-25 11:18:12 +0100634 d40d->lli_pool.size = lli_len * 2 * align;
Linus Walleij8d318a52010-03-30 15:33:42 +0200635
636 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
637 d40d->lli_pool.base = base;
638
639 if (d40d->lli_pool.base == NULL)
640 return -ENOMEM;
641 }
642
643 if (is_log) {
Rabin Vincentd924aba2011-01-25 11:18:16 +0100644 d40d->lli_log.src = PTR_ALIGN(base, align);
Rabin Vincent594ece42011-01-25 11:18:12 +0100645 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
Rabin Vincentb00f9382011-01-25 11:18:15 +0100646
647 d40d->lli_pool.dma_addr = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +0200648 } else {
Rabin Vincentd924aba2011-01-25 11:18:16 +0100649 d40d->lli_phy.src = PTR_ALIGN(base, align);
Rabin Vincent594ece42011-01-25 11:18:12 +0100650 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
Rabin Vincentb00f9382011-01-25 11:18:15 +0100651
652 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
653 d40d->lli_phy.src,
654 d40d->lli_pool.size,
655 DMA_TO_DEVICE);
656
657 if (dma_mapping_error(d40c->base->dev,
658 d40d->lli_pool.dma_addr)) {
659 kfree(d40d->lli_pool.base);
660 d40d->lli_pool.base = NULL;
661 d40d->lli_pool.dma_addr = 0;
662 return -ENOMEM;
663 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200664 }
665
666 return 0;
667}
668
Rabin Vincentb00f9382011-01-25 11:18:15 +0100669static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
Linus Walleij8d318a52010-03-30 15:33:42 +0200670{
Rabin Vincentb00f9382011-01-25 11:18:15 +0100671 if (d40d->lli_pool.dma_addr)
672 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
673 d40d->lli_pool.size, DMA_TO_DEVICE);
674
Linus Walleij8d318a52010-03-30 15:33:42 +0200675 kfree(d40d->lli_pool.base);
676 d40d->lli_pool.base = NULL;
677 d40d->lli_pool.size = 0;
678 d40d->lli_log.src = NULL;
679 d40d->lli_log.dst = NULL;
680 d40d->lli_phy.src = NULL;
681 d40d->lli_phy.dst = NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +0200682}
683
Jonas Aaberg698e4732010-08-09 12:08:56 +0000684static int d40_lcla_alloc_one(struct d40_chan *d40c,
685 struct d40_desc *d40d)
686{
687 unsigned long flags;
688 int i;
689 int ret = -EINVAL;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000690
691 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
692
Jonas Aaberg698e4732010-08-09 12:08:56 +0000693 /*
694 * Allocate both src and dst at the same time, therefore the half
695 * start on 1 since 0 can't be used since zero is used as end marker.
696 */
697 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
Fabio Baltieri7ce529e2012-12-18 16:59:09 +0100698 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
699
700 if (!d40c->base->lcla_pool.alloc_map[idx]) {
701 d40c->base->lcla_pool.alloc_map[idx] = d40d;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000702 d40d->lcla_alloc++;
703 ret = i;
704 break;
705 }
706 }
707
708 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
709
710 return ret;
711}
712
713static int d40_lcla_free_all(struct d40_chan *d40c,
714 struct d40_desc *d40d)
715{
716 unsigned long flags;
717 int i;
718 int ret = -EINVAL;
719
Rabin Vincent724a8572011-01-25 11:18:08 +0100720 if (chan_is_physical(d40c))
Jonas Aaberg698e4732010-08-09 12:08:56 +0000721 return 0;
722
723 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
724
725 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
Fabio Baltieri7ce529e2012-12-18 16:59:09 +0100726 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
727
728 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
729 d40c->base->lcla_pool.alloc_map[idx] = NULL;
Jonas Aaberg698e4732010-08-09 12:08:56 +0000730 d40d->lcla_alloc--;
731 if (d40d->lcla_alloc == 0) {
732 ret = 0;
733 break;
734 }
735 }
736 }
737
738 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
739
740 return ret;
741
742}
743
Linus Walleij8d318a52010-03-30 15:33:42 +0200744static void d40_desc_remove(struct d40_desc *d40d)
745{
746 list_del(&d40d->node);
747}
748
749static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
750{
Rabin Vincenta2c15fa2010-10-06 08:20:37 +0000751 struct d40_desc *desc = NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +0200752
753 if (!list_empty(&d40c->client)) {
Rabin Vincenta2c15fa2010-10-06 08:20:37 +0000754 struct d40_desc *d;
755 struct d40_desc *_d;
756
Narayanan G7fb3e752011-11-17 17:26:41 +0530757 list_for_each_entry_safe(d, _d, &d40c->client, node) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200758 if (async_tx_test_ack(&d->txd)) {
Linus Walleij8d318a52010-03-30 15:33:42 +0200759 d40_desc_remove(d);
Rabin Vincenta2c15fa2010-10-06 08:20:37 +0000760 desc = d;
761 memset(desc, 0, sizeof(*desc));
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000762 break;
Linus Walleij8d318a52010-03-30 15:33:42 +0200763 }
Narayanan G7fb3e752011-11-17 17:26:41 +0530764 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200765 }
Rabin Vincenta2c15fa2010-10-06 08:20:37 +0000766
767 if (!desc)
768 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
769
770 if (desc)
771 INIT_LIST_HEAD(&desc->node);
772
773 return desc;
Linus Walleij8d318a52010-03-30 15:33:42 +0200774}
775
776static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
777{
Jonas Aaberg698e4732010-08-09 12:08:56 +0000778
Rabin Vincentb00f9382011-01-25 11:18:15 +0100779 d40_pool_lli_free(d40c, d40d);
Jonas Aaberg698e4732010-08-09 12:08:56 +0000780 d40_lcla_free_all(d40c, d40d);
Jonas Aabergc675b1b2010-06-20 21:25:08 +0000781 kmem_cache_free(d40c->base->desc_slab, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +0200782}
783
784static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
785{
786 list_add_tail(&desc->node, &d40c->active);
787}
788
Rabin Vincent1c4b0922011-01-25 11:18:24 +0100789static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
790{
791 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
792 struct d40_phy_lli *lli_src = desc->lli_phy.src;
793 void __iomem *base = chan_base(chan);
794
795 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
796 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
797 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
798 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
799
800 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
801 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
802 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
803 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
804}
805
Fabio Baltieri4226dd82012-12-13 13:46:16 +0100806static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
807{
808 list_add_tail(&desc->node, &d40c->done);
809}
810
Rabin Vincente65889c2011-01-25 11:18:31 +0100811static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
812{
813 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
814 struct d40_log_lli_bidir *lli = &desc->lli_log;
815 int lli_current = desc->lli_current;
816 int lli_len = desc->lli_len;
Rabin Vincent0c842b52011-01-25 11:18:35 +0100817 bool cyclic = desc->cyclic;
Rabin Vincente65889c2011-01-25 11:18:31 +0100818 int curr_lcla = -EINVAL;
Rabin Vincent0c842b52011-01-25 11:18:35 +0100819 int first_lcla = 0;
Narayanan G28c7a192011-11-22 13:56:55 +0530820 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
Rabin Vincent0c842b52011-01-25 11:18:35 +0100821 bool linkback;
Rabin Vincente65889c2011-01-25 11:18:31 +0100822
Rabin Vincent0c842b52011-01-25 11:18:35 +0100823 /*
824 * We may have partially running cyclic transfers, in case we did't get
825 * enough LCLA entries.
826 */
827 linkback = cyclic && lli_current == 0;
828
829 /*
830 * For linkback, we need one LCLA even with only one link, because we
831 * can't link back to the one in LCPA space
832 */
833 if (linkback || (lli_len - lli_current > 1)) {
Fabio Baltieri74070482012-12-18 12:25:14 +0100834 /*
835 * If the channel is expected to use only soft_lli don't
836 * allocate a lcla. This is to avoid a HW issue that exists
837 * in some controller during a peripheral to memory transfer
838 * that uses linked lists.
839 */
840 if (!(chan->phy_chan->use_soft_lli &&
841 chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
842 curr_lcla = d40_lcla_alloc_one(chan, desc);
843
Rabin Vincent0c842b52011-01-25 11:18:35 +0100844 first_lcla = curr_lcla;
845 }
Rabin Vincente65889c2011-01-25 11:18:31 +0100846
Rabin Vincent0c842b52011-01-25 11:18:35 +0100847 /*
848 * For linkback, we normally load the LCPA in the loop since we need to
849 * link it to the second LCLA and not the first. However, if we
850 * couldn't even get a first LCLA, then we have to run in LCPA and
851 * reload manually.
852 */
853 if (!linkback || curr_lcla == -EINVAL) {
854 unsigned int flags = 0;
Rabin Vincente65889c2011-01-25 11:18:31 +0100855
Rabin Vincent0c842b52011-01-25 11:18:35 +0100856 if (curr_lcla == -EINVAL)
857 flags |= LLI_TERM_INT;
858
859 d40_log_lli_lcpa_write(chan->lcpa,
860 &lli->dst[lli_current],
861 &lli->src[lli_current],
862 curr_lcla,
863 flags);
864 lli_current++;
865 }
Rabin Vincent6045f0b2011-01-25 11:18:32 +0100866
867 if (curr_lcla < 0)
868 goto out;
869
Rabin Vincente65889c2011-01-25 11:18:31 +0100870 for (; lli_current < lli_len; lli_current++) {
871 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
872 8 * curr_lcla * 2;
873 struct d40_log_lli *lcla = pool->base + lcla_offset;
Rabin Vincent0c842b52011-01-25 11:18:35 +0100874 unsigned int flags = 0;
Rabin Vincente65889c2011-01-25 11:18:31 +0100875 int next_lcla;
876
877 if (lli_current + 1 < lli_len)
878 next_lcla = d40_lcla_alloc_one(chan, desc);
879 else
Rabin Vincent0c842b52011-01-25 11:18:35 +0100880 next_lcla = linkback ? first_lcla : -EINVAL;
Rabin Vincente65889c2011-01-25 11:18:31 +0100881
Rabin Vincent0c842b52011-01-25 11:18:35 +0100882 if (cyclic || next_lcla == -EINVAL)
883 flags |= LLI_TERM_INT;
884
885 if (linkback && curr_lcla == first_lcla) {
886 /* First link goes in both LCPA and LCLA */
887 d40_log_lli_lcpa_write(chan->lcpa,
888 &lli->dst[lli_current],
889 &lli->src[lli_current],
890 next_lcla, flags);
891 }
892
893 /*
894 * One unused LCLA in the cyclic case if the very first
895 * next_lcla fails...
896 */
Rabin Vincente65889c2011-01-25 11:18:31 +0100897 d40_log_lli_lcla_write(lcla,
898 &lli->dst[lli_current],
899 &lli->src[lli_current],
Rabin Vincent0c842b52011-01-25 11:18:35 +0100900 next_lcla, flags);
Rabin Vincente65889c2011-01-25 11:18:31 +0100901
Narayanan G28c7a192011-11-22 13:56:55 +0530902 /*
903 * Cache maintenance is not needed if lcla is
904 * mapped in esram
905 */
906 if (!use_esram_lcla) {
907 dma_sync_single_range_for_device(chan->base->dev,
908 pool->dma_addr, lcla_offset,
909 2 * sizeof(struct d40_log_lli),
910 DMA_TO_DEVICE);
911 }
Rabin Vincente65889c2011-01-25 11:18:31 +0100912 curr_lcla = next_lcla;
913
Rabin Vincent0c842b52011-01-25 11:18:35 +0100914 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
Rabin Vincente65889c2011-01-25 11:18:31 +0100915 lli_current++;
916 break;
917 }
918 }
919
Rabin Vincent6045f0b2011-01-25 11:18:32 +0100920out:
Rabin Vincente65889c2011-01-25 11:18:31 +0100921 desc->lli_current = lli_current;
922}
923
Jonas Aaberg698e4732010-08-09 12:08:56 +0000924static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
925{
Rabin Vincent724a8572011-01-25 11:18:08 +0100926 if (chan_is_physical(d40c)) {
Rabin Vincent1c4b0922011-01-25 11:18:24 +0100927 d40_phy_lli_load(d40c, d40d);
Jonas Aaberg698e4732010-08-09 12:08:56 +0000928 d40d->lli_current = d40d->lli_len;
Rabin Vincente65889c2011-01-25 11:18:31 +0100929 } else
930 d40_log_lli_to_lcxa(d40c, d40d);
Jonas Aaberg698e4732010-08-09 12:08:56 +0000931}
932
Linus Walleij8d318a52010-03-30 15:33:42 +0200933static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
934{
935 struct d40_desc *d;
936
937 if (list_empty(&d40c->active))
938 return NULL;
939
940 d = list_first_entry(&d40c->active,
941 struct d40_desc,
942 node);
943 return d;
944}
945
Per Forlin74043682011-08-29 13:33:34 +0200946/* remove desc from current queue and add it to the pending_queue */
Linus Walleij8d318a52010-03-30 15:33:42 +0200947static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
948{
Per Forlin74043682011-08-29 13:33:34 +0200949 d40_desc_remove(desc);
950 desc->is_in_client_list = false;
Per Forlina8f30672011-06-26 23:29:52 +0200951 list_add_tail(&desc->node, &d40c->pending_queue);
952}
953
954static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
955{
956 struct d40_desc *d;
957
958 if (list_empty(&d40c->pending_queue))
959 return NULL;
960
961 d = list_first_entry(&d40c->pending_queue,
962 struct d40_desc,
963 node);
964 return d;
Linus Walleij8d318a52010-03-30 15:33:42 +0200965}
966
967static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
968{
969 struct d40_desc *d;
970
971 if (list_empty(&d40c->queue))
972 return NULL;
973
974 d = list_first_entry(&d40c->queue,
975 struct d40_desc,
976 node);
977 return d;
978}
979
Fabio Baltieri4226dd82012-12-13 13:46:16 +0100980static struct d40_desc *d40_first_done(struct d40_chan *d40c)
981{
982 if (list_empty(&d40c->done))
983 return NULL;
984
985 return list_first_entry(&d40c->done, struct d40_desc, node);
986}
987
Per Forlind49278e2010-12-20 18:31:38 +0100988static int d40_psize_2_burst_size(bool is_log, int psize)
989{
990 if (is_log) {
991 if (psize == STEDMA40_PSIZE_LOG_1)
992 return 1;
993 } else {
994 if (psize == STEDMA40_PSIZE_PHY_1)
995 return 1;
996 }
Linus Walleij8d318a52010-03-30 15:33:42 +0200997
Per Forlind49278e2010-12-20 18:31:38 +0100998 return 2 << psize;
999}
1000
1001/*
1002 * The dma only supports transmitting packages up to
1003 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
1004 * dma elements required to send the entire sg list
1005 */
1006static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1007{
1008 int dmalen;
1009 u32 max_w = max(data_width1, data_width2);
1010 u32 min_w = min(data_width1, data_width2);
1011 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
1012
1013 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1014 seg_max -= (1 << max_w);
1015
1016 if (!IS_ALIGNED(size, 1 << max_w))
1017 return -EINVAL;
1018
1019 if (size <= seg_max)
1020 dmalen = 1;
1021 else {
1022 dmalen = size / seg_max;
1023 if (dmalen * seg_max < size)
1024 dmalen++;
1025 }
1026 return dmalen;
1027}
1028
1029static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1030 u32 data_width1, u32 data_width2)
1031{
1032 struct scatterlist *sg;
1033 int i;
1034 int len = 0;
1035 int ret;
1036
1037 for_each_sg(sgl, sg, sg_len, i) {
1038 ret = d40_size_2_dmalen(sg_dma_len(sg),
1039 data_width1, data_width2);
1040 if (ret < 0)
1041 return ret;
1042 len += ret;
1043 }
1044 return len;
1045}
1046
Narayanan G7fb3e752011-11-17 17:26:41 +05301047
1048#ifdef CONFIG_PM
1049static void dma40_backup(void __iomem *baseaddr, u32 *backup,
1050 u32 *regaddr, int num, bool save)
1051{
1052 int i;
1053
1054 for (i = 0; i < num; i++) {
1055 void __iomem *addr = baseaddr + regaddr[i];
1056
1057 if (save)
1058 backup[i] = readl_relaxed(addr);
1059 else
1060 writel_relaxed(backup[i], addr);
1061 }
1062}
1063
1064static void d40_save_restore_registers(struct d40_base *base, bool save)
1065{
1066 int i;
1067
1068 /* Save/Restore channel specific registers */
1069 for (i = 0; i < base->num_phy_chans; i++) {
1070 void __iomem *addr;
1071 int idx;
1072
1073 if (base->phy_res[i].reserved)
1074 continue;
1075
1076 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
1077 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
1078
1079 dma40_backup(addr, &base->reg_val_backup_chan[idx],
1080 d40_backup_regs_chan,
1081 ARRAY_SIZE(d40_backup_regs_chan),
1082 save);
1083 }
1084
1085 /* Save/Restore global registers */
1086 dma40_backup(base->virtbase, base->reg_val_backup,
1087 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
1088 save);
1089
1090 /* Save/Restore registers only existing on dma40 v3 and later */
Tong Liu3cb645d2012-09-26 10:07:30 +00001091 if (base->gen_dmac.backup)
1092 dma40_backup(base->virtbase, base->reg_val_backup_v4,
1093 base->gen_dmac.backup,
1094 base->gen_dmac.backup_size,
1095 save);
Narayanan G7fb3e752011-11-17 17:26:41 +05301096}
1097#else
1098static void d40_save_restore_registers(struct d40_base *base, bool save)
1099{
1100}
1101#endif
Linus Walleij8d318a52010-03-30 15:33:42 +02001102
Narayanan G1bdae6f2012-02-09 12:41:37 +05301103static int __d40_execute_command_phy(struct d40_chan *d40c,
1104 enum d40_command command)
Linus Walleij8d318a52010-03-30 15:33:42 +02001105{
Jonas Aaberg767a9672010-08-09 12:08:34 +00001106 u32 status;
1107 int i;
Linus Walleij8d318a52010-03-30 15:33:42 +02001108 void __iomem *active_reg;
1109 int ret = 0;
1110 unsigned long flags;
Jonas Aaberg1d392a72010-06-20 21:26:01 +00001111 u32 wmask;
Linus Walleij8d318a52010-03-30 15:33:42 +02001112
Narayanan G1bdae6f2012-02-09 12:41:37 +05301113 if (command == D40_DMA_STOP) {
1114 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1115 if (ret)
1116 return ret;
1117 }
1118
Linus Walleij8d318a52010-03-30 15:33:42 +02001119 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1120
1121 if (d40c->phy_chan->num % 2 == 0)
1122 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1123 else
1124 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1125
1126 if (command == D40_DMA_SUSPEND_REQ) {
1127 status = (readl(active_reg) &
1128 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1129 D40_CHAN_POS(d40c->phy_chan->num);
1130
1131 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1132 goto done;
1133 }
1134
Jonas Aaberg1d392a72010-06-20 21:26:01 +00001135 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1136 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1137 active_reg);
Linus Walleij8d318a52010-03-30 15:33:42 +02001138
1139 if (command == D40_DMA_SUSPEND_REQ) {
1140
1141 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1142 status = (readl(active_reg) &
1143 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1144 D40_CHAN_POS(d40c->phy_chan->num);
1145
1146 cpu_relax();
1147 /*
1148 * Reduce the number of bus accesses while
1149 * waiting for the DMA to suspend.
1150 */
1151 udelay(3);
1152
1153 if (status == D40_DMA_STOP ||
1154 status == D40_DMA_SUSPENDED)
1155 break;
1156 }
1157
1158 if (i == D40_SUSPEND_MAX_IT) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01001159 chan_err(d40c,
1160 "unable to suspend the chl %d (log: %d) status %x\n",
1161 d40c->phy_chan->num, d40c->log_num,
Linus Walleij8d318a52010-03-30 15:33:42 +02001162 status);
1163 dump_stack();
1164 ret = -EBUSY;
1165 }
1166
1167 }
1168done:
1169 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1170 return ret;
1171}
1172
1173static void d40_term_all(struct d40_chan *d40c)
1174{
1175 struct d40_desc *d40d;
Per Forlin74043682011-08-29 13:33:34 +02001176 struct d40_desc *_d;
Linus Walleij8d318a52010-03-30 15:33:42 +02001177
Fabio Baltieri4226dd82012-12-13 13:46:16 +01001178 /* Release completed descriptors */
1179 while ((d40d = d40_first_done(d40c))) {
1180 d40_desc_remove(d40d);
1181 d40_desc_free(d40c, d40d);
1182 }
1183
Linus Walleij8d318a52010-03-30 15:33:42 +02001184 /* Release active descriptors */
1185 while ((d40d = d40_first_active_get(d40c))) {
1186 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001187 d40_desc_free(d40c, d40d);
1188 }
1189
1190 /* Release queued descriptors waiting for transfer */
1191 while ((d40d = d40_first_queued(d40c))) {
1192 d40_desc_remove(d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001193 d40_desc_free(d40c, d40d);
1194 }
1195
Per Forlina8f30672011-06-26 23:29:52 +02001196 /* Release pending descriptors */
1197 while ((d40d = d40_first_pending(d40c))) {
1198 d40_desc_remove(d40d);
1199 d40_desc_free(d40c, d40d);
1200 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001201
Per Forlin74043682011-08-29 13:33:34 +02001202 /* Release client owned descriptors */
1203 if (!list_empty(&d40c->client))
1204 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1205 d40_desc_remove(d40d);
1206 d40_desc_free(d40c, d40d);
1207 }
1208
Per Forlin82babbb362011-08-29 13:33:35 +02001209 /* Release descriptors in prepare queue */
1210 if (!list_empty(&d40c->prepare_queue))
1211 list_for_each_entry_safe(d40d, _d,
1212 &d40c->prepare_queue, node) {
1213 d40_desc_remove(d40d);
1214 d40_desc_free(d40c, d40d);
1215 }
Per Forlin74043682011-08-29 13:33:34 +02001216
Linus Walleij8d318a52010-03-30 15:33:42 +02001217 d40c->pending_tx = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02001218}
1219
Narayanan G1bdae6f2012-02-09 12:41:37 +05301220static void __d40_config_set_event(struct d40_chan *d40c,
1221 enum d40_events event_type, u32 event,
1222 int reg)
Rabin Vincent262d2912011-01-25 11:18:05 +01001223{
Rabin Vincent8ca84682011-01-25 11:18:07 +01001224 void __iomem *addr = chan_base(d40c) + reg;
Rabin Vincent262d2912011-01-25 11:18:05 +01001225 int tries;
Narayanan G1bdae6f2012-02-09 12:41:37 +05301226 u32 status;
Rabin Vincent262d2912011-01-25 11:18:05 +01001227
Narayanan G1bdae6f2012-02-09 12:41:37 +05301228 switch (event_type) {
1229
1230 case D40_DEACTIVATE_EVENTLINE:
1231
Rabin Vincent262d2912011-01-25 11:18:05 +01001232 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1233 | ~D40_EVENTLINE_MASK(event), addr);
Narayanan G1bdae6f2012-02-09 12:41:37 +05301234 break;
Rabin Vincent262d2912011-01-25 11:18:05 +01001235
Narayanan G1bdae6f2012-02-09 12:41:37 +05301236 case D40_SUSPEND_REQ_EVENTLINE:
1237 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1238 D40_EVENTLINE_POS(event);
1239
1240 if (status == D40_DEACTIVATE_EVENTLINE ||
1241 status == D40_SUSPEND_REQ_EVENTLINE)
1242 break;
1243
1244 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1245 | ~D40_EVENTLINE_MASK(event), addr);
1246
1247 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1248
1249 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1250 D40_EVENTLINE_POS(event);
1251
1252 cpu_relax();
1253 /*
1254 * Reduce the number of bus accesses while
1255 * waiting for the DMA to suspend.
1256 */
1257 udelay(3);
1258
1259 if (status == D40_DEACTIVATE_EVENTLINE)
1260 break;
1261 }
1262
1263 if (tries == D40_SUSPEND_MAX_IT) {
1264 chan_err(d40c,
1265 "unable to stop the event_line chl %d (log: %d)"
1266 "status %x\n", d40c->phy_chan->num,
1267 d40c->log_num, status);
1268 }
1269 break;
1270
1271 case D40_ACTIVATE_EVENTLINE:
Rabin Vincent262d2912011-01-25 11:18:05 +01001272 /*
1273 * The hardware sometimes doesn't register the enable when src and dst
1274 * event lines are active on the same logical channel. Retry to ensure
1275 * it does. Usually only one retry is sufficient.
1276 */
Narayanan G1bdae6f2012-02-09 12:41:37 +05301277 tries = 100;
1278 while (--tries) {
1279 writel((D40_ACTIVATE_EVENTLINE <<
1280 D40_EVENTLINE_POS(event)) |
1281 ~D40_EVENTLINE_MASK(event), addr);
Rabin Vincent262d2912011-01-25 11:18:05 +01001282
Narayanan G1bdae6f2012-02-09 12:41:37 +05301283 if (readl(addr) & D40_EVENTLINE_MASK(event))
1284 break;
1285 }
1286
1287 if (tries != 99)
1288 dev_dbg(chan2dev(d40c),
1289 "[%s] workaround enable S%cLNK (%d tries)\n",
1290 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1291 100 - tries);
1292
1293 WARN_ON(!tries);
1294 break;
1295
1296 case D40_ROUND_EVENTLINE:
1297 BUG();
1298 break;
1299
Rabin Vincent262d2912011-01-25 11:18:05 +01001300 }
Rabin Vincent262d2912011-01-25 11:18:05 +01001301}
1302
Narayanan G1bdae6f2012-02-09 12:41:37 +05301303static void d40_config_set_event(struct d40_chan *d40c,
1304 enum d40_events event_type)
Linus Walleij8d318a52010-03-30 15:33:42 +02001305{
Lee Jones26955c07d2013-05-03 15:31:56 +01001306 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1307
Linus Walleij8d318a52010-03-30 15:33:42 +02001308 /* Enable event line connected to device (or memcpy) */
1309 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
Lee Jones26955c07d2013-05-03 15:31:56 +01001310 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
Narayanan G1bdae6f2012-02-09 12:41:37 +05301311 __d40_config_set_event(d40c, event_type, event,
Rabin Vincent262d2912011-01-25 11:18:05 +01001312 D40_CHAN_REG_SSLNK);
Rabin Vincent262d2912011-01-25 11:18:05 +01001313
Lee Jones26955c07d2013-05-03 15:31:56 +01001314 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
Narayanan G1bdae6f2012-02-09 12:41:37 +05301315 __d40_config_set_event(d40c, event_type, event,
Rabin Vincent262d2912011-01-25 11:18:05 +01001316 D40_CHAN_REG_SDLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +02001317}
1318
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001319static u32 d40_chan_has_events(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +02001320{
Rabin Vincent8ca84682011-01-25 11:18:07 +01001321 void __iomem *chanbase = chan_base(d40c);
Jonas Aabergbe8cb7d2010-08-09 12:07:44 +00001322 u32 val;
Linus Walleij8d318a52010-03-30 15:33:42 +02001323
Rabin Vincent8ca84682011-01-25 11:18:07 +01001324 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1325 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +02001326
Jonas Aaberga5ebca42010-05-18 00:41:09 +02001327 return val;
Linus Walleij8d318a52010-03-30 15:33:42 +02001328}
1329
Narayanan G1bdae6f2012-02-09 12:41:37 +05301330static int
1331__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1332{
1333 unsigned long flags;
1334 int ret = 0;
1335 u32 active_status;
1336 void __iomem *active_reg;
1337
1338 if (d40c->phy_chan->num % 2 == 0)
1339 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1340 else
1341 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1342
1343
1344 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1345
1346 switch (command) {
1347 case D40_DMA_STOP:
1348 case D40_DMA_SUSPEND_REQ:
1349
1350 active_status = (readl(active_reg) &
1351 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1352 D40_CHAN_POS(d40c->phy_chan->num);
1353
1354 if (active_status == D40_DMA_RUN)
1355 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1356 else
1357 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1358
1359 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1360 ret = __d40_execute_command_phy(d40c, command);
1361
1362 break;
1363
1364 case D40_DMA_RUN:
1365
1366 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1367 ret = __d40_execute_command_phy(d40c, command);
1368 break;
1369
1370 case D40_DMA_SUSPENDED:
1371 BUG();
1372 break;
1373 }
1374
1375 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1376 return ret;
1377}
1378
1379static int d40_channel_execute_command(struct d40_chan *d40c,
1380 enum d40_command command)
1381{
1382 if (chan_is_logical(d40c))
1383 return __d40_execute_command_log(d40c, command);
1384 else
1385 return __d40_execute_command_phy(d40c, command);
1386}
1387
Rabin Vincent20a5b6d2010-10-12 13:00:52 +00001388static u32 d40_get_prmo(struct d40_chan *d40c)
1389{
1390 static const unsigned int phy_map[] = {
1391 [STEDMA40_PCHAN_BASIC_MODE]
1392 = D40_DREG_PRMO_PCHAN_BASIC,
1393 [STEDMA40_PCHAN_MODULO_MODE]
1394 = D40_DREG_PRMO_PCHAN_MODULO,
1395 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1396 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1397 };
1398 static const unsigned int log_map[] = {
1399 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1400 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1401 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1402 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1403 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1404 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1405 };
1406
Rabin Vincent724a8572011-01-25 11:18:08 +01001407 if (chan_is_physical(d40c))
Rabin Vincent20a5b6d2010-10-12 13:00:52 +00001408 return phy_map[d40c->dma_cfg.mode_opt];
1409 else
1410 return log_map[d40c->dma_cfg.mode_opt];
1411}
1412
Jonas Aabergb55912c2010-08-09 12:08:02 +00001413static void d40_config_write(struct d40_chan *d40c)
Linus Walleij8d318a52010-03-30 15:33:42 +02001414{
1415 u32 addr_base;
1416 u32 var;
Linus Walleij8d318a52010-03-30 15:33:42 +02001417
1418 /* Odd addresses are even addresses + 4 */
1419 addr_base = (d40c->phy_chan->num % 2) * 4;
1420 /* Setup channel mode to logical or physical */
Rabin Vincent724a8572011-01-25 11:18:08 +01001421 var = ((u32)(chan_is_logical(d40c)) + 1) <<
Linus Walleij8d318a52010-03-30 15:33:42 +02001422 D40_CHAN_POS(d40c->phy_chan->num);
1423 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1424
1425 /* Setup operational mode option register */
Rabin Vincent20a5b6d2010-10-12 13:00:52 +00001426 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
Linus Walleij8d318a52010-03-30 15:33:42 +02001427
1428 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1429
Rabin Vincent724a8572011-01-25 11:18:08 +01001430 if (chan_is_logical(d40c)) {
Rabin Vincent8ca84682011-01-25 11:18:07 +01001431 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1432 & D40_SREG_ELEM_LOG_LIDX_MASK;
1433 void __iomem *chanbase = chan_base(d40c);
1434
Linus Walleij8d318a52010-03-30 15:33:42 +02001435 /* Set default config for CFG reg */
Rabin Vincent8ca84682011-01-25 11:18:07 +01001436 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1437 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
Linus Walleij8d318a52010-03-30 15:33:42 +02001438
Jonas Aabergb55912c2010-08-09 12:08:02 +00001439 /* Set LIDX for lcla */
Rabin Vincent8ca84682011-01-25 11:18:07 +01001440 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1441 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
Rabin Vincente9f3a492011-12-28 11:27:40 +05301442
1443 /* Clear LNK which will be used by d40_chan_has_events() */
1444 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1445 writel(0, chanbase + D40_CHAN_REG_SDLNK);
Linus Walleij8d318a52010-03-30 15:33:42 +02001446 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001447}
1448
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001449static u32 d40_residue(struct d40_chan *d40c)
1450{
1451 u32 num_elt;
1452
Rabin Vincent724a8572011-01-25 11:18:08 +01001453 if (chan_is_logical(d40c))
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001454 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1455 >> D40_MEM_LCSP2_ECNT_POS;
Rabin Vincent8ca84682011-01-25 11:18:07 +01001456 else {
1457 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1458 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1459 >> D40_SREG_ELEM_PHY_ECNT_POS;
1460 }
1461
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001462 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1463}
1464
1465static bool d40_tx_is_linked(struct d40_chan *d40c)
1466{
1467 bool is_link;
1468
Rabin Vincent724a8572011-01-25 11:18:08 +01001469 if (chan_is_logical(d40c))
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001470 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1471 else
Rabin Vincent8ca84682011-01-25 11:18:07 +01001472 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1473 & D40_SREG_LNK_PHYS_LNK_MASK;
1474
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001475 return is_link;
1476}
1477
Rabin Vincent86eb5fb2011-01-25 11:18:34 +01001478static int d40_pause(struct d40_chan *d40c)
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001479{
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001480 int res = 0;
1481 unsigned long flags;
1482
Jonas Aaberg3ac012a2010-08-09 12:09:12 +00001483 if (!d40c->busy)
1484 return 0;
1485
Narayanan G7fb3e752011-11-17 17:26:41 +05301486 pm_runtime_get_sync(d40c->base->dev);
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001487 spin_lock_irqsave(&d40c->lock, flags);
1488
1489 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
Narayanan G1bdae6f2012-02-09 12:41:37 +05301490
Narayanan G7fb3e752011-11-17 17:26:41 +05301491 pm_runtime_mark_last_busy(d40c->base->dev);
1492 pm_runtime_put_autosuspend(d40c->base->dev);
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001493 spin_unlock_irqrestore(&d40c->lock, flags);
1494 return res;
1495}
1496
Rabin Vincent86eb5fb2011-01-25 11:18:34 +01001497static int d40_resume(struct d40_chan *d40c)
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001498{
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001499 int res = 0;
1500 unsigned long flags;
1501
Jonas Aaberg3ac012a2010-08-09 12:09:12 +00001502 if (!d40c->busy)
1503 return 0;
1504
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001505 spin_lock_irqsave(&d40c->lock, flags);
Narayanan G7fb3e752011-11-17 17:26:41 +05301506 pm_runtime_get_sync(d40c->base->dev);
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001507
1508 /* If bytes left to transfer or linked tx resume job */
Narayanan G1bdae6f2012-02-09 12:41:37 +05301509 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001510 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001511
Narayanan G7fb3e752011-11-17 17:26:41 +05301512 pm_runtime_mark_last_busy(d40c->base->dev);
1513 pm_runtime_put_autosuspend(d40c->base->dev);
Jonas Aabergaa182ae2010-08-09 12:08:26 +00001514 spin_unlock_irqrestore(&d40c->lock, flags);
1515 return res;
1516}
1517
Linus Walleij8d318a52010-03-30 15:33:42 +02001518static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1519{
1520 struct d40_chan *d40c = container_of(tx->chan,
1521 struct d40_chan,
1522 chan);
1523 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1524 unsigned long flags;
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00001525 dma_cookie_t cookie;
Linus Walleij8d318a52010-03-30 15:33:42 +02001526
1527 spin_lock_irqsave(&d40c->lock, flags);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00001528 cookie = dma_cookie_assign(tx);
Linus Walleij8d318a52010-03-30 15:33:42 +02001529 d40_desc_queue(d40c, d40d);
Linus Walleij8d318a52010-03-30 15:33:42 +02001530 spin_unlock_irqrestore(&d40c->lock, flags);
1531
Russell King - ARM Linux884485e2012-03-06 22:34:46 +00001532 return cookie;
Linus Walleij8d318a52010-03-30 15:33:42 +02001533}
1534
1535static int d40_start(struct d40_chan *d40c)
1536{
Jonas Aaberg0c322692010-06-20 21:25:46 +00001537 return d40_channel_execute_command(d40c, D40_DMA_RUN);
Linus Walleij8d318a52010-03-30 15:33:42 +02001538}
1539
1540static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1541{
1542 struct d40_desc *d40d;
1543 int err;
1544
1545 /* Start queued jobs, if any */
1546 d40d = d40_first_queued(d40c);
1547
1548 if (d40d != NULL) {
Narayanan G1bdae6f2012-02-09 12:41:37 +05301549 if (!d40c->busy) {
Narayanan G7fb3e752011-11-17 17:26:41 +05301550 d40c->busy = true;
Narayanan G1bdae6f2012-02-09 12:41:37 +05301551 pm_runtime_get_sync(d40c->base->dev);
1552 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001553
1554 /* Remove from queue */
1555 d40_desc_remove(d40d);
1556
1557 /* Add to active queue */
1558 d40_desc_submit(d40c, d40d);
1559
Rabin Vincent7d83a852011-01-25 11:18:06 +01001560 /* Initiate DMA job */
1561 d40_desc_load(d40c, d40d);
Jonas Aaberg698e4732010-08-09 12:08:56 +00001562
Rabin Vincent7d83a852011-01-25 11:18:06 +01001563 /* Start dma job */
1564 err = d40_start(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +02001565
Rabin Vincent7d83a852011-01-25 11:18:06 +01001566 if (err)
1567 return NULL;
Linus Walleij8d318a52010-03-30 15:33:42 +02001568 }
1569
1570 return d40d;
1571}
1572
1573/* called from interrupt context */
1574static void dma_tc_handle(struct d40_chan *d40c)
1575{
1576 struct d40_desc *d40d;
1577
Linus Walleij8d318a52010-03-30 15:33:42 +02001578 /* Get first active entry from list */
1579 d40d = d40_first_active_get(d40c);
1580
1581 if (d40d == NULL)
1582 return;
1583
Rabin Vincent0c842b52011-01-25 11:18:35 +01001584 if (d40d->cyclic) {
1585 /*
1586 * If this was a paritially loaded list, we need to reloaded
1587 * it, and only when the list is completed. We need to check
1588 * for done because the interrupt will hit for every link, and
1589 * not just the last one.
1590 */
1591 if (d40d->lli_current < d40d->lli_len
1592 && !d40_tx_is_linked(d40c)
1593 && !d40_residue(d40c)) {
1594 d40_lcla_free_all(d40c, d40d);
1595 d40_desc_load(d40c, d40d);
1596 (void) d40_start(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +02001597
Rabin Vincent0c842b52011-01-25 11:18:35 +01001598 if (d40d->lli_current == d40d->lli_len)
1599 d40d->lli_current = 0;
1600 }
1601 } else {
1602 d40_lcla_free_all(d40c, d40d);
1603
1604 if (d40d->lli_current < d40d->lli_len) {
1605 d40_desc_load(d40c, d40d);
1606 /* Start dma job */
1607 (void) d40_start(d40c);
1608 return;
1609 }
1610
1611 if (d40_queue_start(d40c) == NULL)
1612 d40c->busy = false;
Narayanan G7fb3e752011-11-17 17:26:41 +05301613 pm_runtime_mark_last_busy(d40c->base->dev);
1614 pm_runtime_put_autosuspend(d40c->base->dev);
Linus Walleij8d318a52010-03-30 15:33:42 +02001615
Fabio Baltieri7dd14522013-02-14 10:03:10 +01001616 d40_desc_remove(d40d);
1617 d40_desc_done(d40c, d40d);
1618 }
Fabio Baltieri4226dd82012-12-13 13:46:16 +01001619
Linus Walleij8d318a52010-03-30 15:33:42 +02001620 d40c->pending_tx++;
1621 tasklet_schedule(&d40c->tasklet);
1622
1623}
1624
1625static void dma_tasklet(unsigned long data)
1626{
1627 struct d40_chan *d40c = (struct d40_chan *) data;
Jonas Aaberg767a9672010-08-09 12:08:34 +00001628 struct d40_desc *d40d;
Linus Walleij8d318a52010-03-30 15:33:42 +02001629 unsigned long flags;
1630 dma_async_tx_callback callback;
1631 void *callback_param;
1632
1633 spin_lock_irqsave(&d40c->lock, flags);
1634
Fabio Baltieri4226dd82012-12-13 13:46:16 +01001635 /* Get first entry from the done list */
1636 d40d = d40_first_done(d40c);
1637 if (d40d == NULL) {
1638 /* Check if we have reached here for cyclic job */
1639 d40d = d40_first_active_get(d40c);
1640 if (d40d == NULL || !d40d->cyclic)
1641 goto err;
1642 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001643
Rabin Vincent0c842b52011-01-25 11:18:35 +01001644 if (!d40d->cyclic)
Russell King - ARM Linuxf7fbce02012-03-06 22:35:07 +00001645 dma_cookie_complete(&d40d->txd);
Linus Walleij8d318a52010-03-30 15:33:42 +02001646
1647 /*
1648 * If terminating a channel pending_tx is set to zero.
1649 * This prevents any finished active jobs to return to the client.
1650 */
1651 if (d40c->pending_tx == 0) {
1652 spin_unlock_irqrestore(&d40c->lock, flags);
1653 return;
1654 }
1655
1656 /* Callback to client */
Jonas Aaberg767a9672010-08-09 12:08:34 +00001657 callback = d40d->txd.callback;
1658 callback_param = d40d->txd.callback_param;
Linus Walleij8d318a52010-03-30 15:33:42 +02001659
Rabin Vincent0c842b52011-01-25 11:18:35 +01001660 if (!d40d->cyclic) {
1661 if (async_tx_test_ack(&d40d->txd)) {
Jonas Aaberg767a9672010-08-09 12:08:34 +00001662 d40_desc_remove(d40d);
Rabin Vincent0c842b52011-01-25 11:18:35 +01001663 d40_desc_free(d40c, d40d);
Fabio Baltierif26e03a2012-12-13 17:12:37 +01001664 } else if (!d40d->is_in_client_list) {
1665 d40_desc_remove(d40d);
1666 d40_lcla_free_all(d40c, d40d);
1667 list_add_tail(&d40d->node, &d40c->client);
1668 d40d->is_in_client_list = true;
Linus Walleij8d318a52010-03-30 15:33:42 +02001669 }
1670 }
1671
1672 d40c->pending_tx--;
1673
1674 if (d40c->pending_tx)
1675 tasklet_schedule(&d40c->tasklet);
1676
1677 spin_unlock_irqrestore(&d40c->lock, flags);
1678
Jonas Aaberg767a9672010-08-09 12:08:34 +00001679 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
Linus Walleij8d318a52010-03-30 15:33:42 +02001680 callback(callback_param);
1681
1682 return;
1683
Narayanan G1bdae6f2012-02-09 12:41:37 +05301684err:
1685 /* Rescue manouver if receiving double interrupts */
Linus Walleij8d318a52010-03-30 15:33:42 +02001686 if (d40c->pending_tx > 0)
1687 d40c->pending_tx--;
1688 spin_unlock_irqrestore(&d40c->lock, flags);
1689}
1690
1691static irqreturn_t d40_handle_interrupt(int irq, void *data)
1692{
Linus Walleij8d318a52010-03-30 15:33:42 +02001693 int i;
Linus Walleij8d318a52010-03-30 15:33:42 +02001694 u32 idx;
1695 u32 row;
1696 long chan = -1;
1697 struct d40_chan *d40c;
1698 unsigned long flags;
1699 struct d40_base *base = data;
Tong Liu3cb645d2012-09-26 10:07:30 +00001700 u32 regs[base->gen_dmac.il_size];
1701 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1702 u32 il_size = base->gen_dmac.il_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02001703
1704 spin_lock_irqsave(&base->interrupt_lock, flags);
1705
1706 /* Read interrupt status of both logical and physical channels */
Tong Liu3cb645d2012-09-26 10:07:30 +00001707 for (i = 0; i < il_size; i++)
Linus Walleij8d318a52010-03-30 15:33:42 +02001708 regs[i] = readl(base->virtbase + il[i].src);
1709
1710 for (;;) {
1711
1712 chan = find_next_bit((unsigned long *)regs,
Tong Liu3cb645d2012-09-26 10:07:30 +00001713 BITS_PER_LONG * il_size, chan + 1);
Linus Walleij8d318a52010-03-30 15:33:42 +02001714
1715 /* No more set bits found? */
Tong Liu3cb645d2012-09-26 10:07:30 +00001716 if (chan == BITS_PER_LONG * il_size)
Linus Walleij8d318a52010-03-30 15:33:42 +02001717 break;
1718
1719 row = chan / BITS_PER_LONG;
1720 idx = chan & (BITS_PER_LONG - 1);
1721
Linus Walleij8d318a52010-03-30 15:33:42 +02001722 if (il[row].offset == D40_PHY_CHAN)
1723 d40c = base->lookup_phy_chans[idx];
1724 else
1725 d40c = base->lookup_log_chans[il[row].offset + idx];
Fabio Baltieri53d6d682012-12-19 14:41:56 +01001726
1727 if (!d40c) {
1728 /*
1729 * No error because this can happen if something else
1730 * in the system is using the channel.
1731 */
1732 continue;
1733 }
1734
1735 /* ACK interrupt */
1736 writel(1 << idx, base->virtbase + il[row].clr);
1737
Linus Walleij8d318a52010-03-30 15:33:42 +02001738 spin_lock(&d40c->lock);
1739
1740 if (!il[row].is_error)
1741 dma_tc_handle(d40c);
1742 else
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01001743 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1744 chan, il[row].offset, idx);
Linus Walleij8d318a52010-03-30 15:33:42 +02001745
1746 spin_unlock(&d40c->lock);
1747 }
1748
1749 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1750
1751 return IRQ_HANDLED;
1752}
1753
Linus Walleij8d318a52010-03-30 15:33:42 +02001754static int d40_validate_conf(struct d40_chan *d40c,
1755 struct stedma40_chan_cfg *conf)
1756{
1757 int res = 0;
Rabin Vincent38bdbf02010-10-12 13:00:51 +00001758 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
Linus Walleij8d318a52010-03-30 15:33:42 +02001759
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001760 if (!conf->dir) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01001761 chan_err(d40c, "Invalid direction.\n");
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001762 res = -EINVAL;
1763 }
1764
Lee Jones26955c07d2013-05-03 15:31:56 +01001765 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1766 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1767 (conf->dev_type < 0)) {
1768 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001769 res = -EINVAL;
1770 }
1771
1772 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
Lee Jones26955c07d2013-05-03 15:31:56 +01001773 d40c->base->plat_data->dev_tx[conf->dev_type] == 0 &&
1774 d40c->runtime_addr == 0) {
1775 chan_err(d40c, "Invalid TX channel address (%d)\n",
1776 conf->dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001777 res = -EINVAL;
1778 }
1779
Linus Walleij0747c7ba2010-08-09 12:07:36 +00001780 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
Lee Jones26955c07d2013-05-03 15:31:56 +01001781 d40c->base->plat_data->dev_rx[conf->dev_type] == 0 &&
1782 d40c->runtime_addr == 0) {
1783 chan_err(d40c, "Invalid RX channel address (%d)\n",
1784 conf->dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02001785 res = -EINVAL;
1786 }
1787
1788 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1789 /*
1790 * DMAC HW supports it. Will be added to this driver,
1791 * in case any dma client requires it.
1792 */
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01001793 chan_err(d40c, "periph to periph not supported\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02001794 res = -EINVAL;
1795 }
1796
Per Forlind49278e2010-12-20 18:31:38 +01001797 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1798 (1 << conf->src_info.data_width) !=
1799 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1800 (1 << conf->dst_info.data_width)) {
1801 /*
1802 * The DMAC hardware only supports
1803 * src (burst x width) == dst (burst x width)
1804 */
1805
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01001806 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
Per Forlind49278e2010-12-20 18:31:38 +01001807 res = -EINVAL;
1808 }
1809
Linus Walleij8d318a52010-03-30 15:33:42 +02001810 return res;
1811}
1812
Narayanan G5cd326f2011-11-30 19:20:42 +05301813static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1814 bool is_src, int log_event_line, bool is_log,
1815 bool *first_user)
Linus Walleij8d318a52010-03-30 15:33:42 +02001816{
1817 unsigned long flags;
1818 spin_lock_irqsave(&phy->lock, flags);
Narayanan G5cd326f2011-11-30 19:20:42 +05301819
1820 *first_user = ((phy->allocated_src | phy->allocated_dst)
1821 == D40_ALLOC_FREE);
1822
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001823 if (!is_log) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001824 /* Physical interrupts are masked per physical full channel */
1825 if (phy->allocated_src == D40_ALLOC_FREE &&
1826 phy->allocated_dst == D40_ALLOC_FREE) {
1827 phy->allocated_dst = D40_ALLOC_PHY;
1828 phy->allocated_src = D40_ALLOC_PHY;
1829 goto found;
1830 } else
1831 goto not_found;
1832 }
1833
1834 /* Logical channel */
1835 if (is_src) {
1836 if (phy->allocated_src == D40_ALLOC_PHY)
1837 goto not_found;
1838
1839 if (phy->allocated_src == D40_ALLOC_FREE)
1840 phy->allocated_src = D40_ALLOC_LOG_FREE;
1841
1842 if (!(phy->allocated_src & (1 << log_event_line))) {
1843 phy->allocated_src |= 1 << log_event_line;
1844 goto found;
1845 } else
1846 goto not_found;
1847 } else {
1848 if (phy->allocated_dst == D40_ALLOC_PHY)
1849 goto not_found;
1850
1851 if (phy->allocated_dst == D40_ALLOC_FREE)
1852 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1853
1854 if (!(phy->allocated_dst & (1 << log_event_line))) {
1855 phy->allocated_dst |= 1 << log_event_line;
1856 goto found;
1857 } else
1858 goto not_found;
1859 }
1860
1861not_found:
1862 spin_unlock_irqrestore(&phy->lock, flags);
1863 return false;
1864found:
1865 spin_unlock_irqrestore(&phy->lock, flags);
1866 return true;
1867}
1868
1869static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1870 int log_event_line)
1871{
1872 unsigned long flags;
1873 bool is_free = false;
1874
1875 spin_lock_irqsave(&phy->lock, flags);
1876 if (!log_event_line) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001877 phy->allocated_dst = D40_ALLOC_FREE;
1878 phy->allocated_src = D40_ALLOC_FREE;
1879 is_free = true;
1880 goto out;
1881 }
1882
1883 /* Logical channel */
1884 if (is_src) {
1885 phy->allocated_src &= ~(1 << log_event_line);
1886 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1887 phy->allocated_src = D40_ALLOC_FREE;
1888 } else {
1889 phy->allocated_dst &= ~(1 << log_event_line);
1890 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1891 phy->allocated_dst = D40_ALLOC_FREE;
1892 }
1893
1894 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1895 D40_ALLOC_FREE);
1896
1897out:
1898 spin_unlock_irqrestore(&phy->lock, flags);
1899
1900 return is_free;
1901}
1902
Narayanan G5cd326f2011-11-30 19:20:42 +05301903static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
Linus Walleij8d318a52010-03-30 15:33:42 +02001904{
Lee Jones26955c07d2013-05-03 15:31:56 +01001905 int dev_type = d40c->dma_cfg.dev_type;
Linus Walleij8d318a52010-03-30 15:33:42 +02001906 int event_group;
1907 int event_line;
1908 struct d40_phy_res *phys;
1909 int i;
1910 int j;
1911 int log_num;
Gerald Baezaf000df82012-11-08 14:39:07 +01001912 int num_phy_chans;
Linus Walleij8d318a52010-03-30 15:33:42 +02001913 bool is_src;
Rabin Vincent38bdbf02010-10-12 13:00:51 +00001914 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
Linus Walleij8d318a52010-03-30 15:33:42 +02001915
1916 phys = d40c->base->phy_res;
Gerald Baezaf000df82012-11-08 14:39:07 +01001917 num_phy_chans = d40c->base->num_phy_chans;
Linus Walleij8d318a52010-03-30 15:33:42 +02001918
1919 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
Linus Walleij8d318a52010-03-30 15:33:42 +02001920 log_num = 2 * dev_type;
1921 is_src = true;
1922 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1923 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1924 /* dst event lines are used for logical memcpy */
Linus Walleij8d318a52010-03-30 15:33:42 +02001925 log_num = 2 * dev_type + 1;
1926 is_src = false;
1927 } else
1928 return -EINVAL;
1929
1930 event_group = D40_TYPE_TO_GROUP(dev_type);
1931 event_line = D40_TYPE_TO_EVENT(dev_type);
1932
1933 if (!is_log) {
1934 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1935 /* Find physical half channel */
Gerald Baezaf000df82012-11-08 14:39:07 +01001936 if (d40c->dma_cfg.use_fixed_channel) {
1937 i = d40c->dma_cfg.phy_channel;
Marcin Mielczarczyk4aed79b2010-05-18 00:41:21 +02001938 if (d40_alloc_mask_set(&phys[i], is_src,
Narayanan G5cd326f2011-11-30 19:20:42 +05301939 0, is_log,
1940 first_phy_user))
Linus Walleij8d318a52010-03-30 15:33:42 +02001941 goto found_phy;
Gerald Baezaf000df82012-11-08 14:39:07 +01001942 } else {
1943 for (i = 0; i < num_phy_chans; i++) {
1944 if (d40_alloc_mask_set(&phys[i], is_src,
1945 0, is_log,
1946 first_phy_user))
1947 goto found_phy;
1948 }
Linus Walleij8d318a52010-03-30 15:33:42 +02001949 }
1950 } else
1951 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1952 int phy_num = j + event_group * 2;
1953 for (i = phy_num; i < phy_num + 2; i++) {
Linus Walleij508849a2010-06-20 21:26:07 +00001954 if (d40_alloc_mask_set(&phys[i],
1955 is_src,
1956 0,
Narayanan G5cd326f2011-11-30 19:20:42 +05301957 is_log,
1958 first_phy_user))
Linus Walleij8d318a52010-03-30 15:33:42 +02001959 goto found_phy;
1960 }
1961 }
1962 return -EINVAL;
1963found_phy:
1964 d40c->phy_chan = &phys[i];
1965 d40c->log_num = D40_PHY_CHAN;
1966 goto out;
1967 }
1968 if (dev_type == -1)
1969 return -EINVAL;
1970
1971 /* Find logical channel */
1972 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1973 int phy_num = j + event_group * 2;
Narayanan G5cd326f2011-11-30 19:20:42 +05301974
1975 if (d40c->dma_cfg.use_fixed_channel) {
1976 i = d40c->dma_cfg.phy_channel;
1977
1978 if ((i != phy_num) && (i != phy_num + 1)) {
1979 dev_err(chan2dev(d40c),
1980 "invalid fixed phy channel %d\n", i);
1981 return -EINVAL;
1982 }
1983
1984 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1985 is_log, first_phy_user))
1986 goto found_log;
1987
1988 dev_err(chan2dev(d40c),
1989 "could not allocate fixed phy channel %d\n", i);
1990 return -EINVAL;
1991 }
1992
Linus Walleij8d318a52010-03-30 15:33:42 +02001993 /*
1994 * Spread logical channels across all available physical rather
1995 * than pack every logical channel at the first available phy
1996 * channels.
1997 */
1998 if (is_src) {
1999 for (i = phy_num; i < phy_num + 2; i++) {
2000 if (d40_alloc_mask_set(&phys[i], is_src,
Narayanan G5cd326f2011-11-30 19:20:42 +05302001 event_line, is_log,
2002 first_phy_user))
Linus Walleij8d318a52010-03-30 15:33:42 +02002003 goto found_log;
2004 }
2005 } else {
2006 for (i = phy_num + 1; i >= phy_num; i--) {
2007 if (d40_alloc_mask_set(&phys[i], is_src,
Narayanan G5cd326f2011-11-30 19:20:42 +05302008 event_line, is_log,
2009 first_phy_user))
Linus Walleij8d318a52010-03-30 15:33:42 +02002010 goto found_log;
2011 }
2012 }
2013 }
2014 return -EINVAL;
2015
2016found_log:
2017 d40c->phy_chan = &phys[i];
2018 d40c->log_num = log_num;
2019out:
2020
2021 if (is_log)
2022 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
2023 else
2024 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
2025
2026 return 0;
2027
2028}
2029
Linus Walleij8d318a52010-03-30 15:33:42 +02002030static int d40_config_memcpy(struct d40_chan *d40c)
2031{
2032 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2033
2034 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
Lee Jones29027a12013-05-03 15:31:54 +01002035 d40c->dma_cfg = dma40_memcpy_conf_log;
Lee Jones26955c07d2013-05-03 15:31:56 +01002036 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
Linus Walleij8d318a52010-03-30 15:33:42 +02002037
2038 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
2039 dma_has_cap(DMA_SLAVE, cap)) {
Lee Jones29027a12013-05-03 15:31:54 +01002040 d40c->dma_cfg = dma40_memcpy_conf_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02002041 } else {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002042 chan_err(d40c, "No memcpy\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002043 return -EINVAL;
2044 }
2045
2046 return 0;
2047}
2048
Linus Walleij8d318a52010-03-30 15:33:42 +02002049static int d40_free_dma(struct d40_chan *d40c)
2050{
2051
2052 int res = 0;
Lee Jones26955c07d2013-05-03 15:31:56 +01002053 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
Linus Walleij8d318a52010-03-30 15:33:42 +02002054 struct d40_phy_res *phy = d40c->phy_chan;
2055 bool is_src;
2056
2057 /* Terminate all queued and active transfers */
2058 d40_term_all(d40c);
2059
2060 if (phy == NULL) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002061 chan_err(d40c, "phy == null\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002062 return -EINVAL;
2063 }
2064
2065 if (phy->allocated_src == D40_ALLOC_FREE &&
2066 phy->allocated_dst == D40_ALLOC_FREE) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002067 chan_err(d40c, "channel already free\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002068 return -EINVAL;
2069 }
2070
Linus Walleij8d318a52010-03-30 15:33:42 +02002071 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
Lee Jones26955c07d2013-05-03 15:31:56 +01002072 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
Linus Walleij8d318a52010-03-30 15:33:42 +02002073 is_src = false;
Lee Jones26955c07d2013-05-03 15:31:56 +01002074 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
Linus Walleij8d318a52010-03-30 15:33:42 +02002075 is_src = true;
Lee Jones26955c07d2013-05-03 15:31:56 +01002076 else {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002077 chan_err(d40c, "Unknown direction\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002078 return -EINVAL;
2079 }
2080
Narayanan G7fb3e752011-11-17 17:26:41 +05302081 pm_runtime_get_sync(d40c->base->dev);
Linus Walleij8d318a52010-03-30 15:33:42 +02002082 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
2083 if (res) {
Narayanan G1bdae6f2012-02-09 12:41:37 +05302084 chan_err(d40c, "stop failed\n");
Narayanan G7fb3e752011-11-17 17:26:41 +05302085 goto out;
Linus Walleij8d318a52010-03-30 15:33:42 +02002086 }
Narayanan G7fb3e752011-11-17 17:26:41 +05302087
Narayanan G1bdae6f2012-02-09 12:41:37 +05302088 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
2089
2090 if (chan_is_logical(d40c))
2091 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
2092 else
2093 d40c->base->lookup_phy_chans[phy->num] = NULL;
2094
Narayanan G7fb3e752011-11-17 17:26:41 +05302095 if (d40c->busy) {
2096 pm_runtime_mark_last_busy(d40c->base->dev);
2097 pm_runtime_put_autosuspend(d40c->base->dev);
2098 }
2099
2100 d40c->busy = false;
Linus Walleij8d318a52010-03-30 15:33:42 +02002101 d40c->phy_chan = NULL;
Rabin Vincentce2ca122010-10-12 13:00:49 +00002102 d40c->configured = false;
Narayanan G7fb3e752011-11-17 17:26:41 +05302103out:
Linus Walleij8d318a52010-03-30 15:33:42 +02002104
Narayanan G7fb3e752011-11-17 17:26:41 +05302105 pm_runtime_mark_last_busy(d40c->base->dev);
2106 pm_runtime_put_autosuspend(d40c->base->dev);
2107 return res;
Linus Walleij8d318a52010-03-30 15:33:42 +02002108}
2109
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002110static bool d40_is_paused(struct d40_chan *d40c)
2111{
Rabin Vincent8ca84682011-01-25 11:18:07 +01002112 void __iomem *chanbase = chan_base(d40c);
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002113 bool is_paused = false;
2114 unsigned long flags;
2115 void __iomem *active_reg;
2116 u32 status;
Lee Jones26955c07d2013-05-03 15:31:56 +01002117 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002118
2119 spin_lock_irqsave(&d40c->lock, flags);
2120
Rabin Vincent724a8572011-01-25 11:18:08 +01002121 if (chan_is_physical(d40c)) {
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002122 if (d40c->phy_chan->num % 2 == 0)
2123 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2124 else
2125 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2126
2127 status = (readl(active_reg) &
2128 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2129 D40_CHAN_POS(d40c->phy_chan->num);
2130 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2131 is_paused = true;
2132
2133 goto _exit;
2134 }
2135
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002136 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00002137 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
Rabin Vincent8ca84682011-01-25 11:18:07 +01002138 status = readl(chanbase + D40_CHAN_REG_SDLNK);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00002139 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
Rabin Vincent8ca84682011-01-25 11:18:07 +01002140 status = readl(chanbase + D40_CHAN_REG_SSLNK);
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00002141 } else {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002142 chan_err(d40c, "Unknown direction\n");
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002143 goto _exit;
2144 }
Jonas Aaberg9dbfbd35c2010-08-09 12:08:41 +00002145
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002146 status = (status & D40_EVENTLINE_MASK(event)) >>
2147 D40_EVENTLINE_POS(event);
2148
2149 if (status != D40_DMA_RUN)
2150 is_paused = true;
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002151_exit:
2152 spin_unlock_irqrestore(&d40c->lock, flags);
2153 return is_paused;
2154
2155}
2156
Linus Walleij8d318a52010-03-30 15:33:42 +02002157static u32 stedma40_residue(struct dma_chan *chan)
2158{
2159 struct d40_chan *d40c =
2160 container_of(chan, struct d40_chan, chan);
2161 u32 bytes_left;
2162 unsigned long flags;
2163
2164 spin_lock_irqsave(&d40c->lock, flags);
2165 bytes_left = d40_residue(d40c);
2166 spin_unlock_irqrestore(&d40c->lock, flags);
2167
2168 return bytes_left;
2169}
2170
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002171static int
2172d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2173 struct scatterlist *sg_src, struct scatterlist *sg_dst,
Rabin Vincent822c5672011-01-25 11:18:28 +01002174 unsigned int sg_len, dma_addr_t src_dev_addr,
2175 dma_addr_t dst_dev_addr)
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002176{
2177 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2178 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2179 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
Rabin Vincent5ed04b82011-01-25 11:18:26 +01002180 int ret;
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002181
Rabin Vincent5ed04b82011-01-25 11:18:26 +01002182 ret = d40_log_sg_to_lli(sg_src, sg_len,
2183 src_dev_addr,
2184 desc->lli_log.src,
2185 chan->log_def.lcsp1,
2186 src_info->data_width,
2187 dst_info->data_width);
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002188
Rabin Vincent5ed04b82011-01-25 11:18:26 +01002189 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2190 dst_dev_addr,
2191 desc->lli_log.dst,
2192 chan->log_def.lcsp3,
2193 dst_info->data_width,
2194 src_info->data_width);
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002195
Rabin Vincent5ed04b82011-01-25 11:18:26 +01002196 return ret < 0 ? ret : 0;
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002197}
2198
2199static int
2200d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2201 struct scatterlist *sg_src, struct scatterlist *sg_dst,
Rabin Vincent822c5672011-01-25 11:18:28 +01002202 unsigned int sg_len, dma_addr_t src_dev_addr,
2203 dma_addr_t dst_dev_addr)
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002204{
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002205 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2206 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2207 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
Rabin Vincent0c842b52011-01-25 11:18:35 +01002208 unsigned long flags = 0;
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002209 int ret;
2210
Rabin Vincent0c842b52011-01-25 11:18:35 +01002211 if (desc->cyclic)
2212 flags |= LLI_CYCLIC | LLI_TERM_INT;
2213
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002214 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2215 desc->lli_phy.src,
2216 virt_to_phys(desc->lli_phy.src),
2217 chan->src_def_cfg,
Rabin Vincent0c842b52011-01-25 11:18:35 +01002218 src_info, dst_info, flags);
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002219
2220 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2221 desc->lli_phy.dst,
2222 virt_to_phys(desc->lli_phy.dst),
2223 chan->dst_def_cfg,
Rabin Vincent0c842b52011-01-25 11:18:35 +01002224 dst_info, src_info, flags);
Rabin Vincent3e3a0762011-01-25 11:18:21 +01002225
2226 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2227 desc->lli_pool.size, DMA_TO_DEVICE);
2228
2229 return ret < 0 ? ret : 0;
2230}
2231
Rabin Vincent5f811582011-01-25 11:18:18 +01002232static struct d40_desc *
2233d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2234 unsigned int sg_len, unsigned long dma_flags)
2235{
2236 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2237 struct d40_desc *desc;
Rabin Vincentdbd88782011-01-25 11:18:19 +01002238 int ret;
Rabin Vincent5f811582011-01-25 11:18:18 +01002239
2240 desc = d40_desc_get(chan);
2241 if (!desc)
2242 return NULL;
2243
2244 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2245 cfg->dst_info.data_width);
2246 if (desc->lli_len < 0) {
2247 chan_err(chan, "Unaligned size\n");
Rabin Vincentdbd88782011-01-25 11:18:19 +01002248 goto err;
Rabin Vincent5f811582011-01-25 11:18:18 +01002249 }
2250
Rabin Vincentdbd88782011-01-25 11:18:19 +01002251 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2252 if (ret < 0) {
2253 chan_err(chan, "Could not allocate lli\n");
2254 goto err;
2255 }
2256
Rabin Vincent5f811582011-01-25 11:18:18 +01002257 desc->lli_current = 0;
2258 desc->txd.flags = dma_flags;
2259 desc->txd.tx_submit = d40_tx_submit;
2260
2261 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2262
2263 return desc;
Rabin Vincentdbd88782011-01-25 11:18:19 +01002264
2265err:
2266 d40_desc_free(chan, desc);
2267 return NULL;
Rabin Vincent5f811582011-01-25 11:18:18 +01002268}
2269
Rabin Vincentcade1d32011-01-25 11:18:23 +01002270static dma_addr_t
Vinod Kouldb8196d2011-10-13 22:34:23 +05302271d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
Linus Walleij8d318a52010-03-30 15:33:42 +02002272{
Rabin Vincentcade1d32011-01-25 11:18:23 +01002273 struct stedma40_platform_data *plat = chan->base->plat_data;
2274 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
Philippe Langlais711b9ce2011-05-07 17:09:43 +02002275 dma_addr_t addr = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02002276
Rabin Vincentcade1d32011-01-25 11:18:23 +01002277 if (chan->runtime_addr)
2278 return chan->runtime_addr;
2279
Vinod Kouldb8196d2011-10-13 22:34:23 +05302280 if (direction == DMA_DEV_TO_MEM)
Lee Jones26955c07d2013-05-03 15:31:56 +01002281 addr = plat->dev_rx[cfg->dev_type];
Vinod Kouldb8196d2011-10-13 22:34:23 +05302282 else if (direction == DMA_MEM_TO_DEV)
Lee Jones26955c07d2013-05-03 15:31:56 +01002283 addr = plat->dev_tx[cfg->dev_type];
Rabin Vincentcade1d32011-01-25 11:18:23 +01002284
2285 return addr;
2286}
2287
2288static struct dma_async_tx_descriptor *
2289d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2290 struct scatterlist *sg_dst, unsigned int sg_len,
Vinod Kouldb8196d2011-10-13 22:34:23 +05302291 enum dma_transfer_direction direction, unsigned long dma_flags)
Rabin Vincentcade1d32011-01-25 11:18:23 +01002292{
2293 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
Rabin Vincent822c5672011-01-25 11:18:28 +01002294 dma_addr_t src_dev_addr = 0;
2295 dma_addr_t dst_dev_addr = 0;
Rabin Vincentcade1d32011-01-25 11:18:23 +01002296 struct d40_desc *desc;
2297 unsigned long flags;
2298 int ret;
2299
2300 if (!chan->phy_chan) {
2301 chan_err(chan, "Cannot prepare unallocated channel\n");
2302 return NULL;
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002303 }
2304
Rabin Vincentcade1d32011-01-25 11:18:23 +01002305 spin_lock_irqsave(&chan->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002306
Rabin Vincentcade1d32011-01-25 11:18:23 +01002307 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2308 if (desc == NULL)
Linus Walleij8d318a52010-03-30 15:33:42 +02002309 goto err;
2310
Rabin Vincent0c842b52011-01-25 11:18:35 +01002311 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2312 desc->cyclic = true;
2313
Linus Walleij7e426da2012-04-12 18:12:52 +02002314 if (direction != DMA_TRANS_NONE) {
Rabin Vincent822c5672011-01-25 11:18:28 +01002315 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2316
Vinod Kouldb8196d2011-10-13 22:34:23 +05302317 if (direction == DMA_DEV_TO_MEM)
Rabin Vincent822c5672011-01-25 11:18:28 +01002318 src_dev_addr = dev_addr;
Vinod Kouldb8196d2011-10-13 22:34:23 +05302319 else if (direction == DMA_MEM_TO_DEV)
Rabin Vincent822c5672011-01-25 11:18:28 +01002320 dst_dev_addr = dev_addr;
2321 }
Rabin Vincentcade1d32011-01-25 11:18:23 +01002322
2323 if (chan_is_logical(chan))
2324 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
Rabin Vincent822c5672011-01-25 11:18:28 +01002325 sg_len, src_dev_addr, dst_dev_addr);
Rabin Vincentcade1d32011-01-25 11:18:23 +01002326 else
2327 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
Rabin Vincent822c5672011-01-25 11:18:28 +01002328 sg_len, src_dev_addr, dst_dev_addr);
Rabin Vincentcade1d32011-01-25 11:18:23 +01002329
2330 if (ret) {
2331 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2332 chan_is_logical(chan) ? "log" : "phy", ret);
2333 goto err;
Linus Walleij8d318a52010-03-30 15:33:42 +02002334 }
2335
Per Forlin82babbb362011-08-29 13:33:35 +02002336 /*
2337 * add descriptor to the prepare queue in order to be able
2338 * to free them later in terminate_all
2339 */
2340 list_add_tail(&desc->node, &chan->prepare_queue);
2341
Rabin Vincentcade1d32011-01-25 11:18:23 +01002342 spin_unlock_irqrestore(&chan->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002343
Rabin Vincentcade1d32011-01-25 11:18:23 +01002344 return &desc->txd;
2345
Linus Walleij8d318a52010-03-30 15:33:42 +02002346err:
Rabin Vincentcade1d32011-01-25 11:18:23 +01002347 if (desc)
2348 d40_desc_free(chan, desc);
2349 spin_unlock_irqrestore(&chan->lock, flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002350 return NULL;
2351}
Linus Walleij8d318a52010-03-30 15:33:42 +02002352
2353bool stedma40_filter(struct dma_chan *chan, void *data)
2354{
2355 struct stedma40_chan_cfg *info = data;
2356 struct d40_chan *d40c =
2357 container_of(chan, struct d40_chan, chan);
2358 int err;
2359
2360 if (data) {
2361 err = d40_validate_conf(d40c, info);
2362 if (!err)
2363 d40c->dma_cfg = *info;
2364 } else
2365 err = d40_config_memcpy(d40c);
2366
Rabin Vincentce2ca122010-10-12 13:00:49 +00002367 if (!err)
2368 d40c->configured = true;
2369
Linus Walleij8d318a52010-03-30 15:33:42 +02002370 return err == 0;
2371}
2372EXPORT_SYMBOL(stedma40_filter);
2373
Rabin Vincentac2c0a32011-01-25 11:18:11 +01002374static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2375{
2376 bool realtime = d40c->dma_cfg.realtime;
2377 bool highprio = d40c->dma_cfg.high_priority;
Tong Liu3cb645d2012-09-26 10:07:30 +00002378 u32 rtreg;
Rabin Vincentac2c0a32011-01-25 11:18:11 +01002379 u32 event = D40_TYPE_TO_EVENT(dev_type);
2380 u32 group = D40_TYPE_TO_GROUP(dev_type);
2381 u32 bit = 1 << event;
Rabin Vincentccc3d692012-05-17 13:47:38 +05302382 u32 prioreg;
Tong Liu3cb645d2012-09-26 10:07:30 +00002383 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
Rabin Vincentccc3d692012-05-17 13:47:38 +05302384
Tong Liu3cb645d2012-09-26 10:07:30 +00002385 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
Rabin Vincentccc3d692012-05-17 13:47:38 +05302386 /*
2387 * Due to a hardware bug, in some cases a logical channel triggered by
2388 * a high priority destination event line can generate extra packet
2389 * transactions.
2390 *
2391 * The workaround is to not set the high priority level for the
2392 * destination event lines that trigger logical channels.
2393 */
2394 if (!src && chan_is_logical(d40c))
2395 highprio = false;
2396
Tong Liu3cb645d2012-09-26 10:07:30 +00002397 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
Rabin Vincentac2c0a32011-01-25 11:18:11 +01002398
2399 /* Destination event lines are stored in the upper halfword */
2400 if (!src)
2401 bit <<= 16;
2402
2403 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2404 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2405}
2406
2407static void d40_set_prio_realtime(struct d40_chan *d40c)
2408{
2409 if (d40c->base->rev < 3)
2410 return;
2411
2412 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
2413 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
Lee Jones26955c07d2013-05-03 15:31:56 +01002414 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
Rabin Vincentac2c0a32011-01-25 11:18:11 +01002415
2416 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
2417 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
Lee Jones26955c07d2013-05-03 15:31:56 +01002418 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
Rabin Vincentac2c0a32011-01-25 11:18:11 +01002419}
2420
Linus Walleij8d318a52010-03-30 15:33:42 +02002421/* DMA ENGINE functions */
2422static int d40_alloc_chan_resources(struct dma_chan *chan)
2423{
2424 int err;
2425 unsigned long flags;
2426 struct d40_chan *d40c =
2427 container_of(chan, struct d40_chan, chan);
Linus Walleijef1872e2010-06-20 21:24:52 +00002428 bool is_free_phy;
Linus Walleij8d318a52010-03-30 15:33:42 +02002429 spin_lock_irqsave(&d40c->lock, flags);
2430
Russell King - ARM Linuxd3ee98cdc2012-03-06 22:35:47 +00002431 dma_cookie_init(chan);
Linus Walleij8d318a52010-03-30 15:33:42 +02002432
Rabin Vincentce2ca122010-10-12 13:00:49 +00002433 /* If no dma configuration is set use default configuration (memcpy) */
2434 if (!d40c->configured) {
Linus Walleij8d318a52010-03-30 15:33:42 +02002435 err = d40_config_memcpy(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00002436 if (err) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002437 chan_err(d40c, "Failed to configure memcpy channel\n");
Jonas Aabergff0b12b2010-06-20 21:25:15 +00002438 goto fail;
2439 }
Linus Walleij8d318a52010-03-30 15:33:42 +02002440 }
2441
Narayanan G5cd326f2011-11-30 19:20:42 +05302442 err = d40_allocate_channel(d40c, &is_free_phy);
Linus Walleij8d318a52010-03-30 15:33:42 +02002443 if (err) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002444 chan_err(d40c, "Failed to allocate channel\n");
Narayanan G7fb3e752011-11-17 17:26:41 +05302445 d40c->configured = false;
Jonas Aabergff0b12b2010-06-20 21:25:15 +00002446 goto fail;
Linus Walleij8d318a52010-03-30 15:33:42 +02002447 }
2448
Narayanan G7fb3e752011-11-17 17:26:41 +05302449 pm_runtime_get_sync(d40c->base->dev);
Linus Walleijef1872e2010-06-20 21:24:52 +00002450 /* Fill in basic CFG register values */
2451 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
Rabin Vincent724a8572011-01-25 11:18:08 +01002452 &d40c->dst_def_cfg, chan_is_logical(d40c));
Linus Walleijef1872e2010-06-20 21:24:52 +00002453
Rabin Vincentac2c0a32011-01-25 11:18:11 +01002454 d40_set_prio_realtime(d40c);
2455
Rabin Vincent724a8572011-01-25 11:18:08 +01002456 if (chan_is_logical(d40c)) {
Linus Walleijef1872e2010-06-20 21:24:52 +00002457 d40_log_cfg(&d40c->dma_cfg,
2458 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2459
2460 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2461 d40c->lcpa = d40c->base->lcpa_base +
Lee Jones26955c07d2013-05-03 15:31:56 +01002462 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
Linus Walleijef1872e2010-06-20 21:24:52 +00002463 else
2464 d40c->lcpa = d40c->base->lcpa_base +
Lee Jones26955c07d2013-05-03 15:31:56 +01002465 d40c->dma_cfg.dev_type *
Fabio Baltierif26e03a2012-12-13 17:12:37 +01002466 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
Linus Walleijef1872e2010-06-20 21:24:52 +00002467 }
2468
Narayanan G5cd326f2011-11-30 19:20:42 +05302469 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2470 chan_is_logical(d40c) ? "logical" : "physical",
2471 d40c->phy_chan->num,
2472 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2473
2474
Linus Walleijef1872e2010-06-20 21:24:52 +00002475 /*
2476 * Only write channel configuration to the DMA if the physical
2477 * resource is free. In case of multiple logical channels
2478 * on the same physical resource, only the first write is necessary.
2479 */
Jonas Aabergb55912c2010-08-09 12:08:02 +00002480 if (is_free_phy)
2481 d40_config_write(d40c);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00002482fail:
Narayanan G7fb3e752011-11-17 17:26:41 +05302483 pm_runtime_mark_last_busy(d40c->base->dev);
2484 pm_runtime_put_autosuspend(d40c->base->dev);
Linus Walleij8d318a52010-03-30 15:33:42 +02002485 spin_unlock_irqrestore(&d40c->lock, flags);
Jonas Aabergff0b12b2010-06-20 21:25:15 +00002486 return err;
Linus Walleij8d318a52010-03-30 15:33:42 +02002487}
2488
2489static void d40_free_chan_resources(struct dma_chan *chan)
2490{
2491 struct d40_chan *d40c =
2492 container_of(chan, struct d40_chan, chan);
2493 int err;
2494 unsigned long flags;
2495
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002496 if (d40c->phy_chan == NULL) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002497 chan_err(d40c, "Cannot free unallocated channel\n");
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002498 return;
2499 }
2500
Linus Walleij8d318a52010-03-30 15:33:42 +02002501 spin_lock_irqsave(&d40c->lock, flags);
2502
2503 err = d40_free_dma(d40c);
2504
2505 if (err)
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002506 chan_err(d40c, "Failed to free channel\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002507 spin_unlock_irqrestore(&d40c->lock, flags);
2508}
2509
2510static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2511 dma_addr_t dst,
2512 dma_addr_t src,
2513 size_t size,
Jonas Aaberg2a614342010-06-20 21:25:24 +00002514 unsigned long dma_flags)
Linus Walleij8d318a52010-03-30 15:33:42 +02002515{
Rabin Vincent95944c62011-01-25 11:18:17 +01002516 struct scatterlist dst_sg;
2517 struct scatterlist src_sg;
Linus Walleij8d318a52010-03-30 15:33:42 +02002518
Rabin Vincent95944c62011-01-25 11:18:17 +01002519 sg_init_table(&dst_sg, 1);
2520 sg_init_table(&src_sg, 1);
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002521
Rabin Vincent95944c62011-01-25 11:18:17 +01002522 sg_dma_address(&dst_sg) = dst;
2523 sg_dma_address(&src_sg) = src;
Linus Walleij8d318a52010-03-30 15:33:42 +02002524
Rabin Vincent95944c62011-01-25 11:18:17 +01002525 sg_dma_len(&dst_sg) = size;
2526 sg_dma_len(&src_sg) = size;
Linus Walleij8d318a52010-03-30 15:33:42 +02002527
Rabin Vincentcade1d32011-01-25 11:18:23 +01002528 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002529}
2530
Ira Snyder0d688662010-09-30 11:46:47 +00002531static struct dma_async_tx_descriptor *
Rabin Vincentcade1d32011-01-25 11:18:23 +01002532d40_prep_memcpy_sg(struct dma_chan *chan,
2533 struct scatterlist *dst_sg, unsigned int dst_nents,
2534 struct scatterlist *src_sg, unsigned int src_nents,
2535 unsigned long dma_flags)
Ira Snyder0d688662010-09-30 11:46:47 +00002536{
2537 if (dst_nents != src_nents)
2538 return NULL;
2539
Rabin Vincentcade1d32011-01-25 11:18:23 +01002540 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
Rabin Vincent00ac0342011-01-25 11:18:20 +01002541}
2542
Fabio Baltierif26e03a2012-12-13 17:12:37 +01002543static struct dma_async_tx_descriptor *
2544d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2545 unsigned int sg_len, enum dma_transfer_direction direction,
2546 unsigned long dma_flags, void *context)
Linus Walleij8d318a52010-03-30 15:33:42 +02002547{
Andy Shevchenkoa725dcc2013-01-10 10:53:01 +02002548 if (!is_slave_direction(direction))
Rabin Vincent00ac0342011-01-25 11:18:20 +01002549 return NULL;
2550
Rabin Vincentcade1d32011-01-25 11:18:23 +01002551 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
Linus Walleij8d318a52010-03-30 15:33:42 +02002552}
2553
Rabin Vincent0c842b52011-01-25 11:18:35 +01002554static struct dma_async_tx_descriptor *
2555dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2556 size_t buf_len, size_t period_len,
Peter Ujfalusiec8b5e42012-09-14 15:05:47 +03002557 enum dma_transfer_direction direction, unsigned long flags,
2558 void *context)
Rabin Vincent0c842b52011-01-25 11:18:35 +01002559{
2560 unsigned int periods = buf_len / period_len;
2561 struct dma_async_tx_descriptor *txd;
2562 struct scatterlist *sg;
2563 int i;
2564
Robert Marklund79ca7ec2011-06-27 11:33:24 +02002565 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
Rabin Vincent0c842b52011-01-25 11:18:35 +01002566 for (i = 0; i < periods; i++) {
2567 sg_dma_address(&sg[i]) = dma_addr;
2568 sg_dma_len(&sg[i]) = period_len;
2569 dma_addr += period_len;
2570 }
2571
2572 sg[periods].offset = 0;
Lars-Peter Clausenfdaf9c42012-04-25 20:50:52 +02002573 sg_dma_len(&sg[periods]) = 0;
Rabin Vincent0c842b52011-01-25 11:18:35 +01002574 sg[periods].page_link =
2575 ((unsigned long)sg | 0x01) & ~0x02;
2576
2577 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2578 DMA_PREP_INTERRUPT);
2579
2580 kfree(sg);
2581
2582 return txd;
2583}
2584
Linus Walleij8d318a52010-03-30 15:33:42 +02002585static enum dma_status d40_tx_status(struct dma_chan *chan,
2586 dma_cookie_t cookie,
2587 struct dma_tx_state *txstate)
2588{
2589 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00002590 enum dma_status ret;
Linus Walleij8d318a52010-03-30 15:33:42 +02002591
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002592 if (d40c->phy_chan == NULL) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002593 chan_err(d40c, "Cannot read status of unallocated channel\n");
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002594 return -EINVAL;
2595 }
2596
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +00002597 ret = dma_cookie_status(chan, cookie, txstate);
2598 if (ret != DMA_SUCCESS)
2599 dma_set_residue(txstate, stedma40_residue(chan));
Linus Walleij8d318a52010-03-30 15:33:42 +02002600
Jonas Aaberga5ebca42010-05-18 00:41:09 +02002601 if (d40_is_paused(d40c))
2602 ret = DMA_PAUSED;
Linus Walleij8d318a52010-03-30 15:33:42 +02002603
2604 return ret;
2605}
2606
2607static void d40_issue_pending(struct dma_chan *chan)
2608{
2609 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2610 unsigned long flags;
2611
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002612 if (d40c->phy_chan == NULL) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002613 chan_err(d40c, "Channel is not allocated!\n");
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002614 return;
2615 }
2616
Linus Walleij8d318a52010-03-30 15:33:42 +02002617 spin_lock_irqsave(&d40c->lock, flags);
2618
Per Forlina8f30672011-06-26 23:29:52 +02002619 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2620
2621 /* Busy means that queued jobs are already being processed */
Linus Walleij8d318a52010-03-30 15:33:42 +02002622 if (!d40c->busy)
2623 (void) d40_queue_start(d40c);
2624
2625 spin_unlock_irqrestore(&d40c->lock, flags);
2626}
2627
Narayanan G1bdae6f2012-02-09 12:41:37 +05302628static void d40_terminate_all(struct dma_chan *chan)
2629{
2630 unsigned long flags;
2631 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2632 int ret;
2633
2634 spin_lock_irqsave(&d40c->lock, flags);
2635
2636 pm_runtime_get_sync(d40c->base->dev);
2637 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2638 if (ret)
2639 chan_err(d40c, "Failed to stop channel\n");
2640
2641 d40_term_all(d40c);
2642 pm_runtime_mark_last_busy(d40c->base->dev);
2643 pm_runtime_put_autosuspend(d40c->base->dev);
2644 if (d40c->busy) {
2645 pm_runtime_mark_last_busy(d40c->base->dev);
2646 pm_runtime_put_autosuspend(d40c->base->dev);
2647 }
2648 d40c->busy = false;
2649
2650 spin_unlock_irqrestore(&d40c->lock, flags);
2651}
2652
Rabin Vincent98ca5282011-06-27 11:33:38 +02002653static int
2654dma40_config_to_halfchannel(struct d40_chan *d40c,
2655 struct stedma40_half_channel_info *info,
2656 enum dma_slave_buswidth width,
2657 u32 maxburst)
2658{
2659 enum stedma40_periph_data_width addr_width;
2660 int psize;
2661
2662 switch (width) {
2663 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2664 addr_width = STEDMA40_BYTE_WIDTH;
2665 break;
2666 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2667 addr_width = STEDMA40_HALFWORD_WIDTH;
2668 break;
2669 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2670 addr_width = STEDMA40_WORD_WIDTH;
2671 break;
2672 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2673 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2674 break;
2675 default:
2676 dev_err(d40c->base->dev,
2677 "illegal peripheral address width "
2678 "requested (%d)\n",
2679 width);
2680 return -EINVAL;
2681 }
2682
2683 if (chan_is_logical(d40c)) {
2684 if (maxburst >= 16)
2685 psize = STEDMA40_PSIZE_LOG_16;
2686 else if (maxburst >= 8)
2687 psize = STEDMA40_PSIZE_LOG_8;
2688 else if (maxburst >= 4)
2689 psize = STEDMA40_PSIZE_LOG_4;
2690 else
2691 psize = STEDMA40_PSIZE_LOG_1;
2692 } else {
2693 if (maxburst >= 16)
2694 psize = STEDMA40_PSIZE_PHY_16;
2695 else if (maxburst >= 8)
2696 psize = STEDMA40_PSIZE_PHY_8;
2697 else if (maxburst >= 4)
2698 psize = STEDMA40_PSIZE_PHY_4;
2699 else
2700 psize = STEDMA40_PSIZE_PHY_1;
2701 }
2702
2703 info->data_width = addr_width;
2704 info->psize = psize;
2705 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2706
2707 return 0;
2708}
2709
Linus Walleij95e14002010-08-04 13:37:45 +02002710/* Runtime reconfiguration extension */
Rabin Vincent98ca5282011-06-27 11:33:38 +02002711static int d40_set_runtime_config(struct dma_chan *chan,
2712 struct dma_slave_config *config)
Linus Walleij95e14002010-08-04 13:37:45 +02002713{
2714 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2715 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
Rabin Vincent98ca5282011-06-27 11:33:38 +02002716 enum dma_slave_buswidth src_addr_width, dst_addr_width;
Linus Walleij95e14002010-08-04 13:37:45 +02002717 dma_addr_t config_addr;
Rabin Vincent98ca5282011-06-27 11:33:38 +02002718 u32 src_maxburst, dst_maxburst;
2719 int ret;
2720
2721 src_addr_width = config->src_addr_width;
2722 src_maxburst = config->src_maxburst;
2723 dst_addr_width = config->dst_addr_width;
2724 dst_maxburst = config->dst_maxburst;
Linus Walleij95e14002010-08-04 13:37:45 +02002725
Vinod Kouldb8196d2011-10-13 22:34:23 +05302726 if (config->direction == DMA_DEV_TO_MEM) {
Linus Walleij95e14002010-08-04 13:37:45 +02002727 dma_addr_t dev_addr_rx =
Lee Jones26955c07d2013-05-03 15:31:56 +01002728 d40c->base->plat_data->dev_rx[cfg->dev_type];
Linus Walleij95e14002010-08-04 13:37:45 +02002729
2730 config_addr = config->src_addr;
2731 if (dev_addr_rx)
2732 dev_dbg(d40c->base->dev,
2733 "channel has a pre-wired RX address %08x "
2734 "overriding with %08x\n",
2735 dev_addr_rx, config_addr);
2736 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2737 dev_dbg(d40c->base->dev,
2738 "channel was not configured for peripheral "
2739 "to memory transfer (%d) overriding\n",
2740 cfg->dir);
2741 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2742
Rabin Vincent98ca5282011-06-27 11:33:38 +02002743 /* Configure the memory side */
2744 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2745 dst_addr_width = src_addr_width;
2746 if (dst_maxburst == 0)
2747 dst_maxburst = src_maxburst;
Linus Walleij95e14002010-08-04 13:37:45 +02002748
Vinod Kouldb8196d2011-10-13 22:34:23 +05302749 } else if (config->direction == DMA_MEM_TO_DEV) {
Linus Walleij95e14002010-08-04 13:37:45 +02002750 dma_addr_t dev_addr_tx =
Lee Jones26955c07d2013-05-03 15:31:56 +01002751 d40c->base->plat_data->dev_tx[cfg->dev_type];
Linus Walleij95e14002010-08-04 13:37:45 +02002752
2753 config_addr = config->dst_addr;
2754 if (dev_addr_tx)
2755 dev_dbg(d40c->base->dev,
2756 "channel has a pre-wired TX address %08x "
2757 "overriding with %08x\n",
2758 dev_addr_tx, config_addr);
2759 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2760 dev_dbg(d40c->base->dev,
2761 "channel was not configured for memory "
2762 "to peripheral transfer (%d) overriding\n",
2763 cfg->dir);
2764 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2765
Rabin Vincent98ca5282011-06-27 11:33:38 +02002766 /* Configure the memory side */
2767 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2768 src_addr_width = dst_addr_width;
2769 if (src_maxburst == 0)
2770 src_maxburst = dst_maxburst;
Linus Walleij95e14002010-08-04 13:37:45 +02002771 } else {
2772 dev_err(d40c->base->dev,
2773 "unrecognized channel direction %d\n",
2774 config->direction);
Rabin Vincent98ca5282011-06-27 11:33:38 +02002775 return -EINVAL;
Linus Walleij95e14002010-08-04 13:37:45 +02002776 }
2777
Rabin Vincent98ca5282011-06-27 11:33:38 +02002778 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
Linus Walleij95e14002010-08-04 13:37:45 +02002779 dev_err(d40c->base->dev,
Rabin Vincent98ca5282011-06-27 11:33:38 +02002780 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2781 src_maxburst,
2782 src_addr_width,
2783 dst_maxburst,
2784 dst_addr_width);
2785 return -EINVAL;
Linus Walleij95e14002010-08-04 13:37:45 +02002786 }
2787
Per Forlin92bb6cd2011-10-13 12:11:36 +02002788 if (src_maxburst > 16) {
2789 src_maxburst = 16;
2790 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2791 } else if (dst_maxburst > 16) {
2792 dst_maxburst = 16;
2793 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2794 }
2795
Rabin Vincent98ca5282011-06-27 11:33:38 +02002796 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2797 src_addr_width,
2798 src_maxburst);
2799 if (ret)
2800 return ret;
Linus Walleij95e14002010-08-04 13:37:45 +02002801
Rabin Vincent98ca5282011-06-27 11:33:38 +02002802 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2803 dst_addr_width,
2804 dst_maxburst);
2805 if (ret)
2806 return ret;
Linus Walleij95e14002010-08-04 13:37:45 +02002807
Per Forlina59670a2010-10-06 09:05:27 +00002808 /* Fill in register values */
Rabin Vincent724a8572011-01-25 11:18:08 +01002809 if (chan_is_logical(d40c))
Per Forlina59670a2010-10-06 09:05:27 +00002810 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2811 else
2812 d40_phy_cfg(cfg, &d40c->src_def_cfg,
2813 &d40c->dst_def_cfg, false);
2814
Linus Walleij95e14002010-08-04 13:37:45 +02002815 /* These settings will take precedence later */
2816 d40c->runtime_addr = config_addr;
2817 d40c->runtime_direction = config->direction;
2818 dev_dbg(d40c->base->dev,
Rabin Vincent98ca5282011-06-27 11:33:38 +02002819 "configured channel %s for %s, data width %d/%d, "
2820 "maxburst %d/%d elements, LE, no flow control\n",
Linus Walleij95e14002010-08-04 13:37:45 +02002821 dma_chan_name(chan),
Vinod Kouldb8196d2011-10-13 22:34:23 +05302822 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
Rabin Vincent98ca5282011-06-27 11:33:38 +02002823 src_addr_width, dst_addr_width,
2824 src_maxburst, dst_maxburst);
2825
2826 return 0;
Linus Walleij95e14002010-08-04 13:37:45 +02002827}
2828
Linus Walleij05827632010-05-17 16:30:42 -07002829static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2830 unsigned long arg)
Linus Walleij8d318a52010-03-30 15:33:42 +02002831{
Linus Walleij8d318a52010-03-30 15:33:42 +02002832 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2833
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002834 if (d40c->phy_chan == NULL) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002835 chan_err(d40c, "Channel is not allocated!\n");
Jonas Aaberg0d0f6b82010-06-20 21:25:31 +00002836 return -EINVAL;
2837 }
2838
Linus Walleij8d318a52010-03-30 15:33:42 +02002839 switch (cmd) {
2840 case DMA_TERMINATE_ALL:
Narayanan G1bdae6f2012-02-09 12:41:37 +05302841 d40_terminate_all(chan);
2842 return 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02002843 case DMA_PAUSE:
Rabin Vincent86eb5fb2011-01-25 11:18:34 +01002844 return d40_pause(d40c);
Linus Walleij8d318a52010-03-30 15:33:42 +02002845 case DMA_RESUME:
Rabin Vincent86eb5fb2011-01-25 11:18:34 +01002846 return d40_resume(d40c);
Linus Walleij95e14002010-08-04 13:37:45 +02002847 case DMA_SLAVE_CONFIG:
Rabin Vincent98ca5282011-06-27 11:33:38 +02002848 return d40_set_runtime_config(chan,
Linus Walleij95e14002010-08-04 13:37:45 +02002849 (struct dma_slave_config *) arg);
Linus Walleij95e14002010-08-04 13:37:45 +02002850 default:
2851 break;
Linus Walleij8d318a52010-03-30 15:33:42 +02002852 }
2853
2854 /* Other commands are unimplemented */
2855 return -ENXIO;
2856}
2857
2858/* Initialization functions */
2859
2860static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2861 struct d40_chan *chans, int offset,
2862 int num_chans)
2863{
2864 int i = 0;
2865 struct d40_chan *d40c;
2866
2867 INIT_LIST_HEAD(&dma->channels);
2868
2869 for (i = offset; i < offset + num_chans; i++) {
2870 d40c = &chans[i];
2871 d40c->base = base;
2872 d40c->chan.device = dma;
2873
Linus Walleij8d318a52010-03-30 15:33:42 +02002874 spin_lock_init(&d40c->lock);
2875
2876 d40c->log_num = D40_PHY_CHAN;
2877
Fabio Baltieri4226dd82012-12-13 13:46:16 +01002878 INIT_LIST_HEAD(&d40c->done);
Linus Walleij8d318a52010-03-30 15:33:42 +02002879 INIT_LIST_HEAD(&d40c->active);
2880 INIT_LIST_HEAD(&d40c->queue);
Per Forlina8f30672011-06-26 23:29:52 +02002881 INIT_LIST_HEAD(&d40c->pending_queue);
Linus Walleij8d318a52010-03-30 15:33:42 +02002882 INIT_LIST_HEAD(&d40c->client);
Per Forlin82babbb362011-08-29 13:33:35 +02002883 INIT_LIST_HEAD(&d40c->prepare_queue);
Linus Walleij8d318a52010-03-30 15:33:42 +02002884
Linus Walleij8d318a52010-03-30 15:33:42 +02002885 tasklet_init(&d40c->tasklet, dma_tasklet,
2886 (unsigned long) d40c);
2887
2888 list_add_tail(&d40c->chan.device_node,
2889 &dma->channels);
2890 }
2891}
2892
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002893static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2894{
2895 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2896 dev->device_prep_slave_sg = d40_prep_slave_sg;
2897
2898 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2899 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2900
2901 /*
2902 * This controller can only access address at even
2903 * 32bit boundaries, i.e. 2^2
2904 */
2905 dev->copy_align = 2;
2906 }
2907
2908 if (dma_has_cap(DMA_SG, dev->cap_mask))
2909 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2910
Rabin Vincent0c842b52011-01-25 11:18:35 +01002911 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2912 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2913
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002914 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2915 dev->device_free_chan_resources = d40_free_chan_resources;
2916 dev->device_issue_pending = d40_issue_pending;
2917 dev->device_tx_status = d40_tx_status;
2918 dev->device_control = d40_control;
2919 dev->dev = base->dev;
2920}
2921
Linus Walleij8d318a52010-03-30 15:33:42 +02002922static int __init d40_dmaengine_init(struct d40_base *base,
2923 int num_reserved_chans)
2924{
2925 int err ;
2926
2927 d40_chan_init(base, &base->dma_slave, base->log_chans,
2928 0, base->num_log_chans);
2929
2930 dma_cap_zero(base->dma_slave.cap_mask);
2931 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
Rabin Vincent0c842b52011-01-25 11:18:35 +01002932 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
Linus Walleij8d318a52010-03-30 15:33:42 +02002933
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002934 d40_ops_init(base, &base->dma_slave);
Linus Walleij8d318a52010-03-30 15:33:42 +02002935
2936 err = dma_async_device_register(&base->dma_slave);
2937
2938 if (err) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002939 d40_err(base->dev, "Failed to register slave channels\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002940 goto failure1;
2941 }
2942
2943 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
Lee Jones664a57e2013-05-03 15:31:53 +01002944 base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels));
Linus Walleij8d318a52010-03-30 15:33:42 +02002945
2946 dma_cap_zero(base->dma_memcpy.cap_mask);
2947 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002948 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
Linus Walleij8d318a52010-03-30 15:33:42 +02002949
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002950 d40_ops_init(base, &base->dma_memcpy);
Linus Walleij8d318a52010-03-30 15:33:42 +02002951
2952 err = dma_async_device_register(&base->dma_memcpy);
2953
2954 if (err) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002955 d40_err(base->dev,
2956 "Failed to regsiter memcpy only channels\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002957 goto failure2;
2958 }
2959
2960 d40_chan_init(base, &base->dma_both, base->phy_chans,
2961 0, num_reserved_chans);
2962
2963 dma_cap_zero(base->dma_both.cap_mask);
2964 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2965 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002966 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
Rabin Vincent0c842b52011-01-25 11:18:35 +01002967 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
Linus Walleij8d318a52010-03-30 15:33:42 +02002968
Rabin Vincent7ad74a72011-01-25 11:18:33 +01002969 d40_ops_init(base, &base->dma_both);
Linus Walleij8d318a52010-03-30 15:33:42 +02002970 err = dma_async_device_register(&base->dma_both);
2971
2972 if (err) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01002973 d40_err(base->dev,
2974 "Failed to register logical and physical capable channels\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02002975 goto failure3;
2976 }
2977 return 0;
2978failure3:
2979 dma_async_device_unregister(&base->dma_memcpy);
2980failure2:
2981 dma_async_device_unregister(&base->dma_slave);
2982failure1:
2983 return err;
2984}
2985
Narayanan G7fb3e752011-11-17 17:26:41 +05302986/* Suspend resume functionality */
2987#ifdef CONFIG_PM
2988static int dma40_pm_suspend(struct device *dev)
2989{
Narayanan G28c7a192011-11-22 13:56:55 +05302990 struct platform_device *pdev = to_platform_device(dev);
2991 struct d40_base *base = platform_get_drvdata(pdev);
2992 int ret = 0;
Narayanan G7fb3e752011-11-17 17:26:41 +05302993
Narayanan G28c7a192011-11-22 13:56:55 +05302994 if (base->lcpa_regulator)
2995 ret = regulator_disable(base->lcpa_regulator);
2996 return ret;
Narayanan G7fb3e752011-11-17 17:26:41 +05302997}
2998
2999static int dma40_runtime_suspend(struct device *dev)
3000{
3001 struct platform_device *pdev = to_platform_device(dev);
3002 struct d40_base *base = platform_get_drvdata(pdev);
3003
3004 d40_save_restore_registers(base, true);
3005
3006 /* Don't disable/enable clocks for v1 due to HW bugs */
3007 if (base->rev != 1)
3008 writel_relaxed(base->gcc_pwr_off_mask,
3009 base->virtbase + D40_DREG_GCC);
3010
3011 return 0;
3012}
3013
3014static int dma40_runtime_resume(struct device *dev)
3015{
3016 struct platform_device *pdev = to_platform_device(dev);
3017 struct d40_base *base = platform_get_drvdata(pdev);
3018
3019 if (base->initialized)
3020 d40_save_restore_registers(base, false);
3021
3022 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3023 base->virtbase + D40_DREG_GCC);
3024 return 0;
3025}
3026
Narayanan G28c7a192011-11-22 13:56:55 +05303027static int dma40_resume(struct device *dev)
3028{
3029 struct platform_device *pdev = to_platform_device(dev);
3030 struct d40_base *base = platform_get_drvdata(pdev);
3031 int ret = 0;
3032
3033 if (base->lcpa_regulator)
3034 ret = regulator_enable(base->lcpa_regulator);
3035
3036 return ret;
3037}
Narayanan G7fb3e752011-11-17 17:26:41 +05303038
3039static const struct dev_pm_ops dma40_pm_ops = {
3040 .suspend = dma40_pm_suspend,
3041 .runtime_suspend = dma40_runtime_suspend,
3042 .runtime_resume = dma40_runtime_resume,
Narayanan G28c7a192011-11-22 13:56:55 +05303043 .resume = dma40_resume,
Narayanan G7fb3e752011-11-17 17:26:41 +05303044};
3045#define DMA40_PM_OPS (&dma40_pm_ops)
3046#else
3047#define DMA40_PM_OPS NULL
3048#endif
3049
Linus Walleij8d318a52010-03-30 15:33:42 +02003050/* Initialization functions. */
3051
3052static int __init d40_phy_res_init(struct d40_base *base)
3053{
3054 int i;
3055 int num_phy_chans_avail = 0;
3056 u32 val[2];
3057 int odd_even_bit = -2;
Narayanan G7fb3e752011-11-17 17:26:41 +05303058 int gcc = D40_DREG_GCC_ENA;
Linus Walleij8d318a52010-03-30 15:33:42 +02003059
3060 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3061 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3062
3063 for (i = 0; i < base->num_phy_chans; i++) {
3064 base->phy_res[i].num = i;
3065 odd_even_bit += 2 * ((i % 2) == 0);
3066 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3067 /* Mark security only channels as occupied */
3068 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3069 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
Narayanan G7fb3e752011-11-17 17:26:41 +05303070 base->phy_res[i].reserved = true;
3071 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3072 D40_DREG_GCC_SRC);
3073 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3074 D40_DREG_GCC_DST);
3075
3076
Linus Walleij8d318a52010-03-30 15:33:42 +02003077 } else {
3078 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3079 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
Narayanan G7fb3e752011-11-17 17:26:41 +05303080 base->phy_res[i].reserved = false;
Linus Walleij8d318a52010-03-30 15:33:42 +02003081 num_phy_chans_avail++;
3082 }
3083 spin_lock_init(&base->phy_res[i].lock);
3084 }
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00003085
3086 /* Mark disabled channels as occupied */
3087 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
Rabin Vincentf57b4072010-10-06 08:20:35 +00003088 int chan = base->plat_data->disabled_channels[i];
3089
3090 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3091 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
Narayanan G7fb3e752011-11-17 17:26:41 +05303092 base->phy_res[chan].reserved = true;
3093 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3094 D40_DREG_GCC_SRC);
3095 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3096 D40_DREG_GCC_DST);
Rabin Vincentf57b4072010-10-06 08:20:35 +00003097 num_phy_chans_avail--;
Jonas Aaberg6b7acd82010-06-20 21:26:59 +00003098 }
3099
Fabio Baltieri74070482012-12-18 12:25:14 +01003100 /* Mark soft_lli channels */
3101 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3102 int chan = base->plat_data->soft_lli_chans[i];
3103
3104 base->phy_res[chan].use_soft_lli = true;
3105 }
3106
Linus Walleij8d318a52010-03-30 15:33:42 +02003107 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3108 num_phy_chans_avail, base->num_phy_chans);
3109
3110 /* Verify settings extended vs standard */
3111 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3112
3113 for (i = 0; i < base->num_phy_chans; i++) {
3114
3115 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3116 (val[0] & 0x3) != 1)
3117 dev_info(base->dev,
3118 "[%s] INFO: channel %d is misconfigured (%d)\n",
3119 __func__, i, val[0] & 0x3);
3120
3121 val[0] = val[0] >> 2;
3122 }
3123
Narayanan G7fb3e752011-11-17 17:26:41 +05303124 /*
3125 * To keep things simple, Enable all clocks initially.
3126 * The clocks will get managed later post channel allocation.
3127 * The clocks for the event lines on which reserved channels exists
3128 * are not managed here.
3129 */
3130 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3131 base->gcc_pwr_off_mask = gcc;
3132
Linus Walleij8d318a52010-03-30 15:33:42 +02003133 return num_phy_chans_avail;
3134}
3135
3136static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3137{
Linus Walleij8d318a52010-03-30 15:33:42 +02003138 struct stedma40_platform_data *plat_data;
3139 struct clk *clk = NULL;
3140 void __iomem *virtbase = NULL;
3141 struct resource *res = NULL;
3142 struct d40_base *base = NULL;
3143 int num_log_chans = 0;
3144 int num_phy_chans;
Ulf Hanssonb707c6582012-08-23 13:41:58 +02003145 int clk_ret = -EINVAL;
Linus Walleij8d318a52010-03-30 15:33:42 +02003146 int i;
Linus Walleijf4b89762011-06-27 11:33:46 +02003147 u32 pid;
3148 u32 cid;
3149 u8 rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02003150
3151 clk = clk_get(&pdev->dev, NULL);
Linus Walleij8d318a52010-03-30 15:33:42 +02003152 if (IS_ERR(clk)) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003153 d40_err(&pdev->dev, "No matching clock found\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003154 goto failure;
3155 }
3156
Ulf Hanssonb707c6582012-08-23 13:41:58 +02003157 clk_ret = clk_prepare_enable(clk);
3158 if (clk_ret) {
3159 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3160 goto failure;
3161 }
Linus Walleij8d318a52010-03-30 15:33:42 +02003162
3163 /* Get IO for DMAC base address */
3164 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3165 if (!res)
3166 goto failure;
3167
3168 if (request_mem_region(res->start, resource_size(res),
3169 D40_NAME " I/O base") == NULL)
3170 goto failure;
3171
3172 virtbase = ioremap(res->start, resource_size(res));
3173 if (!virtbase)
3174 goto failure;
3175
Linus Walleijf4b89762011-06-27 11:33:46 +02003176 /* This is just a regular AMBA PrimeCell ID actually */
3177 for (pid = 0, i = 0; i < 4; i++)
3178 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3179 & 255) << (i * 8);
3180 for (cid = 0, i = 0; i < 4; i++)
3181 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3182 & 255) << (i * 8);
Linus Walleij8d318a52010-03-30 15:33:42 +02003183
Linus Walleijf4b89762011-06-27 11:33:46 +02003184 if (cid != AMBA_CID) {
3185 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003186 goto failure;
3187 }
Linus Walleijf4b89762011-06-27 11:33:46 +02003188 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
3189 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
3190 AMBA_MANF_BITS(pid),
3191 AMBA_VENDOR_ST);
3192 goto failure;
3193 }
3194 /*
3195 * HW revision:
3196 * DB8500ed has revision 0
3197 * ? has revision 1
3198 * DB8500v1 has revision 2
3199 * DB8500v2 has revision 3
Gerald Baeza47db92f2012-09-21 21:21:37 +02003200 * AP9540v1 has revision 4
3201 * DB8540v1 has revision 4
Linus Walleijf4b89762011-06-27 11:33:46 +02003202 */
3203 rev = AMBA_REV_BITS(pid);
Jonas Aaberg3ae02672010-08-09 12:08:18 +00003204
Gerald Baeza47db92f2012-09-21 21:21:37 +02003205 plat_data = pdev->dev.platform_data;
Linus Walleij8d318a52010-03-30 15:33:42 +02003206
Gerald Baeza47db92f2012-09-21 21:21:37 +02003207 /* The number of physical channels on this HW */
3208 if (plat_data->num_of_phy_chans)
3209 num_phy_chans = plat_data->num_of_phy_chans;
3210 else
3211 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
3212
3213 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
3214 rev, res->start, num_phy_chans);
Linus Walleij8d318a52010-03-30 15:33:42 +02003215
Narayanan G1bdae6f2012-02-09 12:41:37 +05303216 if (rev < 2) {
3217 d40_err(&pdev->dev, "hardware revision: %d is not supported",
3218 rev);
3219 goto failure;
3220 }
3221
Linus Walleij8d318a52010-03-30 15:33:42 +02003222 /* Count the number of logical channels in use */
3223 for (i = 0; i < plat_data->dev_len; i++)
3224 if (plat_data->dev_rx[i] != 0)
3225 num_log_chans++;
3226
3227 for (i = 0; i < plat_data->dev_len; i++)
3228 if (plat_data->dev_tx[i] != 0)
3229 num_log_chans++;
3230
3231 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
Lee Jones664a57e2013-05-03 15:31:53 +01003232 (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) *
Linus Walleij8d318a52010-03-30 15:33:42 +02003233 sizeof(struct d40_chan), GFP_KERNEL);
3234
3235 if (base == NULL) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003236 d40_err(&pdev->dev, "Out of memory\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003237 goto failure;
3238 }
3239
Jonas Aaberg3ae02672010-08-09 12:08:18 +00003240 base->rev = rev;
Linus Walleij8d318a52010-03-30 15:33:42 +02003241 base->clk = clk;
3242 base->num_phy_chans = num_phy_chans;
3243 base->num_log_chans = num_log_chans;
3244 base->phy_start = res->start;
3245 base->phy_size = resource_size(res);
3246 base->virtbase = virtbase;
3247 base->plat_data = plat_data;
3248 base->dev = &pdev->dev;
3249 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3250 base->log_chans = &base->phy_chans[num_phy_chans];
3251
Tong Liu3cb645d2012-09-26 10:07:30 +00003252 if (base->plat_data->num_of_phy_chans == 14) {
3253 base->gen_dmac.backup = d40_backup_regs_v4b;
3254 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3255 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3256 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3257 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3258 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3259 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3260 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3261 base->gen_dmac.il = il_v4b;
3262 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3263 base->gen_dmac.init_reg = dma_init_reg_v4b;
3264 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3265 } else {
3266 if (base->rev >= 3) {
3267 base->gen_dmac.backup = d40_backup_regs_v4a;
3268 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3269 }
3270 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3271 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3272 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3273 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3274 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3275 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3276 base->gen_dmac.il = il_v4a;
3277 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3278 base->gen_dmac.init_reg = dma_init_reg_v4a;
3279 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3280 }
3281
Linus Walleij8d318a52010-03-30 15:33:42 +02003282 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3283 GFP_KERNEL);
3284 if (!base->phy_res)
3285 goto failure;
3286
3287 base->lookup_phy_chans = kzalloc(num_phy_chans *
3288 sizeof(struct d40_chan *),
3289 GFP_KERNEL);
3290 if (!base->lookup_phy_chans)
3291 goto failure;
3292
Lee Jones664a57e2013-05-03 15:31:53 +01003293 if (num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) {
Linus Walleij8d318a52010-03-30 15:33:42 +02003294 /*
3295 * The max number of logical channels are event lines for all
3296 * src devices and dst devices
3297 */
3298 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
3299 sizeof(struct d40_chan *),
3300 GFP_KERNEL);
3301 if (!base->lookup_log_chans)
3302 goto failure;
3303 }
Jonas Aaberg698e4732010-08-09 12:08:56 +00003304
Narayanan G7fb3e752011-11-17 17:26:41 +05303305 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3306 sizeof(d40_backup_regs_chan),
Linus Walleij8d318a52010-03-30 15:33:42 +02003307 GFP_KERNEL);
Narayanan G7fb3e752011-11-17 17:26:41 +05303308 if (!base->reg_val_backup_chan)
3309 goto failure;
3310
3311 base->lcla_pool.alloc_map =
3312 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3313 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
Linus Walleij8d318a52010-03-30 15:33:42 +02003314 if (!base->lcla_pool.alloc_map)
3315 goto failure;
3316
Jonas Aabergc675b1b2010-06-20 21:25:08 +00003317 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3318 0, SLAB_HWCACHE_ALIGN,
3319 NULL);
3320 if (base->desc_slab == NULL)
3321 goto failure;
3322
Linus Walleij8d318a52010-03-30 15:33:42 +02003323 return base;
3324
3325failure:
Ulf Hanssonb707c6582012-08-23 13:41:58 +02003326 if (!clk_ret)
3327 clk_disable_unprepare(clk);
3328 if (!IS_ERR(clk))
Linus Walleij8d318a52010-03-30 15:33:42 +02003329 clk_put(clk);
Linus Walleij8d318a52010-03-30 15:33:42 +02003330 if (virtbase)
3331 iounmap(virtbase);
3332 if (res)
3333 release_mem_region(res->start,
3334 resource_size(res));
3335 if (virtbase)
3336 iounmap(virtbase);
3337
3338 if (base) {
3339 kfree(base->lcla_pool.alloc_map);
Narayanan G1bdae6f2012-02-09 12:41:37 +05303340 kfree(base->reg_val_backup_chan);
Linus Walleij8d318a52010-03-30 15:33:42 +02003341 kfree(base->lookup_log_chans);
3342 kfree(base->lookup_phy_chans);
3343 kfree(base->phy_res);
3344 kfree(base);
3345 }
3346
3347 return NULL;
3348}
3349
3350static void __init d40_hw_init(struct d40_base *base)
3351{
3352
Linus Walleij8d318a52010-03-30 15:33:42 +02003353 int i;
3354 u32 prmseo[2] = {0, 0};
3355 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3356 u32 pcmis = 0;
3357 u32 pcicr = 0;
Tong Liu3cb645d2012-09-26 10:07:30 +00003358 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3359 u32 reg_size = base->gen_dmac.init_reg_size;
Linus Walleij8d318a52010-03-30 15:33:42 +02003360
Tong Liu3cb645d2012-09-26 10:07:30 +00003361 for (i = 0; i < reg_size; i++)
Linus Walleij8d318a52010-03-30 15:33:42 +02003362 writel(dma_init_reg[i].val,
3363 base->virtbase + dma_init_reg[i].reg);
3364
3365 /* Configure all our dma channels to default settings */
3366 for (i = 0; i < base->num_phy_chans; i++) {
3367
3368 activeo[i % 2] = activeo[i % 2] << 2;
3369
3370 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3371 == D40_ALLOC_PHY) {
3372 activeo[i % 2] |= 3;
3373 continue;
3374 }
3375
3376 /* Enable interrupt # */
3377 pcmis = (pcmis << 1) | 1;
3378
3379 /* Clear interrupt # */
3380 pcicr = (pcicr << 1) | 1;
3381
3382 /* Set channel to physical mode */
3383 prmseo[i % 2] = prmseo[i % 2] << 2;
3384 prmseo[i % 2] |= 1;
3385
3386 }
3387
3388 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3389 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3390 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3391 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3392
3393 /* Write which interrupt to enable */
Tong Liu3cb645d2012-09-26 10:07:30 +00003394 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
Linus Walleij8d318a52010-03-30 15:33:42 +02003395
3396 /* Write which interrupt to clear */
Tong Liu3cb645d2012-09-26 10:07:30 +00003397 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
Linus Walleij8d318a52010-03-30 15:33:42 +02003398
Tong Liu3cb645d2012-09-26 10:07:30 +00003399 /* These are __initdata and cannot be accessed after init */
3400 base->gen_dmac.init_reg = NULL;
3401 base->gen_dmac.init_reg_size = 0;
Linus Walleij8d318a52010-03-30 15:33:42 +02003402}
3403
Linus Walleij508849a2010-06-20 21:26:07 +00003404static int __init d40_lcla_allocate(struct d40_base *base)
3405{
Rabin Vincent026cbc42011-01-25 11:18:14 +01003406 struct d40_lcla_pool *pool = &base->lcla_pool;
Linus Walleij508849a2010-06-20 21:26:07 +00003407 unsigned long *page_list;
3408 int i, j;
3409 int ret = 0;
3410
3411 /*
3412 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3413 * To full fill this hardware requirement without wasting 256 kb
3414 * we allocate pages until we get an aligned one.
3415 */
3416 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3417 GFP_KERNEL);
3418
3419 if (!page_list) {
3420 ret = -ENOMEM;
3421 goto failure;
3422 }
3423
3424 /* Calculating how many pages that are required */
3425 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3426
3427 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3428 page_list[i] = __get_free_pages(GFP_KERNEL,
3429 base->lcla_pool.pages);
3430 if (!page_list[i]) {
3431
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003432 d40_err(base->dev, "Failed to allocate %d pages.\n",
3433 base->lcla_pool.pages);
Linus Walleij508849a2010-06-20 21:26:07 +00003434
3435 for (j = 0; j < i; j++)
3436 free_pages(page_list[j], base->lcla_pool.pages);
3437 goto failure;
3438 }
3439
3440 if ((virt_to_phys((void *)page_list[i]) &
3441 (LCLA_ALIGNMENT - 1)) == 0)
3442 break;
3443 }
3444
3445 for (j = 0; j < i; j++)
3446 free_pages(page_list[j], base->lcla_pool.pages);
3447
3448 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3449 base->lcla_pool.base = (void *)page_list[i];
3450 } else {
Jonas Aaberg767a9672010-08-09 12:08:34 +00003451 /*
3452 * After many attempts and no succees with finding the correct
3453 * alignment, try with allocating a big buffer.
3454 */
Linus Walleij508849a2010-06-20 21:26:07 +00003455 dev_warn(base->dev,
3456 "[%s] Failed to get %d pages @ 18 bit align.\n",
3457 __func__, base->lcla_pool.pages);
3458 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3459 base->num_phy_chans +
3460 LCLA_ALIGNMENT,
3461 GFP_KERNEL);
3462 if (!base->lcla_pool.base_unaligned) {
3463 ret = -ENOMEM;
3464 goto failure;
3465 }
3466
3467 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3468 LCLA_ALIGNMENT);
3469 }
3470
Rabin Vincent026cbc42011-01-25 11:18:14 +01003471 pool->dma_addr = dma_map_single(base->dev, pool->base,
3472 SZ_1K * base->num_phy_chans,
3473 DMA_TO_DEVICE);
3474 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3475 pool->dma_addr = 0;
3476 ret = -ENOMEM;
3477 goto failure;
3478 }
3479
Linus Walleij508849a2010-06-20 21:26:07 +00003480 writel(virt_to_phys(base->lcla_pool.base),
3481 base->virtbase + D40_DREG_LCLA);
3482failure:
3483 kfree(page_list);
3484 return ret;
3485}
3486
Linus Walleij8d318a52010-03-30 15:33:42 +02003487static int __init d40_probe(struct platform_device *pdev)
3488{
3489 int err;
3490 int ret = -ENOENT;
3491 struct d40_base *base;
3492 struct resource *res = NULL;
3493 int num_reserved_chans;
3494 u32 val;
3495
3496 base = d40_hw_detect_init(pdev);
3497
3498 if (!base)
3499 goto failure;
3500
3501 num_reserved_chans = d40_phy_res_init(base);
3502
3503 platform_set_drvdata(pdev, base);
3504
3505 spin_lock_init(&base->interrupt_lock);
3506 spin_lock_init(&base->execmd_lock);
3507
3508 /* Get IO for logical channel parameter address */
3509 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3510 if (!res) {
3511 ret = -ENOENT;
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003512 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003513 goto failure;
3514 }
3515 base->lcpa_size = resource_size(res);
3516 base->phy_lcpa = res->start;
3517
3518 if (request_mem_region(res->start, resource_size(res),
3519 D40_NAME " I/O lcpa") == NULL) {
3520 ret = -EBUSY;
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003521 d40_err(&pdev->dev,
3522 "Failed to request LCPA region 0x%x-0x%x\n",
3523 res->start, res->end);
Linus Walleij8d318a52010-03-30 15:33:42 +02003524 goto failure;
3525 }
3526
3527 /* We make use of ESRAM memory for this. */
3528 val = readl(base->virtbase + D40_DREG_LCPA);
3529 if (res->start != val && val != 0) {
3530 dev_warn(&pdev->dev,
3531 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3532 __func__, val, res->start);
3533 } else
3534 writel(res->start, base->virtbase + D40_DREG_LCPA);
3535
3536 base->lcpa_base = ioremap(res->start, resource_size(res));
3537 if (!base->lcpa_base) {
3538 ret = -ENOMEM;
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003539 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003540 goto failure;
3541 }
Narayanan G28c7a192011-11-22 13:56:55 +05303542 /* If lcla has to be located in ESRAM we don't need to allocate */
3543 if (base->plat_data->use_esram_lcla) {
3544 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3545 "lcla_esram");
3546 if (!res) {
3547 ret = -ENOENT;
3548 d40_err(&pdev->dev,
3549 "No \"lcla_esram\" memory resource\n");
3550 goto failure;
3551 }
3552 base->lcla_pool.base = ioremap(res->start,
3553 resource_size(res));
3554 if (!base->lcla_pool.base) {
3555 ret = -ENOMEM;
3556 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3557 goto failure;
3558 }
3559 writel(res->start, base->virtbase + D40_DREG_LCLA);
Linus Walleij508849a2010-06-20 21:26:07 +00003560
Narayanan G28c7a192011-11-22 13:56:55 +05303561 } else {
3562 ret = d40_lcla_allocate(base);
3563 if (ret) {
3564 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3565 goto failure;
3566 }
Linus Walleij8d318a52010-03-30 15:33:42 +02003567 }
3568
Linus Walleij8d318a52010-03-30 15:33:42 +02003569 spin_lock_init(&base->lcla_pool.lock);
3570
Linus Walleij8d318a52010-03-30 15:33:42 +02003571 base->irq = platform_get_irq(pdev, 0);
3572
3573 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
Linus Walleij8d318a52010-03-30 15:33:42 +02003574 if (ret) {
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003575 d40_err(&pdev->dev, "No IRQ defined\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003576 goto failure;
3577 }
3578
Narayanan G7fb3e752011-11-17 17:26:41 +05303579 pm_runtime_irq_safe(base->dev);
3580 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3581 pm_runtime_use_autosuspend(base->dev);
3582 pm_runtime_enable(base->dev);
3583 pm_runtime_resume(base->dev);
Narayanan G28c7a192011-11-22 13:56:55 +05303584
3585 if (base->plat_data->use_esram_lcla) {
3586
3587 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3588 if (IS_ERR(base->lcpa_regulator)) {
3589 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3590 base->lcpa_regulator = NULL;
3591 goto failure;
3592 }
3593
3594 ret = regulator_enable(base->lcpa_regulator);
3595 if (ret) {
3596 d40_err(&pdev->dev,
3597 "Failed to enable lcpa_regulator\n");
3598 regulator_put(base->lcpa_regulator);
3599 base->lcpa_regulator = NULL;
3600 goto failure;
3601 }
3602 }
3603
Narayanan G7fb3e752011-11-17 17:26:41 +05303604 base->initialized = true;
Linus Walleij8d318a52010-03-30 15:33:42 +02003605 err = d40_dmaengine_init(base, num_reserved_chans);
3606 if (err)
3607 goto failure;
3608
Per Forlinb96710e2011-10-18 18:39:47 +02003609 base->dev->dma_parms = &base->dma_parms;
3610 err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3611 if (err) {
3612 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3613 goto failure;
3614 }
3615
Linus Walleij8d318a52010-03-30 15:33:42 +02003616 d40_hw_init(base);
3617
3618 dev_info(base->dev, "initialized\n");
3619 return 0;
3620
3621failure:
3622 if (base) {
Jonas Aabergc675b1b2010-06-20 21:25:08 +00003623 if (base->desc_slab)
3624 kmem_cache_destroy(base->desc_slab);
Linus Walleij8d318a52010-03-30 15:33:42 +02003625 if (base->virtbase)
3626 iounmap(base->virtbase);
Rabin Vincent026cbc42011-01-25 11:18:14 +01003627
Narayanan G28c7a192011-11-22 13:56:55 +05303628 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3629 iounmap(base->lcla_pool.base);
3630 base->lcla_pool.base = NULL;
3631 }
3632
Rabin Vincent026cbc42011-01-25 11:18:14 +01003633 if (base->lcla_pool.dma_addr)
3634 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3635 SZ_1K * base->num_phy_chans,
3636 DMA_TO_DEVICE);
3637
Linus Walleij508849a2010-06-20 21:26:07 +00003638 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3639 free_pages((unsigned long)base->lcla_pool.base,
3640 base->lcla_pool.pages);
Jonas Aaberg767a9672010-08-09 12:08:34 +00003641
3642 kfree(base->lcla_pool.base_unaligned);
3643
Linus Walleij8d318a52010-03-30 15:33:42 +02003644 if (base->phy_lcpa)
3645 release_mem_region(base->phy_lcpa,
3646 base->lcpa_size);
3647 if (base->phy_start)
3648 release_mem_region(base->phy_start,
3649 base->phy_size);
3650 if (base->clk) {
Fabio Baltierida2ac562013-01-07 10:58:35 +01003651 clk_disable_unprepare(base->clk);
Linus Walleij8d318a52010-03-30 15:33:42 +02003652 clk_put(base->clk);
3653 }
3654
Narayanan G28c7a192011-11-22 13:56:55 +05303655 if (base->lcpa_regulator) {
3656 regulator_disable(base->lcpa_regulator);
3657 regulator_put(base->lcpa_regulator);
3658 }
3659
Linus Walleij8d318a52010-03-30 15:33:42 +02003660 kfree(base->lcla_pool.alloc_map);
3661 kfree(base->lookup_log_chans);
3662 kfree(base->lookup_phy_chans);
3663 kfree(base->phy_res);
3664 kfree(base);
3665 }
3666
Rabin Vincent6db5a8b2011-01-25 11:18:09 +01003667 d40_err(&pdev->dev, "probe failed\n");
Linus Walleij8d318a52010-03-30 15:33:42 +02003668 return ret;
3669}
3670
3671static struct platform_driver d40_driver = {
3672 .driver = {
3673 .owner = THIS_MODULE,
3674 .name = D40_NAME,
Narayanan G7fb3e752011-11-17 17:26:41 +05303675 .pm = DMA40_PM_OPS,
Linus Walleij8d318a52010-03-30 15:33:42 +02003676 },
3677};
3678
Rabin Vincentcb9ab2d2011-01-25 11:18:04 +01003679static int __init stedma40_init(void)
Linus Walleij8d318a52010-03-30 15:33:42 +02003680{
3681 return platform_driver_probe(&d40_driver, d40_probe);
3682}
Linus Walleija0eb2212011-05-18 14:18:57 +02003683subsys_initcall(stedma40_init);