blob: 1c653ca7c316b969f306b81b144c156e42706a8a [file] [log] [blame]
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
Daniel Mack76fbc242012-06-28 06:12:32 +000018#include <linux/module.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040019#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
Sebastian Siewior817f6d12013-04-23 07:31:35 +000023#include <linux/delay.h>
Grygorii Strashko742fb202016-06-27 12:05:11 +030024#include <linux/genalloc.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040025#include "davinci_cpdma.h"
26
27/* DMA Registers */
28#define CPDMA_TXIDVER 0x00
29#define CPDMA_TXCONTROL 0x04
30#define CPDMA_TXTEARDOWN 0x08
31#define CPDMA_RXIDVER 0x10
32#define CPDMA_RXCONTROL 0x14
33#define CPDMA_SOFTRESET 0x1c
34#define CPDMA_RXTEARDOWN 0x18
35#define CPDMA_TXINTSTATRAW 0x80
36#define CPDMA_TXINTSTATMASKED 0x84
37#define CPDMA_TXINTMASKSET 0x88
38#define CPDMA_TXINTMASKCLEAR 0x8c
39#define CPDMA_MACINVECTOR 0x90
40#define CPDMA_MACEOIVECTOR 0x94
41#define CPDMA_RXINTSTATRAW 0xa0
42#define CPDMA_RXINTSTATMASKED 0xa4
43#define CPDMA_RXINTMASKSET 0xa8
44#define CPDMA_RXINTMASKCLEAR 0xac
45#define CPDMA_DMAINTSTATRAW 0xb0
46#define CPDMA_DMAINTSTATMASKED 0xb4
47#define CPDMA_DMAINTMASKSET 0xb8
48#define CPDMA_DMAINTMASKCLEAR 0xbc
49#define CPDMA_DMAINT_HOSTERR BIT(1)
50
51/* the following exist only if has_ext_regs is set */
52#define CPDMA_DMACONTROL 0x20
53#define CPDMA_DMASTATUS 0x24
54#define CPDMA_RXBUFFOFS 0x28
55#define CPDMA_EM_CONTROL 0x2c
56
57/* Descriptor mode bits */
58#define CPDMA_DESC_SOP BIT(31)
59#define CPDMA_DESC_EOP BIT(30)
60#define CPDMA_DESC_OWNER BIT(29)
61#define CPDMA_DESC_EOQ BIT(28)
62#define CPDMA_DESC_TD_COMPLETE BIT(27)
63#define CPDMA_DESC_PASS_CRC BIT(26)
Mugunthan V Nf6e135c2013-02-11 09:52:18 +000064#define CPDMA_DESC_TO_PORT_EN BIT(20)
65#define CPDMA_TO_PORT_SHIFT 16
66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
Mugunthan V N28a19fe2013-05-29 20:22:01 +000067#define CPDMA_DESC_CRC_LEN 4
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040068
69#define CPDMA_TEARDOWN_VALUE 0xfffffffc
70
71struct cpdma_desc {
72 /* hardware fields */
73 u32 hw_next;
74 u32 hw_buffer;
75 u32 hw_len;
76 u32 hw_mode;
77 /* software fields */
78 void *sw_token;
79 u32 sw_buffer;
80 u32 sw_len;
81};
82
83struct cpdma_desc_pool {
Olof Johanssonc767db52013-12-11 15:51:20 -080084 phys_addr_t phys;
Arnd Bergmann84092992016-01-29 12:39:10 +010085 dma_addr_t hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040086 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
89 int num_desc, used_desc;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040090 struct device *dev;
Grygorii Strashko742fb202016-06-27 12:05:11 +030091 struct gen_pool *gen_pool;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040092};
93
94enum cpdma_state {
95 CPDMA_STATE_IDLE,
96 CPDMA_STATE_ACTIVE,
97 CPDMA_STATE_TEARDOWN,
98};
99
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000100static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400101
102struct cpdma_ctlr {
103 enum cpdma_state state;
104 struct cpdma_params params;
105 struct device *dev;
106 struct cpdma_desc_pool *pool;
107 spinlock_t lock;
108 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
109};
110
111struct cpdma_chan {
Mugunthan V Nfae50822013-01-17 06:31:34 +0000112 struct cpdma_desc __iomem *head, *tail;
113 void __iomem *hdp, *cp, *rxfree;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400114 enum cpdma_state state;
115 struct cpdma_ctlr *ctlr;
116 int chan_num;
117 spinlock_t lock;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400118 int count;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300119 u32 desc_num;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400120 u32 mask;
121 cpdma_handler_fn handler;
122 enum dma_data_direction dir;
123 struct cpdma_chan_stats stats;
124 /* offsets into dmaregs */
125 int int_set, int_clear, td;
126};
127
128/* The following make access to common cpdma_ctlr params more readable */
129#define dmaregs params.dmaregs
130#define num_chan params.num_chan
131
132/* various accessors */
133#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
134#define chan_read(chan, fld) __raw_readl((chan)->fld)
135#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
136#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
137#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
138#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
139
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000140#define cpdma_desc_to_port(chan, mode, directed) \
141 do { \
142 if (!is_rx_chan(chan) && ((directed == 1) || \
143 (directed == 2))) \
144 mode |= (CPDMA_DESC_TO_PORT_EN | \
145 (directed << CPDMA_TO_PORT_SHIFT)); \
146 } while (0)
147
Grygorii Strashko742fb202016-06-27 12:05:11 +0300148static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
149{
150 if (!pool)
151 return;
152
153 WARN_ON(pool->used_desc);
154 if (pool->cpumap)
155 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
156 pool->phys);
157 else
158 iounmap(pool->iomap);
159}
160
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400161/*
162 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
163 * emac) have dedicated on-chip memory for these descriptors. Some other
164 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
165 * abstract out these details
166 */
167static struct cpdma_desc_pool *
Arnd Bergmann84092992016-01-29 12:39:10 +0100168cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
Sriram6a1fef62011-03-22 02:31:03 +0000169 int size, int align)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400170{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400171 struct cpdma_desc_pool *pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300172 int ret;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400173
George Cheriane1943122014-05-12 10:21:21 +0530174 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400175 if (!pool)
Grygorii Strashko742fb202016-06-27 12:05:11 +0300176 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400177
178 pool->dev = dev;
179 pool->mem_size = size;
180 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
181 pool->num_desc = size / pool->desc_size;
182
Grygorii Strashko742fb202016-06-27 12:05:11 +0300183 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
184 "cpdma");
185 if (IS_ERR(pool->gen_pool)) {
186 dev_err(dev, "pool create failed %ld\n",
187 PTR_ERR(pool->gen_pool));
188 goto gen_pool_create_fail;
189 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400190
191 if (phys) {
192 pool->phys = phys;
Arnd Bergmann84092992016-01-29 12:39:10 +0100193 pool->iomap = ioremap(phys, size); /* should be memremap? */
Sriram6a1fef62011-03-22 02:31:03 +0000194 pool->hw_addr = hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400195 } else {
Arnd Bergmann84092992016-01-29 12:39:10 +0100196 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400197 GFP_KERNEL);
Arnd Bergmann84092992016-01-29 12:39:10 +0100198 pool->iomap = (void __iomem __force *)pool->cpumap;
199 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400200 }
201
Grygorii Strashko742fb202016-06-27 12:05:11 +0300202 if (!pool->iomap)
203 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400204
Grygorii Strashko742fb202016-06-27 12:05:11 +0300205 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
206 pool->phys, pool->mem_size, -1);
207 if (ret < 0) {
208 dev_err(dev, "pool add failed %d\n", ret);
209 goto gen_pool_add_virt_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400210 }
Grygorii Strashko742fb202016-06-27 12:05:11 +0300211
212 return pool;
213
214gen_pool_add_virt_fail:
215 cpdma_desc_pool_destroy(pool);
216gen_pool_create_fail:
217 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400218}
219
220static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
221 struct cpdma_desc __iomem *desc)
222{
223 if (!desc)
224 return 0;
Olof Johanssonc767db52013-12-11 15:51:20 -0800225 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400226}
227
228static inline struct cpdma_desc __iomem *
229desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
230{
Sriram6a1fef62011-03-22 02:31:03 +0000231 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400232}
233
234static struct cpdma_desc __iomem *
Grygorii Strashko742fb202016-06-27 12:05:11 +0300235cpdma_desc_alloc(struct cpdma_desc_pool *pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400236{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400237 struct cpdma_desc __iomem *desc = NULL;
238
Grygorii Strashko742fb202016-06-27 12:05:11 +0300239 desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
240 pool->desc_size);
241 if (desc)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400242 pool->used_desc++;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400243
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400244 return desc;
245}
246
247static void cpdma_desc_free(struct cpdma_desc_pool *pool,
248 struct cpdma_desc __iomem *desc, int num_desc)
249{
Grygorii Strashko742fb202016-06-27 12:05:11 +0300250 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400251 pool->used_desc--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400252}
253
254struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
255{
256 struct cpdma_ctlr *ctlr;
257
George Cheriane1943122014-05-12 10:21:21 +0530258 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400259 if (!ctlr)
260 return NULL;
261
262 ctlr->state = CPDMA_STATE_IDLE;
263 ctlr->params = *params;
264 ctlr->dev = params->dev;
265 spin_lock_init(&ctlr->lock);
266
267 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
268 ctlr->params.desc_mem_phys,
Sriram6a1fef62011-03-22 02:31:03 +0000269 ctlr->params.desc_hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400270 ctlr->params.desc_mem_size,
271 ctlr->params.desc_align);
Dan Carpenter2f872082014-06-11 11:16:51 +0300272 if (!ctlr->pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400273 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400274
275 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
276 ctlr->num_chan = CPDMA_MAX_CHANNELS;
277 return ctlr;
278}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000279EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400280
281int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
282{
283 unsigned long flags;
284 int i;
285
286 spin_lock_irqsave(&ctlr->lock, flags);
287 if (ctlr->state != CPDMA_STATE_IDLE) {
288 spin_unlock_irqrestore(&ctlr->lock, flags);
289 return -EBUSY;
290 }
291
292 if (ctlr->params.has_soft_reset) {
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000293 unsigned timeout = 10 * 100;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400294
295 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000296 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400297 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
298 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000299 udelay(10);
300 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400301 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000302 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400303 }
304
305 for (i = 0; i < ctlr->num_chan; i++) {
306 __raw_writel(0, ctlr->params.txhdp + 4 * i);
307 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
308 __raw_writel(0, ctlr->params.txcp + 4 * i);
309 __raw_writel(0, ctlr->params.rxcp + 4 * i);
310 }
311
312 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
313 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
314
315 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
316 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
317
318 ctlr->state = CPDMA_STATE_ACTIVE;
319
320 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
321 if (ctlr->channels[i])
322 cpdma_chan_start(ctlr->channels[i]);
323 }
324 spin_unlock_irqrestore(&ctlr->lock, flags);
325 return 0;
326}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000327EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400328
329int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
330{
331 unsigned long flags;
332 int i;
333
334 spin_lock_irqsave(&ctlr->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100335 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400336 spin_unlock_irqrestore(&ctlr->lock, flags);
337 return -EINVAL;
338 }
339
340 ctlr->state = CPDMA_STATE_TEARDOWN;
341
342 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
343 if (ctlr->channels[i])
344 cpdma_chan_stop(ctlr->channels[i]);
345 }
346
347 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
348 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
349
350 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
351 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
352
353 ctlr->state = CPDMA_STATE_IDLE;
354
355 spin_unlock_irqrestore(&ctlr->lock, flags);
356 return 0;
357}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000358EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400359
360int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
361{
362 struct device *dev = ctlr->dev;
363 unsigned long flags;
364 int i;
365
366 spin_lock_irqsave(&ctlr->lock, flags);
367
368 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
369
370 dev_info(dev, "CPDMA: txidver: %x",
371 dma_reg_read(ctlr, CPDMA_TXIDVER));
372 dev_info(dev, "CPDMA: txcontrol: %x",
373 dma_reg_read(ctlr, CPDMA_TXCONTROL));
374 dev_info(dev, "CPDMA: txteardown: %x",
375 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
376 dev_info(dev, "CPDMA: rxidver: %x",
377 dma_reg_read(ctlr, CPDMA_RXIDVER));
378 dev_info(dev, "CPDMA: rxcontrol: %x",
379 dma_reg_read(ctlr, CPDMA_RXCONTROL));
380 dev_info(dev, "CPDMA: softreset: %x",
381 dma_reg_read(ctlr, CPDMA_SOFTRESET));
382 dev_info(dev, "CPDMA: rxteardown: %x",
383 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
384 dev_info(dev, "CPDMA: txintstatraw: %x",
385 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
386 dev_info(dev, "CPDMA: txintstatmasked: %x",
387 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
388 dev_info(dev, "CPDMA: txintmaskset: %x",
389 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
390 dev_info(dev, "CPDMA: txintmaskclear: %x",
391 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
392 dev_info(dev, "CPDMA: macinvector: %x",
393 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
394 dev_info(dev, "CPDMA: maceoivector: %x",
395 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
396 dev_info(dev, "CPDMA: rxintstatraw: %x",
397 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
398 dev_info(dev, "CPDMA: rxintstatmasked: %x",
399 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
400 dev_info(dev, "CPDMA: rxintmaskset: %x",
401 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
402 dev_info(dev, "CPDMA: rxintmaskclear: %x",
403 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
404 dev_info(dev, "CPDMA: dmaintstatraw: %x",
405 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
406 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
407 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
408 dev_info(dev, "CPDMA: dmaintmaskset: %x",
409 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
410 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
411 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
412
413 if (!ctlr->params.has_ext_regs) {
414 dev_info(dev, "CPDMA: dmacontrol: %x",
415 dma_reg_read(ctlr, CPDMA_DMACONTROL));
416 dev_info(dev, "CPDMA: dmastatus: %x",
417 dma_reg_read(ctlr, CPDMA_DMASTATUS));
418 dev_info(dev, "CPDMA: rxbuffofs: %x",
419 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
420 }
421
422 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
423 if (ctlr->channels[i])
424 cpdma_chan_dump(ctlr->channels[i]);
425
426 spin_unlock_irqrestore(&ctlr->lock, flags);
427 return 0;
428}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000429EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400430
431int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
432{
433 unsigned long flags;
434 int ret = 0, i;
435
436 if (!ctlr)
437 return -EINVAL;
438
439 spin_lock_irqsave(&ctlr->lock, flags);
440 if (ctlr->state != CPDMA_STATE_IDLE)
441 cpdma_ctlr_stop(ctlr);
442
Cyril Roelandt79876e02013-02-12 12:52:30 +0000443 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
444 cpdma_chan_destroy(ctlr->channels[i]);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400445
446 cpdma_desc_pool_destroy(ctlr->pool);
447 spin_unlock_irqrestore(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400448 return ret;
449}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000450EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400451
452int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
453{
454 unsigned long flags;
455 int i, reg;
456
457 spin_lock_irqsave(&ctlr->lock, flags);
458 if (ctlr->state != CPDMA_STATE_ACTIVE) {
459 spin_unlock_irqrestore(&ctlr->lock, flags);
460 return -EINVAL;
461 }
462
463 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
464 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
465
466 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
467 if (ctlr->channels[i])
468 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
469 }
470
471 spin_unlock_irqrestore(&ctlr->lock, flags);
472 return 0;
473}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100474EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400475
Mugunthan V N510a1e722013-02-17 22:19:20 +0000476void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400477{
Mugunthan V N510a1e722013-02-17 22:19:20 +0000478 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400479}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100480EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400481
482struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
483 cpdma_handler_fn handler)
484{
485 struct cpdma_chan *chan;
George Cheriane1943122014-05-12 10:21:21 +0530486 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400487 unsigned long flags;
488
489 if (__chan_linear(chan_num) >= ctlr->num_chan)
490 return NULL;
491
George Cheriane1943122014-05-12 10:21:21 +0530492 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400493 if (!chan)
George Cheriane1943122014-05-12 10:21:21 +0530494 return ERR_PTR(-ENOMEM);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400495
496 spin_lock_irqsave(&ctlr->lock, flags);
George Cheriane1943122014-05-12 10:21:21 +0530497 if (ctlr->channels[chan_num]) {
498 spin_unlock_irqrestore(&ctlr->lock, flags);
499 devm_kfree(ctlr->dev, chan);
500 return ERR_PTR(-EBUSY);
501 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400502
503 chan->ctlr = ctlr;
504 chan->state = CPDMA_STATE_IDLE;
505 chan->chan_num = chan_num;
506 chan->handler = handler;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300507 chan->desc_num = ctlr->pool->num_desc / 2;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400508
509 if (is_rx_chan(chan)) {
510 chan->hdp = ctlr->params.rxhdp + offset;
511 chan->cp = ctlr->params.rxcp + offset;
512 chan->rxfree = ctlr->params.rxfree + offset;
513 chan->int_set = CPDMA_RXINTMASKSET;
514 chan->int_clear = CPDMA_RXINTMASKCLEAR;
515 chan->td = CPDMA_RXTEARDOWN;
516 chan->dir = DMA_FROM_DEVICE;
517 } else {
518 chan->hdp = ctlr->params.txhdp + offset;
519 chan->cp = ctlr->params.txcp + offset;
520 chan->int_set = CPDMA_TXINTMASKSET;
521 chan->int_clear = CPDMA_TXINTMASKCLEAR;
522 chan->td = CPDMA_TXTEARDOWN;
523 chan->dir = DMA_TO_DEVICE;
524 }
525 chan->mask = BIT(chan_linear(chan));
526
527 spin_lock_init(&chan->lock);
528
529 ctlr->channels[chan_num] = chan;
530 spin_unlock_irqrestore(&ctlr->lock, flags);
531 return chan;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400532}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000533EXPORT_SYMBOL_GPL(cpdma_chan_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400534
Ivan Khoronzhuk17933312016-06-17 13:25:39 +0300535int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
536{
537 return ctlr->pool->num_desc / 2;
538}
539EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
540
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400541int cpdma_chan_destroy(struct cpdma_chan *chan)
542{
Julia Lawallf37c54b2012-08-14 05:49:47 +0000543 struct cpdma_ctlr *ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400544 unsigned long flags;
545
546 if (!chan)
547 return -EINVAL;
Julia Lawallf37c54b2012-08-14 05:49:47 +0000548 ctlr = chan->ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400549
550 spin_lock_irqsave(&ctlr->lock, flags);
551 if (chan->state != CPDMA_STATE_IDLE)
552 cpdma_chan_stop(chan);
553 ctlr->channels[chan->chan_num] = NULL;
554 spin_unlock_irqrestore(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400555 return 0;
556}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000557EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400558
559int cpdma_chan_get_stats(struct cpdma_chan *chan,
560 struct cpdma_chan_stats *stats)
561{
562 unsigned long flags;
563 if (!chan)
564 return -EINVAL;
565 spin_lock_irqsave(&chan->lock, flags);
566 memcpy(stats, &chan->stats, sizeof(*stats));
567 spin_unlock_irqrestore(&chan->lock, flags);
568 return 0;
569}
Daniel Mack0ca04b62013-08-22 13:47:00 +0200570EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400571
572int cpdma_chan_dump(struct cpdma_chan *chan)
573{
574 unsigned long flags;
575 struct device *dev = chan->ctlr->dev;
576
577 spin_lock_irqsave(&chan->lock, flags);
578
579 dev_info(dev, "channel %d (%s %d) state %s",
580 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
581 chan_linear(chan), cpdma_state_str[chan->state]);
582 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
583 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
584 if (chan->rxfree) {
585 dev_info(dev, "\trxfree: %x\n",
586 chan_read(chan, rxfree));
587 }
588
589 dev_info(dev, "\tstats head_enqueue: %d\n",
590 chan->stats.head_enqueue);
591 dev_info(dev, "\tstats tail_enqueue: %d\n",
592 chan->stats.tail_enqueue);
593 dev_info(dev, "\tstats pad_enqueue: %d\n",
594 chan->stats.pad_enqueue);
595 dev_info(dev, "\tstats misqueued: %d\n",
596 chan->stats.misqueued);
597 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
598 chan->stats.desc_alloc_fail);
599 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
600 chan->stats.pad_alloc_fail);
601 dev_info(dev, "\tstats runt_receive_buff: %d\n",
602 chan->stats.runt_receive_buff);
603 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
604 chan->stats.runt_transmit_buff);
605 dev_info(dev, "\tstats empty_dequeue: %d\n",
606 chan->stats.empty_dequeue);
607 dev_info(dev, "\tstats busy_dequeue: %d\n",
608 chan->stats.busy_dequeue);
609 dev_info(dev, "\tstats good_dequeue: %d\n",
610 chan->stats.good_dequeue);
611 dev_info(dev, "\tstats requeue: %d\n",
612 chan->stats.requeue);
613 dev_info(dev, "\tstats teardown_dequeue: %d\n",
614 chan->stats.teardown_dequeue);
615
616 spin_unlock_irqrestore(&chan->lock, flags);
617 return 0;
618}
619
620static void __cpdma_chan_submit(struct cpdma_chan *chan,
621 struct cpdma_desc __iomem *desc)
622{
623 struct cpdma_ctlr *ctlr = chan->ctlr;
624 struct cpdma_desc __iomem *prev = chan->tail;
625 struct cpdma_desc_pool *pool = ctlr->pool;
626 dma_addr_t desc_dma;
627 u32 mode;
628
629 desc_dma = desc_phys(pool, desc);
630
631 /* simple case - idle channel */
632 if (!chan->head) {
633 chan->stats.head_enqueue++;
634 chan->head = desc;
635 chan->tail = desc;
636 if (chan->state == CPDMA_STATE_ACTIVE)
637 chan_write(chan, hdp, desc_dma);
638 return;
639 }
640
641 /* first chain the descriptor at the tail of the list */
642 desc_write(prev, hw_next, desc_dma);
643 chan->tail = desc;
644 chan->stats.tail_enqueue++;
645
646 /* next check if EOQ has been triggered already */
647 mode = desc_read(prev, hw_mode);
648 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
649 (chan->state == CPDMA_STATE_ACTIVE)) {
650 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
651 chan_write(chan, hdp, desc_dma);
652 chan->stats.misqueued++;
653 }
654}
655
656int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
Sebastian Siewioraef614e2013-04-23 07:31:38 +0000657 int len, int directed)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400658{
659 struct cpdma_ctlr *ctlr = chan->ctlr;
660 struct cpdma_desc __iomem *desc;
661 dma_addr_t buffer;
662 unsigned long flags;
663 u32 mode;
664 int ret = 0;
665
666 spin_lock_irqsave(&chan->lock, flags);
667
668 if (chan->state == CPDMA_STATE_TEARDOWN) {
669 ret = -EINVAL;
670 goto unlock_ret;
671 }
672
Grygorii Strashko742fb202016-06-27 12:05:11 +0300673 if (chan->count >= chan->desc_num) {
674 chan->stats.desc_alloc_fail++;
675 ret = -ENOMEM;
676 goto unlock_ret;
677 }
678
679 desc = cpdma_desc_alloc(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400680 if (!desc) {
681 chan->stats.desc_alloc_fail++;
682 ret = -ENOMEM;
683 goto unlock_ret;
684 }
685
686 if (len < ctlr->params.min_packet_size) {
687 len = ctlr->params.min_packet_size;
688 chan->stats.runt_transmit_buff++;
689 }
690
691 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
Sebastian Siewior14bd0762013-06-20 16:58:45 +0200692 ret = dma_mapping_error(ctlr->dev, buffer);
693 if (ret) {
694 cpdma_desc_free(ctlr->pool, desc, 1);
695 ret = -EINVAL;
696 goto unlock_ret;
697 }
698
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400699 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000700 cpdma_desc_to_port(chan, mode, directed);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400701
702 desc_write(desc, hw_next, 0);
703 desc_write(desc, hw_buffer, buffer);
704 desc_write(desc, hw_len, len);
705 desc_write(desc, hw_mode, mode | len);
706 desc_write(desc, sw_token, token);
707 desc_write(desc, sw_buffer, buffer);
708 desc_write(desc, sw_len, len);
709
710 __cpdma_chan_submit(chan, desc);
711
712 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
713 chan_write(chan, rxfree, 1);
714
715 chan->count++;
716
717unlock_ret:
718 spin_unlock_irqrestore(&chan->lock, flags);
719 return ret;
720}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000721EXPORT_SYMBOL_GPL(cpdma_chan_submit);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400722
Mugunthan V Nfae50822013-01-17 06:31:34 +0000723bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
724{
Mugunthan V Nfae50822013-01-17 06:31:34 +0000725 struct cpdma_ctlr *ctlr = chan->ctlr;
726 struct cpdma_desc_pool *pool = ctlr->pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300727 bool free_tx_desc;
728 unsigned long flags;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000729
Grygorii Strashko742fb202016-06-27 12:05:11 +0300730 spin_lock_irqsave(&chan->lock, flags);
731 free_tx_desc = (chan->count < chan->desc_num) &&
732 gen_pool_avail(pool->gen_pool);
733 spin_unlock_irqrestore(&chan->lock, flags);
734 return free_tx_desc;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000735}
736EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
737
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400738static void __cpdma_chan_free(struct cpdma_chan *chan,
739 struct cpdma_desc __iomem *desc,
740 int outlen, int status)
741{
742 struct cpdma_ctlr *ctlr = chan->ctlr;
743 struct cpdma_desc_pool *pool = ctlr->pool;
744 dma_addr_t buff_dma;
745 int origlen;
746 void *token;
747
748 token = (void *)desc_read(desc, sw_token);
749 buff_dma = desc_read(desc, sw_buffer);
750 origlen = desc_read(desc, sw_len);
751
752 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
753 cpdma_desc_free(pool, desc, 1);
754 (*chan->handler)(token, outlen, status);
755}
756
757static int __cpdma_chan_process(struct cpdma_chan *chan)
758{
759 struct cpdma_ctlr *ctlr = chan->ctlr;
760 struct cpdma_desc __iomem *desc;
761 int status, outlen;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000762 int cb_status = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400763 struct cpdma_desc_pool *pool = ctlr->pool;
764 dma_addr_t desc_dma;
765 unsigned long flags;
766
767 spin_lock_irqsave(&chan->lock, flags);
768
769 desc = chan->head;
770 if (!desc) {
771 chan->stats.empty_dequeue++;
772 status = -ENOENT;
773 goto unlock_ret;
774 }
775 desc_dma = desc_phys(pool, desc);
776
777 status = __raw_readl(&desc->hw_mode);
778 outlen = status & 0x7ff;
779 if (status & CPDMA_DESC_OWNER) {
780 chan->stats.busy_dequeue++;
781 status = -EBUSY;
782 goto unlock_ret;
783 }
Mugunthan V N28a19fe2013-05-29 20:22:01 +0000784
785 if (status & CPDMA_DESC_PASS_CRC)
786 outlen -= CPDMA_DESC_CRC_LEN;
787
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000788 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
789 CPDMA_DESC_PORT_MASK);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400790
791 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
792 chan_write(chan, cp, desc_dma);
793 chan->count--;
794 chan->stats.good_dequeue++;
795
796 if (status & CPDMA_DESC_EOQ) {
797 chan->stats.requeue++;
798 chan_write(chan, hdp, desc_phys(pool, chan->head));
799 }
800
801 spin_unlock_irqrestore(&chan->lock, flags);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000802 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
803 cb_status = -ENOSYS;
804 else
805 cb_status = status;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400806
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000807 __cpdma_chan_free(chan, desc, outlen, cb_status);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400808 return status;
809
810unlock_ret:
811 spin_unlock_irqrestore(&chan->lock, flags);
812 return status;
813}
814
815int cpdma_chan_process(struct cpdma_chan *chan, int quota)
816{
817 int used = 0, ret = 0;
818
819 if (chan->state != CPDMA_STATE_ACTIVE)
820 return -EINVAL;
821
822 while (used < quota) {
823 ret = __cpdma_chan_process(chan);
824 if (ret < 0)
825 break;
826 used++;
827 }
828 return used;
829}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000830EXPORT_SYMBOL_GPL(cpdma_chan_process);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400831
832int cpdma_chan_start(struct cpdma_chan *chan)
833{
834 struct cpdma_ctlr *ctlr = chan->ctlr;
835 struct cpdma_desc_pool *pool = ctlr->pool;
836 unsigned long flags;
837
838 spin_lock_irqsave(&chan->lock, flags);
839 if (chan->state != CPDMA_STATE_IDLE) {
840 spin_unlock_irqrestore(&chan->lock, flags);
841 return -EBUSY;
842 }
843 if (ctlr->state != CPDMA_STATE_ACTIVE) {
844 spin_unlock_irqrestore(&chan->lock, flags);
845 return -EINVAL;
846 }
847 dma_reg_write(ctlr, chan->int_set, chan->mask);
848 chan->state = CPDMA_STATE_ACTIVE;
849 if (chan->head) {
850 chan_write(chan, hdp, desc_phys(pool, chan->head));
851 if (chan->rxfree)
852 chan_write(chan, rxfree, chan->count);
853 }
854
855 spin_unlock_irqrestore(&chan->lock, flags);
856 return 0;
857}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000858EXPORT_SYMBOL_GPL(cpdma_chan_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400859
860int cpdma_chan_stop(struct cpdma_chan *chan)
861{
862 struct cpdma_ctlr *ctlr = chan->ctlr;
863 struct cpdma_desc_pool *pool = ctlr->pool;
864 unsigned long flags;
865 int ret;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000866 unsigned timeout;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400867
868 spin_lock_irqsave(&chan->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100869 if (chan->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400870 spin_unlock_irqrestore(&chan->lock, flags);
871 return -EINVAL;
872 }
873
874 chan->state = CPDMA_STATE_TEARDOWN;
875 dma_reg_write(ctlr, chan->int_clear, chan->mask);
876
877 /* trigger teardown */
Christian Rieschb4ad0422012-02-22 21:58:00 +0000878 dma_reg_write(ctlr, chan->td, chan_linear(chan));
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400879
880 /* wait for teardown complete */
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000881 timeout = 100 * 100; /* 100 ms */
882 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400883 u32 cp = chan_read(chan, cp);
884 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
885 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000886 udelay(10);
887 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400888 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000889 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400890 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
891
892 /* handle completed packets */
Ilya Yanok7746ab02011-12-18 10:02:04 +0000893 spin_unlock_irqrestore(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400894 do {
895 ret = __cpdma_chan_process(chan);
896 if (ret < 0)
897 break;
898 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
Ilya Yanok7746ab02011-12-18 10:02:04 +0000899 spin_lock_irqsave(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400900
901 /* remaining packets haven't been tx/rx'ed, clean them up */
902 while (chan->head) {
903 struct cpdma_desc __iomem *desc = chan->head;
904 dma_addr_t next_dma;
905
906 next_dma = desc_read(desc, hw_next);
907 chan->head = desc_from_phys(pool, next_dma);
htbeginffb5ba92012-10-01 16:42:43 +0000908 chan->count--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400909 chan->stats.teardown_dequeue++;
910
911 /* issue callback without locks held */
912 spin_unlock_irqrestore(&chan->lock, flags);
913 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
914 spin_lock_irqsave(&chan->lock, flags);
915 }
916
917 chan->state = CPDMA_STATE_IDLE;
918 spin_unlock_irqrestore(&chan->lock, flags);
919 return 0;
920}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000921EXPORT_SYMBOL_GPL(cpdma_chan_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400922
923int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
924{
925 unsigned long flags;
926
927 spin_lock_irqsave(&chan->lock, flags);
928 if (chan->state != CPDMA_STATE_ACTIVE) {
929 spin_unlock_irqrestore(&chan->lock, flags);
930 return -EINVAL;
931 }
932
933 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
934 chan->mask);
935 spin_unlock_irqrestore(&chan->lock, flags);
936
937 return 0;
938}
939
940struct cpdma_control_info {
941 u32 reg;
942 u32 shift, mask;
943 int access;
944#define ACCESS_RO BIT(0)
945#define ACCESS_WO BIT(1)
946#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
947};
948
Olof Johanssondf784162013-12-11 15:51:21 -0800949static struct cpdma_control_info controls[] = {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400950 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
951 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
952 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
953 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
954 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
955 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
956 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
957 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
958 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
959 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
960 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
961};
962
963int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
964{
965 unsigned long flags;
966 struct cpdma_control_info *info = &controls[control];
967 int ret;
968
969 spin_lock_irqsave(&ctlr->lock, flags);
970
971 ret = -ENOTSUPP;
972 if (!ctlr->params.has_ext_regs)
973 goto unlock_ret;
974
975 ret = -EINVAL;
976 if (ctlr->state != CPDMA_STATE_ACTIVE)
977 goto unlock_ret;
978
979 ret = -ENOENT;
980 if (control < 0 || control >= ARRAY_SIZE(controls))
981 goto unlock_ret;
982
983 ret = -EPERM;
984 if ((info->access & ACCESS_RO) != ACCESS_RO)
985 goto unlock_ret;
986
987 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
988
989unlock_ret:
990 spin_unlock_irqrestore(&ctlr->lock, flags);
991 return ret;
992}
993
994int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
995{
996 unsigned long flags;
997 struct cpdma_control_info *info = &controls[control];
998 int ret;
999 u32 val;
1000
1001 spin_lock_irqsave(&ctlr->lock, flags);
1002
1003 ret = -ENOTSUPP;
1004 if (!ctlr->params.has_ext_regs)
1005 goto unlock_ret;
1006
1007 ret = -EINVAL;
1008 if (ctlr->state != CPDMA_STATE_ACTIVE)
1009 goto unlock_ret;
1010
1011 ret = -ENOENT;
1012 if (control < 0 || control >= ARRAY_SIZE(controls))
1013 goto unlock_ret;
1014
1015 ret = -EPERM;
1016 if ((info->access & ACCESS_WO) != ACCESS_WO)
1017 goto unlock_ret;
1018
1019 val = dma_reg_read(ctlr, info->reg);
1020 val &= ~(info->mask << info->shift);
1021 val |= (value & info->mask) << info->shift;
1022 dma_reg_write(ctlr, info->reg, val);
1023 ret = 0;
1024
1025unlock_ret:
1026 spin_unlock_irqrestore(&ctlr->lock, flags);
1027 return ret;
1028}
Arnd Bergmann6929e242013-02-14 17:53:01 +01001029EXPORT_SYMBOL_GPL(cpdma_control_set);
Sebastian Siewior4bc21d42013-04-24 08:48:22 +00001030
1031MODULE_LICENSE("GPL");