blob: 198cf18dc7fc5f5cb5eb61100f08fc957924d462 [file] [log] [blame]
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
Daniel Mack76fbc242012-06-28 06:12:32 +000018#include <linux/module.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040019#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23
24#include "davinci_cpdma.h"
25
26/* DMA Registers */
27#define CPDMA_TXIDVER 0x00
28#define CPDMA_TXCONTROL 0x04
29#define CPDMA_TXTEARDOWN 0x08
30#define CPDMA_RXIDVER 0x10
31#define CPDMA_RXCONTROL 0x14
32#define CPDMA_SOFTRESET 0x1c
33#define CPDMA_RXTEARDOWN 0x18
34#define CPDMA_TXINTSTATRAW 0x80
35#define CPDMA_TXINTSTATMASKED 0x84
36#define CPDMA_TXINTMASKSET 0x88
37#define CPDMA_TXINTMASKCLEAR 0x8c
38#define CPDMA_MACINVECTOR 0x90
39#define CPDMA_MACEOIVECTOR 0x94
40#define CPDMA_RXINTSTATRAW 0xa0
41#define CPDMA_RXINTSTATMASKED 0xa4
42#define CPDMA_RXINTMASKSET 0xa8
43#define CPDMA_RXINTMASKCLEAR 0xac
44#define CPDMA_DMAINTSTATRAW 0xb0
45#define CPDMA_DMAINTSTATMASKED 0xb4
46#define CPDMA_DMAINTMASKSET 0xb8
47#define CPDMA_DMAINTMASKCLEAR 0xbc
48#define CPDMA_DMAINT_HOSTERR BIT(1)
49
50/* the following exist only if has_ext_regs is set */
51#define CPDMA_DMACONTROL 0x20
52#define CPDMA_DMASTATUS 0x24
53#define CPDMA_RXBUFFOFS 0x28
54#define CPDMA_EM_CONTROL 0x2c
55
56/* Descriptor mode bits */
57#define CPDMA_DESC_SOP BIT(31)
58#define CPDMA_DESC_EOP BIT(30)
59#define CPDMA_DESC_OWNER BIT(29)
60#define CPDMA_DESC_EOQ BIT(28)
61#define CPDMA_DESC_TD_COMPLETE BIT(27)
62#define CPDMA_DESC_PASS_CRC BIT(26)
Mugunthan V Nf6e135c2013-02-11 09:52:18 +000063#define CPDMA_DESC_TO_PORT_EN BIT(20)
64#define CPDMA_TO_PORT_SHIFT 16
65#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040066
67#define CPDMA_TEARDOWN_VALUE 0xfffffffc
68
69struct cpdma_desc {
70 /* hardware fields */
71 u32 hw_next;
72 u32 hw_buffer;
73 u32 hw_len;
74 u32 hw_mode;
75 /* software fields */
76 void *sw_token;
77 u32 sw_buffer;
78 u32 sw_len;
79};
80
81struct cpdma_desc_pool {
82 u32 phys;
Sriram6a1fef62011-03-22 02:31:03 +000083 u32 hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040084 void __iomem *iomap; /* ioremap map */
85 void *cpumap; /* dma_alloc map */
86 int desc_size, mem_size;
87 int num_desc, used_desc;
88 unsigned long *bitmap;
89 struct device *dev;
90 spinlock_t lock;
91};
92
93enum cpdma_state {
94 CPDMA_STATE_IDLE,
95 CPDMA_STATE_ACTIVE,
96 CPDMA_STATE_TEARDOWN,
97};
98
Arnd Bergmann32a6d902012-04-20 10:56:09 +000099static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400100
101struct cpdma_ctlr {
102 enum cpdma_state state;
103 struct cpdma_params params;
104 struct device *dev;
105 struct cpdma_desc_pool *pool;
106 spinlock_t lock;
107 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
108};
109
110struct cpdma_chan {
Mugunthan V Nfae50822013-01-17 06:31:34 +0000111 struct cpdma_desc __iomem *head, *tail;
112 void __iomem *hdp, *cp, *rxfree;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400113 enum cpdma_state state;
114 struct cpdma_ctlr *ctlr;
115 int chan_num;
116 spinlock_t lock;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400117 int count;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400118 u32 mask;
119 cpdma_handler_fn handler;
120 enum dma_data_direction dir;
121 struct cpdma_chan_stats stats;
122 /* offsets into dmaregs */
123 int int_set, int_clear, td;
124};
125
126/* The following make access to common cpdma_ctlr params more readable */
127#define dmaregs params.dmaregs
128#define num_chan params.num_chan
129
130/* various accessors */
131#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
132#define chan_read(chan, fld) __raw_readl((chan)->fld)
133#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
134#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
135#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
136#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
137
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000138#define cpdma_desc_to_port(chan, mode, directed) \
139 do { \
140 if (!is_rx_chan(chan) && ((directed == 1) || \
141 (directed == 2))) \
142 mode |= (CPDMA_DESC_TO_PORT_EN | \
143 (directed << CPDMA_TO_PORT_SHIFT)); \
144 } while (0)
145
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400146/*
147 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
148 * emac) have dedicated on-chip memory for these descriptors. Some other
149 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
150 * abstract out these details
151 */
152static struct cpdma_desc_pool *
Sriram6a1fef62011-03-22 02:31:03 +0000153cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
154 int size, int align)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400155{
156 int bitmap_size;
157 struct cpdma_desc_pool *pool;
158
159 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
160 if (!pool)
161 return NULL;
162
163 spin_lock_init(&pool->lock);
164
165 pool->dev = dev;
166 pool->mem_size = size;
167 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
168 pool->num_desc = size / pool->desc_size;
169
170 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
171 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
172 if (!pool->bitmap)
173 goto fail;
174
175 if (phys) {
176 pool->phys = phys;
177 pool->iomap = ioremap(phys, size);
Sriram6a1fef62011-03-22 02:31:03 +0000178 pool->hw_addr = hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400179 } else {
180 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
181 GFP_KERNEL);
Joe Perches43d620c2011-06-16 19:08:06 +0000182 pool->iomap = pool->cpumap;
Sriram6a1fef62011-03-22 02:31:03 +0000183 pool->hw_addr = pool->phys;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400184 }
185
186 if (pool->iomap)
187 return pool;
188
189fail:
190 kfree(pool->bitmap);
191 kfree(pool);
192 return NULL;
193}
194
195static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
196{
197 unsigned long flags;
198
199 if (!pool)
200 return;
201
202 spin_lock_irqsave(&pool->lock, flags);
203 WARN_ON(pool->used_desc);
204 kfree(pool->bitmap);
205 if (pool->cpumap) {
206 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
207 pool->phys);
208 } else {
209 iounmap(pool->iomap);
210 }
211 spin_unlock_irqrestore(&pool->lock, flags);
212 kfree(pool);
213}
214
215static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
216 struct cpdma_desc __iomem *desc)
217{
218 if (!desc)
219 return 0;
Sriram6a1fef62011-03-22 02:31:03 +0000220 return pool->hw_addr + (__force dma_addr_t)desc -
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400221 (__force dma_addr_t)pool->iomap;
222}
223
224static inline struct cpdma_desc __iomem *
225desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
226{
Sriram6a1fef62011-03-22 02:31:03 +0000227 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400228}
229
230static struct cpdma_desc __iomem *
Mugunthan V Nfae50822013-01-17 06:31:34 +0000231cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400232{
233 unsigned long flags;
234 int index;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000235 int desc_start;
236 int desc_end;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400237 struct cpdma_desc __iomem *desc = NULL;
238
239 spin_lock_irqsave(&pool->lock, flags);
240
Mugunthan V Nfae50822013-01-17 06:31:34 +0000241 if (is_rx) {
242 desc_start = 0;
243 desc_end = pool->num_desc/2;
244 } else {
245 desc_start = pool->num_desc/2;
246 desc_end = pool->num_desc;
247 }
248
249 index = bitmap_find_next_zero_area(pool->bitmap,
250 desc_end, desc_start, num_desc, 0);
251 if (index < desc_end) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400252 bitmap_set(pool->bitmap, index, num_desc);
253 desc = pool->iomap + pool->desc_size * index;
254 pool->used_desc++;
255 }
256
257 spin_unlock_irqrestore(&pool->lock, flags);
258 return desc;
259}
260
261static void cpdma_desc_free(struct cpdma_desc_pool *pool,
262 struct cpdma_desc __iomem *desc, int num_desc)
263{
264 unsigned long flags, index;
265
266 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
267 pool->desc_size;
268 spin_lock_irqsave(&pool->lock, flags);
269 bitmap_clear(pool->bitmap, index, num_desc);
270 pool->used_desc--;
271 spin_unlock_irqrestore(&pool->lock, flags);
272}
273
274struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
275{
276 struct cpdma_ctlr *ctlr;
277
278 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
279 if (!ctlr)
280 return NULL;
281
282 ctlr->state = CPDMA_STATE_IDLE;
283 ctlr->params = *params;
284 ctlr->dev = params->dev;
285 spin_lock_init(&ctlr->lock);
286
287 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
288 ctlr->params.desc_mem_phys,
Sriram6a1fef62011-03-22 02:31:03 +0000289 ctlr->params.desc_hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400290 ctlr->params.desc_mem_size,
291 ctlr->params.desc_align);
292 if (!ctlr->pool) {
293 kfree(ctlr);
294 return NULL;
295 }
296
297 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
298 ctlr->num_chan = CPDMA_MAX_CHANNELS;
299 return ctlr;
300}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000301EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400302
303int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
304{
305 unsigned long flags;
306 int i;
307
308 spin_lock_irqsave(&ctlr->lock, flags);
309 if (ctlr->state != CPDMA_STATE_IDLE) {
310 spin_unlock_irqrestore(&ctlr->lock, flags);
311 return -EBUSY;
312 }
313
314 if (ctlr->params.has_soft_reset) {
315 unsigned long timeout = jiffies + HZ/10;
316
317 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
318 while (time_before(jiffies, timeout)) {
319 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
320 break;
321 }
322 WARN_ON(!time_before(jiffies, timeout));
323 }
324
325 for (i = 0; i < ctlr->num_chan; i++) {
326 __raw_writel(0, ctlr->params.txhdp + 4 * i);
327 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
328 __raw_writel(0, ctlr->params.txcp + 4 * i);
329 __raw_writel(0, ctlr->params.rxcp + 4 * i);
330 }
331
332 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
333 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
334
335 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
336 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
337
338 ctlr->state = CPDMA_STATE_ACTIVE;
339
340 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
341 if (ctlr->channels[i])
342 cpdma_chan_start(ctlr->channels[i]);
343 }
344 spin_unlock_irqrestore(&ctlr->lock, flags);
345 return 0;
346}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000347EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400348
349int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
350{
351 unsigned long flags;
352 int i;
353
354 spin_lock_irqsave(&ctlr->lock, flags);
355 if (ctlr->state != CPDMA_STATE_ACTIVE) {
356 spin_unlock_irqrestore(&ctlr->lock, flags);
357 return -EINVAL;
358 }
359
360 ctlr->state = CPDMA_STATE_TEARDOWN;
361
362 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
363 if (ctlr->channels[i])
364 cpdma_chan_stop(ctlr->channels[i]);
365 }
366
367 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
368 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
369
370 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
371 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
372
373 ctlr->state = CPDMA_STATE_IDLE;
374
375 spin_unlock_irqrestore(&ctlr->lock, flags);
376 return 0;
377}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000378EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400379
380int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
381{
382 struct device *dev = ctlr->dev;
383 unsigned long flags;
384 int i;
385
386 spin_lock_irqsave(&ctlr->lock, flags);
387
388 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
389
390 dev_info(dev, "CPDMA: txidver: %x",
391 dma_reg_read(ctlr, CPDMA_TXIDVER));
392 dev_info(dev, "CPDMA: txcontrol: %x",
393 dma_reg_read(ctlr, CPDMA_TXCONTROL));
394 dev_info(dev, "CPDMA: txteardown: %x",
395 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
396 dev_info(dev, "CPDMA: rxidver: %x",
397 dma_reg_read(ctlr, CPDMA_RXIDVER));
398 dev_info(dev, "CPDMA: rxcontrol: %x",
399 dma_reg_read(ctlr, CPDMA_RXCONTROL));
400 dev_info(dev, "CPDMA: softreset: %x",
401 dma_reg_read(ctlr, CPDMA_SOFTRESET));
402 dev_info(dev, "CPDMA: rxteardown: %x",
403 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
404 dev_info(dev, "CPDMA: txintstatraw: %x",
405 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
406 dev_info(dev, "CPDMA: txintstatmasked: %x",
407 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
408 dev_info(dev, "CPDMA: txintmaskset: %x",
409 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
410 dev_info(dev, "CPDMA: txintmaskclear: %x",
411 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
412 dev_info(dev, "CPDMA: macinvector: %x",
413 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
414 dev_info(dev, "CPDMA: maceoivector: %x",
415 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
416 dev_info(dev, "CPDMA: rxintstatraw: %x",
417 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
418 dev_info(dev, "CPDMA: rxintstatmasked: %x",
419 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
420 dev_info(dev, "CPDMA: rxintmaskset: %x",
421 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
422 dev_info(dev, "CPDMA: rxintmaskclear: %x",
423 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
424 dev_info(dev, "CPDMA: dmaintstatraw: %x",
425 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
426 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
427 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
428 dev_info(dev, "CPDMA: dmaintmaskset: %x",
429 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
430 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
431 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
432
433 if (!ctlr->params.has_ext_regs) {
434 dev_info(dev, "CPDMA: dmacontrol: %x",
435 dma_reg_read(ctlr, CPDMA_DMACONTROL));
436 dev_info(dev, "CPDMA: dmastatus: %x",
437 dma_reg_read(ctlr, CPDMA_DMASTATUS));
438 dev_info(dev, "CPDMA: rxbuffofs: %x",
439 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
440 }
441
442 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
443 if (ctlr->channels[i])
444 cpdma_chan_dump(ctlr->channels[i]);
445
446 spin_unlock_irqrestore(&ctlr->lock, flags);
447 return 0;
448}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000449EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400450
451int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
452{
453 unsigned long flags;
454 int ret = 0, i;
455
456 if (!ctlr)
457 return -EINVAL;
458
459 spin_lock_irqsave(&ctlr->lock, flags);
460 if (ctlr->state != CPDMA_STATE_IDLE)
461 cpdma_ctlr_stop(ctlr);
462
463 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
464 if (ctlr->channels[i])
465 cpdma_chan_destroy(ctlr->channels[i]);
466 }
467
468 cpdma_desc_pool_destroy(ctlr->pool);
469 spin_unlock_irqrestore(&ctlr->lock, flags);
470 kfree(ctlr);
471 return ret;
472}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000473EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400474
475int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
476{
477 unsigned long flags;
478 int i, reg;
479
480 spin_lock_irqsave(&ctlr->lock, flags);
481 if (ctlr->state != CPDMA_STATE_ACTIVE) {
482 spin_unlock_irqrestore(&ctlr->lock, flags);
483 return -EINVAL;
484 }
485
486 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
487 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
488
489 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
490 if (ctlr->channels[i])
491 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
492 }
493
494 spin_unlock_irqrestore(&ctlr->lock, flags);
495 return 0;
496}
497
498void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
499{
500 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
501}
502
503struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
504 cpdma_handler_fn handler)
505{
506 struct cpdma_chan *chan;
507 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
508 unsigned long flags;
509
510 if (__chan_linear(chan_num) >= ctlr->num_chan)
511 return NULL;
512
513 ret = -ENOMEM;
514 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
515 if (!chan)
516 goto err_chan_alloc;
517
518 spin_lock_irqsave(&ctlr->lock, flags);
519 ret = -EBUSY;
520 if (ctlr->channels[chan_num])
521 goto err_chan_busy;
522
523 chan->ctlr = ctlr;
524 chan->state = CPDMA_STATE_IDLE;
525 chan->chan_num = chan_num;
526 chan->handler = handler;
527
528 if (is_rx_chan(chan)) {
529 chan->hdp = ctlr->params.rxhdp + offset;
530 chan->cp = ctlr->params.rxcp + offset;
531 chan->rxfree = ctlr->params.rxfree + offset;
532 chan->int_set = CPDMA_RXINTMASKSET;
533 chan->int_clear = CPDMA_RXINTMASKCLEAR;
534 chan->td = CPDMA_RXTEARDOWN;
535 chan->dir = DMA_FROM_DEVICE;
536 } else {
537 chan->hdp = ctlr->params.txhdp + offset;
538 chan->cp = ctlr->params.txcp + offset;
539 chan->int_set = CPDMA_TXINTMASKSET;
540 chan->int_clear = CPDMA_TXINTMASKCLEAR;
541 chan->td = CPDMA_TXTEARDOWN;
542 chan->dir = DMA_TO_DEVICE;
543 }
544 chan->mask = BIT(chan_linear(chan));
545
546 spin_lock_init(&chan->lock);
547
548 ctlr->channels[chan_num] = chan;
549 spin_unlock_irqrestore(&ctlr->lock, flags);
550 return chan;
551
552err_chan_busy:
553 spin_unlock_irqrestore(&ctlr->lock, flags);
554 kfree(chan);
555err_chan_alloc:
556 return ERR_PTR(ret);
557}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000558EXPORT_SYMBOL_GPL(cpdma_chan_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400559
560int cpdma_chan_destroy(struct cpdma_chan *chan)
561{
Julia Lawallf37c54b2012-08-14 05:49:47 +0000562 struct cpdma_ctlr *ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400563 unsigned long flags;
564
565 if (!chan)
566 return -EINVAL;
Julia Lawallf37c54b2012-08-14 05:49:47 +0000567 ctlr = chan->ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400568
569 spin_lock_irqsave(&ctlr->lock, flags);
570 if (chan->state != CPDMA_STATE_IDLE)
571 cpdma_chan_stop(chan);
572 ctlr->channels[chan->chan_num] = NULL;
573 spin_unlock_irqrestore(&ctlr->lock, flags);
574 kfree(chan);
575 return 0;
576}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000577EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400578
579int cpdma_chan_get_stats(struct cpdma_chan *chan,
580 struct cpdma_chan_stats *stats)
581{
582 unsigned long flags;
583 if (!chan)
584 return -EINVAL;
585 spin_lock_irqsave(&chan->lock, flags);
586 memcpy(stats, &chan->stats, sizeof(*stats));
587 spin_unlock_irqrestore(&chan->lock, flags);
588 return 0;
589}
590
591int cpdma_chan_dump(struct cpdma_chan *chan)
592{
593 unsigned long flags;
594 struct device *dev = chan->ctlr->dev;
595
596 spin_lock_irqsave(&chan->lock, flags);
597
598 dev_info(dev, "channel %d (%s %d) state %s",
599 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
600 chan_linear(chan), cpdma_state_str[chan->state]);
601 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
602 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
603 if (chan->rxfree) {
604 dev_info(dev, "\trxfree: %x\n",
605 chan_read(chan, rxfree));
606 }
607
608 dev_info(dev, "\tstats head_enqueue: %d\n",
609 chan->stats.head_enqueue);
610 dev_info(dev, "\tstats tail_enqueue: %d\n",
611 chan->stats.tail_enqueue);
612 dev_info(dev, "\tstats pad_enqueue: %d\n",
613 chan->stats.pad_enqueue);
614 dev_info(dev, "\tstats misqueued: %d\n",
615 chan->stats.misqueued);
616 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
617 chan->stats.desc_alloc_fail);
618 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
619 chan->stats.pad_alloc_fail);
620 dev_info(dev, "\tstats runt_receive_buff: %d\n",
621 chan->stats.runt_receive_buff);
622 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
623 chan->stats.runt_transmit_buff);
624 dev_info(dev, "\tstats empty_dequeue: %d\n",
625 chan->stats.empty_dequeue);
626 dev_info(dev, "\tstats busy_dequeue: %d\n",
627 chan->stats.busy_dequeue);
628 dev_info(dev, "\tstats good_dequeue: %d\n",
629 chan->stats.good_dequeue);
630 dev_info(dev, "\tstats requeue: %d\n",
631 chan->stats.requeue);
632 dev_info(dev, "\tstats teardown_dequeue: %d\n",
633 chan->stats.teardown_dequeue);
634
635 spin_unlock_irqrestore(&chan->lock, flags);
636 return 0;
637}
638
639static void __cpdma_chan_submit(struct cpdma_chan *chan,
640 struct cpdma_desc __iomem *desc)
641{
642 struct cpdma_ctlr *ctlr = chan->ctlr;
643 struct cpdma_desc __iomem *prev = chan->tail;
644 struct cpdma_desc_pool *pool = ctlr->pool;
645 dma_addr_t desc_dma;
646 u32 mode;
647
648 desc_dma = desc_phys(pool, desc);
649
650 /* simple case - idle channel */
651 if (!chan->head) {
652 chan->stats.head_enqueue++;
653 chan->head = desc;
654 chan->tail = desc;
655 if (chan->state == CPDMA_STATE_ACTIVE)
656 chan_write(chan, hdp, desc_dma);
657 return;
658 }
659
660 /* first chain the descriptor at the tail of the list */
661 desc_write(prev, hw_next, desc_dma);
662 chan->tail = desc;
663 chan->stats.tail_enqueue++;
664
665 /* next check if EOQ has been triggered already */
666 mode = desc_read(prev, hw_mode);
667 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
668 (chan->state == CPDMA_STATE_ACTIVE)) {
669 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
670 chan_write(chan, hdp, desc_dma);
671 chan->stats.misqueued++;
672 }
673}
674
675int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000676 int len, int directed, gfp_t gfp_mask)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400677{
678 struct cpdma_ctlr *ctlr = chan->ctlr;
679 struct cpdma_desc __iomem *desc;
680 dma_addr_t buffer;
681 unsigned long flags;
682 u32 mode;
683 int ret = 0;
684
685 spin_lock_irqsave(&chan->lock, flags);
686
687 if (chan->state == CPDMA_STATE_TEARDOWN) {
688 ret = -EINVAL;
689 goto unlock_ret;
690 }
691
Mugunthan V Nfae50822013-01-17 06:31:34 +0000692 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400693 if (!desc) {
694 chan->stats.desc_alloc_fail++;
695 ret = -ENOMEM;
696 goto unlock_ret;
697 }
698
699 if (len < ctlr->params.min_packet_size) {
700 len = ctlr->params.min_packet_size;
701 chan->stats.runt_transmit_buff++;
702 }
703
704 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
705 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000706 cpdma_desc_to_port(chan, mode, directed);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400707
708 desc_write(desc, hw_next, 0);
709 desc_write(desc, hw_buffer, buffer);
710 desc_write(desc, hw_len, len);
711 desc_write(desc, hw_mode, mode | len);
712 desc_write(desc, sw_token, token);
713 desc_write(desc, sw_buffer, buffer);
714 desc_write(desc, sw_len, len);
715
716 __cpdma_chan_submit(chan, desc);
717
718 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
719 chan_write(chan, rxfree, 1);
720
721 chan->count++;
722
723unlock_ret:
724 spin_unlock_irqrestore(&chan->lock, flags);
725 return ret;
726}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000727EXPORT_SYMBOL_GPL(cpdma_chan_submit);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400728
Mugunthan V Nfae50822013-01-17 06:31:34 +0000729bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
730{
731 unsigned long flags;
732 int index;
733 bool ret;
734 struct cpdma_ctlr *ctlr = chan->ctlr;
735 struct cpdma_desc_pool *pool = ctlr->pool;
736
737 spin_lock_irqsave(&pool->lock, flags);
738
739 index = bitmap_find_next_zero_area(pool->bitmap,
740 pool->num_desc, pool->num_desc/2, 1, 0);
741
742 if (index < pool->num_desc)
743 ret = true;
744 else
745 ret = false;
746
747 spin_unlock_irqrestore(&pool->lock, flags);
748 return ret;
749}
750EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
751
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400752static void __cpdma_chan_free(struct cpdma_chan *chan,
753 struct cpdma_desc __iomem *desc,
754 int outlen, int status)
755{
756 struct cpdma_ctlr *ctlr = chan->ctlr;
757 struct cpdma_desc_pool *pool = ctlr->pool;
758 dma_addr_t buff_dma;
759 int origlen;
760 void *token;
761
762 token = (void *)desc_read(desc, sw_token);
763 buff_dma = desc_read(desc, sw_buffer);
764 origlen = desc_read(desc, sw_len);
765
766 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
767 cpdma_desc_free(pool, desc, 1);
768 (*chan->handler)(token, outlen, status);
769}
770
771static int __cpdma_chan_process(struct cpdma_chan *chan)
772{
773 struct cpdma_ctlr *ctlr = chan->ctlr;
774 struct cpdma_desc __iomem *desc;
775 int status, outlen;
776 struct cpdma_desc_pool *pool = ctlr->pool;
777 dma_addr_t desc_dma;
778 unsigned long flags;
779
780 spin_lock_irqsave(&chan->lock, flags);
781
782 desc = chan->head;
783 if (!desc) {
784 chan->stats.empty_dequeue++;
785 status = -ENOENT;
786 goto unlock_ret;
787 }
788 desc_dma = desc_phys(pool, desc);
789
790 status = __raw_readl(&desc->hw_mode);
791 outlen = status & 0x7ff;
792 if (status & CPDMA_DESC_OWNER) {
793 chan->stats.busy_dequeue++;
794 status = -EBUSY;
795 goto unlock_ret;
796 }
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000797 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
798 CPDMA_DESC_PORT_MASK);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400799
800 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
801 chan_write(chan, cp, desc_dma);
802 chan->count--;
803 chan->stats.good_dequeue++;
804
805 if (status & CPDMA_DESC_EOQ) {
806 chan->stats.requeue++;
807 chan_write(chan, hdp, desc_phys(pool, chan->head));
808 }
809
810 spin_unlock_irqrestore(&chan->lock, flags);
811
812 __cpdma_chan_free(chan, desc, outlen, status);
813 return status;
814
815unlock_ret:
816 spin_unlock_irqrestore(&chan->lock, flags);
817 return status;
818}
819
820int cpdma_chan_process(struct cpdma_chan *chan, int quota)
821{
822 int used = 0, ret = 0;
823
824 if (chan->state != CPDMA_STATE_ACTIVE)
825 return -EINVAL;
826
827 while (used < quota) {
828 ret = __cpdma_chan_process(chan);
829 if (ret < 0)
830 break;
831 used++;
832 }
833 return used;
834}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000835EXPORT_SYMBOL_GPL(cpdma_chan_process);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400836
837int cpdma_chan_start(struct cpdma_chan *chan)
838{
839 struct cpdma_ctlr *ctlr = chan->ctlr;
840 struct cpdma_desc_pool *pool = ctlr->pool;
841 unsigned long flags;
842
843 spin_lock_irqsave(&chan->lock, flags);
844 if (chan->state != CPDMA_STATE_IDLE) {
845 spin_unlock_irqrestore(&chan->lock, flags);
846 return -EBUSY;
847 }
848 if (ctlr->state != CPDMA_STATE_ACTIVE) {
849 spin_unlock_irqrestore(&chan->lock, flags);
850 return -EINVAL;
851 }
852 dma_reg_write(ctlr, chan->int_set, chan->mask);
853 chan->state = CPDMA_STATE_ACTIVE;
854 if (chan->head) {
855 chan_write(chan, hdp, desc_phys(pool, chan->head));
856 if (chan->rxfree)
857 chan_write(chan, rxfree, chan->count);
858 }
859
860 spin_unlock_irqrestore(&chan->lock, flags);
861 return 0;
862}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000863EXPORT_SYMBOL_GPL(cpdma_chan_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400864
865int cpdma_chan_stop(struct cpdma_chan *chan)
866{
867 struct cpdma_ctlr *ctlr = chan->ctlr;
868 struct cpdma_desc_pool *pool = ctlr->pool;
869 unsigned long flags;
870 int ret;
871 unsigned long timeout;
872
873 spin_lock_irqsave(&chan->lock, flags);
874 if (chan->state != CPDMA_STATE_ACTIVE) {
875 spin_unlock_irqrestore(&chan->lock, flags);
876 return -EINVAL;
877 }
878
879 chan->state = CPDMA_STATE_TEARDOWN;
880 dma_reg_write(ctlr, chan->int_clear, chan->mask);
881
882 /* trigger teardown */
Christian Rieschb4ad0422012-02-22 21:58:00 +0000883 dma_reg_write(ctlr, chan->td, chan_linear(chan));
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400884
885 /* wait for teardown complete */
886 timeout = jiffies + HZ/10; /* 100 msec */
887 while (time_before(jiffies, timeout)) {
888 u32 cp = chan_read(chan, cp);
889 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
890 break;
891 cpu_relax();
892 }
893 WARN_ON(!time_before(jiffies, timeout));
894 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
895
896 /* handle completed packets */
Ilya Yanok7746ab02011-12-18 10:02:04 +0000897 spin_unlock_irqrestore(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400898 do {
899 ret = __cpdma_chan_process(chan);
900 if (ret < 0)
901 break;
902 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
Ilya Yanok7746ab02011-12-18 10:02:04 +0000903 spin_lock_irqsave(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400904
905 /* remaining packets haven't been tx/rx'ed, clean them up */
906 while (chan->head) {
907 struct cpdma_desc __iomem *desc = chan->head;
908 dma_addr_t next_dma;
909
910 next_dma = desc_read(desc, hw_next);
911 chan->head = desc_from_phys(pool, next_dma);
htbeginffb5ba92012-10-01 16:42:43 +0000912 chan->count--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400913 chan->stats.teardown_dequeue++;
914
915 /* issue callback without locks held */
916 spin_unlock_irqrestore(&chan->lock, flags);
917 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
918 spin_lock_irqsave(&chan->lock, flags);
919 }
920
921 chan->state = CPDMA_STATE_IDLE;
922 spin_unlock_irqrestore(&chan->lock, flags);
923 return 0;
924}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000925EXPORT_SYMBOL_GPL(cpdma_chan_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400926
927int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
928{
929 unsigned long flags;
930
931 spin_lock_irqsave(&chan->lock, flags);
932 if (chan->state != CPDMA_STATE_ACTIVE) {
933 spin_unlock_irqrestore(&chan->lock, flags);
934 return -EINVAL;
935 }
936
937 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
938 chan->mask);
939 spin_unlock_irqrestore(&chan->lock, flags);
940
941 return 0;
942}
943
944struct cpdma_control_info {
945 u32 reg;
946 u32 shift, mask;
947 int access;
948#define ACCESS_RO BIT(0)
949#define ACCESS_WO BIT(1)
950#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
951};
952
953struct cpdma_control_info controls[] = {
954 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
955 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
956 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
957 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
958 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
959 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
960 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
961 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
962 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
963 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
964 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
965};
966
967int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
968{
969 unsigned long flags;
970 struct cpdma_control_info *info = &controls[control];
971 int ret;
972
973 spin_lock_irqsave(&ctlr->lock, flags);
974
975 ret = -ENOTSUPP;
976 if (!ctlr->params.has_ext_regs)
977 goto unlock_ret;
978
979 ret = -EINVAL;
980 if (ctlr->state != CPDMA_STATE_ACTIVE)
981 goto unlock_ret;
982
983 ret = -ENOENT;
984 if (control < 0 || control >= ARRAY_SIZE(controls))
985 goto unlock_ret;
986
987 ret = -EPERM;
988 if ((info->access & ACCESS_RO) != ACCESS_RO)
989 goto unlock_ret;
990
991 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
992
993unlock_ret:
994 spin_unlock_irqrestore(&ctlr->lock, flags);
995 return ret;
996}
997
998int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
999{
1000 unsigned long flags;
1001 struct cpdma_control_info *info = &controls[control];
1002 int ret;
1003 u32 val;
1004
1005 spin_lock_irqsave(&ctlr->lock, flags);
1006
1007 ret = -ENOTSUPP;
1008 if (!ctlr->params.has_ext_regs)
1009 goto unlock_ret;
1010
1011 ret = -EINVAL;
1012 if (ctlr->state != CPDMA_STATE_ACTIVE)
1013 goto unlock_ret;
1014
1015 ret = -ENOENT;
1016 if (control < 0 || control >= ARRAY_SIZE(controls))
1017 goto unlock_ret;
1018
1019 ret = -EPERM;
1020 if ((info->access & ACCESS_WO) != ACCESS_WO)
1021 goto unlock_ret;
1022
1023 val = dma_reg_read(ctlr, info->reg);
1024 val &= ~(info->mask << info->shift);
1025 val |= (value & info->mask) << info->shift;
1026 dma_reg_write(ctlr, info->reg, val);
1027 ret = 0;
1028
1029unlock_ret:
1030 spin_unlock_irqrestore(&ctlr->lock, flags);
1031 return ret;
1032}