blob: cf72b3390d855d0a33196c50299be35b87af4635 [file] [log] [blame]
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
Daniel Mack76fbc242012-06-28 06:12:32 +000018#include <linux/module.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040019#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
Sebastian Siewior817f6d12013-04-23 07:31:35 +000023#include <linux/delay.h>
Grygorii Strashko742fb202016-06-27 12:05:11 +030024#include <linux/genalloc.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040025#include "davinci_cpdma.h"
26
27/* DMA Registers */
28#define CPDMA_TXIDVER 0x00
29#define CPDMA_TXCONTROL 0x04
30#define CPDMA_TXTEARDOWN 0x08
31#define CPDMA_RXIDVER 0x10
32#define CPDMA_RXCONTROL 0x14
33#define CPDMA_SOFTRESET 0x1c
34#define CPDMA_RXTEARDOWN 0x18
35#define CPDMA_TXINTSTATRAW 0x80
36#define CPDMA_TXINTSTATMASKED 0x84
37#define CPDMA_TXINTMASKSET 0x88
38#define CPDMA_TXINTMASKCLEAR 0x8c
39#define CPDMA_MACINVECTOR 0x90
40#define CPDMA_MACEOIVECTOR 0x94
41#define CPDMA_RXINTSTATRAW 0xa0
42#define CPDMA_RXINTSTATMASKED 0xa4
43#define CPDMA_RXINTMASKSET 0xa8
44#define CPDMA_RXINTMASKCLEAR 0xac
45#define CPDMA_DMAINTSTATRAW 0xb0
46#define CPDMA_DMAINTSTATMASKED 0xb4
47#define CPDMA_DMAINTMASKSET 0xb8
48#define CPDMA_DMAINTMASKCLEAR 0xbc
49#define CPDMA_DMAINT_HOSTERR BIT(1)
50
51/* the following exist only if has_ext_regs is set */
52#define CPDMA_DMACONTROL 0x20
53#define CPDMA_DMASTATUS 0x24
54#define CPDMA_RXBUFFOFS 0x28
55#define CPDMA_EM_CONTROL 0x2c
56
57/* Descriptor mode bits */
58#define CPDMA_DESC_SOP BIT(31)
59#define CPDMA_DESC_EOP BIT(30)
60#define CPDMA_DESC_OWNER BIT(29)
61#define CPDMA_DESC_EOQ BIT(28)
62#define CPDMA_DESC_TD_COMPLETE BIT(27)
63#define CPDMA_DESC_PASS_CRC BIT(26)
Mugunthan V Nf6e135c2013-02-11 09:52:18 +000064#define CPDMA_DESC_TO_PORT_EN BIT(20)
65#define CPDMA_TO_PORT_SHIFT 16
66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
Mugunthan V N28a19fe2013-05-29 20:22:01 +000067#define CPDMA_DESC_CRC_LEN 4
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040068
69#define CPDMA_TEARDOWN_VALUE 0xfffffffc
70
71struct cpdma_desc {
72 /* hardware fields */
73 u32 hw_next;
74 u32 hw_buffer;
75 u32 hw_len;
76 u32 hw_mode;
77 /* software fields */
78 void *sw_token;
79 u32 sw_buffer;
80 u32 sw_len;
81};
82
83struct cpdma_desc_pool {
Olof Johanssonc767db52013-12-11 15:51:20 -080084 phys_addr_t phys;
Arnd Bergmann84092992016-01-29 12:39:10 +010085 dma_addr_t hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040086 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
Grygorii Strashkoaeec3022016-08-04 18:20:51 +030089 int num_desc;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040090 struct device *dev;
Grygorii Strashko742fb202016-06-27 12:05:11 +030091 struct gen_pool *gen_pool;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040092};
93
94enum cpdma_state {
95 CPDMA_STATE_IDLE,
96 CPDMA_STATE_ACTIVE,
97 CPDMA_STATE_TEARDOWN,
98};
99
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400100struct cpdma_ctlr {
101 enum cpdma_state state;
102 struct cpdma_params params;
103 struct device *dev;
104 struct cpdma_desc_pool *pool;
105 spinlock_t lock;
106 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
107};
108
109struct cpdma_chan {
Mugunthan V Nfae50822013-01-17 06:31:34 +0000110 struct cpdma_desc __iomem *head, *tail;
111 void __iomem *hdp, *cp, *rxfree;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400112 enum cpdma_state state;
113 struct cpdma_ctlr *ctlr;
114 int chan_num;
115 spinlock_t lock;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400116 int count;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300117 u32 desc_num;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400118 u32 mask;
119 cpdma_handler_fn handler;
120 enum dma_data_direction dir;
121 struct cpdma_chan_stats stats;
122 /* offsets into dmaregs */
123 int int_set, int_clear, td;
124};
125
126/* The following make access to common cpdma_ctlr params more readable */
127#define dmaregs params.dmaregs
128#define num_chan params.num_chan
129
130/* various accessors */
131#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
132#define chan_read(chan, fld) __raw_readl((chan)->fld)
133#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
134#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
135#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
136#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
137
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000138#define cpdma_desc_to_port(chan, mode, directed) \
139 do { \
140 if (!is_rx_chan(chan) && ((directed == 1) || \
141 (directed == 2))) \
142 mode |= (CPDMA_DESC_TO_PORT_EN | \
143 (directed << CPDMA_TO_PORT_SHIFT)); \
144 } while (0)
145
Grygorii Strashko742fb202016-06-27 12:05:11 +0300146static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
147{
148 if (!pool)
149 return;
150
Grygorii Strashkoaeec3022016-08-04 18:20:51 +0300151 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
152 "cpdma_desc_pool size %d != avail %d",
153 gen_pool_size(pool->gen_pool),
154 gen_pool_avail(pool->gen_pool));
Grygorii Strashko742fb202016-06-27 12:05:11 +0300155 if (pool->cpumap)
156 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
157 pool->phys);
158 else
159 iounmap(pool->iomap);
160}
161
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400162/*
163 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
164 * emac) have dedicated on-chip memory for these descriptors. Some other
165 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
166 * abstract out these details
167 */
168static struct cpdma_desc_pool *
Arnd Bergmann84092992016-01-29 12:39:10 +0100169cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
Sriram6a1fef62011-03-22 02:31:03 +0000170 int size, int align)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400171{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400172 struct cpdma_desc_pool *pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300173 int ret;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400174
George Cheriane1943122014-05-12 10:21:21 +0530175 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400176 if (!pool)
Grygorii Strashko742fb202016-06-27 12:05:11 +0300177 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400178
179 pool->dev = dev;
180 pool->mem_size = size;
181 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
182 pool->num_desc = size / pool->desc_size;
183
Grygorii Strashko742fb202016-06-27 12:05:11 +0300184 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
185 "cpdma");
186 if (IS_ERR(pool->gen_pool)) {
187 dev_err(dev, "pool create failed %ld\n",
188 PTR_ERR(pool->gen_pool));
189 goto gen_pool_create_fail;
190 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400191
192 if (phys) {
193 pool->phys = phys;
Arnd Bergmann84092992016-01-29 12:39:10 +0100194 pool->iomap = ioremap(phys, size); /* should be memremap? */
Sriram6a1fef62011-03-22 02:31:03 +0000195 pool->hw_addr = hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400196 } else {
Arnd Bergmann84092992016-01-29 12:39:10 +0100197 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400198 GFP_KERNEL);
Arnd Bergmann84092992016-01-29 12:39:10 +0100199 pool->iomap = (void __iomem __force *)pool->cpumap;
200 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400201 }
202
Grygorii Strashko742fb202016-06-27 12:05:11 +0300203 if (!pool->iomap)
204 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400205
Grygorii Strashko742fb202016-06-27 12:05:11 +0300206 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
207 pool->phys, pool->mem_size, -1);
208 if (ret < 0) {
209 dev_err(dev, "pool add failed %d\n", ret);
210 goto gen_pool_add_virt_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400211 }
Grygorii Strashko742fb202016-06-27 12:05:11 +0300212
213 return pool;
214
215gen_pool_add_virt_fail:
216 cpdma_desc_pool_destroy(pool);
217gen_pool_create_fail:
218 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400219}
220
221static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
222 struct cpdma_desc __iomem *desc)
223{
224 if (!desc)
225 return 0;
Olof Johanssonc767db52013-12-11 15:51:20 -0800226 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400227}
228
229static inline struct cpdma_desc __iomem *
230desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
231{
Sriram6a1fef62011-03-22 02:31:03 +0000232 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400233}
234
235static struct cpdma_desc __iomem *
Grygorii Strashko742fb202016-06-27 12:05:11 +0300236cpdma_desc_alloc(struct cpdma_desc_pool *pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400237{
Grygorii Strashkoaeec3022016-08-04 18:20:51 +0300238 return (struct cpdma_desc __iomem *)
239 gen_pool_alloc(pool->gen_pool, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400240}
241
242static void cpdma_desc_free(struct cpdma_desc_pool *pool,
243 struct cpdma_desc __iomem *desc, int num_desc)
244{
Grygorii Strashko742fb202016-06-27 12:05:11 +0300245 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400246}
247
248struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
249{
250 struct cpdma_ctlr *ctlr;
251
George Cheriane1943122014-05-12 10:21:21 +0530252 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400253 if (!ctlr)
254 return NULL;
255
256 ctlr->state = CPDMA_STATE_IDLE;
257 ctlr->params = *params;
258 ctlr->dev = params->dev;
259 spin_lock_init(&ctlr->lock);
260
261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
262 ctlr->params.desc_mem_phys,
Sriram6a1fef62011-03-22 02:31:03 +0000263 ctlr->params.desc_hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400264 ctlr->params.desc_mem_size,
265 ctlr->params.desc_align);
Dan Carpenter2f872082014-06-11 11:16:51 +0300266 if (!ctlr->pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400267 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400268
269 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
270 ctlr->num_chan = CPDMA_MAX_CHANNELS;
271 return ctlr;
272}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000273EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400274
275int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
276{
277 unsigned long flags;
278 int i;
279
280 spin_lock_irqsave(&ctlr->lock, flags);
281 if (ctlr->state != CPDMA_STATE_IDLE) {
282 spin_unlock_irqrestore(&ctlr->lock, flags);
283 return -EBUSY;
284 }
285
286 if (ctlr->params.has_soft_reset) {
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000287 unsigned timeout = 10 * 100;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400288
289 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000290 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400291 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
292 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000293 udelay(10);
294 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400295 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000296 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400297 }
298
299 for (i = 0; i < ctlr->num_chan; i++) {
300 __raw_writel(0, ctlr->params.txhdp + 4 * i);
301 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
302 __raw_writel(0, ctlr->params.txcp + 4 * i);
303 __raw_writel(0, ctlr->params.rxcp + 4 * i);
304 }
305
306 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
307 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
308
309 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
310 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
311
312 ctlr->state = CPDMA_STATE_ACTIVE;
313
314 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
315 if (ctlr->channels[i])
316 cpdma_chan_start(ctlr->channels[i]);
317 }
318 spin_unlock_irqrestore(&ctlr->lock, flags);
319 return 0;
320}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000321EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400322
323int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
324{
325 unsigned long flags;
326 int i;
327
328 spin_lock_irqsave(&ctlr->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100329 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400330 spin_unlock_irqrestore(&ctlr->lock, flags);
331 return -EINVAL;
332 }
333
334 ctlr->state = CPDMA_STATE_TEARDOWN;
335
336 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
337 if (ctlr->channels[i])
338 cpdma_chan_stop(ctlr->channels[i]);
339 }
340
341 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
342 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
343
344 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
345 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
346
347 ctlr->state = CPDMA_STATE_IDLE;
348
349 spin_unlock_irqrestore(&ctlr->lock, flags);
350 return 0;
351}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000352EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400353
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400354int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
355{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400356 int ret = 0, i;
357
358 if (!ctlr)
359 return -EINVAL;
360
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400361 if (ctlr->state != CPDMA_STATE_IDLE)
362 cpdma_ctlr_stop(ctlr);
363
Cyril Roelandt79876e02013-02-12 12:52:30 +0000364 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
365 cpdma_chan_destroy(ctlr->channels[i]);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400366
367 cpdma_desc_pool_destroy(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400368 return ret;
369}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000370EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400371
372int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
373{
374 unsigned long flags;
375 int i, reg;
376
377 spin_lock_irqsave(&ctlr->lock, flags);
378 if (ctlr->state != CPDMA_STATE_ACTIVE) {
379 spin_unlock_irqrestore(&ctlr->lock, flags);
380 return -EINVAL;
381 }
382
383 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
384 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
385
386 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
387 if (ctlr->channels[i])
388 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
389 }
390
391 spin_unlock_irqrestore(&ctlr->lock, flags);
392 return 0;
393}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100394EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400395
Mugunthan V N510a1e722013-02-17 22:19:20 +0000396void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400397{
Mugunthan V N510a1e722013-02-17 22:19:20 +0000398 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400399}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100400EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400401
402struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
403 cpdma_handler_fn handler)
404{
405 struct cpdma_chan *chan;
George Cheriane1943122014-05-12 10:21:21 +0530406 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400407 unsigned long flags;
408
409 if (__chan_linear(chan_num) >= ctlr->num_chan)
410 return NULL;
411
George Cheriane1943122014-05-12 10:21:21 +0530412 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400413 if (!chan)
George Cheriane1943122014-05-12 10:21:21 +0530414 return ERR_PTR(-ENOMEM);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400415
416 spin_lock_irqsave(&ctlr->lock, flags);
George Cheriane1943122014-05-12 10:21:21 +0530417 if (ctlr->channels[chan_num]) {
418 spin_unlock_irqrestore(&ctlr->lock, flags);
419 devm_kfree(ctlr->dev, chan);
420 return ERR_PTR(-EBUSY);
421 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400422
423 chan->ctlr = ctlr;
424 chan->state = CPDMA_STATE_IDLE;
425 chan->chan_num = chan_num;
426 chan->handler = handler;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300427 chan->desc_num = ctlr->pool->num_desc / 2;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400428
429 if (is_rx_chan(chan)) {
430 chan->hdp = ctlr->params.rxhdp + offset;
431 chan->cp = ctlr->params.rxcp + offset;
432 chan->rxfree = ctlr->params.rxfree + offset;
433 chan->int_set = CPDMA_RXINTMASKSET;
434 chan->int_clear = CPDMA_RXINTMASKCLEAR;
435 chan->td = CPDMA_RXTEARDOWN;
436 chan->dir = DMA_FROM_DEVICE;
437 } else {
438 chan->hdp = ctlr->params.txhdp + offset;
439 chan->cp = ctlr->params.txcp + offset;
440 chan->int_set = CPDMA_TXINTMASKSET;
441 chan->int_clear = CPDMA_TXINTMASKCLEAR;
442 chan->td = CPDMA_TXTEARDOWN;
443 chan->dir = DMA_TO_DEVICE;
444 }
445 chan->mask = BIT(chan_linear(chan));
446
447 spin_lock_init(&chan->lock);
448
449 ctlr->channels[chan_num] = chan;
450 spin_unlock_irqrestore(&ctlr->lock, flags);
451 return chan;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400452}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000453EXPORT_SYMBOL_GPL(cpdma_chan_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400454
Ivan Khoronzhuk17933312016-06-17 13:25:39 +0300455int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
456{
457 return ctlr->pool->num_desc / 2;
458}
459EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
460
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400461int cpdma_chan_destroy(struct cpdma_chan *chan)
462{
Julia Lawallf37c54b2012-08-14 05:49:47 +0000463 struct cpdma_ctlr *ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400464 unsigned long flags;
465
466 if (!chan)
467 return -EINVAL;
Julia Lawallf37c54b2012-08-14 05:49:47 +0000468 ctlr = chan->ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400469
470 spin_lock_irqsave(&ctlr->lock, flags);
471 if (chan->state != CPDMA_STATE_IDLE)
472 cpdma_chan_stop(chan);
473 ctlr->channels[chan->chan_num] = NULL;
474 spin_unlock_irqrestore(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400475 return 0;
476}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000477EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400478
479int cpdma_chan_get_stats(struct cpdma_chan *chan,
480 struct cpdma_chan_stats *stats)
481{
482 unsigned long flags;
483 if (!chan)
484 return -EINVAL;
485 spin_lock_irqsave(&chan->lock, flags);
486 memcpy(stats, &chan->stats, sizeof(*stats));
487 spin_unlock_irqrestore(&chan->lock, flags);
488 return 0;
489}
Daniel Mack0ca04b62013-08-22 13:47:00 +0200490EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400491
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400492static void __cpdma_chan_submit(struct cpdma_chan *chan,
493 struct cpdma_desc __iomem *desc)
494{
495 struct cpdma_ctlr *ctlr = chan->ctlr;
496 struct cpdma_desc __iomem *prev = chan->tail;
497 struct cpdma_desc_pool *pool = ctlr->pool;
498 dma_addr_t desc_dma;
499 u32 mode;
500
501 desc_dma = desc_phys(pool, desc);
502
503 /* simple case - idle channel */
504 if (!chan->head) {
505 chan->stats.head_enqueue++;
506 chan->head = desc;
507 chan->tail = desc;
508 if (chan->state == CPDMA_STATE_ACTIVE)
509 chan_write(chan, hdp, desc_dma);
510 return;
511 }
512
513 /* first chain the descriptor at the tail of the list */
514 desc_write(prev, hw_next, desc_dma);
515 chan->tail = desc;
516 chan->stats.tail_enqueue++;
517
518 /* next check if EOQ has been triggered already */
519 mode = desc_read(prev, hw_mode);
520 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
521 (chan->state == CPDMA_STATE_ACTIVE)) {
522 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
523 chan_write(chan, hdp, desc_dma);
524 chan->stats.misqueued++;
525 }
526}
527
528int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
Sebastian Siewioraef614e2013-04-23 07:31:38 +0000529 int len, int directed)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400530{
531 struct cpdma_ctlr *ctlr = chan->ctlr;
532 struct cpdma_desc __iomem *desc;
533 dma_addr_t buffer;
534 unsigned long flags;
535 u32 mode;
536 int ret = 0;
537
538 spin_lock_irqsave(&chan->lock, flags);
539
540 if (chan->state == CPDMA_STATE_TEARDOWN) {
541 ret = -EINVAL;
542 goto unlock_ret;
543 }
544
Grygorii Strashko742fb202016-06-27 12:05:11 +0300545 if (chan->count >= chan->desc_num) {
546 chan->stats.desc_alloc_fail++;
547 ret = -ENOMEM;
548 goto unlock_ret;
549 }
550
551 desc = cpdma_desc_alloc(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400552 if (!desc) {
553 chan->stats.desc_alloc_fail++;
554 ret = -ENOMEM;
555 goto unlock_ret;
556 }
557
558 if (len < ctlr->params.min_packet_size) {
559 len = ctlr->params.min_packet_size;
560 chan->stats.runt_transmit_buff++;
561 }
562
563 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
Sebastian Siewior14bd0762013-06-20 16:58:45 +0200564 ret = dma_mapping_error(ctlr->dev, buffer);
565 if (ret) {
566 cpdma_desc_free(ctlr->pool, desc, 1);
567 ret = -EINVAL;
568 goto unlock_ret;
569 }
570
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400571 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000572 cpdma_desc_to_port(chan, mode, directed);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400573
574 desc_write(desc, hw_next, 0);
575 desc_write(desc, hw_buffer, buffer);
576 desc_write(desc, hw_len, len);
577 desc_write(desc, hw_mode, mode | len);
578 desc_write(desc, sw_token, token);
579 desc_write(desc, sw_buffer, buffer);
580 desc_write(desc, sw_len, len);
581
582 __cpdma_chan_submit(chan, desc);
583
584 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
585 chan_write(chan, rxfree, 1);
586
587 chan->count++;
588
589unlock_ret:
590 spin_unlock_irqrestore(&chan->lock, flags);
591 return ret;
592}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000593EXPORT_SYMBOL_GPL(cpdma_chan_submit);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400594
Mugunthan V Nfae50822013-01-17 06:31:34 +0000595bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
596{
Mugunthan V Nfae50822013-01-17 06:31:34 +0000597 struct cpdma_ctlr *ctlr = chan->ctlr;
598 struct cpdma_desc_pool *pool = ctlr->pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300599 bool free_tx_desc;
600 unsigned long flags;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000601
Grygorii Strashko742fb202016-06-27 12:05:11 +0300602 spin_lock_irqsave(&chan->lock, flags);
603 free_tx_desc = (chan->count < chan->desc_num) &&
604 gen_pool_avail(pool->gen_pool);
605 spin_unlock_irqrestore(&chan->lock, flags);
606 return free_tx_desc;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000607}
608EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
609
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400610static void __cpdma_chan_free(struct cpdma_chan *chan,
611 struct cpdma_desc __iomem *desc,
612 int outlen, int status)
613{
614 struct cpdma_ctlr *ctlr = chan->ctlr;
615 struct cpdma_desc_pool *pool = ctlr->pool;
616 dma_addr_t buff_dma;
617 int origlen;
618 void *token;
619
620 token = (void *)desc_read(desc, sw_token);
621 buff_dma = desc_read(desc, sw_buffer);
622 origlen = desc_read(desc, sw_len);
623
624 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
625 cpdma_desc_free(pool, desc, 1);
626 (*chan->handler)(token, outlen, status);
627}
628
629static int __cpdma_chan_process(struct cpdma_chan *chan)
630{
631 struct cpdma_ctlr *ctlr = chan->ctlr;
632 struct cpdma_desc __iomem *desc;
633 int status, outlen;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000634 int cb_status = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400635 struct cpdma_desc_pool *pool = ctlr->pool;
636 dma_addr_t desc_dma;
637 unsigned long flags;
638
639 spin_lock_irqsave(&chan->lock, flags);
640
641 desc = chan->head;
642 if (!desc) {
643 chan->stats.empty_dequeue++;
644 status = -ENOENT;
645 goto unlock_ret;
646 }
647 desc_dma = desc_phys(pool, desc);
648
649 status = __raw_readl(&desc->hw_mode);
650 outlen = status & 0x7ff;
651 if (status & CPDMA_DESC_OWNER) {
652 chan->stats.busy_dequeue++;
653 status = -EBUSY;
654 goto unlock_ret;
655 }
Mugunthan V N28a19fe2013-05-29 20:22:01 +0000656
657 if (status & CPDMA_DESC_PASS_CRC)
658 outlen -= CPDMA_DESC_CRC_LEN;
659
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000660 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
661 CPDMA_DESC_PORT_MASK);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400662
663 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
664 chan_write(chan, cp, desc_dma);
665 chan->count--;
666 chan->stats.good_dequeue++;
667
668 if (status & CPDMA_DESC_EOQ) {
669 chan->stats.requeue++;
670 chan_write(chan, hdp, desc_phys(pool, chan->head));
671 }
672
673 spin_unlock_irqrestore(&chan->lock, flags);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000674 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
675 cb_status = -ENOSYS;
676 else
677 cb_status = status;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400678
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000679 __cpdma_chan_free(chan, desc, outlen, cb_status);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400680 return status;
681
682unlock_ret:
683 spin_unlock_irqrestore(&chan->lock, flags);
684 return status;
685}
686
687int cpdma_chan_process(struct cpdma_chan *chan, int quota)
688{
689 int used = 0, ret = 0;
690
691 if (chan->state != CPDMA_STATE_ACTIVE)
692 return -EINVAL;
693
694 while (used < quota) {
695 ret = __cpdma_chan_process(chan);
696 if (ret < 0)
697 break;
698 used++;
699 }
700 return used;
701}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000702EXPORT_SYMBOL_GPL(cpdma_chan_process);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400703
704int cpdma_chan_start(struct cpdma_chan *chan)
705{
706 struct cpdma_ctlr *ctlr = chan->ctlr;
707 struct cpdma_desc_pool *pool = ctlr->pool;
708 unsigned long flags;
709
710 spin_lock_irqsave(&chan->lock, flags);
711 if (chan->state != CPDMA_STATE_IDLE) {
712 spin_unlock_irqrestore(&chan->lock, flags);
713 return -EBUSY;
714 }
715 if (ctlr->state != CPDMA_STATE_ACTIVE) {
716 spin_unlock_irqrestore(&chan->lock, flags);
717 return -EINVAL;
718 }
719 dma_reg_write(ctlr, chan->int_set, chan->mask);
720 chan->state = CPDMA_STATE_ACTIVE;
721 if (chan->head) {
722 chan_write(chan, hdp, desc_phys(pool, chan->head));
723 if (chan->rxfree)
724 chan_write(chan, rxfree, chan->count);
725 }
726
727 spin_unlock_irqrestore(&chan->lock, flags);
728 return 0;
729}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000730EXPORT_SYMBOL_GPL(cpdma_chan_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400731
732int cpdma_chan_stop(struct cpdma_chan *chan)
733{
734 struct cpdma_ctlr *ctlr = chan->ctlr;
735 struct cpdma_desc_pool *pool = ctlr->pool;
736 unsigned long flags;
737 int ret;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000738 unsigned timeout;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400739
740 spin_lock_irqsave(&chan->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100741 if (chan->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400742 spin_unlock_irqrestore(&chan->lock, flags);
743 return -EINVAL;
744 }
745
746 chan->state = CPDMA_STATE_TEARDOWN;
747 dma_reg_write(ctlr, chan->int_clear, chan->mask);
748
749 /* trigger teardown */
Christian Rieschb4ad0422012-02-22 21:58:00 +0000750 dma_reg_write(ctlr, chan->td, chan_linear(chan));
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400751
752 /* wait for teardown complete */
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000753 timeout = 100 * 100; /* 100 ms */
754 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400755 u32 cp = chan_read(chan, cp);
756 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
757 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000758 udelay(10);
759 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400760 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000761 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400762 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
763
764 /* handle completed packets */
Ilya Yanok7746ab02011-12-18 10:02:04 +0000765 spin_unlock_irqrestore(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400766 do {
767 ret = __cpdma_chan_process(chan);
768 if (ret < 0)
769 break;
770 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
Ilya Yanok7746ab02011-12-18 10:02:04 +0000771 spin_lock_irqsave(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400772
773 /* remaining packets haven't been tx/rx'ed, clean them up */
774 while (chan->head) {
775 struct cpdma_desc __iomem *desc = chan->head;
776 dma_addr_t next_dma;
777
778 next_dma = desc_read(desc, hw_next);
779 chan->head = desc_from_phys(pool, next_dma);
htbeginffb5ba92012-10-01 16:42:43 +0000780 chan->count--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400781 chan->stats.teardown_dequeue++;
782
783 /* issue callback without locks held */
784 spin_unlock_irqrestore(&chan->lock, flags);
785 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
786 spin_lock_irqsave(&chan->lock, flags);
787 }
788
789 chan->state = CPDMA_STATE_IDLE;
790 spin_unlock_irqrestore(&chan->lock, flags);
791 return 0;
792}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000793EXPORT_SYMBOL_GPL(cpdma_chan_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400794
795int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
796{
797 unsigned long flags;
798
799 spin_lock_irqsave(&chan->lock, flags);
800 if (chan->state != CPDMA_STATE_ACTIVE) {
801 spin_unlock_irqrestore(&chan->lock, flags);
802 return -EINVAL;
803 }
804
805 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
806 chan->mask);
807 spin_unlock_irqrestore(&chan->lock, flags);
808
809 return 0;
810}
811
812struct cpdma_control_info {
813 u32 reg;
814 u32 shift, mask;
815 int access;
816#define ACCESS_RO BIT(0)
817#define ACCESS_WO BIT(1)
818#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
819};
820
Olof Johanssondf784162013-12-11 15:51:21 -0800821static struct cpdma_control_info controls[] = {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400822 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
823 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
824 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
825 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
826 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
827 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
828 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
829 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
830 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
831 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
832 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
833};
834
835int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
836{
837 unsigned long flags;
838 struct cpdma_control_info *info = &controls[control];
839 int ret;
840
841 spin_lock_irqsave(&ctlr->lock, flags);
842
843 ret = -ENOTSUPP;
844 if (!ctlr->params.has_ext_regs)
845 goto unlock_ret;
846
847 ret = -EINVAL;
848 if (ctlr->state != CPDMA_STATE_ACTIVE)
849 goto unlock_ret;
850
851 ret = -ENOENT;
852 if (control < 0 || control >= ARRAY_SIZE(controls))
853 goto unlock_ret;
854
855 ret = -EPERM;
856 if ((info->access & ACCESS_RO) != ACCESS_RO)
857 goto unlock_ret;
858
859 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
860
861unlock_ret:
862 spin_unlock_irqrestore(&ctlr->lock, flags);
863 return ret;
864}
865
866int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
867{
868 unsigned long flags;
869 struct cpdma_control_info *info = &controls[control];
870 int ret;
871 u32 val;
872
873 spin_lock_irqsave(&ctlr->lock, flags);
874
875 ret = -ENOTSUPP;
876 if (!ctlr->params.has_ext_regs)
877 goto unlock_ret;
878
879 ret = -EINVAL;
880 if (ctlr->state != CPDMA_STATE_ACTIVE)
881 goto unlock_ret;
882
883 ret = -ENOENT;
884 if (control < 0 || control >= ARRAY_SIZE(controls))
885 goto unlock_ret;
886
887 ret = -EPERM;
888 if ((info->access & ACCESS_WO) != ACCESS_WO)
889 goto unlock_ret;
890
891 val = dma_reg_read(ctlr, info->reg);
892 val &= ~(info->mask << info->shift);
893 val |= (value & info->mask) << info->shift;
894 dma_reg_write(ctlr, info->reg, val);
895 ret = 0;
896
897unlock_ret:
898 spin_unlock_irqrestore(&ctlr->lock, flags);
899 return ret;
900}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100901EXPORT_SYMBOL_GPL(cpdma_control_set);
Sebastian Siewior4bc21d42013-04-24 08:48:22 +0000902
903MODULE_LICENSE("GPL");