blob: 87456a937732bdf3cef1d9d54abe96ea474e52d2 [file] [log] [blame]
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
Daniel Mack76fbc242012-06-28 06:12:32 +000018#include <linux/module.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040019#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
Sebastian Siewior817f6d12013-04-23 07:31:35 +000023#include <linux/delay.h>
Grygorii Strashko742fb202016-06-27 12:05:11 +030024#include <linux/genalloc.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040025#include "davinci_cpdma.h"
26
27/* DMA Registers */
28#define CPDMA_TXIDVER 0x00
29#define CPDMA_TXCONTROL 0x04
30#define CPDMA_TXTEARDOWN 0x08
31#define CPDMA_RXIDVER 0x10
32#define CPDMA_RXCONTROL 0x14
33#define CPDMA_SOFTRESET 0x1c
34#define CPDMA_RXTEARDOWN 0x18
35#define CPDMA_TXINTSTATRAW 0x80
36#define CPDMA_TXINTSTATMASKED 0x84
37#define CPDMA_TXINTMASKSET 0x88
38#define CPDMA_TXINTMASKCLEAR 0x8c
39#define CPDMA_MACINVECTOR 0x90
40#define CPDMA_MACEOIVECTOR 0x94
41#define CPDMA_RXINTSTATRAW 0xa0
42#define CPDMA_RXINTSTATMASKED 0xa4
43#define CPDMA_RXINTMASKSET 0xa8
44#define CPDMA_RXINTMASKCLEAR 0xac
45#define CPDMA_DMAINTSTATRAW 0xb0
46#define CPDMA_DMAINTSTATMASKED 0xb4
47#define CPDMA_DMAINTMASKSET 0xb8
48#define CPDMA_DMAINTMASKCLEAR 0xbc
49#define CPDMA_DMAINT_HOSTERR BIT(1)
50
51/* the following exist only if has_ext_regs is set */
52#define CPDMA_DMACONTROL 0x20
53#define CPDMA_DMASTATUS 0x24
54#define CPDMA_RXBUFFOFS 0x28
55#define CPDMA_EM_CONTROL 0x2c
56
57/* Descriptor mode bits */
58#define CPDMA_DESC_SOP BIT(31)
59#define CPDMA_DESC_EOP BIT(30)
60#define CPDMA_DESC_OWNER BIT(29)
61#define CPDMA_DESC_EOQ BIT(28)
62#define CPDMA_DESC_TD_COMPLETE BIT(27)
63#define CPDMA_DESC_PASS_CRC BIT(26)
Mugunthan V Nf6e135c2013-02-11 09:52:18 +000064#define CPDMA_DESC_TO_PORT_EN BIT(20)
65#define CPDMA_TO_PORT_SHIFT 16
66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
Mugunthan V N28a19fe2013-05-29 20:22:01 +000067#define CPDMA_DESC_CRC_LEN 4
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040068
69#define CPDMA_TEARDOWN_VALUE 0xfffffffc
70
71struct cpdma_desc {
72 /* hardware fields */
73 u32 hw_next;
74 u32 hw_buffer;
75 u32 hw_len;
76 u32 hw_mode;
77 /* software fields */
78 void *sw_token;
79 u32 sw_buffer;
80 u32 sw_len;
81};
82
83struct cpdma_desc_pool {
Olof Johanssonc767db52013-12-11 15:51:20 -080084 phys_addr_t phys;
Arnd Bergmann84092992016-01-29 12:39:10 +010085 dma_addr_t hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040086 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
Grygorii Strashkoaeec3022016-08-04 18:20:51 +030089 int num_desc;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040090 struct device *dev;
Grygorii Strashko742fb202016-06-27 12:05:11 +030091 struct gen_pool *gen_pool;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040092};
93
94enum cpdma_state {
95 CPDMA_STATE_IDLE,
96 CPDMA_STATE_ACTIVE,
97 CPDMA_STATE_TEARDOWN,
98};
99
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400100struct cpdma_ctlr {
101 enum cpdma_state state;
102 struct cpdma_params params;
103 struct device *dev;
104 struct cpdma_desc_pool *pool;
105 spinlock_t lock;
106 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300107 int chan_num;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400108};
109
110struct cpdma_chan {
Mugunthan V Nfae50822013-01-17 06:31:34 +0000111 struct cpdma_desc __iomem *head, *tail;
112 void __iomem *hdp, *cp, *rxfree;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400113 enum cpdma_state state;
114 struct cpdma_ctlr *ctlr;
115 int chan_num;
116 spinlock_t lock;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400117 int count;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300118 u32 desc_num;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400119 u32 mask;
120 cpdma_handler_fn handler;
121 enum dma_data_direction dir;
122 struct cpdma_chan_stats stats;
123 /* offsets into dmaregs */
124 int int_set, int_clear, td;
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200125 int weight;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400126};
127
Ivan Khoronzhuk991ddb12016-11-11 15:45:24 +0200128struct cpdma_control_info {
129 u32 reg;
130 u32 shift, mask;
131 int access;
132#define ACCESS_RO BIT(0)
133#define ACCESS_WO BIT(1)
134#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
135};
136
137static struct cpdma_control_info controls[] = {
138 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
139 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
140 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
141 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
142 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
143 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
144 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
145 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
146 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
147 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
148 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
149};
150
Ivan Khoronzhuk925d65e2016-08-22 21:18:27 +0300151#define tx_chan_num(chan) (chan)
152#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
153#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
154#define is_tx_chan(chan) (!is_rx_chan(chan))
155#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
156#define chan_linear(chan) __chan_linear((chan)->chan_num)
157
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400158/* The following make access to common cpdma_ctlr params more readable */
159#define dmaregs params.dmaregs
160#define num_chan params.num_chan
161
162/* various accessors */
163#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
164#define chan_read(chan, fld) __raw_readl((chan)->fld)
165#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
166#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
167#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
168#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
169
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000170#define cpdma_desc_to_port(chan, mode, directed) \
171 do { \
172 if (!is_rx_chan(chan) && ((directed == 1) || \
173 (directed == 2))) \
174 mode |= (CPDMA_DESC_TO_PORT_EN | \
175 (directed << CPDMA_TO_PORT_SHIFT)); \
176 } while (0)
177
Grygorii Strashko742fb202016-06-27 12:05:11 +0300178static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
179{
180 if (!pool)
181 return;
182
Grygorii Strashkoaeec3022016-08-04 18:20:51 +0300183 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
184 "cpdma_desc_pool size %d != avail %d",
185 gen_pool_size(pool->gen_pool),
186 gen_pool_avail(pool->gen_pool));
Grygorii Strashko742fb202016-06-27 12:05:11 +0300187 if (pool->cpumap)
188 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
189 pool->phys);
190 else
191 iounmap(pool->iomap);
192}
193
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400194/*
195 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
196 * emac) have dedicated on-chip memory for these descriptors. Some other
197 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
198 * abstract out these details
199 */
200static struct cpdma_desc_pool *
Arnd Bergmann84092992016-01-29 12:39:10 +0100201cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
Sriram6a1fef62011-03-22 02:31:03 +0000202 int size, int align)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400203{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400204 struct cpdma_desc_pool *pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300205 int ret;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400206
George Cheriane1943122014-05-12 10:21:21 +0530207 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400208 if (!pool)
Grygorii Strashko742fb202016-06-27 12:05:11 +0300209 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400210
211 pool->dev = dev;
212 pool->mem_size = size;
213 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
214 pool->num_desc = size / pool->desc_size;
215
Grygorii Strashko742fb202016-06-27 12:05:11 +0300216 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
217 "cpdma");
218 if (IS_ERR(pool->gen_pool)) {
219 dev_err(dev, "pool create failed %ld\n",
220 PTR_ERR(pool->gen_pool));
221 goto gen_pool_create_fail;
222 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400223
224 if (phys) {
225 pool->phys = phys;
Arnd Bergmann84092992016-01-29 12:39:10 +0100226 pool->iomap = ioremap(phys, size); /* should be memremap? */
Sriram6a1fef62011-03-22 02:31:03 +0000227 pool->hw_addr = hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400228 } else {
Arnd Bergmann84092992016-01-29 12:39:10 +0100229 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400230 GFP_KERNEL);
Arnd Bergmann84092992016-01-29 12:39:10 +0100231 pool->iomap = (void __iomem __force *)pool->cpumap;
232 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400233 }
234
Grygorii Strashko742fb202016-06-27 12:05:11 +0300235 if (!pool->iomap)
236 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400237
Grygorii Strashko742fb202016-06-27 12:05:11 +0300238 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
239 pool->phys, pool->mem_size, -1);
240 if (ret < 0) {
241 dev_err(dev, "pool add failed %d\n", ret);
242 goto gen_pool_add_virt_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400243 }
Grygorii Strashko742fb202016-06-27 12:05:11 +0300244
245 return pool;
246
247gen_pool_add_virt_fail:
248 cpdma_desc_pool_destroy(pool);
249gen_pool_create_fail:
250 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400251}
252
253static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
254 struct cpdma_desc __iomem *desc)
255{
256 if (!desc)
257 return 0;
Olof Johanssonc767db52013-12-11 15:51:20 -0800258 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400259}
260
261static inline struct cpdma_desc __iomem *
262desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
263{
Sriram6a1fef62011-03-22 02:31:03 +0000264 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400265}
266
267static struct cpdma_desc __iomem *
Grygorii Strashko742fb202016-06-27 12:05:11 +0300268cpdma_desc_alloc(struct cpdma_desc_pool *pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400269{
Grygorii Strashkoaeec3022016-08-04 18:20:51 +0300270 return (struct cpdma_desc __iomem *)
271 gen_pool_alloc(pool->gen_pool, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400272}
273
274static void cpdma_desc_free(struct cpdma_desc_pool *pool,
275 struct cpdma_desc __iomem *desc, int num_desc)
276{
Grygorii Strashko742fb202016-06-27 12:05:11 +0300277 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400278}
279
Ivan Khoronzhuk991ddb12016-11-11 15:45:24 +0200280static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
281{
282 struct cpdma_control_info *info = &controls[control];
283 u32 val;
284
285 if (!ctlr->params.has_ext_regs)
286 return -ENOTSUPP;
287
288 if (ctlr->state != CPDMA_STATE_ACTIVE)
289 return -EINVAL;
290
291 if (control < 0 || control >= ARRAY_SIZE(controls))
292 return -ENOENT;
293
294 if ((info->access & ACCESS_WO) != ACCESS_WO)
295 return -EPERM;
296
297 val = dma_reg_read(ctlr, info->reg);
298 val &= ~(info->mask << info->shift);
299 val |= (value & info->mask) << info->shift;
300 dma_reg_write(ctlr, info->reg, val);
301
302 return 0;
303}
304
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400305struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
306{
307 struct cpdma_ctlr *ctlr;
308
George Cheriane1943122014-05-12 10:21:21 +0530309 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400310 if (!ctlr)
311 return NULL;
312
313 ctlr->state = CPDMA_STATE_IDLE;
314 ctlr->params = *params;
315 ctlr->dev = params->dev;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300316 ctlr->chan_num = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400317 spin_lock_init(&ctlr->lock);
318
319 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
320 ctlr->params.desc_mem_phys,
Sriram6a1fef62011-03-22 02:31:03 +0000321 ctlr->params.desc_hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400322 ctlr->params.desc_mem_size,
323 ctlr->params.desc_align);
Dan Carpenter2f872082014-06-11 11:16:51 +0300324 if (!ctlr->pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400325 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400326
327 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
328 ctlr->num_chan = CPDMA_MAX_CHANNELS;
329 return ctlr;
330}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000331EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400332
333int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
334{
335 unsigned long flags;
336 int i;
337
338 spin_lock_irqsave(&ctlr->lock, flags);
339 if (ctlr->state != CPDMA_STATE_IDLE) {
340 spin_unlock_irqrestore(&ctlr->lock, flags);
341 return -EBUSY;
342 }
343
344 if (ctlr->params.has_soft_reset) {
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000345 unsigned timeout = 10 * 100;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400346
347 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000348 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400349 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
350 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000351 udelay(10);
352 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400353 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000354 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400355 }
356
357 for (i = 0; i < ctlr->num_chan; i++) {
358 __raw_writel(0, ctlr->params.txhdp + 4 * i);
359 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
360 __raw_writel(0, ctlr->params.txcp + 4 * i);
361 __raw_writel(0, ctlr->params.rxcp + 4 * i);
362 }
363
364 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
365 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
366
367 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
368 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
369
370 ctlr->state = CPDMA_STATE_ACTIVE;
371
372 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
373 if (ctlr->channels[i])
374 cpdma_chan_start(ctlr->channels[i]);
375 }
Ivan Khoronzhuk991ddb12016-11-11 15:45:24 +0200376
377 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, 1);
378 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
379
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400380 spin_unlock_irqrestore(&ctlr->lock, flags);
381 return 0;
382}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000383EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400384
385int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
386{
387 unsigned long flags;
388 int i;
389
390 spin_lock_irqsave(&ctlr->lock, flags);
Ivan Khoronzhukb993eec2016-11-11 16:10:47 +0200391 if (ctlr->state != CPDMA_STATE_ACTIVE) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400392 spin_unlock_irqrestore(&ctlr->lock, flags);
393 return -EINVAL;
394 }
395
396 ctlr->state = CPDMA_STATE_TEARDOWN;
Ivan Khoronzhuk080d5c5a2016-08-22 21:18:25 +0300397 spin_unlock_irqrestore(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400398
399 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
400 if (ctlr->channels[i])
401 cpdma_chan_stop(ctlr->channels[i]);
402 }
403
Ivan Khoronzhuk080d5c5a2016-08-22 21:18:25 +0300404 spin_lock_irqsave(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400405 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
406 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
407
408 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
409 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
410
411 ctlr->state = CPDMA_STATE_IDLE;
412
413 spin_unlock_irqrestore(&ctlr->lock, flags);
414 return 0;
415}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000416EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400417
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400418int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
419{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400420 int ret = 0, i;
421
422 if (!ctlr)
423 return -EINVAL;
424
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400425 if (ctlr->state != CPDMA_STATE_IDLE)
426 cpdma_ctlr_stop(ctlr);
427
Cyril Roelandt79876e02013-02-12 12:52:30 +0000428 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
429 cpdma_chan_destroy(ctlr->channels[i]);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400430
431 cpdma_desc_pool_destroy(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400432 return ret;
433}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000434EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400435
436int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
437{
438 unsigned long flags;
439 int i, reg;
440
441 spin_lock_irqsave(&ctlr->lock, flags);
442 if (ctlr->state != CPDMA_STATE_ACTIVE) {
443 spin_unlock_irqrestore(&ctlr->lock, flags);
444 return -EINVAL;
445 }
446
447 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
448 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
449
450 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
451 if (ctlr->channels[i])
452 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
453 }
454
455 spin_unlock_irqrestore(&ctlr->lock, flags);
456 return 0;
457}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100458EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400459
Mugunthan V N510a1e722013-02-17 22:19:20 +0000460void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400461{
Mugunthan V N510a1e722013-02-17 22:19:20 +0000462 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400463}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100464EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400465
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300466u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
467{
468 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
469}
470EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
471
472u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
473{
474 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
475}
476EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
477
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200478static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
479 int rx, int desc_num,
480 int per_ch_desc)
481{
482 struct cpdma_chan *chan, *most_chan = NULL;
483 int desc_cnt = desc_num;
484 int most_dnum = 0;
485 int min, max, i;
486
487 if (!desc_num)
488 return;
489
490 if (rx) {
491 min = rx_chan_num(0);
492 max = rx_chan_num(CPDMA_MAX_CHANNELS);
493 } else {
494 min = tx_chan_num(0);
495 max = tx_chan_num(CPDMA_MAX_CHANNELS);
496 }
497
498 for (i = min; i < max; i++) {
499 chan = ctlr->channels[i];
500 if (!chan)
501 continue;
502
503 if (chan->weight)
504 chan->desc_num = (chan->weight * desc_num) / 100;
505 else
506 chan->desc_num = per_ch_desc;
507
508 desc_cnt -= chan->desc_num;
509
510 if (most_dnum < chan->desc_num) {
511 most_dnum = chan->desc_num;
512 most_chan = chan;
513 }
514 }
515 /* use remains */
516 most_chan->desc_num += desc_cnt;
517}
518
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300519/**
520 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
521 * Has to be called under ctlr lock
522 */
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200523static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300524{
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200525 int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300526 struct cpdma_desc_pool *pool = ctlr->pool;
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200527 int free_rx_num = 0, free_tx_num = 0;
528 int rx_weight = 0, tx_weight = 0;
529 int tx_desc_num, rx_desc_num;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300530 struct cpdma_chan *chan;
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200531 int i, tx_num = 0;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300532
533 if (!ctlr->chan_num)
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200534 return 0;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300535
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300536 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
537 chan = ctlr->channels[i];
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200538 if (!chan)
539 continue;
540
541 if (is_rx_chan(chan)) {
542 if (!chan->weight)
543 free_rx_num++;
544 rx_weight += chan->weight;
545 } else {
546 if (!chan->weight)
547 free_tx_num++;
548 tx_weight += chan->weight;
549 tx_num++;
550 }
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300551 }
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200552
553 if (rx_weight > 100 || tx_weight > 100)
554 return -EINVAL;
555
556 tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num;
557 rx_desc_num = pool->num_desc - tx_desc_num;
558
559 if (free_tx_num) {
560 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
561 tx_per_ch_desc /= free_tx_num;
562 }
563 if (free_rx_num) {
564 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
565 rx_per_ch_desc /= free_rx_num;
566 }
567
568 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
569 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
570
571 return 0;
572}
573
574/* cpdma_chan_set_weight - set weight of a channel in percentage.
575 * Tx and Rx channels have separate weights. That is 100% for RX
576 * and 100% for Tx. The weight is used to split cpdma resources
577 * in correct proportion required by the channels, including number
578 * of descriptors. The channel rate is not enough to know the
579 * weight of a channel as the maximum rate of an interface is needed.
580 * If weight = 0, then channel uses rest of descriptors leaved by
581 * weighted channels.
582 */
583int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
584{
585 struct cpdma_ctlr *ctlr = ch->ctlr;
586 unsigned long flags, ch_flags;
587 int ret;
588
589 spin_lock_irqsave(&ctlr->lock, flags);
590 spin_lock_irqsave(&ch->lock, ch_flags);
591 if (ch->weight == weight) {
592 spin_unlock_irqrestore(&ch->lock, ch_flags);
593 spin_unlock_irqrestore(&ctlr->lock, flags);
594 return 0;
595 }
596 ch->weight = weight;
597 spin_unlock_irqrestore(&ch->lock, ch_flags);
598
599 /* re-split pool using new channel weight */
600 ret = cpdma_chan_split_pool(ctlr);
601 spin_unlock_irqrestore(&ctlr->lock, flags);
602 return ret;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300603}
604
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400605struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
Ivan Khoronzhuk925d65e2016-08-22 21:18:27 +0300606 cpdma_handler_fn handler, int rx_type)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400607{
Ivan Khoronzhuk925d65e2016-08-22 21:18:27 +0300608 int offset = chan_num * 4;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400609 struct cpdma_chan *chan;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400610 unsigned long flags;
611
Ivan Khoronzhuk925d65e2016-08-22 21:18:27 +0300612 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
613
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400614 if (__chan_linear(chan_num) >= ctlr->num_chan)
615 return NULL;
616
George Cheriane1943122014-05-12 10:21:21 +0530617 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400618 if (!chan)
George Cheriane1943122014-05-12 10:21:21 +0530619 return ERR_PTR(-ENOMEM);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400620
621 spin_lock_irqsave(&ctlr->lock, flags);
George Cheriane1943122014-05-12 10:21:21 +0530622 if (ctlr->channels[chan_num]) {
623 spin_unlock_irqrestore(&ctlr->lock, flags);
624 devm_kfree(ctlr->dev, chan);
625 return ERR_PTR(-EBUSY);
626 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400627
628 chan->ctlr = ctlr;
629 chan->state = CPDMA_STATE_IDLE;
630 chan->chan_num = chan_num;
631 chan->handler = handler;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300632 chan->desc_num = ctlr->pool->num_desc / 2;
Ivan Khoronzhuk0fc64322016-11-29 17:00:47 +0200633 chan->weight = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400634
635 if (is_rx_chan(chan)) {
636 chan->hdp = ctlr->params.rxhdp + offset;
637 chan->cp = ctlr->params.rxcp + offset;
638 chan->rxfree = ctlr->params.rxfree + offset;
639 chan->int_set = CPDMA_RXINTMASKSET;
640 chan->int_clear = CPDMA_RXINTMASKCLEAR;
641 chan->td = CPDMA_RXTEARDOWN;
642 chan->dir = DMA_FROM_DEVICE;
643 } else {
644 chan->hdp = ctlr->params.txhdp + offset;
645 chan->cp = ctlr->params.txcp + offset;
646 chan->int_set = CPDMA_TXINTMASKSET;
647 chan->int_clear = CPDMA_TXINTMASKCLEAR;
648 chan->td = CPDMA_TXTEARDOWN;
649 chan->dir = DMA_TO_DEVICE;
650 }
651 chan->mask = BIT(chan_linear(chan));
652
653 spin_lock_init(&chan->lock);
654
655 ctlr->channels[chan_num] = chan;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300656 ctlr->chan_num++;
657
658 cpdma_chan_split_pool(ctlr);
659
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400660 spin_unlock_irqrestore(&ctlr->lock, flags);
661 return chan;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400662}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000663EXPORT_SYMBOL_GPL(cpdma_chan_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400664
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300665int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
Ivan Khoronzhuk17933312016-06-17 13:25:39 +0300666{
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300667 unsigned long flags;
668 int desc_num;
669
670 spin_lock_irqsave(&chan->lock, flags);
671 desc_num = chan->desc_num;
672 spin_unlock_irqrestore(&chan->lock, flags);
673
674 return desc_num;
Ivan Khoronzhuk17933312016-06-17 13:25:39 +0300675}
676EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
677
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400678int cpdma_chan_destroy(struct cpdma_chan *chan)
679{
Julia Lawallf37c54b2012-08-14 05:49:47 +0000680 struct cpdma_ctlr *ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400681 unsigned long flags;
682
683 if (!chan)
684 return -EINVAL;
Julia Lawallf37c54b2012-08-14 05:49:47 +0000685 ctlr = chan->ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400686
687 spin_lock_irqsave(&ctlr->lock, flags);
688 if (chan->state != CPDMA_STATE_IDLE)
689 cpdma_chan_stop(chan);
690 ctlr->channels[chan->chan_num] = NULL;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300691 ctlr->chan_num--;
Ivan Khoronzhukb602e492016-11-08 15:16:05 +0200692 devm_kfree(ctlr->dev, chan);
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300693 cpdma_chan_split_pool(ctlr);
694
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400695 spin_unlock_irqrestore(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400696 return 0;
697}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000698EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400699
700int cpdma_chan_get_stats(struct cpdma_chan *chan,
701 struct cpdma_chan_stats *stats)
702{
703 unsigned long flags;
704 if (!chan)
705 return -EINVAL;
706 spin_lock_irqsave(&chan->lock, flags);
707 memcpy(stats, &chan->stats, sizeof(*stats));
708 spin_unlock_irqrestore(&chan->lock, flags);
709 return 0;
710}
Daniel Mack0ca04b62013-08-22 13:47:00 +0200711EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400712
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400713static void __cpdma_chan_submit(struct cpdma_chan *chan,
714 struct cpdma_desc __iomem *desc)
715{
716 struct cpdma_ctlr *ctlr = chan->ctlr;
717 struct cpdma_desc __iomem *prev = chan->tail;
718 struct cpdma_desc_pool *pool = ctlr->pool;
719 dma_addr_t desc_dma;
720 u32 mode;
721
722 desc_dma = desc_phys(pool, desc);
723
724 /* simple case - idle channel */
725 if (!chan->head) {
726 chan->stats.head_enqueue++;
727 chan->head = desc;
728 chan->tail = desc;
729 if (chan->state == CPDMA_STATE_ACTIVE)
730 chan_write(chan, hdp, desc_dma);
731 return;
732 }
733
734 /* first chain the descriptor at the tail of the list */
735 desc_write(prev, hw_next, desc_dma);
736 chan->tail = desc;
737 chan->stats.tail_enqueue++;
738
739 /* next check if EOQ has been triggered already */
740 mode = desc_read(prev, hw_mode);
741 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
742 (chan->state == CPDMA_STATE_ACTIVE)) {
743 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
744 chan_write(chan, hdp, desc_dma);
745 chan->stats.misqueued++;
746 }
747}
748
749int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
Sebastian Siewioraef614e2013-04-23 07:31:38 +0000750 int len, int directed)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400751{
752 struct cpdma_ctlr *ctlr = chan->ctlr;
753 struct cpdma_desc __iomem *desc;
754 dma_addr_t buffer;
755 unsigned long flags;
756 u32 mode;
757 int ret = 0;
758
759 spin_lock_irqsave(&chan->lock, flags);
760
761 if (chan->state == CPDMA_STATE_TEARDOWN) {
762 ret = -EINVAL;
763 goto unlock_ret;
764 }
765
Grygorii Strashko742fb202016-06-27 12:05:11 +0300766 if (chan->count >= chan->desc_num) {
767 chan->stats.desc_alloc_fail++;
768 ret = -ENOMEM;
769 goto unlock_ret;
770 }
771
772 desc = cpdma_desc_alloc(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400773 if (!desc) {
774 chan->stats.desc_alloc_fail++;
775 ret = -ENOMEM;
776 goto unlock_ret;
777 }
778
779 if (len < ctlr->params.min_packet_size) {
780 len = ctlr->params.min_packet_size;
781 chan->stats.runt_transmit_buff++;
782 }
783
784 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
Sebastian Siewior14bd0762013-06-20 16:58:45 +0200785 ret = dma_mapping_error(ctlr->dev, buffer);
786 if (ret) {
787 cpdma_desc_free(ctlr->pool, desc, 1);
788 ret = -EINVAL;
789 goto unlock_ret;
790 }
791
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400792 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000793 cpdma_desc_to_port(chan, mode, directed);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400794
795 desc_write(desc, hw_next, 0);
796 desc_write(desc, hw_buffer, buffer);
797 desc_write(desc, hw_len, len);
798 desc_write(desc, hw_mode, mode | len);
799 desc_write(desc, sw_token, token);
800 desc_write(desc, sw_buffer, buffer);
801 desc_write(desc, sw_len, len);
802
803 __cpdma_chan_submit(chan, desc);
804
805 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
806 chan_write(chan, rxfree, 1);
807
808 chan->count++;
809
810unlock_ret:
811 spin_unlock_irqrestore(&chan->lock, flags);
812 return ret;
813}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000814EXPORT_SYMBOL_GPL(cpdma_chan_submit);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400815
Mugunthan V Nfae50822013-01-17 06:31:34 +0000816bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
817{
Mugunthan V Nfae50822013-01-17 06:31:34 +0000818 struct cpdma_ctlr *ctlr = chan->ctlr;
819 struct cpdma_desc_pool *pool = ctlr->pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300820 bool free_tx_desc;
821 unsigned long flags;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000822
Grygorii Strashko742fb202016-06-27 12:05:11 +0300823 spin_lock_irqsave(&chan->lock, flags);
824 free_tx_desc = (chan->count < chan->desc_num) &&
825 gen_pool_avail(pool->gen_pool);
826 spin_unlock_irqrestore(&chan->lock, flags);
827 return free_tx_desc;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000828}
829EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
830
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400831static void __cpdma_chan_free(struct cpdma_chan *chan,
832 struct cpdma_desc __iomem *desc,
833 int outlen, int status)
834{
835 struct cpdma_ctlr *ctlr = chan->ctlr;
836 struct cpdma_desc_pool *pool = ctlr->pool;
837 dma_addr_t buff_dma;
838 int origlen;
839 void *token;
840
841 token = (void *)desc_read(desc, sw_token);
842 buff_dma = desc_read(desc, sw_buffer);
843 origlen = desc_read(desc, sw_len);
844
845 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
846 cpdma_desc_free(pool, desc, 1);
847 (*chan->handler)(token, outlen, status);
848}
849
850static int __cpdma_chan_process(struct cpdma_chan *chan)
851{
852 struct cpdma_ctlr *ctlr = chan->ctlr;
853 struct cpdma_desc __iomem *desc;
854 int status, outlen;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000855 int cb_status = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400856 struct cpdma_desc_pool *pool = ctlr->pool;
857 dma_addr_t desc_dma;
858 unsigned long flags;
859
860 spin_lock_irqsave(&chan->lock, flags);
861
862 desc = chan->head;
863 if (!desc) {
864 chan->stats.empty_dequeue++;
865 status = -ENOENT;
866 goto unlock_ret;
867 }
868 desc_dma = desc_phys(pool, desc);
869
870 status = __raw_readl(&desc->hw_mode);
871 outlen = status & 0x7ff;
872 if (status & CPDMA_DESC_OWNER) {
873 chan->stats.busy_dequeue++;
874 status = -EBUSY;
875 goto unlock_ret;
876 }
Mugunthan V N28a19fe2013-05-29 20:22:01 +0000877
878 if (status & CPDMA_DESC_PASS_CRC)
879 outlen -= CPDMA_DESC_CRC_LEN;
880
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000881 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
882 CPDMA_DESC_PORT_MASK);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400883
884 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
885 chan_write(chan, cp, desc_dma);
886 chan->count--;
887 chan->stats.good_dequeue++;
888
889 if (status & CPDMA_DESC_EOQ) {
890 chan->stats.requeue++;
891 chan_write(chan, hdp, desc_phys(pool, chan->head));
892 }
893
894 spin_unlock_irqrestore(&chan->lock, flags);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000895 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
896 cb_status = -ENOSYS;
897 else
898 cb_status = status;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400899
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000900 __cpdma_chan_free(chan, desc, outlen, cb_status);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400901 return status;
902
903unlock_ret:
904 spin_unlock_irqrestore(&chan->lock, flags);
905 return status;
906}
907
908int cpdma_chan_process(struct cpdma_chan *chan, int quota)
909{
910 int used = 0, ret = 0;
911
912 if (chan->state != CPDMA_STATE_ACTIVE)
913 return -EINVAL;
914
915 while (used < quota) {
916 ret = __cpdma_chan_process(chan);
917 if (ret < 0)
918 break;
919 used++;
920 }
921 return used;
922}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000923EXPORT_SYMBOL_GPL(cpdma_chan_process);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400924
925int cpdma_chan_start(struct cpdma_chan *chan)
926{
927 struct cpdma_ctlr *ctlr = chan->ctlr;
928 struct cpdma_desc_pool *pool = ctlr->pool;
929 unsigned long flags;
930
931 spin_lock_irqsave(&chan->lock, flags);
932 if (chan->state != CPDMA_STATE_IDLE) {
933 spin_unlock_irqrestore(&chan->lock, flags);
934 return -EBUSY;
935 }
936 if (ctlr->state != CPDMA_STATE_ACTIVE) {
937 spin_unlock_irqrestore(&chan->lock, flags);
938 return -EINVAL;
939 }
940 dma_reg_write(ctlr, chan->int_set, chan->mask);
941 chan->state = CPDMA_STATE_ACTIVE;
942 if (chan->head) {
943 chan_write(chan, hdp, desc_phys(pool, chan->head));
944 if (chan->rxfree)
945 chan_write(chan, rxfree, chan->count);
946 }
947
948 spin_unlock_irqrestore(&chan->lock, flags);
949 return 0;
950}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000951EXPORT_SYMBOL_GPL(cpdma_chan_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400952
953int cpdma_chan_stop(struct cpdma_chan *chan)
954{
955 struct cpdma_ctlr *ctlr = chan->ctlr;
956 struct cpdma_desc_pool *pool = ctlr->pool;
957 unsigned long flags;
958 int ret;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000959 unsigned timeout;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400960
961 spin_lock_irqsave(&chan->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100962 if (chan->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400963 spin_unlock_irqrestore(&chan->lock, flags);
964 return -EINVAL;
965 }
966
967 chan->state = CPDMA_STATE_TEARDOWN;
968 dma_reg_write(ctlr, chan->int_clear, chan->mask);
969
970 /* trigger teardown */
Christian Rieschb4ad0422012-02-22 21:58:00 +0000971 dma_reg_write(ctlr, chan->td, chan_linear(chan));
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400972
973 /* wait for teardown complete */
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000974 timeout = 100 * 100; /* 100 ms */
975 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400976 u32 cp = chan_read(chan, cp);
977 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
978 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000979 udelay(10);
980 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400981 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000982 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400983 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
984
985 /* handle completed packets */
Ilya Yanok7746ab02011-12-18 10:02:04 +0000986 spin_unlock_irqrestore(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400987 do {
988 ret = __cpdma_chan_process(chan);
989 if (ret < 0)
990 break;
991 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
Ilya Yanok7746ab02011-12-18 10:02:04 +0000992 spin_lock_irqsave(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400993
994 /* remaining packets haven't been tx/rx'ed, clean them up */
995 while (chan->head) {
996 struct cpdma_desc __iomem *desc = chan->head;
997 dma_addr_t next_dma;
998
999 next_dma = desc_read(desc, hw_next);
1000 chan->head = desc_from_phys(pool, next_dma);
htbeginffb5ba92012-10-01 16:42:43 +00001001 chan->count--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001002 chan->stats.teardown_dequeue++;
1003
1004 /* issue callback without locks held */
1005 spin_unlock_irqrestore(&chan->lock, flags);
1006 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
1007 spin_lock_irqsave(&chan->lock, flags);
1008 }
1009
1010 chan->state = CPDMA_STATE_IDLE;
1011 spin_unlock_irqrestore(&chan->lock, flags);
1012 return 0;
1013}
Arnd Bergmann32a6d902012-04-20 10:56:09 +00001014EXPORT_SYMBOL_GPL(cpdma_chan_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001015
1016int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1017{
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&chan->lock, flags);
1021 if (chan->state != CPDMA_STATE_ACTIVE) {
1022 spin_unlock_irqrestore(&chan->lock, flags);
1023 return -EINVAL;
1024 }
1025
1026 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1027 chan->mask);
1028 spin_unlock_irqrestore(&chan->lock, flags);
1029
1030 return 0;
1031}
1032
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001033int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1034{
1035 unsigned long flags;
1036 struct cpdma_control_info *info = &controls[control];
1037 int ret;
1038
1039 spin_lock_irqsave(&ctlr->lock, flags);
1040
1041 ret = -ENOTSUPP;
1042 if (!ctlr->params.has_ext_regs)
1043 goto unlock_ret;
1044
1045 ret = -EINVAL;
1046 if (ctlr->state != CPDMA_STATE_ACTIVE)
1047 goto unlock_ret;
1048
1049 ret = -ENOENT;
1050 if (control < 0 || control >= ARRAY_SIZE(controls))
1051 goto unlock_ret;
1052
1053 ret = -EPERM;
1054 if ((info->access & ACCESS_RO) != ACCESS_RO)
1055 goto unlock_ret;
1056
1057 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
1058
1059unlock_ret:
1060 spin_unlock_irqrestore(&ctlr->lock, flags);
1061 return ret;
1062}
1063
1064int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1065{
1066 unsigned long flags;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001067 int ret;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001068
1069 spin_lock_irqsave(&ctlr->lock, flags);
Ivan Khoronzhuk991ddb12016-11-11 15:45:24 +02001070 ret = _cpdma_control_set(ctlr, control, value);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001071 spin_unlock_irqrestore(&ctlr->lock, flags);
1072 return ret;
1073}
Arnd Bergmann6929e242013-02-14 17:53:01 +01001074EXPORT_SYMBOL_GPL(cpdma_control_set);
Sebastian Siewior4bc21d42013-04-24 08:48:22 +00001075
1076MODULE_LICENSE("GPL");