blob: 167fd659319dafb83a964a8e88a1ce952cf9a468 [file] [log] [blame]
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -04001/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
Daniel Mack76fbc242012-06-28 06:12:32 +000018#include <linux/module.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040019#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
Sebastian Siewior817f6d12013-04-23 07:31:35 +000023#include <linux/delay.h>
Grygorii Strashko742fb202016-06-27 12:05:11 +030024#include <linux/genalloc.h>
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040025#include "davinci_cpdma.h"
26
27/* DMA Registers */
28#define CPDMA_TXIDVER 0x00
29#define CPDMA_TXCONTROL 0x04
30#define CPDMA_TXTEARDOWN 0x08
31#define CPDMA_RXIDVER 0x10
32#define CPDMA_RXCONTROL 0x14
33#define CPDMA_SOFTRESET 0x1c
34#define CPDMA_RXTEARDOWN 0x18
35#define CPDMA_TXINTSTATRAW 0x80
36#define CPDMA_TXINTSTATMASKED 0x84
37#define CPDMA_TXINTMASKSET 0x88
38#define CPDMA_TXINTMASKCLEAR 0x8c
39#define CPDMA_MACINVECTOR 0x90
40#define CPDMA_MACEOIVECTOR 0x94
41#define CPDMA_RXINTSTATRAW 0xa0
42#define CPDMA_RXINTSTATMASKED 0xa4
43#define CPDMA_RXINTMASKSET 0xa8
44#define CPDMA_RXINTMASKCLEAR 0xac
45#define CPDMA_DMAINTSTATRAW 0xb0
46#define CPDMA_DMAINTSTATMASKED 0xb4
47#define CPDMA_DMAINTMASKSET 0xb8
48#define CPDMA_DMAINTMASKCLEAR 0xbc
49#define CPDMA_DMAINT_HOSTERR BIT(1)
50
51/* the following exist only if has_ext_regs is set */
52#define CPDMA_DMACONTROL 0x20
53#define CPDMA_DMASTATUS 0x24
54#define CPDMA_RXBUFFOFS 0x28
55#define CPDMA_EM_CONTROL 0x2c
56
57/* Descriptor mode bits */
58#define CPDMA_DESC_SOP BIT(31)
59#define CPDMA_DESC_EOP BIT(30)
60#define CPDMA_DESC_OWNER BIT(29)
61#define CPDMA_DESC_EOQ BIT(28)
62#define CPDMA_DESC_TD_COMPLETE BIT(27)
63#define CPDMA_DESC_PASS_CRC BIT(26)
Mugunthan V Nf6e135c2013-02-11 09:52:18 +000064#define CPDMA_DESC_TO_PORT_EN BIT(20)
65#define CPDMA_TO_PORT_SHIFT 16
66#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
Mugunthan V N28a19fe2013-05-29 20:22:01 +000067#define CPDMA_DESC_CRC_LEN 4
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040068
69#define CPDMA_TEARDOWN_VALUE 0xfffffffc
70
71struct cpdma_desc {
72 /* hardware fields */
73 u32 hw_next;
74 u32 hw_buffer;
75 u32 hw_len;
76 u32 hw_mode;
77 /* software fields */
78 void *sw_token;
79 u32 sw_buffer;
80 u32 sw_len;
81};
82
83struct cpdma_desc_pool {
Olof Johanssonc767db52013-12-11 15:51:20 -080084 phys_addr_t phys;
Arnd Bergmann84092992016-01-29 12:39:10 +010085 dma_addr_t hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040086 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
Grygorii Strashkoaeec3022016-08-04 18:20:51 +030089 int num_desc;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040090 struct device *dev;
Grygorii Strashko742fb202016-06-27 12:05:11 +030091 struct gen_pool *gen_pool;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -040092};
93
94enum cpdma_state {
95 CPDMA_STATE_IDLE,
96 CPDMA_STATE_ACTIVE,
97 CPDMA_STATE_TEARDOWN,
98};
99
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400100struct cpdma_ctlr {
101 enum cpdma_state state;
102 struct cpdma_params params;
103 struct device *dev;
104 struct cpdma_desc_pool *pool;
105 spinlock_t lock;
106 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300107 int chan_num;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400108};
109
110struct cpdma_chan {
Mugunthan V Nfae50822013-01-17 06:31:34 +0000111 struct cpdma_desc __iomem *head, *tail;
112 void __iomem *hdp, *cp, *rxfree;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400113 enum cpdma_state state;
114 struct cpdma_ctlr *ctlr;
115 int chan_num;
116 spinlock_t lock;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400117 int count;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300118 u32 desc_num;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400119 u32 mask;
120 cpdma_handler_fn handler;
121 enum dma_data_direction dir;
122 struct cpdma_chan_stats stats;
123 /* offsets into dmaregs */
124 int int_set, int_clear, td;
125};
126
127/* The following make access to common cpdma_ctlr params more readable */
128#define dmaregs params.dmaregs
129#define num_chan params.num_chan
130
131/* various accessors */
132#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
133#define chan_read(chan, fld) __raw_readl((chan)->fld)
134#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
135#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
136#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
137#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
138
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000139#define cpdma_desc_to_port(chan, mode, directed) \
140 do { \
141 if (!is_rx_chan(chan) && ((directed == 1) || \
142 (directed == 2))) \
143 mode |= (CPDMA_DESC_TO_PORT_EN | \
144 (directed << CPDMA_TO_PORT_SHIFT)); \
145 } while (0)
146
Grygorii Strashko742fb202016-06-27 12:05:11 +0300147static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
148{
149 if (!pool)
150 return;
151
Grygorii Strashkoaeec3022016-08-04 18:20:51 +0300152 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
153 "cpdma_desc_pool size %d != avail %d",
154 gen_pool_size(pool->gen_pool),
155 gen_pool_avail(pool->gen_pool));
Grygorii Strashko742fb202016-06-27 12:05:11 +0300156 if (pool->cpumap)
157 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
158 pool->phys);
159 else
160 iounmap(pool->iomap);
161}
162
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400163/*
164 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
165 * emac) have dedicated on-chip memory for these descriptors. Some other
166 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
167 * abstract out these details
168 */
169static struct cpdma_desc_pool *
Arnd Bergmann84092992016-01-29 12:39:10 +0100170cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
Sriram6a1fef62011-03-22 02:31:03 +0000171 int size, int align)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400172{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400173 struct cpdma_desc_pool *pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300174 int ret;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400175
George Cheriane1943122014-05-12 10:21:21 +0530176 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400177 if (!pool)
Grygorii Strashko742fb202016-06-27 12:05:11 +0300178 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400179
180 pool->dev = dev;
181 pool->mem_size = size;
182 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
183 pool->num_desc = size / pool->desc_size;
184
Grygorii Strashko742fb202016-06-27 12:05:11 +0300185 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
186 "cpdma");
187 if (IS_ERR(pool->gen_pool)) {
188 dev_err(dev, "pool create failed %ld\n",
189 PTR_ERR(pool->gen_pool));
190 goto gen_pool_create_fail;
191 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400192
193 if (phys) {
194 pool->phys = phys;
Arnd Bergmann84092992016-01-29 12:39:10 +0100195 pool->iomap = ioremap(phys, size); /* should be memremap? */
Sriram6a1fef62011-03-22 02:31:03 +0000196 pool->hw_addr = hw_addr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400197 } else {
Arnd Bergmann84092992016-01-29 12:39:10 +0100198 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400199 GFP_KERNEL);
Arnd Bergmann84092992016-01-29 12:39:10 +0100200 pool->iomap = (void __iomem __force *)pool->cpumap;
201 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400202 }
203
Grygorii Strashko742fb202016-06-27 12:05:11 +0300204 if (!pool->iomap)
205 goto gen_pool_create_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400206
Grygorii Strashko742fb202016-06-27 12:05:11 +0300207 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
208 pool->phys, pool->mem_size, -1);
209 if (ret < 0) {
210 dev_err(dev, "pool add failed %d\n", ret);
211 goto gen_pool_add_virt_fail;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400212 }
Grygorii Strashko742fb202016-06-27 12:05:11 +0300213
214 return pool;
215
216gen_pool_add_virt_fail:
217 cpdma_desc_pool_destroy(pool);
218gen_pool_create_fail:
219 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400220}
221
222static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
223 struct cpdma_desc __iomem *desc)
224{
225 if (!desc)
226 return 0;
Olof Johanssonc767db52013-12-11 15:51:20 -0800227 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400228}
229
230static inline struct cpdma_desc __iomem *
231desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
232{
Sriram6a1fef62011-03-22 02:31:03 +0000233 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400234}
235
236static struct cpdma_desc __iomem *
Grygorii Strashko742fb202016-06-27 12:05:11 +0300237cpdma_desc_alloc(struct cpdma_desc_pool *pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400238{
Grygorii Strashkoaeec3022016-08-04 18:20:51 +0300239 return (struct cpdma_desc __iomem *)
240 gen_pool_alloc(pool->gen_pool, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400241}
242
243static void cpdma_desc_free(struct cpdma_desc_pool *pool,
244 struct cpdma_desc __iomem *desc, int num_desc)
245{
Grygorii Strashko742fb202016-06-27 12:05:11 +0300246 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400247}
248
249struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
250{
251 struct cpdma_ctlr *ctlr;
252
George Cheriane1943122014-05-12 10:21:21 +0530253 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400254 if (!ctlr)
255 return NULL;
256
257 ctlr->state = CPDMA_STATE_IDLE;
258 ctlr->params = *params;
259 ctlr->dev = params->dev;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300260 ctlr->chan_num = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400261 spin_lock_init(&ctlr->lock);
262
263 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
264 ctlr->params.desc_mem_phys,
Sriram6a1fef62011-03-22 02:31:03 +0000265 ctlr->params.desc_hw_addr,
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400266 ctlr->params.desc_mem_size,
267 ctlr->params.desc_align);
Dan Carpenter2f872082014-06-11 11:16:51 +0300268 if (!ctlr->pool)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400269 return NULL;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400270
271 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
272 ctlr->num_chan = CPDMA_MAX_CHANNELS;
273 return ctlr;
274}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000275EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400276
277int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
278{
279 unsigned long flags;
280 int i;
281
282 spin_lock_irqsave(&ctlr->lock, flags);
283 if (ctlr->state != CPDMA_STATE_IDLE) {
284 spin_unlock_irqrestore(&ctlr->lock, flags);
285 return -EBUSY;
286 }
287
288 if (ctlr->params.has_soft_reset) {
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000289 unsigned timeout = 10 * 100;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400290
291 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000292 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400293 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
294 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000295 udelay(10);
296 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400297 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000298 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400299 }
300
301 for (i = 0; i < ctlr->num_chan; i++) {
302 __raw_writel(0, ctlr->params.txhdp + 4 * i);
303 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
304 __raw_writel(0, ctlr->params.txcp + 4 * i);
305 __raw_writel(0, ctlr->params.rxcp + 4 * i);
306 }
307
308 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
309 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
310
311 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
312 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
313
314 ctlr->state = CPDMA_STATE_ACTIVE;
315
316 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
317 if (ctlr->channels[i])
318 cpdma_chan_start(ctlr->channels[i]);
319 }
320 spin_unlock_irqrestore(&ctlr->lock, flags);
321 return 0;
322}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000323EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400324
325int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
326{
327 unsigned long flags;
328 int i;
329
330 spin_lock_irqsave(&ctlr->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100331 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400332 spin_unlock_irqrestore(&ctlr->lock, flags);
333 return -EINVAL;
334 }
335
336 ctlr->state = CPDMA_STATE_TEARDOWN;
337
338 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
339 if (ctlr->channels[i])
340 cpdma_chan_stop(ctlr->channels[i]);
341 }
342
343 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
344 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
345
346 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
347 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
348
349 ctlr->state = CPDMA_STATE_IDLE;
350
351 spin_unlock_irqrestore(&ctlr->lock, flags);
352 return 0;
353}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000354EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400355
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400356int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
357{
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400358 int ret = 0, i;
359
360 if (!ctlr)
361 return -EINVAL;
362
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400363 if (ctlr->state != CPDMA_STATE_IDLE)
364 cpdma_ctlr_stop(ctlr);
365
Cyril Roelandt79876e02013-02-12 12:52:30 +0000366 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
367 cpdma_chan_destroy(ctlr->channels[i]);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400368
369 cpdma_desc_pool_destroy(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400370 return ret;
371}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000372EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400373
374int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
375{
376 unsigned long flags;
377 int i, reg;
378
379 spin_lock_irqsave(&ctlr->lock, flags);
380 if (ctlr->state != CPDMA_STATE_ACTIVE) {
381 spin_unlock_irqrestore(&ctlr->lock, flags);
382 return -EINVAL;
383 }
384
385 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
386 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
387
388 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
389 if (ctlr->channels[i])
390 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
391 }
392
393 spin_unlock_irqrestore(&ctlr->lock, flags);
394 return 0;
395}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100396EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400397
Mugunthan V N510a1e722013-02-17 22:19:20 +0000398void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400399{
Mugunthan V N510a1e722013-02-17 22:19:20 +0000400 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400401}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100402EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400403
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300404/**
405 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
406 * Has to be called under ctlr lock
407 */
408static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
409{
410 struct cpdma_desc_pool *pool = ctlr->pool;
411 struct cpdma_chan *chan;
412 int ch_desc_num;
413 int i;
414
415 if (!ctlr->chan_num)
416 return;
417
418 /* calculate average size of pool slice */
419 ch_desc_num = pool->num_desc / ctlr->chan_num;
420
421 /* split ctlr pool */
422 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
423 chan = ctlr->channels[i];
424 if (chan)
425 chan->desc_num = ch_desc_num;
426 }
427}
428
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400429struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
430 cpdma_handler_fn handler)
431{
432 struct cpdma_chan *chan;
George Cheriane1943122014-05-12 10:21:21 +0530433 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400434 unsigned long flags;
435
436 if (__chan_linear(chan_num) >= ctlr->num_chan)
437 return NULL;
438
George Cheriane1943122014-05-12 10:21:21 +0530439 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400440 if (!chan)
George Cheriane1943122014-05-12 10:21:21 +0530441 return ERR_PTR(-ENOMEM);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400442
443 spin_lock_irqsave(&ctlr->lock, flags);
George Cheriane1943122014-05-12 10:21:21 +0530444 if (ctlr->channels[chan_num]) {
445 spin_unlock_irqrestore(&ctlr->lock, flags);
446 devm_kfree(ctlr->dev, chan);
447 return ERR_PTR(-EBUSY);
448 }
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400449
450 chan->ctlr = ctlr;
451 chan->state = CPDMA_STATE_IDLE;
452 chan->chan_num = chan_num;
453 chan->handler = handler;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300454 chan->desc_num = ctlr->pool->num_desc / 2;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400455
456 if (is_rx_chan(chan)) {
457 chan->hdp = ctlr->params.rxhdp + offset;
458 chan->cp = ctlr->params.rxcp + offset;
459 chan->rxfree = ctlr->params.rxfree + offset;
460 chan->int_set = CPDMA_RXINTMASKSET;
461 chan->int_clear = CPDMA_RXINTMASKCLEAR;
462 chan->td = CPDMA_RXTEARDOWN;
463 chan->dir = DMA_FROM_DEVICE;
464 } else {
465 chan->hdp = ctlr->params.txhdp + offset;
466 chan->cp = ctlr->params.txcp + offset;
467 chan->int_set = CPDMA_TXINTMASKSET;
468 chan->int_clear = CPDMA_TXINTMASKCLEAR;
469 chan->td = CPDMA_TXTEARDOWN;
470 chan->dir = DMA_TO_DEVICE;
471 }
472 chan->mask = BIT(chan_linear(chan));
473
474 spin_lock_init(&chan->lock);
475
476 ctlr->channels[chan_num] = chan;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300477 ctlr->chan_num++;
478
479 cpdma_chan_split_pool(ctlr);
480
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400481 spin_unlock_irqrestore(&ctlr->lock, flags);
482 return chan;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400483}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000484EXPORT_SYMBOL_GPL(cpdma_chan_create);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400485
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300486int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
Ivan Khoronzhuk17933312016-06-17 13:25:39 +0300487{
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300488 unsigned long flags;
489 int desc_num;
490
491 spin_lock_irqsave(&chan->lock, flags);
492 desc_num = chan->desc_num;
493 spin_unlock_irqrestore(&chan->lock, flags);
494
495 return desc_num;
Ivan Khoronzhuk17933312016-06-17 13:25:39 +0300496}
497EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
498
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400499int cpdma_chan_destroy(struct cpdma_chan *chan)
500{
Julia Lawallf37c54b2012-08-14 05:49:47 +0000501 struct cpdma_ctlr *ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400502 unsigned long flags;
503
504 if (!chan)
505 return -EINVAL;
Julia Lawallf37c54b2012-08-14 05:49:47 +0000506 ctlr = chan->ctlr;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400507
508 spin_lock_irqsave(&ctlr->lock, flags);
509 if (chan->state != CPDMA_STATE_IDLE)
510 cpdma_chan_stop(chan);
511 ctlr->channels[chan->chan_num] = NULL;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +0300512 ctlr->chan_num--;
513
514 cpdma_chan_split_pool(ctlr);
515
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400516 spin_unlock_irqrestore(&ctlr->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400517 return 0;
518}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000519EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400520
521int cpdma_chan_get_stats(struct cpdma_chan *chan,
522 struct cpdma_chan_stats *stats)
523{
524 unsigned long flags;
525 if (!chan)
526 return -EINVAL;
527 spin_lock_irqsave(&chan->lock, flags);
528 memcpy(stats, &chan->stats, sizeof(*stats));
529 spin_unlock_irqrestore(&chan->lock, flags);
530 return 0;
531}
Daniel Mack0ca04b62013-08-22 13:47:00 +0200532EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400533
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400534static void __cpdma_chan_submit(struct cpdma_chan *chan,
535 struct cpdma_desc __iomem *desc)
536{
537 struct cpdma_ctlr *ctlr = chan->ctlr;
538 struct cpdma_desc __iomem *prev = chan->tail;
539 struct cpdma_desc_pool *pool = ctlr->pool;
540 dma_addr_t desc_dma;
541 u32 mode;
542
543 desc_dma = desc_phys(pool, desc);
544
545 /* simple case - idle channel */
546 if (!chan->head) {
547 chan->stats.head_enqueue++;
548 chan->head = desc;
549 chan->tail = desc;
550 if (chan->state == CPDMA_STATE_ACTIVE)
551 chan_write(chan, hdp, desc_dma);
552 return;
553 }
554
555 /* first chain the descriptor at the tail of the list */
556 desc_write(prev, hw_next, desc_dma);
557 chan->tail = desc;
558 chan->stats.tail_enqueue++;
559
560 /* next check if EOQ has been triggered already */
561 mode = desc_read(prev, hw_mode);
562 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
563 (chan->state == CPDMA_STATE_ACTIVE)) {
564 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
565 chan_write(chan, hdp, desc_dma);
566 chan->stats.misqueued++;
567 }
568}
569
570int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
Sebastian Siewioraef614e2013-04-23 07:31:38 +0000571 int len, int directed)
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400572{
573 struct cpdma_ctlr *ctlr = chan->ctlr;
574 struct cpdma_desc __iomem *desc;
575 dma_addr_t buffer;
576 unsigned long flags;
577 u32 mode;
578 int ret = 0;
579
580 spin_lock_irqsave(&chan->lock, flags);
581
582 if (chan->state == CPDMA_STATE_TEARDOWN) {
583 ret = -EINVAL;
584 goto unlock_ret;
585 }
586
Grygorii Strashko742fb202016-06-27 12:05:11 +0300587 if (chan->count >= chan->desc_num) {
588 chan->stats.desc_alloc_fail++;
589 ret = -ENOMEM;
590 goto unlock_ret;
591 }
592
593 desc = cpdma_desc_alloc(ctlr->pool);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400594 if (!desc) {
595 chan->stats.desc_alloc_fail++;
596 ret = -ENOMEM;
597 goto unlock_ret;
598 }
599
600 if (len < ctlr->params.min_packet_size) {
601 len = ctlr->params.min_packet_size;
602 chan->stats.runt_transmit_buff++;
603 }
604
605 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
Sebastian Siewior14bd0762013-06-20 16:58:45 +0200606 ret = dma_mapping_error(ctlr->dev, buffer);
607 if (ret) {
608 cpdma_desc_free(ctlr->pool, desc, 1);
609 ret = -EINVAL;
610 goto unlock_ret;
611 }
612
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400613 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000614 cpdma_desc_to_port(chan, mode, directed);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400615
616 desc_write(desc, hw_next, 0);
617 desc_write(desc, hw_buffer, buffer);
618 desc_write(desc, hw_len, len);
619 desc_write(desc, hw_mode, mode | len);
620 desc_write(desc, sw_token, token);
621 desc_write(desc, sw_buffer, buffer);
622 desc_write(desc, sw_len, len);
623
624 __cpdma_chan_submit(chan, desc);
625
626 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
627 chan_write(chan, rxfree, 1);
628
629 chan->count++;
630
631unlock_ret:
632 spin_unlock_irqrestore(&chan->lock, flags);
633 return ret;
634}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000635EXPORT_SYMBOL_GPL(cpdma_chan_submit);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400636
Mugunthan V Nfae50822013-01-17 06:31:34 +0000637bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
638{
Mugunthan V Nfae50822013-01-17 06:31:34 +0000639 struct cpdma_ctlr *ctlr = chan->ctlr;
640 struct cpdma_desc_pool *pool = ctlr->pool;
Grygorii Strashko742fb202016-06-27 12:05:11 +0300641 bool free_tx_desc;
642 unsigned long flags;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000643
Grygorii Strashko742fb202016-06-27 12:05:11 +0300644 spin_lock_irqsave(&chan->lock, flags);
645 free_tx_desc = (chan->count < chan->desc_num) &&
646 gen_pool_avail(pool->gen_pool);
647 spin_unlock_irqrestore(&chan->lock, flags);
648 return free_tx_desc;
Mugunthan V Nfae50822013-01-17 06:31:34 +0000649}
650EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
651
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400652static void __cpdma_chan_free(struct cpdma_chan *chan,
653 struct cpdma_desc __iomem *desc,
654 int outlen, int status)
655{
656 struct cpdma_ctlr *ctlr = chan->ctlr;
657 struct cpdma_desc_pool *pool = ctlr->pool;
658 dma_addr_t buff_dma;
659 int origlen;
660 void *token;
661
662 token = (void *)desc_read(desc, sw_token);
663 buff_dma = desc_read(desc, sw_buffer);
664 origlen = desc_read(desc, sw_len);
665
666 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
667 cpdma_desc_free(pool, desc, 1);
668 (*chan->handler)(token, outlen, status);
669}
670
671static int __cpdma_chan_process(struct cpdma_chan *chan)
672{
673 struct cpdma_ctlr *ctlr = chan->ctlr;
674 struct cpdma_desc __iomem *desc;
675 int status, outlen;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000676 int cb_status = 0;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400677 struct cpdma_desc_pool *pool = ctlr->pool;
678 dma_addr_t desc_dma;
679 unsigned long flags;
680
681 spin_lock_irqsave(&chan->lock, flags);
682
683 desc = chan->head;
684 if (!desc) {
685 chan->stats.empty_dequeue++;
686 status = -ENOENT;
687 goto unlock_ret;
688 }
689 desc_dma = desc_phys(pool, desc);
690
691 status = __raw_readl(&desc->hw_mode);
692 outlen = status & 0x7ff;
693 if (status & CPDMA_DESC_OWNER) {
694 chan->stats.busy_dequeue++;
695 status = -EBUSY;
696 goto unlock_ret;
697 }
Mugunthan V N28a19fe2013-05-29 20:22:01 +0000698
699 if (status & CPDMA_DESC_PASS_CRC)
700 outlen -= CPDMA_DESC_CRC_LEN;
701
Mugunthan V Nf6e135c2013-02-11 09:52:18 +0000702 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
703 CPDMA_DESC_PORT_MASK);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400704
705 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
706 chan_write(chan, cp, desc_dma);
707 chan->count--;
708 chan->stats.good_dequeue++;
709
710 if (status & CPDMA_DESC_EOQ) {
711 chan->stats.requeue++;
712 chan_write(chan, hdp, desc_phys(pool, chan->head));
713 }
714
715 spin_unlock_irqrestore(&chan->lock, flags);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000716 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
717 cb_status = -ENOSYS;
718 else
719 cb_status = status;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400720
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000721 __cpdma_chan_free(chan, desc, outlen, cb_status);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400722 return status;
723
724unlock_ret:
725 spin_unlock_irqrestore(&chan->lock, flags);
726 return status;
727}
728
729int cpdma_chan_process(struct cpdma_chan *chan, int quota)
730{
731 int used = 0, ret = 0;
732
733 if (chan->state != CPDMA_STATE_ACTIVE)
734 return -EINVAL;
735
736 while (used < quota) {
737 ret = __cpdma_chan_process(chan);
738 if (ret < 0)
739 break;
740 used++;
741 }
742 return used;
743}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000744EXPORT_SYMBOL_GPL(cpdma_chan_process);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400745
746int cpdma_chan_start(struct cpdma_chan *chan)
747{
748 struct cpdma_ctlr *ctlr = chan->ctlr;
749 struct cpdma_desc_pool *pool = ctlr->pool;
750 unsigned long flags;
751
752 spin_lock_irqsave(&chan->lock, flags);
753 if (chan->state != CPDMA_STATE_IDLE) {
754 spin_unlock_irqrestore(&chan->lock, flags);
755 return -EBUSY;
756 }
757 if (ctlr->state != CPDMA_STATE_ACTIVE) {
758 spin_unlock_irqrestore(&chan->lock, flags);
759 return -EINVAL;
760 }
761 dma_reg_write(ctlr, chan->int_set, chan->mask);
762 chan->state = CPDMA_STATE_ACTIVE;
763 if (chan->head) {
764 chan_write(chan, hdp, desc_phys(pool, chan->head));
765 if (chan->rxfree)
766 chan_write(chan, rxfree, chan->count);
767 }
768
769 spin_unlock_irqrestore(&chan->lock, flags);
770 return 0;
771}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000772EXPORT_SYMBOL_GPL(cpdma_chan_start);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400773
774int cpdma_chan_stop(struct cpdma_chan *chan)
775{
776 struct cpdma_ctlr *ctlr = chan->ctlr;
777 struct cpdma_desc_pool *pool = ctlr->pool;
778 unsigned long flags;
779 int ret;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000780 unsigned timeout;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400781
782 spin_lock_irqsave(&chan->lock, flags);
Christian Rieschcd11cf52014-03-24 13:46:27 +0100783 if (chan->state == CPDMA_STATE_TEARDOWN) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400784 spin_unlock_irqrestore(&chan->lock, flags);
785 return -EINVAL;
786 }
787
788 chan->state = CPDMA_STATE_TEARDOWN;
789 dma_reg_write(ctlr, chan->int_clear, chan->mask);
790
791 /* trigger teardown */
Christian Rieschb4ad0422012-02-22 21:58:00 +0000792 dma_reg_write(ctlr, chan->td, chan_linear(chan));
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400793
794 /* wait for teardown complete */
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000795 timeout = 100 * 100; /* 100 ms */
796 while (timeout) {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400797 u32 cp = chan_read(chan, cp);
798 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
799 break;
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000800 udelay(10);
801 timeout--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400802 }
Sebastian Siewior817f6d12013-04-23 07:31:35 +0000803 WARN_ON(!timeout);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400804 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
805
806 /* handle completed packets */
Ilya Yanok7746ab02011-12-18 10:02:04 +0000807 spin_unlock_irqrestore(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400808 do {
809 ret = __cpdma_chan_process(chan);
810 if (ret < 0)
811 break;
812 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
Ilya Yanok7746ab02011-12-18 10:02:04 +0000813 spin_lock_irqsave(&chan->lock, flags);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400814
815 /* remaining packets haven't been tx/rx'ed, clean them up */
816 while (chan->head) {
817 struct cpdma_desc __iomem *desc = chan->head;
818 dma_addr_t next_dma;
819
820 next_dma = desc_read(desc, hw_next);
821 chan->head = desc_from_phys(pool, next_dma);
htbeginffb5ba92012-10-01 16:42:43 +0000822 chan->count--;
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400823 chan->stats.teardown_dequeue++;
824
825 /* issue callback without locks held */
826 spin_unlock_irqrestore(&chan->lock, flags);
827 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
828 spin_lock_irqsave(&chan->lock, flags);
829 }
830
831 chan->state = CPDMA_STATE_IDLE;
832 spin_unlock_irqrestore(&chan->lock, flags);
833 return 0;
834}
Arnd Bergmann32a6d902012-04-20 10:56:09 +0000835EXPORT_SYMBOL_GPL(cpdma_chan_stop);
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400836
837int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
838{
839 unsigned long flags;
840
841 spin_lock_irqsave(&chan->lock, flags);
842 if (chan->state != CPDMA_STATE_ACTIVE) {
843 spin_unlock_irqrestore(&chan->lock, flags);
844 return -EINVAL;
845 }
846
847 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
848 chan->mask);
849 spin_unlock_irqrestore(&chan->lock, flags);
850
851 return 0;
852}
853
854struct cpdma_control_info {
855 u32 reg;
856 u32 shift, mask;
857 int access;
858#define ACCESS_RO BIT(0)
859#define ACCESS_WO BIT(1)
860#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
861};
862
Olof Johanssondf784162013-12-11 15:51:21 -0800863static struct cpdma_control_info controls[] = {
Cyril Chemparathyef8c2da2010-09-15 10:11:28 -0400864 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
865 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
866 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
867 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
868 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
869 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
870 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
871 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
872 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
873 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
874 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
875};
876
877int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
878{
879 unsigned long flags;
880 struct cpdma_control_info *info = &controls[control];
881 int ret;
882
883 spin_lock_irqsave(&ctlr->lock, flags);
884
885 ret = -ENOTSUPP;
886 if (!ctlr->params.has_ext_regs)
887 goto unlock_ret;
888
889 ret = -EINVAL;
890 if (ctlr->state != CPDMA_STATE_ACTIVE)
891 goto unlock_ret;
892
893 ret = -ENOENT;
894 if (control < 0 || control >= ARRAY_SIZE(controls))
895 goto unlock_ret;
896
897 ret = -EPERM;
898 if ((info->access & ACCESS_RO) != ACCESS_RO)
899 goto unlock_ret;
900
901 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
902
903unlock_ret:
904 spin_unlock_irqrestore(&ctlr->lock, flags);
905 return ret;
906}
907
908int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
909{
910 unsigned long flags;
911 struct cpdma_control_info *info = &controls[control];
912 int ret;
913 u32 val;
914
915 spin_lock_irqsave(&ctlr->lock, flags);
916
917 ret = -ENOTSUPP;
918 if (!ctlr->params.has_ext_regs)
919 goto unlock_ret;
920
921 ret = -EINVAL;
922 if (ctlr->state != CPDMA_STATE_ACTIVE)
923 goto unlock_ret;
924
925 ret = -ENOENT;
926 if (control < 0 || control >= ARRAY_SIZE(controls))
927 goto unlock_ret;
928
929 ret = -EPERM;
930 if ((info->access & ACCESS_WO) != ACCESS_WO)
931 goto unlock_ret;
932
933 val = dma_reg_read(ctlr, info->reg);
934 val &= ~(info->mask << info->shift);
935 val |= (value & info->mask) << info->shift;
936 dma_reg_write(ctlr, info->reg, val);
937 ret = 0;
938
939unlock_ret:
940 spin_unlock_irqrestore(&ctlr->lock, flags);
941 return ret;
942}
Arnd Bergmann6929e242013-02-14 17:53:01 +0100943EXPORT_SYMBOL_GPL(cpdma_control_set);
Sebastian Siewior4bc21d42013-04-24 08:48:22 +0000944
945MODULE_LICENSE("GPL");