Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Texas Instruments CPDMA Driver |
| 3 | * |
| 4 | * Copyright (C) 2010 Texas Instruments |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License as |
| 8 | * published by the Free Software Foundation version 2. |
| 9 | * |
| 10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any |
| 11 | * kind, whether express or implied; without even the implied warranty |
| 12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | */ |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/device.h> |
Daniel Mack | 76fbc24 | 2012-06-28 06:12:32 +0000 | [diff] [blame] | 18 | #include <linux/module.h> |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 19 | #include <linux/slab.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/dma-mapping.h> |
| 22 | #include <linux/io.h> |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 23 | #include <linux/delay.h> |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 24 | #include <linux/genalloc.h> |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 25 | #include "davinci_cpdma.h" |
| 26 | |
| 27 | /* DMA Registers */ |
| 28 | #define CPDMA_TXIDVER 0x00 |
| 29 | #define CPDMA_TXCONTROL 0x04 |
| 30 | #define CPDMA_TXTEARDOWN 0x08 |
| 31 | #define CPDMA_RXIDVER 0x10 |
| 32 | #define CPDMA_RXCONTROL 0x14 |
| 33 | #define CPDMA_SOFTRESET 0x1c |
| 34 | #define CPDMA_RXTEARDOWN 0x18 |
| 35 | #define CPDMA_TXINTSTATRAW 0x80 |
| 36 | #define CPDMA_TXINTSTATMASKED 0x84 |
| 37 | #define CPDMA_TXINTMASKSET 0x88 |
| 38 | #define CPDMA_TXINTMASKCLEAR 0x8c |
| 39 | #define CPDMA_MACINVECTOR 0x90 |
| 40 | #define CPDMA_MACEOIVECTOR 0x94 |
| 41 | #define CPDMA_RXINTSTATRAW 0xa0 |
| 42 | #define CPDMA_RXINTSTATMASKED 0xa4 |
| 43 | #define CPDMA_RXINTMASKSET 0xa8 |
| 44 | #define CPDMA_RXINTMASKCLEAR 0xac |
| 45 | #define CPDMA_DMAINTSTATRAW 0xb0 |
| 46 | #define CPDMA_DMAINTSTATMASKED 0xb4 |
| 47 | #define CPDMA_DMAINTMASKSET 0xb8 |
| 48 | #define CPDMA_DMAINTMASKCLEAR 0xbc |
| 49 | #define CPDMA_DMAINT_HOSTERR BIT(1) |
| 50 | |
| 51 | /* the following exist only if has_ext_regs is set */ |
| 52 | #define CPDMA_DMACONTROL 0x20 |
| 53 | #define CPDMA_DMASTATUS 0x24 |
| 54 | #define CPDMA_RXBUFFOFS 0x28 |
| 55 | #define CPDMA_EM_CONTROL 0x2c |
| 56 | |
| 57 | /* Descriptor mode bits */ |
| 58 | #define CPDMA_DESC_SOP BIT(31) |
| 59 | #define CPDMA_DESC_EOP BIT(30) |
| 60 | #define CPDMA_DESC_OWNER BIT(29) |
| 61 | #define CPDMA_DESC_EOQ BIT(28) |
| 62 | #define CPDMA_DESC_TD_COMPLETE BIT(27) |
| 63 | #define CPDMA_DESC_PASS_CRC BIT(26) |
Mugunthan V N | f6e135c | 2013-02-11 09:52:18 +0000 | [diff] [blame] | 64 | #define CPDMA_DESC_TO_PORT_EN BIT(20) |
| 65 | #define CPDMA_TO_PORT_SHIFT 16 |
| 66 | #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) |
Mugunthan V N | 28a19fe | 2013-05-29 20:22:01 +0000 | [diff] [blame] | 67 | #define CPDMA_DESC_CRC_LEN 4 |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 68 | |
| 69 | #define CPDMA_TEARDOWN_VALUE 0xfffffffc |
| 70 | |
| 71 | struct cpdma_desc { |
| 72 | /* hardware fields */ |
| 73 | u32 hw_next; |
| 74 | u32 hw_buffer; |
| 75 | u32 hw_len; |
| 76 | u32 hw_mode; |
| 77 | /* software fields */ |
| 78 | void *sw_token; |
| 79 | u32 sw_buffer; |
| 80 | u32 sw_len; |
| 81 | }; |
| 82 | |
| 83 | struct cpdma_desc_pool { |
Olof Johansson | c767db5 | 2013-12-11 15:51:20 -0800 | [diff] [blame] | 84 | phys_addr_t phys; |
Arnd Bergmann | 8409299 | 2016-01-29 12:39:10 +0100 | [diff] [blame] | 85 | dma_addr_t hw_addr; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 86 | void __iomem *iomap; /* ioremap map */ |
| 87 | void *cpumap; /* dma_alloc map */ |
| 88 | int desc_size, mem_size; |
Grygorii Strashko | aeec302 | 2016-08-04 18:20:51 +0300 | [diff] [blame] | 89 | int num_desc; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 90 | struct device *dev; |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 91 | struct gen_pool *gen_pool; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 92 | }; |
| 93 | |
| 94 | enum cpdma_state { |
| 95 | CPDMA_STATE_IDLE, |
| 96 | CPDMA_STATE_ACTIVE, |
| 97 | CPDMA_STATE_TEARDOWN, |
| 98 | }; |
| 99 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 100 | struct cpdma_ctlr { |
| 101 | enum cpdma_state state; |
| 102 | struct cpdma_params params; |
| 103 | struct device *dev; |
| 104 | struct cpdma_desc_pool *pool; |
| 105 | spinlock_t lock; |
| 106 | struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 107 | int chan_num; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | struct cpdma_chan { |
Mugunthan V N | fae5082 | 2013-01-17 06:31:34 +0000 | [diff] [blame] | 111 | struct cpdma_desc __iomem *head, *tail; |
| 112 | void __iomem *hdp, *cp, *rxfree; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 113 | enum cpdma_state state; |
| 114 | struct cpdma_ctlr *ctlr; |
| 115 | int chan_num; |
| 116 | spinlock_t lock; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 117 | int count; |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 118 | u32 desc_num; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 119 | u32 mask; |
| 120 | cpdma_handler_fn handler; |
| 121 | enum dma_data_direction dir; |
| 122 | struct cpdma_chan_stats stats; |
| 123 | /* offsets into dmaregs */ |
| 124 | int int_set, int_clear, td; |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 125 | int weight; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 126 | }; |
| 127 | |
Ivan Khoronzhuk | 991ddb1 | 2016-11-11 15:45:24 +0200 | [diff] [blame] | 128 | struct cpdma_control_info { |
| 129 | u32 reg; |
| 130 | u32 shift, mask; |
| 131 | int access; |
| 132 | #define ACCESS_RO BIT(0) |
| 133 | #define ACCESS_WO BIT(1) |
| 134 | #define ACCESS_RW (ACCESS_RO | ACCESS_WO) |
| 135 | }; |
| 136 | |
| 137 | static struct cpdma_control_info controls[] = { |
| 138 | [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, |
| 139 | [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, |
| 140 | [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, |
| 141 | [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, |
| 142 | [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, |
| 143 | [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, |
| 144 | [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, |
| 145 | [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, |
| 146 | [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, |
| 147 | [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, |
| 148 | [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, |
| 149 | }; |
| 150 | |
Ivan Khoronzhuk | 925d65e | 2016-08-22 21:18:27 +0300 | [diff] [blame] | 151 | #define tx_chan_num(chan) (chan) |
| 152 | #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) |
| 153 | #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) |
| 154 | #define is_tx_chan(chan) (!is_rx_chan(chan)) |
| 155 | #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) |
| 156 | #define chan_linear(chan) __chan_linear((chan)->chan_num) |
| 157 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 158 | /* The following make access to common cpdma_ctlr params more readable */ |
| 159 | #define dmaregs params.dmaregs |
| 160 | #define num_chan params.num_chan |
| 161 | |
| 162 | /* various accessors */ |
| 163 | #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) |
| 164 | #define chan_read(chan, fld) __raw_readl((chan)->fld) |
| 165 | #define desc_read(desc, fld) __raw_readl(&(desc)->fld) |
| 166 | #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) |
| 167 | #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) |
| 168 | #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) |
| 169 | |
Mugunthan V N | f6e135c | 2013-02-11 09:52:18 +0000 | [diff] [blame] | 170 | #define cpdma_desc_to_port(chan, mode, directed) \ |
| 171 | do { \ |
| 172 | if (!is_rx_chan(chan) && ((directed == 1) || \ |
| 173 | (directed == 2))) \ |
| 174 | mode |= (CPDMA_DESC_TO_PORT_EN | \ |
| 175 | (directed << CPDMA_TO_PORT_SHIFT)); \ |
| 176 | } while (0) |
| 177 | |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 178 | static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) |
| 179 | { |
| 180 | if (!pool) |
| 181 | return; |
| 182 | |
Grygorii Strashko | aeec302 | 2016-08-04 18:20:51 +0300 | [diff] [blame] | 183 | WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), |
| 184 | "cpdma_desc_pool size %d != avail %d", |
| 185 | gen_pool_size(pool->gen_pool), |
| 186 | gen_pool_avail(pool->gen_pool)); |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 187 | if (pool->cpumap) |
| 188 | dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, |
| 189 | pool->phys); |
| 190 | else |
| 191 | iounmap(pool->iomap); |
| 192 | } |
| 193 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 194 | /* |
| 195 | * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci |
| 196 | * emac) have dedicated on-chip memory for these descriptors. Some other |
| 197 | * devices (e.g. cpsw switches) use plain old memory. Descriptor pools |
| 198 | * abstract out these details |
| 199 | */ |
| 200 | static struct cpdma_desc_pool * |
Arnd Bergmann | 8409299 | 2016-01-29 12:39:10 +0100 | [diff] [blame] | 201 | cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, |
Sriram | 6a1fef6 | 2011-03-22 02:31:03 +0000 | [diff] [blame] | 202 | int size, int align) |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 203 | { |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 204 | struct cpdma_desc_pool *pool; |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 205 | int ret; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 206 | |
George Cherian | e194312 | 2014-05-12 10:21:21 +0530 | [diff] [blame] | 207 | pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 208 | if (!pool) |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 209 | goto gen_pool_create_fail; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 210 | |
| 211 | pool->dev = dev; |
| 212 | pool->mem_size = size; |
| 213 | pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); |
| 214 | pool->num_desc = size / pool->desc_size; |
| 215 | |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 216 | pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, |
| 217 | "cpdma"); |
| 218 | if (IS_ERR(pool->gen_pool)) { |
| 219 | dev_err(dev, "pool create failed %ld\n", |
| 220 | PTR_ERR(pool->gen_pool)); |
| 221 | goto gen_pool_create_fail; |
| 222 | } |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 223 | |
| 224 | if (phys) { |
| 225 | pool->phys = phys; |
Arnd Bergmann | 8409299 | 2016-01-29 12:39:10 +0100 | [diff] [blame] | 226 | pool->iomap = ioremap(phys, size); /* should be memremap? */ |
Sriram | 6a1fef6 | 2011-03-22 02:31:03 +0000 | [diff] [blame] | 227 | pool->hw_addr = hw_addr; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 228 | } else { |
Arnd Bergmann | 8409299 | 2016-01-29 12:39:10 +0100 | [diff] [blame] | 229 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 230 | GFP_KERNEL); |
Arnd Bergmann | 8409299 | 2016-01-29 12:39:10 +0100 | [diff] [blame] | 231 | pool->iomap = (void __iomem __force *)pool->cpumap; |
| 232 | pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 233 | } |
| 234 | |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 235 | if (!pool->iomap) |
| 236 | goto gen_pool_create_fail; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 237 | |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 238 | ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, |
| 239 | pool->phys, pool->mem_size, -1); |
| 240 | if (ret < 0) { |
| 241 | dev_err(dev, "pool add failed %d\n", ret); |
| 242 | goto gen_pool_add_virt_fail; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 243 | } |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 244 | |
| 245 | return pool; |
| 246 | |
| 247 | gen_pool_add_virt_fail: |
| 248 | cpdma_desc_pool_destroy(pool); |
| 249 | gen_pool_create_fail: |
| 250 | return NULL; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 251 | } |
| 252 | |
| 253 | static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, |
| 254 | struct cpdma_desc __iomem *desc) |
| 255 | { |
| 256 | if (!desc) |
| 257 | return 0; |
Olof Johansson | c767db5 | 2013-12-11 15:51:20 -0800 | [diff] [blame] | 258 | return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | static inline struct cpdma_desc __iomem * |
| 262 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) |
| 263 | { |
Sriram | 6a1fef6 | 2011-03-22 02:31:03 +0000 | [diff] [blame] | 264 | return dma ? pool->iomap + dma - pool->hw_addr : NULL; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | static struct cpdma_desc __iomem * |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 268 | cpdma_desc_alloc(struct cpdma_desc_pool *pool) |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 269 | { |
Grygorii Strashko | aeec302 | 2016-08-04 18:20:51 +0300 | [diff] [blame] | 270 | return (struct cpdma_desc __iomem *) |
| 271 | gen_pool_alloc(pool->gen_pool, pool->desc_size); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | static void cpdma_desc_free(struct cpdma_desc_pool *pool, |
| 275 | struct cpdma_desc __iomem *desc, int num_desc) |
| 276 | { |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 277 | gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 278 | } |
| 279 | |
Ivan Khoronzhuk | 991ddb1 | 2016-11-11 15:45:24 +0200 | [diff] [blame] | 280 | static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) |
| 281 | { |
| 282 | struct cpdma_control_info *info = &controls[control]; |
| 283 | u32 val; |
| 284 | |
| 285 | if (!ctlr->params.has_ext_regs) |
| 286 | return -ENOTSUPP; |
| 287 | |
| 288 | if (ctlr->state != CPDMA_STATE_ACTIVE) |
| 289 | return -EINVAL; |
| 290 | |
| 291 | if (control < 0 || control >= ARRAY_SIZE(controls)) |
| 292 | return -ENOENT; |
| 293 | |
| 294 | if ((info->access & ACCESS_WO) != ACCESS_WO) |
| 295 | return -EPERM; |
| 296 | |
| 297 | val = dma_reg_read(ctlr, info->reg); |
| 298 | val &= ~(info->mask << info->shift); |
| 299 | val |= (value & info->mask) << info->shift; |
| 300 | dma_reg_write(ctlr, info->reg, val); |
| 301 | |
| 302 | return 0; |
| 303 | } |
| 304 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 305 | struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) |
| 306 | { |
| 307 | struct cpdma_ctlr *ctlr; |
| 308 | |
George Cherian | e194312 | 2014-05-12 10:21:21 +0530 | [diff] [blame] | 309 | ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 310 | if (!ctlr) |
| 311 | return NULL; |
| 312 | |
| 313 | ctlr->state = CPDMA_STATE_IDLE; |
| 314 | ctlr->params = *params; |
| 315 | ctlr->dev = params->dev; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 316 | ctlr->chan_num = 0; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 317 | spin_lock_init(&ctlr->lock); |
| 318 | |
| 319 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, |
| 320 | ctlr->params.desc_mem_phys, |
Sriram | 6a1fef6 | 2011-03-22 02:31:03 +0000 | [diff] [blame] | 321 | ctlr->params.desc_hw_addr, |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 322 | ctlr->params.desc_mem_size, |
| 323 | ctlr->params.desc_align); |
Dan Carpenter | 2f87208 | 2014-06-11 11:16:51 +0300 | [diff] [blame] | 324 | if (!ctlr->pool) |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 325 | return NULL; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 326 | |
| 327 | if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) |
| 328 | ctlr->num_chan = CPDMA_MAX_CHANNELS; |
| 329 | return ctlr; |
| 330 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 331 | EXPORT_SYMBOL_GPL(cpdma_ctlr_create); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 332 | |
| 333 | int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) |
| 334 | { |
| 335 | unsigned long flags; |
| 336 | int i; |
| 337 | |
| 338 | spin_lock_irqsave(&ctlr->lock, flags); |
| 339 | if (ctlr->state != CPDMA_STATE_IDLE) { |
| 340 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 341 | return -EBUSY; |
| 342 | } |
| 343 | |
| 344 | if (ctlr->params.has_soft_reset) { |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 345 | unsigned timeout = 10 * 100; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 346 | |
| 347 | dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 348 | while (timeout) { |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 349 | if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) |
| 350 | break; |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 351 | udelay(10); |
| 352 | timeout--; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 353 | } |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 354 | WARN_ON(!timeout); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 355 | } |
| 356 | |
| 357 | for (i = 0; i < ctlr->num_chan; i++) { |
| 358 | __raw_writel(0, ctlr->params.txhdp + 4 * i); |
| 359 | __raw_writel(0, ctlr->params.rxhdp + 4 * i); |
| 360 | __raw_writel(0, ctlr->params.txcp + 4 * i); |
| 361 | __raw_writel(0, ctlr->params.rxcp + 4 * i); |
| 362 | } |
| 363 | |
| 364 | dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); |
| 365 | dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); |
| 366 | |
| 367 | dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); |
| 368 | dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); |
| 369 | |
| 370 | ctlr->state = CPDMA_STATE_ACTIVE; |
| 371 | |
| 372 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { |
| 373 | if (ctlr->channels[i]) |
| 374 | cpdma_chan_start(ctlr->channels[i]); |
| 375 | } |
Ivan Khoronzhuk | 991ddb1 | 2016-11-11 15:45:24 +0200 | [diff] [blame] | 376 | |
| 377 | _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, 1); |
| 378 | _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0); |
| 379 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 380 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 381 | return 0; |
| 382 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 383 | EXPORT_SYMBOL_GPL(cpdma_ctlr_start); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 384 | |
| 385 | int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) |
| 386 | { |
| 387 | unsigned long flags; |
| 388 | int i; |
| 389 | |
| 390 | spin_lock_irqsave(&ctlr->lock, flags); |
Ivan Khoronzhuk | b993eec | 2016-11-11 16:10:47 +0200 | [diff] [blame] | 391 | if (ctlr->state != CPDMA_STATE_ACTIVE) { |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 392 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 393 | return -EINVAL; |
| 394 | } |
| 395 | |
| 396 | ctlr->state = CPDMA_STATE_TEARDOWN; |
Ivan Khoronzhuk | 080d5c5a | 2016-08-22 21:18:25 +0300 | [diff] [blame] | 397 | spin_unlock_irqrestore(&ctlr->lock, flags); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 398 | |
| 399 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { |
| 400 | if (ctlr->channels[i]) |
| 401 | cpdma_chan_stop(ctlr->channels[i]); |
| 402 | } |
| 403 | |
Ivan Khoronzhuk | 080d5c5a | 2016-08-22 21:18:25 +0300 | [diff] [blame] | 404 | spin_lock_irqsave(&ctlr->lock, flags); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 405 | dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); |
| 406 | dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); |
| 407 | |
| 408 | dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); |
| 409 | dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); |
| 410 | |
| 411 | ctlr->state = CPDMA_STATE_IDLE; |
| 412 | |
| 413 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 414 | return 0; |
| 415 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 416 | EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 417 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 418 | int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) |
| 419 | { |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 420 | int ret = 0, i; |
| 421 | |
| 422 | if (!ctlr) |
| 423 | return -EINVAL; |
| 424 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 425 | if (ctlr->state != CPDMA_STATE_IDLE) |
| 426 | cpdma_ctlr_stop(ctlr); |
| 427 | |
Cyril Roelandt | 79876e0 | 2013-02-12 12:52:30 +0000 | [diff] [blame] | 428 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) |
| 429 | cpdma_chan_destroy(ctlr->channels[i]); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 430 | |
| 431 | cpdma_desc_pool_destroy(ctlr->pool); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 432 | return ret; |
| 433 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 434 | EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 435 | |
| 436 | int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) |
| 437 | { |
| 438 | unsigned long flags; |
| 439 | int i, reg; |
| 440 | |
| 441 | spin_lock_irqsave(&ctlr->lock, flags); |
| 442 | if (ctlr->state != CPDMA_STATE_ACTIVE) { |
| 443 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 444 | return -EINVAL; |
| 445 | } |
| 446 | |
| 447 | reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; |
| 448 | dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); |
| 449 | |
| 450 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { |
| 451 | if (ctlr->channels[i]) |
| 452 | cpdma_chan_int_ctrl(ctlr->channels[i], enable); |
| 453 | } |
| 454 | |
| 455 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 456 | return 0; |
| 457 | } |
Arnd Bergmann | 6929e24 | 2013-02-14 17:53:01 +0100 | [diff] [blame] | 458 | EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 459 | |
Mugunthan V N | 510a1e72 | 2013-02-17 22:19:20 +0000 | [diff] [blame] | 460 | void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 461 | { |
Mugunthan V N | 510a1e72 | 2013-02-17 22:19:20 +0000 | [diff] [blame] | 462 | dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 463 | } |
Arnd Bergmann | 6929e24 | 2013-02-14 17:53:01 +0100 | [diff] [blame] | 464 | EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 465 | |
Ivan Khoronzhuk | e05107e | 2016-08-22 21:18:26 +0300 | [diff] [blame] | 466 | u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) |
| 467 | { |
| 468 | return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); |
| 469 | } |
| 470 | EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); |
| 471 | |
| 472 | u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) |
| 473 | { |
| 474 | return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); |
| 475 | } |
| 476 | EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); |
| 477 | |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 478 | static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, |
| 479 | int rx, int desc_num, |
| 480 | int per_ch_desc) |
| 481 | { |
| 482 | struct cpdma_chan *chan, *most_chan = NULL; |
| 483 | int desc_cnt = desc_num; |
| 484 | int most_dnum = 0; |
| 485 | int min, max, i; |
| 486 | |
| 487 | if (!desc_num) |
| 488 | return; |
| 489 | |
| 490 | if (rx) { |
| 491 | min = rx_chan_num(0); |
| 492 | max = rx_chan_num(CPDMA_MAX_CHANNELS); |
| 493 | } else { |
| 494 | min = tx_chan_num(0); |
| 495 | max = tx_chan_num(CPDMA_MAX_CHANNELS); |
| 496 | } |
| 497 | |
| 498 | for (i = min; i < max; i++) { |
| 499 | chan = ctlr->channels[i]; |
| 500 | if (!chan) |
| 501 | continue; |
| 502 | |
| 503 | if (chan->weight) |
| 504 | chan->desc_num = (chan->weight * desc_num) / 100; |
| 505 | else |
| 506 | chan->desc_num = per_ch_desc; |
| 507 | |
| 508 | desc_cnt -= chan->desc_num; |
| 509 | |
| 510 | if (most_dnum < chan->desc_num) { |
| 511 | most_dnum = chan->desc_num; |
| 512 | most_chan = chan; |
| 513 | } |
| 514 | } |
| 515 | /* use remains */ |
| 516 | most_chan->desc_num += desc_cnt; |
| 517 | } |
| 518 | |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 519 | /** |
| 520 | * cpdma_chan_split_pool - Splits ctrl pool between all channels. |
| 521 | * Has to be called under ctlr lock |
| 522 | */ |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 523 | static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 524 | { |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 525 | int tx_per_ch_desc = 0, rx_per_ch_desc = 0; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 526 | struct cpdma_desc_pool *pool = ctlr->pool; |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 527 | int free_rx_num = 0, free_tx_num = 0; |
| 528 | int rx_weight = 0, tx_weight = 0; |
| 529 | int tx_desc_num, rx_desc_num; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 530 | struct cpdma_chan *chan; |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 531 | int i, tx_num = 0; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 532 | |
| 533 | if (!ctlr->chan_num) |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 534 | return 0; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 535 | |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 536 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { |
| 537 | chan = ctlr->channels[i]; |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 538 | if (!chan) |
| 539 | continue; |
| 540 | |
| 541 | if (is_rx_chan(chan)) { |
| 542 | if (!chan->weight) |
| 543 | free_rx_num++; |
| 544 | rx_weight += chan->weight; |
| 545 | } else { |
| 546 | if (!chan->weight) |
| 547 | free_tx_num++; |
| 548 | tx_weight += chan->weight; |
| 549 | tx_num++; |
| 550 | } |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 551 | } |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 552 | |
| 553 | if (rx_weight > 100 || tx_weight > 100) |
| 554 | return -EINVAL; |
| 555 | |
| 556 | tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num; |
| 557 | rx_desc_num = pool->num_desc - tx_desc_num; |
| 558 | |
| 559 | if (free_tx_num) { |
| 560 | tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; |
| 561 | tx_per_ch_desc /= free_tx_num; |
| 562 | } |
| 563 | if (free_rx_num) { |
| 564 | rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100; |
| 565 | rx_per_ch_desc /= free_rx_num; |
| 566 | } |
| 567 | |
| 568 | cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc); |
| 569 | cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc); |
| 570 | |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | /* cpdma_chan_set_weight - set weight of a channel in percentage. |
| 575 | * Tx and Rx channels have separate weights. That is 100% for RX |
| 576 | * and 100% for Tx. The weight is used to split cpdma resources |
| 577 | * in correct proportion required by the channels, including number |
| 578 | * of descriptors. The channel rate is not enough to know the |
| 579 | * weight of a channel as the maximum rate of an interface is needed. |
| 580 | * If weight = 0, then channel uses rest of descriptors leaved by |
| 581 | * weighted channels. |
| 582 | */ |
| 583 | int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight) |
| 584 | { |
| 585 | struct cpdma_ctlr *ctlr = ch->ctlr; |
| 586 | unsigned long flags, ch_flags; |
| 587 | int ret; |
| 588 | |
| 589 | spin_lock_irqsave(&ctlr->lock, flags); |
| 590 | spin_lock_irqsave(&ch->lock, ch_flags); |
| 591 | if (ch->weight == weight) { |
| 592 | spin_unlock_irqrestore(&ch->lock, ch_flags); |
| 593 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 594 | return 0; |
| 595 | } |
| 596 | ch->weight = weight; |
| 597 | spin_unlock_irqrestore(&ch->lock, ch_flags); |
| 598 | |
| 599 | /* re-split pool using new channel weight */ |
| 600 | ret = cpdma_chan_split_pool(ctlr); |
| 601 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 602 | return ret; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 603 | } |
| 604 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 605 | struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, |
Ivan Khoronzhuk | 925d65e | 2016-08-22 21:18:27 +0300 | [diff] [blame] | 606 | cpdma_handler_fn handler, int rx_type) |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 607 | { |
Ivan Khoronzhuk | 925d65e | 2016-08-22 21:18:27 +0300 | [diff] [blame] | 608 | int offset = chan_num * 4; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 609 | struct cpdma_chan *chan; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 610 | unsigned long flags; |
| 611 | |
Ivan Khoronzhuk | 925d65e | 2016-08-22 21:18:27 +0300 | [diff] [blame] | 612 | chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); |
| 613 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 614 | if (__chan_linear(chan_num) >= ctlr->num_chan) |
| 615 | return NULL; |
| 616 | |
George Cherian | e194312 | 2014-05-12 10:21:21 +0530 | [diff] [blame] | 617 | chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 618 | if (!chan) |
George Cherian | e194312 | 2014-05-12 10:21:21 +0530 | [diff] [blame] | 619 | return ERR_PTR(-ENOMEM); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 620 | |
| 621 | spin_lock_irqsave(&ctlr->lock, flags); |
George Cherian | e194312 | 2014-05-12 10:21:21 +0530 | [diff] [blame] | 622 | if (ctlr->channels[chan_num]) { |
| 623 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 624 | devm_kfree(ctlr->dev, chan); |
| 625 | return ERR_PTR(-EBUSY); |
| 626 | } |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 627 | |
| 628 | chan->ctlr = ctlr; |
| 629 | chan->state = CPDMA_STATE_IDLE; |
| 630 | chan->chan_num = chan_num; |
| 631 | chan->handler = handler; |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 632 | chan->desc_num = ctlr->pool->num_desc / 2; |
Ivan Khoronzhuk | 0fc6432 | 2016-11-29 17:00:47 +0200 | [diff] [blame^] | 633 | chan->weight = 0; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 634 | |
| 635 | if (is_rx_chan(chan)) { |
| 636 | chan->hdp = ctlr->params.rxhdp + offset; |
| 637 | chan->cp = ctlr->params.rxcp + offset; |
| 638 | chan->rxfree = ctlr->params.rxfree + offset; |
| 639 | chan->int_set = CPDMA_RXINTMASKSET; |
| 640 | chan->int_clear = CPDMA_RXINTMASKCLEAR; |
| 641 | chan->td = CPDMA_RXTEARDOWN; |
| 642 | chan->dir = DMA_FROM_DEVICE; |
| 643 | } else { |
| 644 | chan->hdp = ctlr->params.txhdp + offset; |
| 645 | chan->cp = ctlr->params.txcp + offset; |
| 646 | chan->int_set = CPDMA_TXINTMASKSET; |
| 647 | chan->int_clear = CPDMA_TXINTMASKCLEAR; |
| 648 | chan->td = CPDMA_TXTEARDOWN; |
| 649 | chan->dir = DMA_TO_DEVICE; |
| 650 | } |
| 651 | chan->mask = BIT(chan_linear(chan)); |
| 652 | |
| 653 | spin_lock_init(&chan->lock); |
| 654 | |
| 655 | ctlr->channels[chan_num] = chan; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 656 | ctlr->chan_num++; |
| 657 | |
| 658 | cpdma_chan_split_pool(ctlr); |
| 659 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 660 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 661 | return chan; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 662 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 663 | EXPORT_SYMBOL_GPL(cpdma_chan_create); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 664 | |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 665 | int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) |
Ivan Khoronzhuk | 1793331 | 2016-06-17 13:25:39 +0300 | [diff] [blame] | 666 | { |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 667 | unsigned long flags; |
| 668 | int desc_num; |
| 669 | |
| 670 | spin_lock_irqsave(&chan->lock, flags); |
| 671 | desc_num = chan->desc_num; |
| 672 | spin_unlock_irqrestore(&chan->lock, flags); |
| 673 | |
| 674 | return desc_num; |
Ivan Khoronzhuk | 1793331 | 2016-06-17 13:25:39 +0300 | [diff] [blame] | 675 | } |
| 676 | EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); |
| 677 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 678 | int cpdma_chan_destroy(struct cpdma_chan *chan) |
| 679 | { |
Julia Lawall | f37c54b | 2012-08-14 05:49:47 +0000 | [diff] [blame] | 680 | struct cpdma_ctlr *ctlr; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 681 | unsigned long flags; |
| 682 | |
| 683 | if (!chan) |
| 684 | return -EINVAL; |
Julia Lawall | f37c54b | 2012-08-14 05:49:47 +0000 | [diff] [blame] | 685 | ctlr = chan->ctlr; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 686 | |
| 687 | spin_lock_irqsave(&ctlr->lock, flags); |
| 688 | if (chan->state != CPDMA_STATE_IDLE) |
| 689 | cpdma_chan_stop(chan); |
| 690 | ctlr->channels[chan->chan_num] = NULL; |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 691 | ctlr->chan_num--; |
Ivan Khoronzhuk | b602e49 | 2016-11-08 15:16:05 +0200 | [diff] [blame] | 692 | devm_kfree(ctlr->dev, chan); |
Ivan Khoronzhuk | 3802dce1 | 2016-08-22 21:18:24 +0300 | [diff] [blame] | 693 | cpdma_chan_split_pool(ctlr); |
| 694 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 695 | spin_unlock_irqrestore(&ctlr->lock, flags); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 696 | return 0; |
| 697 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 698 | EXPORT_SYMBOL_GPL(cpdma_chan_destroy); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 699 | |
| 700 | int cpdma_chan_get_stats(struct cpdma_chan *chan, |
| 701 | struct cpdma_chan_stats *stats) |
| 702 | { |
| 703 | unsigned long flags; |
| 704 | if (!chan) |
| 705 | return -EINVAL; |
| 706 | spin_lock_irqsave(&chan->lock, flags); |
| 707 | memcpy(stats, &chan->stats, sizeof(*stats)); |
| 708 | spin_unlock_irqrestore(&chan->lock, flags); |
| 709 | return 0; |
| 710 | } |
Daniel Mack | 0ca04b6 | 2013-08-22 13:47:00 +0200 | [diff] [blame] | 711 | EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 712 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 713 | static void __cpdma_chan_submit(struct cpdma_chan *chan, |
| 714 | struct cpdma_desc __iomem *desc) |
| 715 | { |
| 716 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 717 | struct cpdma_desc __iomem *prev = chan->tail; |
| 718 | struct cpdma_desc_pool *pool = ctlr->pool; |
| 719 | dma_addr_t desc_dma; |
| 720 | u32 mode; |
| 721 | |
| 722 | desc_dma = desc_phys(pool, desc); |
| 723 | |
| 724 | /* simple case - idle channel */ |
| 725 | if (!chan->head) { |
| 726 | chan->stats.head_enqueue++; |
| 727 | chan->head = desc; |
| 728 | chan->tail = desc; |
| 729 | if (chan->state == CPDMA_STATE_ACTIVE) |
| 730 | chan_write(chan, hdp, desc_dma); |
| 731 | return; |
| 732 | } |
| 733 | |
| 734 | /* first chain the descriptor at the tail of the list */ |
| 735 | desc_write(prev, hw_next, desc_dma); |
| 736 | chan->tail = desc; |
| 737 | chan->stats.tail_enqueue++; |
| 738 | |
| 739 | /* next check if EOQ has been triggered already */ |
| 740 | mode = desc_read(prev, hw_mode); |
| 741 | if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && |
| 742 | (chan->state == CPDMA_STATE_ACTIVE)) { |
| 743 | desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); |
| 744 | chan_write(chan, hdp, desc_dma); |
| 745 | chan->stats.misqueued++; |
| 746 | } |
| 747 | } |
| 748 | |
| 749 | int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, |
Sebastian Siewior | aef614e | 2013-04-23 07:31:38 +0000 | [diff] [blame] | 750 | int len, int directed) |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 751 | { |
| 752 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 753 | struct cpdma_desc __iomem *desc; |
| 754 | dma_addr_t buffer; |
| 755 | unsigned long flags; |
| 756 | u32 mode; |
| 757 | int ret = 0; |
| 758 | |
| 759 | spin_lock_irqsave(&chan->lock, flags); |
| 760 | |
| 761 | if (chan->state == CPDMA_STATE_TEARDOWN) { |
| 762 | ret = -EINVAL; |
| 763 | goto unlock_ret; |
| 764 | } |
| 765 | |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 766 | if (chan->count >= chan->desc_num) { |
| 767 | chan->stats.desc_alloc_fail++; |
| 768 | ret = -ENOMEM; |
| 769 | goto unlock_ret; |
| 770 | } |
| 771 | |
| 772 | desc = cpdma_desc_alloc(ctlr->pool); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 773 | if (!desc) { |
| 774 | chan->stats.desc_alloc_fail++; |
| 775 | ret = -ENOMEM; |
| 776 | goto unlock_ret; |
| 777 | } |
| 778 | |
| 779 | if (len < ctlr->params.min_packet_size) { |
| 780 | len = ctlr->params.min_packet_size; |
| 781 | chan->stats.runt_transmit_buff++; |
| 782 | } |
| 783 | |
| 784 | buffer = dma_map_single(ctlr->dev, data, len, chan->dir); |
Sebastian Siewior | 14bd076 | 2013-06-20 16:58:45 +0200 | [diff] [blame] | 785 | ret = dma_mapping_error(ctlr->dev, buffer); |
| 786 | if (ret) { |
| 787 | cpdma_desc_free(ctlr->pool, desc, 1); |
| 788 | ret = -EINVAL; |
| 789 | goto unlock_ret; |
| 790 | } |
| 791 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 792 | mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; |
Mugunthan V N | f6e135c | 2013-02-11 09:52:18 +0000 | [diff] [blame] | 793 | cpdma_desc_to_port(chan, mode, directed); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 794 | |
| 795 | desc_write(desc, hw_next, 0); |
| 796 | desc_write(desc, hw_buffer, buffer); |
| 797 | desc_write(desc, hw_len, len); |
| 798 | desc_write(desc, hw_mode, mode | len); |
| 799 | desc_write(desc, sw_token, token); |
| 800 | desc_write(desc, sw_buffer, buffer); |
| 801 | desc_write(desc, sw_len, len); |
| 802 | |
| 803 | __cpdma_chan_submit(chan, desc); |
| 804 | |
| 805 | if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) |
| 806 | chan_write(chan, rxfree, 1); |
| 807 | |
| 808 | chan->count++; |
| 809 | |
| 810 | unlock_ret: |
| 811 | spin_unlock_irqrestore(&chan->lock, flags); |
| 812 | return ret; |
| 813 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 814 | EXPORT_SYMBOL_GPL(cpdma_chan_submit); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 815 | |
Mugunthan V N | fae5082 | 2013-01-17 06:31:34 +0000 | [diff] [blame] | 816 | bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) |
| 817 | { |
Mugunthan V N | fae5082 | 2013-01-17 06:31:34 +0000 | [diff] [blame] | 818 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 819 | struct cpdma_desc_pool *pool = ctlr->pool; |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 820 | bool free_tx_desc; |
| 821 | unsigned long flags; |
Mugunthan V N | fae5082 | 2013-01-17 06:31:34 +0000 | [diff] [blame] | 822 | |
Grygorii Strashko | 742fb20 | 2016-06-27 12:05:11 +0300 | [diff] [blame] | 823 | spin_lock_irqsave(&chan->lock, flags); |
| 824 | free_tx_desc = (chan->count < chan->desc_num) && |
| 825 | gen_pool_avail(pool->gen_pool); |
| 826 | spin_unlock_irqrestore(&chan->lock, flags); |
| 827 | return free_tx_desc; |
Mugunthan V N | fae5082 | 2013-01-17 06:31:34 +0000 | [diff] [blame] | 828 | } |
| 829 | EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); |
| 830 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 831 | static void __cpdma_chan_free(struct cpdma_chan *chan, |
| 832 | struct cpdma_desc __iomem *desc, |
| 833 | int outlen, int status) |
| 834 | { |
| 835 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 836 | struct cpdma_desc_pool *pool = ctlr->pool; |
| 837 | dma_addr_t buff_dma; |
| 838 | int origlen; |
| 839 | void *token; |
| 840 | |
| 841 | token = (void *)desc_read(desc, sw_token); |
| 842 | buff_dma = desc_read(desc, sw_buffer); |
| 843 | origlen = desc_read(desc, sw_len); |
| 844 | |
| 845 | dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); |
| 846 | cpdma_desc_free(pool, desc, 1); |
| 847 | (*chan->handler)(token, outlen, status); |
| 848 | } |
| 849 | |
| 850 | static int __cpdma_chan_process(struct cpdma_chan *chan) |
| 851 | { |
| 852 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 853 | struct cpdma_desc __iomem *desc; |
| 854 | int status, outlen; |
Sebastian Siewior | b4727e6 | 2013-04-23 07:31:39 +0000 | [diff] [blame] | 855 | int cb_status = 0; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 856 | struct cpdma_desc_pool *pool = ctlr->pool; |
| 857 | dma_addr_t desc_dma; |
| 858 | unsigned long flags; |
| 859 | |
| 860 | spin_lock_irqsave(&chan->lock, flags); |
| 861 | |
| 862 | desc = chan->head; |
| 863 | if (!desc) { |
| 864 | chan->stats.empty_dequeue++; |
| 865 | status = -ENOENT; |
| 866 | goto unlock_ret; |
| 867 | } |
| 868 | desc_dma = desc_phys(pool, desc); |
| 869 | |
| 870 | status = __raw_readl(&desc->hw_mode); |
| 871 | outlen = status & 0x7ff; |
| 872 | if (status & CPDMA_DESC_OWNER) { |
| 873 | chan->stats.busy_dequeue++; |
| 874 | status = -EBUSY; |
| 875 | goto unlock_ret; |
| 876 | } |
Mugunthan V N | 28a19fe | 2013-05-29 20:22:01 +0000 | [diff] [blame] | 877 | |
| 878 | if (status & CPDMA_DESC_PASS_CRC) |
| 879 | outlen -= CPDMA_DESC_CRC_LEN; |
| 880 | |
Mugunthan V N | f6e135c | 2013-02-11 09:52:18 +0000 | [diff] [blame] | 881 | status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | |
| 882 | CPDMA_DESC_PORT_MASK); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 883 | |
| 884 | chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); |
| 885 | chan_write(chan, cp, desc_dma); |
| 886 | chan->count--; |
| 887 | chan->stats.good_dequeue++; |
| 888 | |
| 889 | if (status & CPDMA_DESC_EOQ) { |
| 890 | chan->stats.requeue++; |
| 891 | chan_write(chan, hdp, desc_phys(pool, chan->head)); |
| 892 | } |
| 893 | |
| 894 | spin_unlock_irqrestore(&chan->lock, flags); |
Sebastian Siewior | b4727e6 | 2013-04-23 07:31:39 +0000 | [diff] [blame] | 895 | if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) |
| 896 | cb_status = -ENOSYS; |
| 897 | else |
| 898 | cb_status = status; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 899 | |
Sebastian Siewior | b4727e6 | 2013-04-23 07:31:39 +0000 | [diff] [blame] | 900 | __cpdma_chan_free(chan, desc, outlen, cb_status); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 901 | return status; |
| 902 | |
| 903 | unlock_ret: |
| 904 | spin_unlock_irqrestore(&chan->lock, flags); |
| 905 | return status; |
| 906 | } |
| 907 | |
| 908 | int cpdma_chan_process(struct cpdma_chan *chan, int quota) |
| 909 | { |
| 910 | int used = 0, ret = 0; |
| 911 | |
| 912 | if (chan->state != CPDMA_STATE_ACTIVE) |
| 913 | return -EINVAL; |
| 914 | |
| 915 | while (used < quota) { |
| 916 | ret = __cpdma_chan_process(chan); |
| 917 | if (ret < 0) |
| 918 | break; |
| 919 | used++; |
| 920 | } |
| 921 | return used; |
| 922 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 923 | EXPORT_SYMBOL_GPL(cpdma_chan_process); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 924 | |
| 925 | int cpdma_chan_start(struct cpdma_chan *chan) |
| 926 | { |
| 927 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 928 | struct cpdma_desc_pool *pool = ctlr->pool; |
| 929 | unsigned long flags; |
| 930 | |
| 931 | spin_lock_irqsave(&chan->lock, flags); |
| 932 | if (chan->state != CPDMA_STATE_IDLE) { |
| 933 | spin_unlock_irqrestore(&chan->lock, flags); |
| 934 | return -EBUSY; |
| 935 | } |
| 936 | if (ctlr->state != CPDMA_STATE_ACTIVE) { |
| 937 | spin_unlock_irqrestore(&chan->lock, flags); |
| 938 | return -EINVAL; |
| 939 | } |
| 940 | dma_reg_write(ctlr, chan->int_set, chan->mask); |
| 941 | chan->state = CPDMA_STATE_ACTIVE; |
| 942 | if (chan->head) { |
| 943 | chan_write(chan, hdp, desc_phys(pool, chan->head)); |
| 944 | if (chan->rxfree) |
| 945 | chan_write(chan, rxfree, chan->count); |
| 946 | } |
| 947 | |
| 948 | spin_unlock_irqrestore(&chan->lock, flags); |
| 949 | return 0; |
| 950 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 951 | EXPORT_SYMBOL_GPL(cpdma_chan_start); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 952 | |
| 953 | int cpdma_chan_stop(struct cpdma_chan *chan) |
| 954 | { |
| 955 | struct cpdma_ctlr *ctlr = chan->ctlr; |
| 956 | struct cpdma_desc_pool *pool = ctlr->pool; |
| 957 | unsigned long flags; |
| 958 | int ret; |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 959 | unsigned timeout; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 960 | |
| 961 | spin_lock_irqsave(&chan->lock, flags); |
Christian Riesch | cd11cf5 | 2014-03-24 13:46:27 +0100 | [diff] [blame] | 962 | if (chan->state == CPDMA_STATE_TEARDOWN) { |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 963 | spin_unlock_irqrestore(&chan->lock, flags); |
| 964 | return -EINVAL; |
| 965 | } |
| 966 | |
| 967 | chan->state = CPDMA_STATE_TEARDOWN; |
| 968 | dma_reg_write(ctlr, chan->int_clear, chan->mask); |
| 969 | |
| 970 | /* trigger teardown */ |
Christian Riesch | b4ad042 | 2012-02-22 21:58:00 +0000 | [diff] [blame] | 971 | dma_reg_write(ctlr, chan->td, chan_linear(chan)); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 972 | |
| 973 | /* wait for teardown complete */ |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 974 | timeout = 100 * 100; /* 100 ms */ |
| 975 | while (timeout) { |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 976 | u32 cp = chan_read(chan, cp); |
| 977 | if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) |
| 978 | break; |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 979 | udelay(10); |
| 980 | timeout--; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 981 | } |
Sebastian Siewior | 817f6d1 | 2013-04-23 07:31:35 +0000 | [diff] [blame] | 982 | WARN_ON(!timeout); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 983 | chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); |
| 984 | |
| 985 | /* handle completed packets */ |
Ilya Yanok | 7746ab0 | 2011-12-18 10:02:04 +0000 | [diff] [blame] | 986 | spin_unlock_irqrestore(&chan->lock, flags); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 987 | do { |
| 988 | ret = __cpdma_chan_process(chan); |
| 989 | if (ret < 0) |
| 990 | break; |
| 991 | } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); |
Ilya Yanok | 7746ab0 | 2011-12-18 10:02:04 +0000 | [diff] [blame] | 992 | spin_lock_irqsave(&chan->lock, flags); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 993 | |
| 994 | /* remaining packets haven't been tx/rx'ed, clean them up */ |
| 995 | while (chan->head) { |
| 996 | struct cpdma_desc __iomem *desc = chan->head; |
| 997 | dma_addr_t next_dma; |
| 998 | |
| 999 | next_dma = desc_read(desc, hw_next); |
| 1000 | chan->head = desc_from_phys(pool, next_dma); |
htbegin | ffb5ba9 | 2012-10-01 16:42:43 +0000 | [diff] [blame] | 1001 | chan->count--; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1002 | chan->stats.teardown_dequeue++; |
| 1003 | |
| 1004 | /* issue callback without locks held */ |
| 1005 | spin_unlock_irqrestore(&chan->lock, flags); |
| 1006 | __cpdma_chan_free(chan, desc, 0, -ENOSYS); |
| 1007 | spin_lock_irqsave(&chan->lock, flags); |
| 1008 | } |
| 1009 | |
| 1010 | chan->state = CPDMA_STATE_IDLE; |
| 1011 | spin_unlock_irqrestore(&chan->lock, flags); |
| 1012 | return 0; |
| 1013 | } |
Arnd Bergmann | 32a6d90 | 2012-04-20 10:56:09 +0000 | [diff] [blame] | 1014 | EXPORT_SYMBOL_GPL(cpdma_chan_stop); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1015 | |
| 1016 | int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) |
| 1017 | { |
| 1018 | unsigned long flags; |
| 1019 | |
| 1020 | spin_lock_irqsave(&chan->lock, flags); |
| 1021 | if (chan->state != CPDMA_STATE_ACTIVE) { |
| 1022 | spin_unlock_irqrestore(&chan->lock, flags); |
| 1023 | return -EINVAL; |
| 1024 | } |
| 1025 | |
| 1026 | dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, |
| 1027 | chan->mask); |
| 1028 | spin_unlock_irqrestore(&chan->lock, flags); |
| 1029 | |
| 1030 | return 0; |
| 1031 | } |
| 1032 | |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1033 | int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) |
| 1034 | { |
| 1035 | unsigned long flags; |
| 1036 | struct cpdma_control_info *info = &controls[control]; |
| 1037 | int ret; |
| 1038 | |
| 1039 | spin_lock_irqsave(&ctlr->lock, flags); |
| 1040 | |
| 1041 | ret = -ENOTSUPP; |
| 1042 | if (!ctlr->params.has_ext_regs) |
| 1043 | goto unlock_ret; |
| 1044 | |
| 1045 | ret = -EINVAL; |
| 1046 | if (ctlr->state != CPDMA_STATE_ACTIVE) |
| 1047 | goto unlock_ret; |
| 1048 | |
| 1049 | ret = -ENOENT; |
| 1050 | if (control < 0 || control >= ARRAY_SIZE(controls)) |
| 1051 | goto unlock_ret; |
| 1052 | |
| 1053 | ret = -EPERM; |
| 1054 | if ((info->access & ACCESS_RO) != ACCESS_RO) |
| 1055 | goto unlock_ret; |
| 1056 | |
| 1057 | ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; |
| 1058 | |
| 1059 | unlock_ret: |
| 1060 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 1061 | return ret; |
| 1062 | } |
| 1063 | |
| 1064 | int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) |
| 1065 | { |
| 1066 | unsigned long flags; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1067 | int ret; |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1068 | |
| 1069 | spin_lock_irqsave(&ctlr->lock, flags); |
Ivan Khoronzhuk | 991ddb1 | 2016-11-11 15:45:24 +0200 | [diff] [blame] | 1070 | ret = _cpdma_control_set(ctlr, control, value); |
Cyril Chemparathy | ef8c2da | 2010-09-15 10:11:28 -0400 | [diff] [blame] | 1071 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 1072 | return ret; |
| 1073 | } |
Arnd Bergmann | 6929e24 | 2013-02-14 17:53:01 +0100 | [diff] [blame] | 1074 | EXPORT_SYMBOL_GPL(cpdma_control_set); |
Sebastian Siewior | 4bc21d4 | 2013-04-24 08:48:22 +0000 | [diff] [blame] | 1075 | |
| 1076 | MODULE_LICENSE("GPL"); |