Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. |
| 3 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of version 2 of the GNU General Public License as |
| 7 | * published by the Free Software Foundation. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/bitmap.h> |
| 11 | #include <linux/bitops.h> |
| 12 | #include <linux/clk.h> |
| 13 | #include <linux/dma-mapping.h> |
| 14 | #include <linux/dmaengine.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/io.h> |
| 18 | #include <linux/log2.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/of.h> |
| 21 | #include <linux/of_device.h> |
| 22 | #include <linux/of_dma.h> |
| 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/slab.h> |
| 25 | |
| 26 | #include <dt-bindings/dma/nbpfaxi.h> |
| 27 | |
| 28 | #include "dmaengine.h" |
| 29 | |
| 30 | #define NBPF_REG_CHAN_OFFSET 0 |
| 31 | #define NBPF_REG_CHAN_SIZE 0x40 |
| 32 | |
| 33 | /* Channel Current Transaction Byte register */ |
| 34 | #define NBPF_CHAN_CUR_TR_BYTE 0x20 |
| 35 | |
| 36 | /* Channel Status register */ |
| 37 | #define NBPF_CHAN_STAT 0x24 |
| 38 | #define NBPF_CHAN_STAT_EN 1 |
| 39 | #define NBPF_CHAN_STAT_TACT 4 |
| 40 | #define NBPF_CHAN_STAT_ERR 0x10 |
| 41 | #define NBPF_CHAN_STAT_END 0x20 |
| 42 | #define NBPF_CHAN_STAT_TC 0x40 |
| 43 | #define NBPF_CHAN_STAT_DER 0x400 |
| 44 | |
| 45 | /* Channel Control register */ |
| 46 | #define NBPF_CHAN_CTRL 0x28 |
| 47 | #define NBPF_CHAN_CTRL_SETEN 1 |
| 48 | #define NBPF_CHAN_CTRL_CLREN 2 |
| 49 | #define NBPF_CHAN_CTRL_STG 4 |
| 50 | #define NBPF_CHAN_CTRL_SWRST 8 |
| 51 | #define NBPF_CHAN_CTRL_CLRRQ 0x10 |
| 52 | #define NBPF_CHAN_CTRL_CLREND 0x20 |
| 53 | #define NBPF_CHAN_CTRL_CLRTC 0x40 |
| 54 | #define NBPF_CHAN_CTRL_SETSUS 0x100 |
| 55 | #define NBPF_CHAN_CTRL_CLRSUS 0x200 |
| 56 | |
| 57 | /* Channel Configuration register */ |
| 58 | #define NBPF_CHAN_CFG 0x2c |
| 59 | #define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ |
| 60 | #define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ |
| 61 | #define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ |
| 62 | #define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ |
| 63 | #define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ |
| 64 | #define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ |
| 65 | #define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ |
| 66 | #define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ |
| 67 | #define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ |
| 68 | #define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ |
| 69 | #define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ |
| 70 | #define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ |
| 71 | #define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ |
| 72 | #define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ |
| 73 | #define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ |
| 74 | #define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ |
| 75 | #define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ |
| 76 | #define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ |
| 77 | |
| 78 | #define NBPF_CHAN_NXLA 0x38 |
| 79 | #define NBPF_CHAN_CRLA 0x3c |
| 80 | |
| 81 | /* Link Header field */ |
| 82 | #define NBPF_HEADER_LV 1 |
| 83 | #define NBPF_HEADER_LE 2 |
| 84 | #define NBPF_HEADER_WBD 4 |
| 85 | #define NBPF_HEADER_DIM 8 |
| 86 | |
| 87 | #define NBPF_CTRL 0x300 |
| 88 | #define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ |
| 89 | #define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ |
| 90 | |
| 91 | #define NBPF_DSTAT_ER 0x314 |
| 92 | #define NBPF_DSTAT_END 0x318 |
| 93 | |
| 94 | #define NBPF_DMA_BUSWIDTHS \ |
| 95 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
| 96 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
| 97 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
| 98 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ |
| 99 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) |
| 100 | |
| 101 | struct nbpf_config { |
| 102 | int num_channels; |
| 103 | int buffer_size; |
| 104 | }; |
| 105 | |
| 106 | /* |
| 107 | * We've got 3 types of objects, used to describe DMA transfers: |
| 108 | * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object |
| 109 | * in it, used to communicate with the user |
| 110 | * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer |
| 111 | * queuing, these must be DMAable, using either the streaming DMA API or |
| 112 | * allocated from coherent memory - one per SG segment |
| 113 | * 3. one per SG segment descriptors, used to manage HW link descriptors from |
| 114 | * (2). They do not have to be DMAable. They can either be (a) allocated |
| 115 | * together with link descriptors as mixed (DMA / CPU) objects, or (b) |
| 116 | * separately. Even if allocated separately it would be best to link them |
| 117 | * to link descriptors once during channel resource allocation and always |
| 118 | * use them as a single object. |
| 119 | * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be |
| 120 | * treated as a single SG segment descriptor. |
| 121 | */ |
| 122 | |
| 123 | struct nbpf_link_reg { |
| 124 | u32 header; |
| 125 | u32 src_addr; |
| 126 | u32 dst_addr; |
| 127 | u32 transaction_size; |
| 128 | u32 config; |
| 129 | u32 interval; |
| 130 | u32 extension; |
| 131 | u32 next; |
| 132 | } __packed; |
| 133 | |
| 134 | struct nbpf_device; |
| 135 | struct nbpf_channel; |
| 136 | struct nbpf_desc; |
| 137 | |
| 138 | struct nbpf_link_desc { |
| 139 | struct nbpf_link_reg *hwdesc; |
| 140 | dma_addr_t hwdesc_dma_addr; |
| 141 | struct nbpf_desc *desc; |
| 142 | struct list_head node; |
| 143 | }; |
| 144 | |
| 145 | /** |
| 146 | * struct nbpf_desc - DMA transfer descriptor |
| 147 | * @async_tx: dmaengine object |
| 148 | * @user_wait: waiting for a user ack |
| 149 | * @length: total transfer length |
| 150 | * @sg: list of hardware descriptors, represented by struct nbpf_link_desc |
| 151 | * @node: member in channel descriptor lists |
| 152 | */ |
| 153 | struct nbpf_desc { |
| 154 | struct dma_async_tx_descriptor async_tx; |
| 155 | bool user_wait; |
| 156 | size_t length; |
| 157 | struct nbpf_channel *chan; |
| 158 | struct list_head sg; |
| 159 | struct list_head node; |
| 160 | }; |
| 161 | |
| 162 | /* Take a wild guess: allocate 4 segments per descriptor */ |
| 163 | #define NBPF_SEGMENTS_PER_DESC 4 |
| 164 | #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ |
| 165 | (sizeof(struct nbpf_desc) + \ |
| 166 | NBPF_SEGMENTS_PER_DESC * \ |
| 167 | (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) |
| 168 | #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) |
| 169 | |
| 170 | struct nbpf_desc_page { |
| 171 | struct list_head node; |
| 172 | struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; |
| 173 | struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; |
| 174 | struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; |
| 175 | }; |
| 176 | |
| 177 | /** |
| 178 | * struct nbpf_channel - one DMAC channel |
| 179 | * @dma_chan: standard dmaengine channel object |
| 180 | * @base: register address base |
| 181 | * @nbpf: DMAC |
| 182 | * @name: IRQ name |
| 183 | * @irq: IRQ number |
| 184 | * @slave_addr: address for slave DMA |
| 185 | * @slave_width:slave data size in bytes |
| 186 | * @slave_burst:maximum slave burst size in bytes |
| 187 | * @terminal: DMA terminal, assigned to this channel |
| 188 | * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG |
| 189 | * @flags: configuration flags from DT |
| 190 | * @lock: protect descriptor lists |
| 191 | * @free_links: list of free link descriptors |
| 192 | * @free: list of free descriptors |
| 193 | * @queued: list of queued descriptors |
| 194 | * @active: list of descriptors, scheduled for processing |
| 195 | * @done: list of completed descriptors, waiting post-processing |
| 196 | * @desc_page: list of additionally allocated descriptor pages - if any |
| 197 | */ |
| 198 | struct nbpf_channel { |
| 199 | struct dma_chan dma_chan; |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 200 | struct tasklet_struct tasklet; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 201 | void __iomem *base; |
| 202 | struct nbpf_device *nbpf; |
| 203 | char name[16]; |
| 204 | int irq; |
| 205 | dma_addr_t slave_src_addr; |
| 206 | size_t slave_src_width; |
| 207 | size_t slave_src_burst; |
| 208 | dma_addr_t slave_dst_addr; |
| 209 | size_t slave_dst_width; |
| 210 | size_t slave_dst_burst; |
| 211 | unsigned int terminal; |
| 212 | u32 dmarq_cfg; |
| 213 | unsigned long flags; |
| 214 | spinlock_t lock; |
| 215 | struct list_head free_links; |
| 216 | struct list_head free; |
| 217 | struct list_head queued; |
| 218 | struct list_head active; |
| 219 | struct list_head done; |
| 220 | struct list_head desc_page; |
| 221 | struct nbpf_desc *running; |
| 222 | bool paused; |
| 223 | }; |
| 224 | |
| 225 | struct nbpf_device { |
| 226 | struct dma_device dma_dev; |
| 227 | void __iomem *base; |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 228 | u32 max_burst_mem_read; |
| 229 | u32 max_burst_mem_write; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 230 | struct clk *clk; |
| 231 | const struct nbpf_config *config; |
Vinod Koul | 84c610b | 2016-07-04 16:01:18 +0530 | [diff] [blame] | 232 | unsigned int eirq; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 233 | struct nbpf_channel chan[]; |
| 234 | }; |
| 235 | |
| 236 | enum nbpf_model { |
| 237 | NBPF1B4, |
| 238 | NBPF1B8, |
| 239 | NBPF1B16, |
| 240 | NBPF4B4, |
| 241 | NBPF4B8, |
| 242 | NBPF4B16, |
| 243 | NBPF8B4, |
| 244 | NBPF8B8, |
| 245 | NBPF8B16, |
| 246 | }; |
| 247 | |
| 248 | static struct nbpf_config nbpf_cfg[] = { |
| 249 | [NBPF1B4] = { |
| 250 | .num_channels = 1, |
| 251 | .buffer_size = 4, |
| 252 | }, |
| 253 | [NBPF1B8] = { |
| 254 | .num_channels = 1, |
| 255 | .buffer_size = 8, |
| 256 | }, |
| 257 | [NBPF1B16] = { |
| 258 | .num_channels = 1, |
| 259 | .buffer_size = 16, |
| 260 | }, |
| 261 | [NBPF4B4] = { |
| 262 | .num_channels = 4, |
| 263 | .buffer_size = 4, |
| 264 | }, |
| 265 | [NBPF4B8] = { |
| 266 | .num_channels = 4, |
| 267 | .buffer_size = 8, |
| 268 | }, |
| 269 | [NBPF4B16] = { |
| 270 | .num_channels = 4, |
| 271 | .buffer_size = 16, |
| 272 | }, |
| 273 | [NBPF8B4] = { |
| 274 | .num_channels = 8, |
| 275 | .buffer_size = 4, |
| 276 | }, |
| 277 | [NBPF8B8] = { |
| 278 | .num_channels = 8, |
| 279 | .buffer_size = 8, |
| 280 | }, |
| 281 | [NBPF8B16] = { |
| 282 | .num_channels = 8, |
| 283 | .buffer_size = 16, |
| 284 | }, |
| 285 | }; |
| 286 | |
| 287 | #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) |
| 288 | |
| 289 | /* |
| 290 | * dmaengine drivers seem to have a lot in common and instead of sharing more |
| 291 | * code, they reimplement those common algorithms independently. In this driver |
| 292 | * we try to separate the hardware-specific part from the (largely) generic |
| 293 | * part. This improves code readability and makes it possible in the future to |
| 294 | * reuse the generic code in form of a helper library. That generic code should |
| 295 | * be suitable for various DMA controllers, using transfer descriptors in RAM |
| 296 | * and pushing one SG list at a time to the DMA controller. |
| 297 | */ |
| 298 | |
| 299 | /* Hardware-specific part */ |
| 300 | |
| 301 | static inline u32 nbpf_chan_read(struct nbpf_channel *chan, |
| 302 | unsigned int offset) |
| 303 | { |
| 304 | u32 data = ioread32(chan->base + offset); |
| 305 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", |
| 306 | __func__, chan->base, offset, data); |
| 307 | return data; |
| 308 | } |
| 309 | |
| 310 | static inline void nbpf_chan_write(struct nbpf_channel *chan, |
| 311 | unsigned int offset, u32 data) |
| 312 | { |
| 313 | iowrite32(data, chan->base + offset); |
| 314 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", |
| 315 | __func__, chan->base, offset, data); |
| 316 | } |
| 317 | |
| 318 | static inline u32 nbpf_read(struct nbpf_device *nbpf, |
| 319 | unsigned int offset) |
| 320 | { |
| 321 | u32 data = ioread32(nbpf->base + offset); |
| 322 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", |
| 323 | __func__, nbpf->base, offset, data); |
| 324 | return data; |
| 325 | } |
| 326 | |
| 327 | static inline void nbpf_write(struct nbpf_device *nbpf, |
| 328 | unsigned int offset, u32 data) |
| 329 | { |
| 330 | iowrite32(data, nbpf->base + offset); |
| 331 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", |
| 332 | __func__, nbpf->base, offset, data); |
| 333 | } |
| 334 | |
| 335 | static void nbpf_chan_halt(struct nbpf_channel *chan) |
| 336 | { |
| 337 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); |
| 338 | } |
| 339 | |
| 340 | static bool nbpf_status_get(struct nbpf_channel *chan) |
| 341 | { |
| 342 | u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); |
| 343 | |
| 344 | return status & BIT(chan - chan->nbpf->chan); |
| 345 | } |
| 346 | |
| 347 | static void nbpf_status_ack(struct nbpf_channel *chan) |
| 348 | { |
| 349 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); |
| 350 | } |
| 351 | |
| 352 | static u32 nbpf_error_get(struct nbpf_device *nbpf) |
| 353 | { |
| 354 | return nbpf_read(nbpf, NBPF_DSTAT_ER); |
| 355 | } |
| 356 | |
Fengguang Wu | 1141b7e | 2014-08-05 22:00:18 +0530 | [diff] [blame] | 357 | static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 358 | { |
| 359 | return nbpf->chan + __ffs(error); |
| 360 | } |
| 361 | |
| 362 | static void nbpf_error_clear(struct nbpf_channel *chan) |
| 363 | { |
| 364 | u32 status; |
| 365 | int i; |
| 366 | |
| 367 | /* Stop the channel, make sure DMA has been aborted */ |
| 368 | nbpf_chan_halt(chan); |
| 369 | |
| 370 | for (i = 1000; i; i--) { |
| 371 | status = nbpf_chan_read(chan, NBPF_CHAN_STAT); |
| 372 | if (!(status & NBPF_CHAN_STAT_TACT)) |
| 373 | break; |
| 374 | cpu_relax(); |
| 375 | } |
| 376 | |
| 377 | if (!i) |
| 378 | dev_err(chan->dma_chan.device->dev, |
| 379 | "%s(): abort timeout, channel status 0x%x\n", __func__, status); |
| 380 | |
| 381 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); |
| 382 | } |
| 383 | |
| 384 | static int nbpf_start(struct nbpf_desc *desc) |
| 385 | { |
| 386 | struct nbpf_channel *chan = desc->chan; |
| 387 | struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); |
| 388 | |
| 389 | nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); |
| 390 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); |
| 391 | chan->paused = false; |
| 392 | |
| 393 | /* Software trigger MEMCPY - only MEMCPY uses the block mode */ |
| 394 | if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) |
| 395 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); |
| 396 | |
| 397 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, |
| 398 | nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); |
| 399 | |
| 400 | return 0; |
| 401 | } |
| 402 | |
| 403 | static void nbpf_chan_prepare(struct nbpf_channel *chan) |
| 404 | { |
| 405 | chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | |
| 406 | (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | |
| 407 | (chan->flags & NBPF_SLAVE_RQ_LEVEL ? |
| 408 | NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | |
| 409 | chan->terminal; |
| 410 | } |
| 411 | |
| 412 | static void nbpf_chan_prepare_default(struct nbpf_channel *chan) |
| 413 | { |
| 414 | /* Don't output DMAACK */ |
| 415 | chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; |
| 416 | chan->terminal = 0; |
| 417 | chan->flags = 0; |
| 418 | } |
| 419 | |
| 420 | static void nbpf_chan_configure(struct nbpf_channel *chan) |
| 421 | { |
| 422 | /* |
| 423 | * We assume, that only the link mode and DMA request line configuration |
| 424 | * have to be set in the configuration register manually. Dynamic |
| 425 | * per-transfer configuration will be loaded from transfer descriptors. |
| 426 | */ |
| 427 | nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); |
| 428 | } |
| 429 | |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 430 | static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size, |
| 431 | enum dma_transfer_direction direction) |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 432 | { |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 433 | int max_burst = nbpf->config->buffer_size * 8; |
| 434 | |
| 435 | if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) { |
| 436 | switch (direction) { |
| 437 | case DMA_MEM_TO_MEM: |
| 438 | max_burst = min_not_zero(nbpf->max_burst_mem_read, |
| 439 | nbpf->max_burst_mem_write); |
| 440 | break; |
| 441 | case DMA_MEM_TO_DEV: |
| 442 | if (nbpf->max_burst_mem_read) |
| 443 | max_burst = nbpf->max_burst_mem_read; |
| 444 | break; |
| 445 | case DMA_DEV_TO_MEM: |
| 446 | if (nbpf->max_burst_mem_write) |
| 447 | max_burst = nbpf->max_burst_mem_write; |
| 448 | break; |
| 449 | case DMA_DEV_TO_DEV: |
| 450 | default: |
| 451 | break; |
| 452 | } |
| 453 | } |
| 454 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 455 | /* Maximum supported bursts depend on the buffer size */ |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 456 | return min_t(int, __ffs(size), ilog2(max_burst)); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | static size_t nbpf_xfer_size(struct nbpf_device *nbpf, |
| 460 | enum dma_slave_buswidth width, u32 burst) |
| 461 | { |
| 462 | size_t size; |
| 463 | |
| 464 | if (!burst) |
| 465 | burst = 1; |
| 466 | |
| 467 | switch (width) { |
| 468 | case DMA_SLAVE_BUSWIDTH_8_BYTES: |
| 469 | size = 8 * burst; |
| 470 | break; |
| 471 | |
| 472 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
| 473 | size = 4 * burst; |
| 474 | break; |
| 475 | |
| 476 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
| 477 | size = 2 * burst; |
| 478 | break; |
| 479 | |
| 480 | default: |
| 481 | pr_warn("%s(): invalid bus width %u\n", __func__, width); |
| 482 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
| 483 | size = burst; |
| 484 | } |
| 485 | |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 486 | return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 487 | } |
| 488 | |
| 489 | /* |
| 490 | * We need a way to recognise slaves, whose data is sent "raw" over the bus, |
| 491 | * i.e. it isn't known in advance how many bytes will be received. Therefore |
| 492 | * the slave driver has to provide a "large enough" buffer and either read the |
| 493 | * buffer, when it is full, or detect, that some data has arrived, then wait for |
| 494 | * a timeout, if no more data arrives - receive what's already there. We want to |
| 495 | * handle such slaves in a special way to allow an optimised mode for other |
| 496 | * users, for whom the amount of data is known in advance. So far there's no way |
| 497 | * to recognise such slaves. We use a data-width check to distinguish between |
| 498 | * the SD host and the PL011 UART. |
| 499 | */ |
| 500 | |
| 501 | static int nbpf_prep_one(struct nbpf_link_desc *ldesc, |
| 502 | enum dma_transfer_direction direction, |
| 503 | dma_addr_t src, dma_addr_t dst, size_t size, bool last) |
| 504 | { |
| 505 | struct nbpf_link_reg *hwdesc = ldesc->hwdesc; |
| 506 | struct nbpf_desc *desc = ldesc->desc; |
| 507 | struct nbpf_channel *chan = desc->chan; |
| 508 | struct device *dev = chan->dma_chan.device->dev; |
| 509 | size_t mem_xfer, slave_xfer; |
| 510 | bool can_burst; |
| 511 | |
| 512 | hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | |
| 513 | (last ? NBPF_HEADER_LE : 0); |
| 514 | |
| 515 | hwdesc->src_addr = src; |
| 516 | hwdesc->dst_addr = dst; |
| 517 | hwdesc->transaction_size = size; |
| 518 | |
| 519 | /* |
| 520 | * set config: SAD, DAD, DDS, SDS, etc. |
| 521 | * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, |
| 522 | * but it is important to have transaction size a multiple of both |
| 523 | * receiver and transmitter transfer sizes. It is also possible to use |
| 524 | * different RAM and device transfer sizes, and it does work well with |
| 525 | * some devices, e.g. with V08R07S01E SD host controllers, which can use |
| 526 | * 128 byte transfers. But this doesn't work with other devices, |
| 527 | * especially when the transaction size is unknown. This is the case, |
| 528 | * e.g. with serial drivers like amba-pl011.c. For reception it sets up |
| 529 | * the transaction size of 4K and if fewer bytes are received, it |
| 530 | * pauses DMA and reads out data received via DMA as well as those left |
| 531 | * in the Rx FIFO. For this to work with the RAM side using burst |
| 532 | * transfers we enable the SBE bit and terminate the transfer in our |
Vinod Koul | fbde286 | 2014-12-22 20:24:14 +0530 | [diff] [blame] | 533 | * .device_pause handler. |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 534 | */ |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 535 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 536 | |
| 537 | switch (direction) { |
| 538 | case DMA_DEV_TO_MEM: |
| 539 | can_burst = chan->slave_src_width >= 3; |
| 540 | slave_xfer = min(mem_xfer, can_burst ? |
| 541 | chan->slave_src_burst : chan->slave_src_width); |
| 542 | /* |
| 543 | * Is the slave narrower than 64 bits, i.e. isn't using the full |
| 544 | * bus width and cannot use bursts? |
| 545 | */ |
| 546 | if (mem_xfer > chan->slave_src_burst && !can_burst) |
| 547 | mem_xfer = chan->slave_src_burst; |
| 548 | /* Device-to-RAM DMA is unreliable without REQD set */ |
| 549 | hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | |
| 550 | (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | |
| 551 | NBPF_CHAN_CFG_SBE; |
| 552 | break; |
| 553 | |
| 554 | case DMA_MEM_TO_DEV: |
| 555 | slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? |
| 556 | chan->slave_dst_burst : chan->slave_dst_width); |
| 557 | hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | |
| 558 | (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; |
| 559 | break; |
| 560 | |
| 561 | case DMA_MEM_TO_MEM: |
| 562 | hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | |
| 563 | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | |
| 564 | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); |
| 565 | break; |
| 566 | |
| 567 | default: |
| 568 | return -EINVAL; |
| 569 | } |
| 570 | |
| 571 | hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | |
| 572 | NBPF_CHAN_CFG_DMS; |
| 573 | |
| 574 | dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", |
| 575 | __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, |
| 576 | hwdesc->config, size, &src, &dst); |
| 577 | |
| 578 | dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), |
| 579 | DMA_TO_DEVICE); |
| 580 | |
| 581 | return 0; |
| 582 | } |
| 583 | |
| 584 | static size_t nbpf_bytes_left(struct nbpf_channel *chan) |
| 585 | { |
| 586 | return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); |
| 587 | } |
| 588 | |
| 589 | static void nbpf_configure(struct nbpf_device *nbpf) |
| 590 | { |
| 591 | nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); |
| 592 | } |
| 593 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 594 | /* Generic part */ |
| 595 | |
| 596 | /* DMA ENGINE functions */ |
| 597 | static void nbpf_issue_pending(struct dma_chan *dchan) |
| 598 | { |
| 599 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 600 | unsigned long flags; |
| 601 | |
| 602 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); |
| 603 | |
| 604 | spin_lock_irqsave(&chan->lock, flags); |
| 605 | if (list_empty(&chan->queued)) |
| 606 | goto unlock; |
| 607 | |
| 608 | list_splice_tail_init(&chan->queued, &chan->active); |
| 609 | |
| 610 | if (!chan->running) { |
| 611 | struct nbpf_desc *desc = list_first_entry(&chan->active, |
| 612 | struct nbpf_desc, node); |
| 613 | if (!nbpf_start(desc)) |
| 614 | chan->running = desc; |
| 615 | } |
| 616 | |
| 617 | unlock: |
| 618 | spin_unlock_irqrestore(&chan->lock, flags); |
| 619 | } |
| 620 | |
| 621 | static enum dma_status nbpf_tx_status(struct dma_chan *dchan, |
| 622 | dma_cookie_t cookie, struct dma_tx_state *state) |
| 623 | { |
| 624 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 625 | enum dma_status status = dma_cookie_status(dchan, cookie, state); |
| 626 | |
| 627 | if (state) { |
| 628 | dma_cookie_t running; |
| 629 | unsigned long flags; |
| 630 | |
| 631 | spin_lock_irqsave(&chan->lock, flags); |
| 632 | running = chan->running ? chan->running->async_tx.cookie : -EINVAL; |
| 633 | |
| 634 | if (cookie == running) { |
| 635 | state->residue = nbpf_bytes_left(chan); |
| 636 | dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, |
| 637 | state->residue); |
| 638 | } else if (status == DMA_IN_PROGRESS) { |
| 639 | struct nbpf_desc *desc; |
| 640 | bool found = false; |
| 641 | |
| 642 | list_for_each_entry(desc, &chan->active, node) |
| 643 | if (desc->async_tx.cookie == cookie) { |
| 644 | found = true; |
| 645 | break; |
| 646 | } |
| 647 | |
| 648 | if (!found) |
| 649 | list_for_each_entry(desc, &chan->queued, node) |
| 650 | if (desc->async_tx.cookie == cookie) { |
| 651 | found = true; |
| 652 | break; |
| 653 | |
| 654 | } |
| 655 | |
| 656 | state->residue = found ? desc->length : 0; |
| 657 | } |
| 658 | |
| 659 | spin_unlock_irqrestore(&chan->lock, flags); |
| 660 | } |
| 661 | |
| 662 | if (chan->paused) |
| 663 | status = DMA_PAUSED; |
| 664 | |
| 665 | return status; |
| 666 | } |
| 667 | |
| 668 | static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) |
| 669 | { |
| 670 | struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); |
| 671 | struct nbpf_channel *chan = desc->chan; |
| 672 | unsigned long flags; |
| 673 | dma_cookie_t cookie; |
| 674 | |
| 675 | spin_lock_irqsave(&chan->lock, flags); |
| 676 | cookie = dma_cookie_assign(tx); |
| 677 | list_add_tail(&desc->node, &chan->queued); |
| 678 | spin_unlock_irqrestore(&chan->lock, flags); |
| 679 | |
| 680 | dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); |
| 681 | |
| 682 | return cookie; |
| 683 | } |
| 684 | |
| 685 | static int nbpf_desc_page_alloc(struct nbpf_channel *chan) |
| 686 | { |
| 687 | struct dma_chan *dchan = &chan->dma_chan; |
| 688 | struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
| 689 | struct nbpf_link_desc *ldesc; |
| 690 | struct nbpf_link_reg *hwdesc; |
| 691 | struct nbpf_desc *desc; |
| 692 | LIST_HEAD(head); |
| 693 | LIST_HEAD(lhead); |
| 694 | int i; |
| 695 | struct device *dev = dchan->device->dev; |
| 696 | |
| 697 | if (!dpage) |
| 698 | return -ENOMEM; |
| 699 | |
| 700 | dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n", |
| 701 | __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); |
| 702 | |
| 703 | for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; |
| 704 | i < ARRAY_SIZE(dpage->ldesc); |
| 705 | i++, ldesc++, hwdesc++) { |
| 706 | ldesc->hwdesc = hwdesc; |
| 707 | list_add_tail(&ldesc->node, &lhead); |
| 708 | ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, |
| 709 | hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); |
| 710 | |
| 711 | dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, |
| 712 | hwdesc, &ldesc->hwdesc_dma_addr); |
| 713 | } |
| 714 | |
| 715 | for (i = 0, desc = dpage->desc; |
| 716 | i < ARRAY_SIZE(dpage->desc); |
| 717 | i++, desc++) { |
| 718 | dma_async_tx_descriptor_init(&desc->async_tx, dchan); |
| 719 | desc->async_tx.tx_submit = nbpf_tx_submit; |
| 720 | desc->chan = chan; |
| 721 | INIT_LIST_HEAD(&desc->sg); |
| 722 | list_add_tail(&desc->node, &head); |
| 723 | } |
| 724 | |
| 725 | /* |
| 726 | * This function cannot be called from interrupt context, so, no need to |
| 727 | * save flags |
| 728 | */ |
| 729 | spin_lock_irq(&chan->lock); |
| 730 | list_splice_tail(&lhead, &chan->free_links); |
| 731 | list_splice_tail(&head, &chan->free); |
| 732 | list_add(&dpage->node, &chan->desc_page); |
| 733 | spin_unlock_irq(&chan->lock); |
| 734 | |
| 735 | return ARRAY_SIZE(dpage->desc); |
| 736 | } |
| 737 | |
| 738 | static void nbpf_desc_put(struct nbpf_desc *desc) |
| 739 | { |
| 740 | struct nbpf_channel *chan = desc->chan; |
| 741 | struct nbpf_link_desc *ldesc, *tmp; |
| 742 | unsigned long flags; |
| 743 | |
| 744 | spin_lock_irqsave(&chan->lock, flags); |
| 745 | list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) |
| 746 | list_move(&ldesc->node, &chan->free_links); |
| 747 | |
| 748 | list_add(&desc->node, &chan->free); |
| 749 | spin_unlock_irqrestore(&chan->lock, flags); |
| 750 | } |
| 751 | |
| 752 | static void nbpf_scan_acked(struct nbpf_channel *chan) |
| 753 | { |
| 754 | struct nbpf_desc *desc, *tmp; |
| 755 | unsigned long flags; |
| 756 | LIST_HEAD(head); |
| 757 | |
| 758 | spin_lock_irqsave(&chan->lock, flags); |
| 759 | list_for_each_entry_safe(desc, tmp, &chan->done, node) |
| 760 | if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { |
| 761 | list_move(&desc->node, &head); |
| 762 | desc->user_wait = false; |
| 763 | } |
| 764 | spin_unlock_irqrestore(&chan->lock, flags); |
| 765 | |
| 766 | list_for_each_entry_safe(desc, tmp, &head, node) { |
| 767 | list_del(&desc->node); |
| 768 | nbpf_desc_put(desc); |
| 769 | } |
| 770 | } |
| 771 | |
| 772 | /* |
| 773 | * We have to allocate descriptors with the channel lock dropped. This means, |
| 774 | * before we re-acquire the lock buffers can be taken already, so we have to |
| 775 | * re-check after re-acquiring the lock and possibly retry, if buffers are gone |
| 776 | * again. |
| 777 | */ |
| 778 | static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) |
| 779 | { |
| 780 | struct nbpf_desc *desc = NULL; |
| 781 | struct nbpf_link_desc *ldesc, *prev = NULL; |
| 782 | |
| 783 | nbpf_scan_acked(chan); |
| 784 | |
| 785 | spin_lock_irq(&chan->lock); |
| 786 | |
| 787 | do { |
| 788 | int i = 0, ret; |
| 789 | |
| 790 | if (list_empty(&chan->free)) { |
| 791 | /* No more free descriptors */ |
| 792 | spin_unlock_irq(&chan->lock); |
| 793 | ret = nbpf_desc_page_alloc(chan); |
| 794 | if (ret < 0) |
| 795 | return NULL; |
| 796 | spin_lock_irq(&chan->lock); |
| 797 | continue; |
| 798 | } |
| 799 | desc = list_first_entry(&chan->free, struct nbpf_desc, node); |
| 800 | list_del(&desc->node); |
| 801 | |
| 802 | do { |
| 803 | if (list_empty(&chan->free_links)) { |
| 804 | /* No more free link descriptors */ |
| 805 | spin_unlock_irq(&chan->lock); |
| 806 | ret = nbpf_desc_page_alloc(chan); |
| 807 | if (ret < 0) { |
| 808 | nbpf_desc_put(desc); |
| 809 | return NULL; |
| 810 | } |
| 811 | spin_lock_irq(&chan->lock); |
| 812 | continue; |
| 813 | } |
| 814 | |
| 815 | ldesc = list_first_entry(&chan->free_links, |
| 816 | struct nbpf_link_desc, node); |
| 817 | ldesc->desc = desc; |
| 818 | if (prev) |
| 819 | prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; |
| 820 | |
| 821 | prev = ldesc; |
| 822 | list_move_tail(&ldesc->node, &desc->sg); |
| 823 | |
| 824 | i++; |
| 825 | } while (i < len); |
| 826 | } while (!desc); |
| 827 | |
| 828 | prev->hwdesc->next = 0; |
| 829 | |
| 830 | spin_unlock_irq(&chan->lock); |
| 831 | |
| 832 | return desc; |
| 833 | } |
| 834 | |
| 835 | static void nbpf_chan_idle(struct nbpf_channel *chan) |
| 836 | { |
| 837 | struct nbpf_desc *desc, *tmp; |
| 838 | unsigned long flags; |
| 839 | LIST_HEAD(head); |
| 840 | |
| 841 | spin_lock_irqsave(&chan->lock, flags); |
| 842 | |
| 843 | list_splice_init(&chan->done, &head); |
| 844 | list_splice_init(&chan->active, &head); |
| 845 | list_splice_init(&chan->queued, &head); |
| 846 | |
| 847 | chan->running = NULL; |
| 848 | |
| 849 | spin_unlock_irqrestore(&chan->lock, flags); |
| 850 | |
| 851 | list_for_each_entry_safe(desc, tmp, &head, node) { |
| 852 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", |
| 853 | __func__, desc, desc->async_tx.cookie); |
| 854 | list_del(&desc->node); |
| 855 | nbpf_desc_put(desc); |
| 856 | } |
| 857 | } |
| 858 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 859 | static int nbpf_pause(struct dma_chan *dchan) |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 860 | { |
| 861 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 862 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 863 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 864 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 865 | chan->paused = true; |
| 866 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); |
| 867 | /* See comment in nbpf_prep_one() */ |
| 868 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 869 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 870 | return 0; |
| 871 | } |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 872 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 873 | static int nbpf_terminate_all(struct dma_chan *dchan) |
| 874 | { |
| 875 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 876 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 877 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
| 878 | dev_dbg(dchan->device->dev, "Terminating\n"); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 879 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 880 | nbpf_chan_halt(chan); |
| 881 | nbpf_chan_idle(chan); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 882 | |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 883 | return 0; |
| 884 | } |
| 885 | |
| 886 | static int nbpf_config(struct dma_chan *dchan, |
| 887 | struct dma_slave_config *config) |
| 888 | { |
| 889 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 890 | |
| 891 | dev_dbg(dchan->device->dev, "Entry %s\n", __func__); |
| 892 | |
| 893 | /* |
| 894 | * We could check config->slave_id to match chan->terminal here, |
| 895 | * but with DT they would be coming from the same source, so |
| 896 | * such a check would be superflous |
| 897 | */ |
| 898 | |
| 899 | chan->slave_dst_addr = config->dst_addr; |
| 900 | chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, |
| 901 | config->dst_addr_width, 1); |
| 902 | chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, |
| 903 | config->dst_addr_width, |
| 904 | config->dst_maxburst); |
| 905 | chan->slave_src_addr = config->src_addr; |
| 906 | chan->slave_src_width = nbpf_xfer_size(chan->nbpf, |
| 907 | config->src_addr_width, 1); |
| 908 | chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, |
| 909 | config->src_addr_width, |
| 910 | config->src_maxburst); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 911 | |
| 912 | return 0; |
| 913 | } |
| 914 | |
| 915 | static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, |
| 916 | struct scatterlist *src_sg, struct scatterlist *dst_sg, |
| 917 | size_t len, enum dma_transfer_direction direction, |
| 918 | unsigned long flags) |
| 919 | { |
| 920 | struct nbpf_link_desc *ldesc; |
| 921 | struct scatterlist *mem_sg; |
| 922 | struct nbpf_desc *desc; |
| 923 | bool inc_src, inc_dst; |
| 924 | size_t data_len = 0; |
| 925 | int i = 0; |
| 926 | |
| 927 | switch (direction) { |
| 928 | case DMA_DEV_TO_MEM: |
| 929 | mem_sg = dst_sg; |
| 930 | inc_src = false; |
| 931 | inc_dst = true; |
| 932 | break; |
| 933 | |
| 934 | case DMA_MEM_TO_DEV: |
| 935 | mem_sg = src_sg; |
| 936 | inc_src = true; |
| 937 | inc_dst = false; |
| 938 | break; |
| 939 | |
| 940 | default: |
| 941 | case DMA_MEM_TO_MEM: |
| 942 | mem_sg = src_sg; |
| 943 | inc_src = true; |
| 944 | inc_dst = true; |
| 945 | } |
| 946 | |
| 947 | desc = nbpf_desc_get(chan, len); |
| 948 | if (!desc) |
| 949 | return NULL; |
| 950 | |
| 951 | desc->async_tx.flags = flags; |
| 952 | desc->async_tx.cookie = -EBUSY; |
| 953 | desc->user_wait = false; |
| 954 | |
| 955 | /* |
| 956 | * This is a private descriptor list, and we own the descriptor. No need |
| 957 | * to lock. |
| 958 | */ |
| 959 | list_for_each_entry(ldesc, &desc->sg, node) { |
| 960 | int ret = nbpf_prep_one(ldesc, direction, |
| 961 | sg_dma_address(src_sg), |
| 962 | sg_dma_address(dst_sg), |
| 963 | sg_dma_len(mem_sg), |
| 964 | i == len - 1); |
| 965 | if (ret < 0) { |
| 966 | nbpf_desc_put(desc); |
| 967 | return NULL; |
| 968 | } |
| 969 | data_len += sg_dma_len(mem_sg); |
| 970 | if (inc_src) |
| 971 | src_sg = sg_next(src_sg); |
| 972 | if (inc_dst) |
| 973 | dst_sg = sg_next(dst_sg); |
| 974 | mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; |
| 975 | i++; |
| 976 | } |
| 977 | |
| 978 | desc->length = data_len; |
| 979 | |
| 980 | /* The user has to return the descriptor to us ASAP via .tx_submit() */ |
| 981 | return &desc->async_tx; |
| 982 | } |
| 983 | |
| 984 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy( |
| 985 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, |
| 986 | size_t len, unsigned long flags) |
| 987 | { |
| 988 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 989 | struct scatterlist dst_sg; |
| 990 | struct scatterlist src_sg; |
| 991 | |
| 992 | sg_init_table(&dst_sg, 1); |
| 993 | sg_init_table(&src_sg, 1); |
| 994 | |
| 995 | sg_dma_address(&dst_sg) = dst; |
| 996 | sg_dma_address(&src_sg) = src; |
| 997 | |
| 998 | sg_dma_len(&dst_sg) = len; |
| 999 | sg_dma_len(&src_sg) = len; |
| 1000 | |
| 1001 | dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", |
| 1002 | __func__, len, &src, &dst); |
| 1003 | |
| 1004 | return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, |
| 1005 | DMA_MEM_TO_MEM, flags); |
| 1006 | } |
| 1007 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1008 | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( |
| 1009 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
| 1010 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
| 1011 | { |
| 1012 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 1013 | struct scatterlist slave_sg; |
| 1014 | |
| 1015 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); |
| 1016 | |
| 1017 | sg_init_table(&slave_sg, 1); |
| 1018 | |
| 1019 | switch (direction) { |
| 1020 | case DMA_MEM_TO_DEV: |
| 1021 | sg_dma_address(&slave_sg) = chan->slave_dst_addr; |
| 1022 | return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, |
| 1023 | direction, flags); |
| 1024 | |
| 1025 | case DMA_DEV_TO_MEM: |
| 1026 | sg_dma_address(&slave_sg) = chan->slave_src_addr; |
| 1027 | return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, |
| 1028 | direction, flags); |
| 1029 | |
| 1030 | default: |
| 1031 | return NULL; |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | static int nbpf_alloc_chan_resources(struct dma_chan *dchan) |
| 1036 | { |
| 1037 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 1038 | int ret; |
| 1039 | |
| 1040 | INIT_LIST_HEAD(&chan->free); |
| 1041 | INIT_LIST_HEAD(&chan->free_links); |
| 1042 | INIT_LIST_HEAD(&chan->queued); |
| 1043 | INIT_LIST_HEAD(&chan->active); |
| 1044 | INIT_LIST_HEAD(&chan->done); |
| 1045 | |
| 1046 | ret = nbpf_desc_page_alloc(chan); |
| 1047 | if (ret < 0) |
| 1048 | return ret; |
| 1049 | |
| 1050 | dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, |
| 1051 | chan->terminal); |
| 1052 | |
| 1053 | nbpf_chan_configure(chan); |
| 1054 | |
| 1055 | return ret; |
| 1056 | } |
| 1057 | |
| 1058 | static void nbpf_free_chan_resources(struct dma_chan *dchan) |
| 1059 | { |
| 1060 | struct nbpf_channel *chan = nbpf_to_chan(dchan); |
| 1061 | struct nbpf_desc_page *dpage, *tmp; |
| 1062 | |
| 1063 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); |
| 1064 | |
| 1065 | nbpf_chan_halt(chan); |
Guennadi Liakhovetski | 67b1668 | 2014-08-03 19:13:03 +0200 | [diff] [blame] | 1066 | nbpf_chan_idle(chan); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1067 | /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ |
| 1068 | nbpf_chan_prepare_default(chan); |
| 1069 | |
| 1070 | list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { |
| 1071 | struct nbpf_link_desc *ldesc; |
| 1072 | int i; |
| 1073 | list_del(&dpage->node); |
| 1074 | for (i = 0, ldesc = dpage->ldesc; |
| 1075 | i < ARRAY_SIZE(dpage->ldesc); |
| 1076 | i++, ldesc++) |
| 1077 | dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, |
| 1078 | sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); |
| 1079 | free_page((unsigned long)dpage); |
| 1080 | } |
| 1081 | } |
| 1082 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1083 | static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, |
| 1084 | struct of_dma *ofdma) |
| 1085 | { |
| 1086 | struct nbpf_device *nbpf = ofdma->of_dma_data; |
| 1087 | struct dma_chan *dchan; |
| 1088 | struct nbpf_channel *chan; |
| 1089 | |
| 1090 | if (dma_spec->args_count != 2) |
| 1091 | return NULL; |
| 1092 | |
| 1093 | dchan = dma_get_any_slave_channel(&nbpf->dma_dev); |
| 1094 | if (!dchan) |
| 1095 | return NULL; |
| 1096 | |
| 1097 | dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, |
| 1098 | dma_spec->np->name); |
| 1099 | |
| 1100 | chan = nbpf_to_chan(dchan); |
| 1101 | |
| 1102 | chan->terminal = dma_spec->args[0]; |
| 1103 | chan->flags = dma_spec->args[1]; |
| 1104 | |
| 1105 | nbpf_chan_prepare(chan); |
| 1106 | nbpf_chan_configure(chan); |
| 1107 | |
| 1108 | return dchan; |
| 1109 | } |
| 1110 | |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 1111 | static void nbpf_chan_tasklet(unsigned long data) |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1112 | { |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 1113 | struct nbpf_channel *chan = (struct nbpf_channel *)data; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1114 | struct nbpf_desc *desc, *tmp; |
Dave Jiang | 0024b2a | 2016-07-20 13:12:24 -0700 | [diff] [blame] | 1115 | struct dmaengine_desc_callback cb; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1116 | |
| 1117 | while (!list_empty(&chan->done)) { |
| 1118 | bool found = false, must_put, recycling = false; |
| 1119 | |
| 1120 | spin_lock_irq(&chan->lock); |
| 1121 | |
| 1122 | list_for_each_entry_safe(desc, tmp, &chan->done, node) { |
| 1123 | if (!desc->user_wait) { |
| 1124 | /* Newly completed descriptor, have to process */ |
| 1125 | found = true; |
| 1126 | break; |
| 1127 | } else if (async_tx_test_ack(&desc->async_tx)) { |
| 1128 | /* |
| 1129 | * This descriptor was waiting for a user ACK, |
| 1130 | * it can be recycled now. |
| 1131 | */ |
| 1132 | list_del(&desc->node); |
| 1133 | spin_unlock_irq(&chan->lock); |
| 1134 | nbpf_desc_put(desc); |
| 1135 | recycling = true; |
| 1136 | break; |
| 1137 | } |
| 1138 | } |
| 1139 | |
| 1140 | if (recycling) |
| 1141 | continue; |
| 1142 | |
| 1143 | if (!found) { |
| 1144 | /* This can happen if TERMINATE_ALL has been called */ |
| 1145 | spin_unlock_irq(&chan->lock); |
| 1146 | break; |
| 1147 | } |
| 1148 | |
| 1149 | dma_cookie_complete(&desc->async_tx); |
| 1150 | |
| 1151 | /* |
| 1152 | * With released lock we cannot dereference desc, maybe it's |
| 1153 | * still on the "done" list |
| 1154 | */ |
| 1155 | if (async_tx_test_ack(&desc->async_tx)) { |
| 1156 | list_del(&desc->node); |
| 1157 | must_put = true; |
| 1158 | } else { |
| 1159 | desc->user_wait = true; |
| 1160 | must_put = false; |
| 1161 | } |
| 1162 | |
Dave Jiang | 0024b2a | 2016-07-20 13:12:24 -0700 | [diff] [blame] | 1163 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1164 | |
| 1165 | /* ack and callback completed descriptor */ |
| 1166 | spin_unlock_irq(&chan->lock); |
| 1167 | |
Dave Jiang | 0024b2a | 2016-07-20 13:12:24 -0700 | [diff] [blame] | 1168 | dmaengine_desc_callback_invoke(&cb, NULL); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1169 | |
| 1170 | if (must_put) |
| 1171 | nbpf_desc_put(desc); |
| 1172 | } |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1173 | } |
| 1174 | |
| 1175 | static irqreturn_t nbpf_chan_irq(int irq, void *dev) |
| 1176 | { |
| 1177 | struct nbpf_channel *chan = dev; |
| 1178 | bool done = nbpf_status_get(chan); |
| 1179 | struct nbpf_desc *desc; |
| 1180 | irqreturn_t ret; |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 1181 | bool bh = false; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1182 | |
| 1183 | if (!done) |
| 1184 | return IRQ_NONE; |
| 1185 | |
| 1186 | nbpf_status_ack(chan); |
| 1187 | |
| 1188 | dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); |
| 1189 | |
| 1190 | spin_lock(&chan->lock); |
| 1191 | desc = chan->running; |
| 1192 | if (WARN_ON(!desc)) { |
| 1193 | ret = IRQ_NONE; |
| 1194 | goto unlock; |
| 1195 | } else { |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 1196 | ret = IRQ_HANDLED; |
| 1197 | bh = true; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1198 | } |
| 1199 | |
| 1200 | list_move_tail(&desc->node, &chan->done); |
| 1201 | chan->running = NULL; |
| 1202 | |
| 1203 | if (!list_empty(&chan->active)) { |
| 1204 | desc = list_first_entry(&chan->active, |
| 1205 | struct nbpf_desc, node); |
| 1206 | if (!nbpf_start(desc)) |
| 1207 | chan->running = desc; |
| 1208 | } |
| 1209 | |
| 1210 | unlock: |
| 1211 | spin_unlock(&chan->lock); |
| 1212 | |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 1213 | if (bh) |
| 1214 | tasklet_schedule(&chan->tasklet); |
| 1215 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1216 | return ret; |
| 1217 | } |
| 1218 | |
| 1219 | static irqreturn_t nbpf_err_irq(int irq, void *dev) |
| 1220 | { |
| 1221 | struct nbpf_device *nbpf = dev; |
| 1222 | u32 error = nbpf_error_get(nbpf); |
| 1223 | |
| 1224 | dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); |
| 1225 | |
| 1226 | if (!error) |
| 1227 | return IRQ_NONE; |
| 1228 | |
| 1229 | do { |
| 1230 | struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); |
| 1231 | /* On error: abort all queued transfers, no callback */ |
| 1232 | nbpf_error_clear(chan); |
| 1233 | nbpf_chan_idle(chan); |
| 1234 | error = nbpf_error_get(nbpf); |
| 1235 | } while (error); |
| 1236 | |
| 1237 | return IRQ_HANDLED; |
| 1238 | } |
| 1239 | |
| 1240 | static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) |
| 1241 | { |
| 1242 | struct dma_device *dma_dev = &nbpf->dma_dev; |
| 1243 | struct nbpf_channel *chan = nbpf->chan + n; |
| 1244 | int ret; |
| 1245 | |
| 1246 | chan->nbpf = nbpf; |
| 1247 | chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; |
| 1248 | INIT_LIST_HEAD(&chan->desc_page); |
| 1249 | spin_lock_init(&chan->lock); |
| 1250 | chan->dma_chan.device = dma_dev; |
| 1251 | dma_cookie_init(&chan->dma_chan); |
| 1252 | nbpf_chan_prepare_default(chan); |
| 1253 | |
| 1254 | dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); |
| 1255 | |
| 1256 | snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); |
| 1257 | |
Guennadi Liakhovetski | f02323e | 2014-08-03 19:13:07 +0200 | [diff] [blame] | 1258 | tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan); |
| 1259 | ret = devm_request_irq(dma_dev->dev, chan->irq, |
| 1260 | nbpf_chan_irq, IRQF_SHARED, |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1261 | chan->name, chan); |
| 1262 | if (ret < 0) |
| 1263 | return ret; |
| 1264 | |
| 1265 | /* Add the channel to DMA device channel list */ |
| 1266 | list_add_tail(&chan->dma_chan.device_node, |
| 1267 | &dma_dev->channels); |
| 1268 | |
| 1269 | return 0; |
| 1270 | } |
| 1271 | |
| 1272 | static const struct of_device_id nbpf_match[] = { |
| 1273 | {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]}, |
| 1274 | {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]}, |
| 1275 | {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]}, |
| 1276 | {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]}, |
| 1277 | {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]}, |
| 1278 | {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]}, |
| 1279 | {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]}, |
| 1280 | {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]}, |
| 1281 | {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]}, |
| 1282 | {} |
| 1283 | }; |
| 1284 | MODULE_DEVICE_TABLE(of, nbpf_match); |
| 1285 | |
| 1286 | static int nbpf_probe(struct platform_device *pdev) |
| 1287 | { |
| 1288 | struct device *dev = &pdev->dev; |
| 1289 | const struct of_device_id *of_id = of_match_device(nbpf_match, dev); |
| 1290 | struct device_node *np = dev->of_node; |
| 1291 | struct nbpf_device *nbpf; |
| 1292 | struct dma_device *dma_dev; |
| 1293 | struct resource *iomem, *irq_res; |
| 1294 | const struct nbpf_config *cfg; |
| 1295 | int num_channels; |
| 1296 | int ret, irq, eirq, i; |
| 1297 | int irqbuf[9] /* maximum 8 channels + error IRQ */; |
| 1298 | unsigned int irqs = 0; |
| 1299 | |
| 1300 | BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); |
| 1301 | |
| 1302 | /* DT only */ |
| 1303 | if (!np || !of_id || !of_id->data) |
| 1304 | return -ENODEV; |
| 1305 | |
| 1306 | cfg = of_id->data; |
| 1307 | num_channels = cfg->num_channels; |
| 1308 | |
| 1309 | nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * |
| 1310 | sizeof(nbpf->chan[0]), GFP_KERNEL); |
Peter Griffin | aef94fe | 2016-06-07 18:38:41 +0100 | [diff] [blame] | 1311 | if (!nbpf) |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1312 | return -ENOMEM; |
Peter Griffin | aef94fe | 2016-06-07 18:38:41 +0100 | [diff] [blame] | 1313 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1314 | dma_dev = &nbpf->dma_dev; |
| 1315 | dma_dev->dev = dev; |
| 1316 | |
| 1317 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1318 | nbpf->base = devm_ioremap_resource(dev, iomem); |
| 1319 | if (IS_ERR(nbpf->base)) |
| 1320 | return PTR_ERR(nbpf->base); |
| 1321 | |
| 1322 | nbpf->clk = devm_clk_get(dev, NULL); |
| 1323 | if (IS_ERR(nbpf->clk)) |
| 1324 | return PTR_ERR(nbpf->clk); |
| 1325 | |
Niklas Cassel | 9a1a34f | 2016-10-24 15:29:15 +0200 | [diff] [blame] | 1326 | of_property_read_u32(np, "max-burst-mem-read", |
| 1327 | &nbpf->max_burst_mem_read); |
| 1328 | of_property_read_u32(np, "max-burst-mem-write", |
| 1329 | &nbpf->max_burst_mem_write); |
| 1330 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1331 | nbpf->config = cfg; |
| 1332 | |
| 1333 | for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { |
| 1334 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); |
| 1335 | if (!irq_res) |
| 1336 | break; |
| 1337 | |
| 1338 | for (irq = irq_res->start; irq <= irq_res->end; |
| 1339 | irq++, irqs++) |
| 1340 | irqbuf[irqs] = irq; |
| 1341 | } |
| 1342 | |
| 1343 | /* |
| 1344 | * 3 IRQ resource schemes are supported: |
| 1345 | * 1. 1 shared IRQ for error and all channels |
| 1346 | * 2. 2 IRQs: one for error and one shared for all channels |
| 1347 | * 3. 1 IRQ for error and an own IRQ for each channel |
| 1348 | */ |
| 1349 | if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) |
| 1350 | return -ENXIO; |
| 1351 | |
| 1352 | if (irqs == 1) { |
| 1353 | eirq = irqbuf[0]; |
| 1354 | |
| 1355 | for (i = 0; i <= num_channels; i++) |
| 1356 | nbpf->chan[i].irq = irqbuf[0]; |
| 1357 | } else { |
| 1358 | eirq = platform_get_irq_byname(pdev, "error"); |
| 1359 | if (eirq < 0) |
| 1360 | return eirq; |
| 1361 | |
| 1362 | if (irqs == num_channels + 1) { |
| 1363 | struct nbpf_channel *chan; |
| 1364 | |
| 1365 | for (i = 0, chan = nbpf->chan; i <= num_channels; |
| 1366 | i++, chan++) { |
| 1367 | /* Skip the error IRQ */ |
| 1368 | if (irqbuf[i] == eirq) |
| 1369 | i++; |
| 1370 | chan->irq = irqbuf[i]; |
| 1371 | } |
| 1372 | |
| 1373 | if (chan != nbpf->chan + num_channels) |
| 1374 | return -EINVAL; |
| 1375 | } else { |
| 1376 | /* 2 IRQs and more than one channel */ |
| 1377 | if (irqbuf[0] == eirq) |
| 1378 | irq = irqbuf[1]; |
| 1379 | else |
| 1380 | irq = irqbuf[0]; |
| 1381 | |
| 1382 | for (i = 0; i <= num_channels; i++) |
| 1383 | nbpf->chan[i].irq = irq; |
| 1384 | } |
| 1385 | } |
| 1386 | |
| 1387 | ret = devm_request_irq(dev, eirq, nbpf_err_irq, |
| 1388 | IRQF_SHARED, "dma error", nbpf); |
| 1389 | if (ret < 0) |
| 1390 | return ret; |
Vinod Koul | 84c610b | 2016-07-04 16:01:18 +0530 | [diff] [blame] | 1391 | nbpf->eirq = eirq; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1392 | |
| 1393 | INIT_LIST_HEAD(&dma_dev->channels); |
| 1394 | |
| 1395 | /* Create DMA Channel */ |
| 1396 | for (i = 0; i < num_channels; i++) { |
| 1397 | ret = nbpf_chan_probe(nbpf, i); |
| 1398 | if (ret < 0) |
| 1399 | return ret; |
| 1400 | } |
| 1401 | |
| 1402 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
| 1403 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
| 1404 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1405 | |
| 1406 | /* Common and MEMCPY operations */ |
| 1407 | dma_dev->device_alloc_chan_resources |
| 1408 | = nbpf_alloc_chan_resources; |
| 1409 | dma_dev->device_free_chan_resources = nbpf_free_chan_resources; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1410 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; |
| 1411 | dma_dev->device_tx_status = nbpf_tx_status; |
| 1412 | dma_dev->device_issue_pending = nbpf_issue_pending; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1413 | |
| 1414 | /* |
| 1415 | * If we drop support for unaligned MEMCPY buffer addresses and / or |
| 1416 | * lengths by setting |
| 1417 | * dma_dev->copy_align = 4; |
| 1418 | * then we can set transfer length to 4 bytes in nbpf_prep_one() for |
| 1419 | * DMA_MEM_TO_MEM |
| 1420 | */ |
| 1421 | |
| 1422 | /* Compulsory for DMA_SLAVE fields */ |
| 1423 | dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; |
Maxime Ripard | e22aec0 | 2014-11-17 14:42:27 +0100 | [diff] [blame] | 1424 | dma_dev->device_config = nbpf_config; |
| 1425 | dma_dev->device_pause = nbpf_pause; |
| 1426 | dma_dev->device_terminate_all = nbpf_terminate_all; |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1427 | |
Maxime Ripard | 03526d3 | 2014-11-17 14:42:48 +0100 | [diff] [blame] | 1428 | dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS; |
| 1429 | dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS; |
| 1430 | dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
| 1431 | |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1432 | platform_set_drvdata(pdev, nbpf); |
| 1433 | |
| 1434 | ret = clk_prepare_enable(nbpf->clk); |
| 1435 | if (ret < 0) |
| 1436 | return ret; |
| 1437 | |
| 1438 | nbpf_configure(nbpf); |
| 1439 | |
| 1440 | ret = dma_async_device_register(dma_dev); |
| 1441 | if (ret < 0) |
| 1442 | goto e_clk_off; |
| 1443 | |
| 1444 | ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf); |
| 1445 | if (ret < 0) |
| 1446 | goto e_dma_dev_unreg; |
| 1447 | |
| 1448 | return 0; |
| 1449 | |
| 1450 | e_dma_dev_unreg: |
| 1451 | dma_async_device_unregister(dma_dev); |
| 1452 | e_clk_off: |
| 1453 | clk_disable_unprepare(nbpf->clk); |
| 1454 | |
| 1455 | return ret; |
| 1456 | } |
| 1457 | |
| 1458 | static int nbpf_remove(struct platform_device *pdev) |
| 1459 | { |
| 1460 | struct nbpf_device *nbpf = platform_get_drvdata(pdev); |
Vinod Koul | 84c610b | 2016-07-04 16:01:18 +0530 | [diff] [blame] | 1461 | int i; |
| 1462 | |
| 1463 | devm_free_irq(&pdev->dev, nbpf->eirq, nbpf); |
| 1464 | |
| 1465 | for (i = 0; i < nbpf->config->num_channels; i++) { |
| 1466 | struct nbpf_channel *chan = nbpf->chan + i; |
| 1467 | |
| 1468 | devm_free_irq(&pdev->dev, chan->irq, chan); |
Vinod Koul | b63abf1 | 2016-07-04 16:06:04 +0530 | [diff] [blame] | 1469 | |
| 1470 | tasklet_kill(&chan->tasklet); |
Vinod Koul | 84c610b | 2016-07-04 16:01:18 +0530 | [diff] [blame] | 1471 | } |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1472 | |
| 1473 | of_dma_controller_free(pdev->dev.of_node); |
| 1474 | dma_async_device_unregister(&nbpf->dma_dev); |
| 1475 | clk_disable_unprepare(nbpf->clk); |
| 1476 | |
| 1477 | return 0; |
| 1478 | } |
| 1479 | |
Krzysztof Kozlowski | 4715727 | 2015-05-02 00:57:48 +0900 | [diff] [blame] | 1480 | static const struct platform_device_id nbpf_ids[] = { |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1481 | {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, |
| 1482 | {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, |
| 1483 | {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, |
| 1484 | {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, |
| 1485 | {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, |
| 1486 | {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, |
| 1487 | {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, |
| 1488 | {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, |
| 1489 | {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, |
| 1490 | {}, |
| 1491 | }; |
| 1492 | MODULE_DEVICE_TABLE(platform, nbpf_ids); |
| 1493 | |
Rafael J. Wysocki | ee34350 | 2014-12-05 23:28:59 +0100 | [diff] [blame] | 1494 | #ifdef CONFIG_PM |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1495 | static int nbpf_runtime_suspend(struct device *dev) |
| 1496 | { |
| 1497 | struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); |
| 1498 | clk_disable_unprepare(nbpf->clk); |
| 1499 | return 0; |
| 1500 | } |
| 1501 | |
| 1502 | static int nbpf_runtime_resume(struct device *dev) |
| 1503 | { |
| 1504 | struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); |
| 1505 | return clk_prepare_enable(nbpf->clk); |
| 1506 | } |
| 1507 | #endif |
| 1508 | |
| 1509 | static const struct dev_pm_ops nbpf_pm_ops = { |
| 1510 | SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) |
| 1511 | }; |
| 1512 | |
| 1513 | static struct platform_driver nbpf_driver = { |
| 1514 | .driver = { |
Guennadi Liakhovetski | b45b262 | 2014-07-19 12:48:51 +0200 | [diff] [blame] | 1515 | .name = "dma-nbpf", |
| 1516 | .of_match_table = nbpf_match, |
| 1517 | .pm = &nbpf_pm_ops, |
| 1518 | }, |
| 1519 | .id_table = nbpf_ids, |
| 1520 | .probe = nbpf_probe, |
| 1521 | .remove = nbpf_remove, |
| 1522 | }; |
| 1523 | |
| 1524 | module_platform_driver(nbpf_driver); |
| 1525 | |
| 1526 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); |
| 1527 | MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs"); |
| 1528 | MODULE_LICENSE("GPL v2"); |