Lars-Peter Clausen | 7c169a4 | 2013-05-30 18:25:02 +0200 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de> |
| 3 | * JZ4740 DMAC support |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License as published by the |
| 7 | * Free Software Foundation; either version 2 of the License, or (at your |
| 8 | * option) any later version. |
| 9 | * |
| 10 | * You should have received a copy of the GNU General Public License along |
| 11 | * with this program; if not, write to the Free Software Foundation, Inc., |
| 12 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/dmaengine.h> |
| 17 | #include <linux/dma-mapping.h> |
| 18 | #include <linux/err.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/module.h> |
| 22 | #include <linux/platform_device.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | |
| 26 | #include <asm/mach-jz4740/dma.h> |
| 27 | |
| 28 | #include "virt-dma.h" |
| 29 | |
| 30 | #define JZ_DMA_NR_CHANS 6 |
| 31 | |
| 32 | struct jz4740_dma_sg { |
| 33 | dma_addr_t addr; |
| 34 | unsigned int len; |
| 35 | }; |
| 36 | |
| 37 | struct jz4740_dma_desc { |
| 38 | struct virt_dma_desc vdesc; |
| 39 | |
| 40 | enum dma_transfer_direction direction; |
| 41 | bool cyclic; |
| 42 | |
| 43 | unsigned int num_sgs; |
| 44 | struct jz4740_dma_sg sg[]; |
| 45 | }; |
| 46 | |
| 47 | struct jz4740_dmaengine_chan { |
| 48 | struct virt_dma_chan vchan; |
| 49 | struct jz4740_dma_chan *jz_chan; |
| 50 | |
| 51 | dma_addr_t fifo_addr; |
| 52 | |
| 53 | struct jz4740_dma_desc *desc; |
| 54 | unsigned int next_sg; |
| 55 | }; |
| 56 | |
| 57 | struct jz4740_dma_dev { |
| 58 | struct dma_device ddev; |
| 59 | |
| 60 | struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS]; |
| 61 | }; |
| 62 | |
| 63 | static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c) |
| 64 | { |
| 65 | return container_of(c, struct jz4740_dmaengine_chan, vchan.chan); |
| 66 | } |
| 67 | |
| 68 | static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc) |
| 69 | { |
| 70 | return container_of(vdesc, struct jz4740_dma_desc, vdesc); |
| 71 | } |
| 72 | |
| 73 | static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs) |
| 74 | { |
| 75 | return kzalloc(sizeof(struct jz4740_dma_desc) + |
| 76 | sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC); |
| 77 | } |
| 78 | |
| 79 | static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width) |
| 80 | { |
| 81 | switch (width) { |
| 82 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
| 83 | return JZ4740_DMA_WIDTH_8BIT; |
| 84 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
| 85 | return JZ4740_DMA_WIDTH_16BIT; |
| 86 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
| 87 | return JZ4740_DMA_WIDTH_32BIT; |
| 88 | default: |
| 89 | return JZ4740_DMA_WIDTH_32BIT; |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) |
| 94 | { |
| 95 | if (maxburst <= 1) |
| 96 | return JZ4740_DMA_TRANSFER_SIZE_1BYTE; |
| 97 | else if (maxburst <= 3) |
| 98 | return JZ4740_DMA_TRANSFER_SIZE_2BYTE; |
| 99 | else if (maxburst <= 15) |
| 100 | return JZ4740_DMA_TRANSFER_SIZE_4BYTE; |
| 101 | else if (maxburst <= 31) |
| 102 | return JZ4740_DMA_TRANSFER_SIZE_16BYTE; |
| 103 | |
| 104 | return JZ4740_DMA_TRANSFER_SIZE_32BYTE; |
| 105 | } |
| 106 | |
| 107 | static int jz4740_dma_slave_config(struct dma_chan *c, |
| 108 | const struct dma_slave_config *config) |
| 109 | { |
| 110 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 111 | struct jz4740_dma_config jzcfg; |
| 112 | |
| 113 | switch (config->direction) { |
| 114 | case DMA_MEM_TO_DEV: |
| 115 | jzcfg.flags = JZ4740_DMA_SRC_AUTOINC; |
| 116 | jzcfg.transfer_size = jz4740_dma_maxburst(config->dst_maxburst); |
| 117 | chan->fifo_addr = config->dst_addr; |
| 118 | break; |
| 119 | case DMA_DEV_TO_MEM: |
| 120 | jzcfg.flags = JZ4740_DMA_DST_AUTOINC; |
| 121 | jzcfg.transfer_size = jz4740_dma_maxburst(config->src_maxburst); |
| 122 | chan->fifo_addr = config->src_addr; |
| 123 | break; |
| 124 | default: |
| 125 | return -EINVAL; |
| 126 | } |
| 127 | |
| 128 | |
| 129 | jzcfg.src_width = jz4740_dma_width(config->src_addr_width); |
| 130 | jzcfg.dst_width = jz4740_dma_width(config->dst_addr_width); |
| 131 | jzcfg.mode = JZ4740_DMA_MODE_SINGLE; |
| 132 | jzcfg.request_type = config->slave_id; |
| 133 | |
| 134 | jz4740_dma_configure(chan->jz_chan, &jzcfg); |
| 135 | |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | static int jz4740_dma_terminate_all(struct dma_chan *c) |
| 140 | { |
| 141 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 142 | unsigned long flags; |
| 143 | LIST_HEAD(head); |
| 144 | |
| 145 | spin_lock_irqsave(&chan->vchan.lock, flags); |
| 146 | jz4740_dma_disable(chan->jz_chan); |
| 147 | chan->desc = NULL; |
| 148 | vchan_get_all_descriptors(&chan->vchan, &head); |
| 149 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
| 150 | |
| 151 | vchan_dma_desc_free_list(&chan->vchan, &head); |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
| 157 | unsigned long arg) |
| 158 | { |
| 159 | struct dma_slave_config *config = (struct dma_slave_config *)arg; |
| 160 | |
| 161 | switch (cmd) { |
| 162 | case DMA_SLAVE_CONFIG: |
| 163 | return jz4740_dma_slave_config(chan, config); |
| 164 | case DMA_TERMINATE_ALL: |
| 165 | return jz4740_dma_terminate_all(chan); |
| 166 | default: |
| 167 | return -ENOSYS; |
| 168 | } |
| 169 | } |
| 170 | |
| 171 | static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) |
| 172 | { |
| 173 | dma_addr_t src_addr, dst_addr; |
| 174 | struct virt_dma_desc *vdesc; |
| 175 | struct jz4740_dma_sg *sg; |
| 176 | |
| 177 | jz4740_dma_disable(chan->jz_chan); |
| 178 | |
| 179 | if (!chan->desc) { |
| 180 | vdesc = vchan_next_desc(&chan->vchan); |
| 181 | if (!vdesc) |
| 182 | return 0; |
| 183 | chan->desc = to_jz4740_dma_desc(vdesc); |
| 184 | chan->next_sg = 0; |
| 185 | } |
| 186 | |
| 187 | if (chan->next_sg == chan->desc->num_sgs) |
| 188 | chan->next_sg = 0; |
| 189 | |
| 190 | sg = &chan->desc->sg[chan->next_sg]; |
| 191 | |
| 192 | if (chan->desc->direction == DMA_MEM_TO_DEV) { |
| 193 | src_addr = sg->addr; |
| 194 | dst_addr = chan->fifo_addr; |
| 195 | } else { |
| 196 | src_addr = chan->fifo_addr; |
| 197 | dst_addr = sg->addr; |
| 198 | } |
| 199 | jz4740_dma_set_src_addr(chan->jz_chan, src_addr); |
| 200 | jz4740_dma_set_dst_addr(chan->jz_chan, dst_addr); |
| 201 | jz4740_dma_set_transfer_count(chan->jz_chan, sg->len); |
| 202 | |
| 203 | chan->next_sg++; |
| 204 | |
| 205 | jz4740_dma_enable(chan->jz_chan); |
| 206 | |
| 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | static void jz4740_dma_complete_cb(struct jz4740_dma_chan *jz_chan, int error, |
| 211 | void *devid) |
| 212 | { |
| 213 | struct jz4740_dmaengine_chan *chan = devid; |
| 214 | |
| 215 | spin_lock(&chan->vchan.lock); |
| 216 | if (chan->desc) { |
| 217 | if (chan->desc && chan->desc->cyclic) { |
| 218 | vchan_cyclic_callback(&chan->desc->vdesc); |
| 219 | } else { |
| 220 | if (chan->next_sg == chan->desc->num_sgs) { |
| 221 | chan->desc = NULL; |
| 222 | vchan_cookie_complete(&chan->desc->vdesc); |
| 223 | } |
| 224 | } |
| 225 | } |
| 226 | jz4740_dma_start_transfer(chan); |
| 227 | spin_unlock(&chan->vchan.lock); |
| 228 | } |
| 229 | |
| 230 | static void jz4740_dma_issue_pending(struct dma_chan *c) |
| 231 | { |
| 232 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 233 | unsigned long flags; |
| 234 | |
| 235 | spin_lock_irqsave(&chan->vchan.lock, flags); |
| 236 | if (vchan_issue_pending(&chan->vchan) && !chan->desc) |
| 237 | jz4740_dma_start_transfer(chan); |
| 238 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
| 239 | } |
| 240 | |
| 241 | static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( |
| 242 | struct dma_chan *c, struct scatterlist *sgl, |
| 243 | unsigned int sg_len, enum dma_transfer_direction direction, |
| 244 | unsigned long flags, void *context) |
| 245 | { |
| 246 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 247 | struct jz4740_dma_desc *desc; |
| 248 | struct scatterlist *sg; |
| 249 | unsigned int i; |
| 250 | |
| 251 | desc = jz4740_dma_alloc_desc(sg_len); |
| 252 | if (!desc) |
| 253 | return NULL; |
| 254 | |
| 255 | for_each_sg(sgl, sg, sg_len, i) { |
| 256 | desc->sg[i].addr = sg_dma_address(sg); |
| 257 | desc->sg[i].len = sg_dma_len(sg); |
| 258 | } |
| 259 | |
| 260 | desc->num_sgs = sg_len; |
| 261 | desc->direction = direction; |
| 262 | desc->cyclic = false; |
| 263 | |
| 264 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); |
| 265 | } |
| 266 | |
| 267 | static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( |
| 268 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, |
| 269 | size_t period_len, enum dma_transfer_direction direction, |
| 270 | unsigned long flags, void *context) |
| 271 | { |
| 272 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 273 | struct jz4740_dma_desc *desc; |
| 274 | unsigned int num_periods, i; |
| 275 | |
| 276 | if (buf_len % period_len) |
| 277 | return NULL; |
| 278 | |
| 279 | num_periods = buf_len / period_len; |
| 280 | |
| 281 | desc = jz4740_dma_alloc_desc(num_periods); |
| 282 | if (!desc) |
| 283 | return NULL; |
| 284 | |
| 285 | for (i = 0; i < num_periods; i++) { |
| 286 | desc->sg[i].addr = buf_addr; |
| 287 | desc->sg[i].len = period_len; |
| 288 | buf_addr += period_len; |
| 289 | } |
| 290 | |
| 291 | desc->num_sgs = num_periods; |
| 292 | desc->direction = direction; |
| 293 | desc->cyclic = true; |
| 294 | |
| 295 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); |
| 296 | } |
| 297 | |
| 298 | static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, |
| 299 | struct jz4740_dma_desc *desc, unsigned int next_sg) |
| 300 | { |
| 301 | size_t residue = 0; |
| 302 | unsigned int i; |
| 303 | |
| 304 | residue = 0; |
| 305 | |
| 306 | for (i = next_sg; i < desc->num_sgs; i++) |
| 307 | residue += desc->sg[i].len; |
| 308 | |
| 309 | if (next_sg != 0) |
| 310 | residue += jz4740_dma_get_residue(chan->jz_chan); |
| 311 | |
| 312 | return residue; |
| 313 | } |
| 314 | |
| 315 | static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, |
| 316 | dma_cookie_t cookie, struct dma_tx_state *state) |
| 317 | { |
| 318 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 319 | struct virt_dma_desc *vdesc; |
| 320 | enum dma_status status; |
| 321 | unsigned long flags; |
| 322 | |
| 323 | status = dma_cookie_status(c, cookie, state); |
| 324 | if (status == DMA_SUCCESS || !state) |
| 325 | return status; |
| 326 | |
| 327 | spin_lock_irqsave(&chan->vchan.lock, flags); |
| 328 | vdesc = vchan_find_desc(&chan->vchan, cookie); |
| 329 | if (cookie == chan->desc->vdesc.tx.cookie) { |
| 330 | state->residue = jz4740_dma_desc_residue(chan, chan->desc, |
| 331 | chan->next_sg); |
| 332 | } else if (vdesc) { |
| 333 | state->residue = jz4740_dma_desc_residue(chan, |
| 334 | to_jz4740_dma_desc(vdesc), 0); |
| 335 | } else { |
| 336 | state->residue = 0; |
| 337 | } |
| 338 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
| 339 | |
| 340 | return status; |
| 341 | } |
| 342 | |
| 343 | static int jz4740_dma_alloc_chan_resources(struct dma_chan *c) |
| 344 | { |
| 345 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 346 | |
| 347 | chan->jz_chan = jz4740_dma_request(chan, NULL); |
| 348 | if (!chan->jz_chan) |
| 349 | return -EBUSY; |
| 350 | |
| 351 | jz4740_dma_set_complete_cb(chan->jz_chan, jz4740_dma_complete_cb); |
| 352 | |
| 353 | return 0; |
| 354 | } |
| 355 | |
| 356 | static void jz4740_dma_free_chan_resources(struct dma_chan *c) |
| 357 | { |
| 358 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
| 359 | |
| 360 | vchan_free_chan_resources(&chan->vchan); |
| 361 | jz4740_dma_free(chan->jz_chan); |
| 362 | chan->jz_chan = NULL; |
| 363 | } |
| 364 | |
| 365 | static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc) |
| 366 | { |
| 367 | kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); |
| 368 | } |
| 369 | |
| 370 | static int jz4740_dma_probe(struct platform_device *pdev) |
| 371 | { |
| 372 | struct jz4740_dmaengine_chan *chan; |
| 373 | struct jz4740_dma_dev *dmadev; |
| 374 | struct dma_device *dd; |
| 375 | unsigned int i; |
| 376 | int ret; |
| 377 | |
| 378 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); |
| 379 | if (!dmadev) |
| 380 | return -EINVAL; |
| 381 | |
| 382 | dd = &dmadev->ddev; |
| 383 | |
| 384 | dma_cap_set(DMA_SLAVE, dd->cap_mask); |
| 385 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); |
| 386 | dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources; |
| 387 | dd->device_free_chan_resources = jz4740_dma_free_chan_resources; |
| 388 | dd->device_tx_status = jz4740_dma_tx_status; |
| 389 | dd->device_issue_pending = jz4740_dma_issue_pending; |
| 390 | dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; |
| 391 | dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; |
| 392 | dd->device_control = jz4740_dma_control; |
| 393 | dd->dev = &pdev->dev; |
| 394 | dd->chancnt = JZ_DMA_NR_CHANS; |
| 395 | INIT_LIST_HEAD(&dd->channels); |
| 396 | |
| 397 | for (i = 0; i < dd->chancnt; i++) { |
| 398 | chan = &dmadev->chan[i]; |
| 399 | chan->vchan.desc_free = jz4740_dma_desc_free; |
| 400 | vchan_init(&chan->vchan, dd); |
| 401 | } |
| 402 | |
| 403 | ret = dma_async_device_register(dd); |
| 404 | if (ret) |
| 405 | return ret; |
| 406 | |
| 407 | platform_set_drvdata(pdev, dmadev); |
| 408 | |
| 409 | return 0; |
| 410 | } |
| 411 | |
| 412 | static int jz4740_dma_remove(struct platform_device *pdev) |
| 413 | { |
| 414 | struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev); |
| 415 | |
| 416 | dma_async_device_unregister(&dmadev->ddev); |
| 417 | |
| 418 | return 0; |
| 419 | } |
| 420 | |
| 421 | static struct platform_driver jz4740_dma_driver = { |
| 422 | .probe = jz4740_dma_probe, |
| 423 | .remove = jz4740_dma_remove, |
| 424 | .driver = { |
| 425 | .name = "jz4740-dma", |
| 426 | .owner = THIS_MODULE, |
| 427 | }, |
| 428 | }; |
| 429 | module_platform_driver(jz4740_dma_driver); |
| 430 | |
| 431 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); |
| 432 | MODULE_DESCRIPTION("JZ4740 DMA driver"); |
| 433 | MODULE_LICENSE("GPLv2"); |