Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/mmc/tmio_mmc_dma.c |
| 3 | * |
| 4 | * Copyright (C) 2010-2011 Guennadi Liakhovetski |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * DMA function for TMIO MMC implementations |
| 11 | */ |
| 12 | |
| 13 | #include <linux/device.h> |
Alexey Dobriyan | b7f080c | 2011-06-16 11:01:34 +0000 | [diff] [blame^] | 14 | #include <linux/dma-mapping.h> |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 15 | #include <linux/dmaengine.h> |
| 16 | #include <linux/mfd/tmio.h> |
| 17 | #include <linux/mmc/host.h> |
Simon Horman | cba179a | 2011-03-24 09:48:36 +0100 | [diff] [blame] | 18 | #include <linux/mmc/tmio.h> |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 19 | #include <linux/pagemap.h> |
| 20 | #include <linux/scatterlist.h> |
| 21 | |
| 22 | #include "tmio_mmc.h" |
| 23 | |
| 24 | #define TMIO_MMC_MIN_DMA_LEN 8 |
| 25 | |
| 26 | static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) |
| 27 | { |
| 28 | #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) |
| 29 | /* Switch DMA mode on or off - SuperH specific? */ |
| 30 | writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift)); |
| 31 | #endif |
| 32 | } |
| 33 | |
| 34 | static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) |
| 35 | { |
| 36 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
| 37 | struct dma_async_tx_descriptor *desc = NULL; |
| 38 | struct dma_chan *chan = host->chan_rx; |
| 39 | struct tmio_mmc_data *pdata = host->pdata; |
| 40 | dma_cookie_t cookie; |
| 41 | int ret, i; |
| 42 | bool aligned = true, multiple = true; |
| 43 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; |
| 44 | |
| 45 | for_each_sg(sg, sg_tmp, host->sg_len, i) { |
| 46 | if (sg_tmp->offset & align) |
| 47 | aligned = false; |
| 48 | if (sg_tmp->length & align) { |
| 49 | multiple = false; |
| 50 | break; |
| 51 | } |
| 52 | } |
| 53 | |
| 54 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || |
| 55 | (align & PAGE_MASK))) || !multiple) { |
| 56 | ret = -EINVAL; |
| 57 | goto pio; |
| 58 | } |
| 59 | |
| 60 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { |
| 61 | host->force_pio = true; |
| 62 | return; |
| 63 | } |
| 64 | |
| 65 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); |
| 66 | |
| 67 | /* The only sg element can be unaligned, use our bounce buffer then */ |
| 68 | if (!aligned) { |
| 69 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); |
| 70 | host->sg_ptr = &host->bounce_sg; |
| 71 | sg = host->sg_ptr; |
| 72 | } |
| 73 | |
| 74 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); |
| 75 | if (ret > 0) |
| 76 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
| 77 | DMA_FROM_DEVICE, DMA_CTRL_ACK); |
| 78 | |
| 79 | if (desc) { |
| 80 | cookie = dmaengine_submit(desc); |
| 81 | if (cookie < 0) { |
| 82 | desc = NULL; |
| 83 | ret = cookie; |
| 84 | } |
| 85 | } |
| 86 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", |
| 87 | __func__, host->sg_len, ret, cookie, host->mrq); |
| 88 | |
| 89 | pio: |
| 90 | if (!desc) { |
| 91 | /* DMA failed, fall back to PIO */ |
| 92 | if (ret >= 0) |
| 93 | ret = -EIO; |
| 94 | host->chan_rx = NULL; |
| 95 | dma_release_channel(chan); |
| 96 | /* Free the Tx channel too */ |
| 97 | chan = host->chan_tx; |
| 98 | if (chan) { |
| 99 | host->chan_tx = NULL; |
| 100 | dma_release_channel(chan); |
| 101 | } |
| 102 | dev_warn(&host->pdev->dev, |
| 103 | "DMA failed: %d, falling back to PIO\n", ret); |
| 104 | tmio_mmc_enable_dma(host, false); |
| 105 | } |
| 106 | |
| 107 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, |
| 108 | desc, cookie, host->sg_len); |
| 109 | } |
| 110 | |
| 111 | static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) |
| 112 | { |
| 113 | struct scatterlist *sg = host->sg_ptr, *sg_tmp; |
| 114 | struct dma_async_tx_descriptor *desc = NULL; |
| 115 | struct dma_chan *chan = host->chan_tx; |
| 116 | struct tmio_mmc_data *pdata = host->pdata; |
| 117 | dma_cookie_t cookie; |
| 118 | int ret, i; |
| 119 | bool aligned = true, multiple = true; |
| 120 | unsigned int align = (1 << pdata->dma->alignment_shift) - 1; |
| 121 | |
| 122 | for_each_sg(sg, sg_tmp, host->sg_len, i) { |
| 123 | if (sg_tmp->offset & align) |
| 124 | aligned = false; |
| 125 | if (sg_tmp->length & align) { |
| 126 | multiple = false; |
| 127 | break; |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || |
| 132 | (align & PAGE_MASK))) || !multiple) { |
| 133 | ret = -EINVAL; |
| 134 | goto pio; |
| 135 | } |
| 136 | |
| 137 | if (sg->length < TMIO_MMC_MIN_DMA_LEN) { |
| 138 | host->force_pio = true; |
| 139 | return; |
| 140 | } |
| 141 | |
| 142 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); |
| 143 | |
| 144 | /* The only sg element can be unaligned, use our bounce buffer then */ |
| 145 | if (!aligned) { |
| 146 | unsigned long flags; |
| 147 | void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); |
| 148 | sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); |
| 149 | memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); |
| 150 | tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); |
| 151 | host->sg_ptr = &host->bounce_sg; |
| 152 | sg = host->sg_ptr; |
| 153 | } |
| 154 | |
| 155 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); |
| 156 | if (ret > 0) |
| 157 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
| 158 | DMA_TO_DEVICE, DMA_CTRL_ACK); |
| 159 | |
| 160 | if (desc) { |
| 161 | cookie = dmaengine_submit(desc); |
| 162 | if (cookie < 0) { |
| 163 | desc = NULL; |
| 164 | ret = cookie; |
| 165 | } |
| 166 | } |
| 167 | dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", |
| 168 | __func__, host->sg_len, ret, cookie, host->mrq); |
| 169 | |
| 170 | pio: |
| 171 | if (!desc) { |
| 172 | /* DMA failed, fall back to PIO */ |
| 173 | if (ret >= 0) |
| 174 | ret = -EIO; |
| 175 | host->chan_tx = NULL; |
| 176 | dma_release_channel(chan); |
| 177 | /* Free the Rx channel too */ |
| 178 | chan = host->chan_rx; |
| 179 | if (chan) { |
| 180 | host->chan_rx = NULL; |
| 181 | dma_release_channel(chan); |
| 182 | } |
| 183 | dev_warn(&host->pdev->dev, |
| 184 | "DMA failed: %d, falling back to PIO\n", ret); |
| 185 | tmio_mmc_enable_dma(host, false); |
| 186 | } |
| 187 | |
| 188 | dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, |
| 189 | desc, cookie); |
| 190 | } |
| 191 | |
| 192 | void tmio_mmc_start_dma(struct tmio_mmc_host *host, |
| 193 | struct mmc_data *data) |
| 194 | { |
| 195 | if (data->flags & MMC_DATA_READ) { |
| 196 | if (host->chan_rx) |
| 197 | tmio_mmc_start_dma_rx(host); |
| 198 | } else { |
| 199 | if (host->chan_tx) |
| 200 | tmio_mmc_start_dma_tx(host); |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | static void tmio_mmc_issue_tasklet_fn(unsigned long priv) |
| 205 | { |
| 206 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; |
| 207 | struct dma_chan *chan = NULL; |
| 208 | |
| 209 | spin_lock_irq(&host->lock); |
| 210 | |
| 211 | if (host && host->data) { |
| 212 | if (host->data->flags & MMC_DATA_READ) |
| 213 | chan = host->chan_rx; |
| 214 | else |
| 215 | chan = host->chan_tx; |
| 216 | } |
| 217 | |
| 218 | spin_unlock_irq(&host->lock); |
| 219 | |
| 220 | tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); |
| 221 | |
| 222 | if (chan) |
| 223 | dma_async_issue_pending(chan); |
| 224 | } |
| 225 | |
| 226 | static void tmio_mmc_tasklet_fn(unsigned long arg) |
| 227 | { |
| 228 | struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; |
| 229 | |
| 230 | spin_lock_irq(&host->lock); |
| 231 | |
| 232 | if (!host->data) |
| 233 | goto out; |
| 234 | |
| 235 | if (host->data->flags & MMC_DATA_READ) |
| 236 | dma_unmap_sg(host->chan_rx->device->dev, |
| 237 | host->sg_ptr, host->sg_len, |
| 238 | DMA_FROM_DEVICE); |
| 239 | else |
| 240 | dma_unmap_sg(host->chan_tx->device->dev, |
| 241 | host->sg_ptr, host->sg_len, |
| 242 | DMA_TO_DEVICE); |
| 243 | |
| 244 | tmio_mmc_do_data_irq(host); |
| 245 | out: |
| 246 | spin_unlock_irq(&host->lock); |
| 247 | } |
| 248 | |
| 249 | /* It might be necessary to make filter MFD specific */ |
| 250 | static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) |
| 251 | { |
| 252 | dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); |
| 253 | chan->private = arg; |
| 254 | return true; |
| 255 | } |
| 256 | |
| 257 | void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) |
| 258 | { |
| 259 | /* We can only either use DMA for both Tx and Rx or not use it at all */ |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 260 | if (!pdata->dma) |
| 261 | return; |
| 262 | |
| 263 | if (!host->chan_tx && !host->chan_rx) { |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 264 | dma_cap_mask_t mask; |
| 265 | |
| 266 | dma_cap_zero(mask); |
| 267 | dma_cap_set(DMA_SLAVE, mask); |
| 268 | |
| 269 | host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, |
| 270 | pdata->dma->chan_priv_tx); |
| 271 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, |
| 272 | host->chan_tx); |
| 273 | |
| 274 | if (!host->chan_tx) |
| 275 | return; |
| 276 | |
| 277 | host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, |
| 278 | pdata->dma->chan_priv_rx); |
| 279 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, |
| 280 | host->chan_rx); |
| 281 | |
| 282 | if (!host->chan_rx) |
| 283 | goto ereqrx; |
| 284 | |
| 285 | host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); |
| 286 | if (!host->bounce_buf) |
| 287 | goto ebouncebuf; |
| 288 | |
| 289 | tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); |
| 290 | tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 291 | } |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 292 | |
| 293 | tmio_mmc_enable_dma(host, true); |
| 294 | |
| 295 | return; |
| 296 | |
| 297 | ebouncebuf: |
| 298 | dma_release_channel(host->chan_rx); |
| 299 | host->chan_rx = NULL; |
| 300 | ereqrx: |
| 301 | dma_release_channel(host->chan_tx); |
| 302 | host->chan_tx = NULL; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | void tmio_mmc_release_dma(struct tmio_mmc_host *host) |
| 306 | { |
| 307 | if (host->chan_tx) { |
| 308 | struct dma_chan *chan = host->chan_tx; |
| 309 | host->chan_tx = NULL; |
| 310 | dma_release_channel(chan); |
| 311 | } |
| 312 | if (host->chan_rx) { |
| 313 | struct dma_chan *chan = host->chan_rx; |
| 314 | host->chan_rx = NULL; |
| 315 | dma_release_channel(chan); |
| 316 | } |
| 317 | if (host->bounce_buf) { |
| 318 | free_pages((unsigned long)host->bounce_buf, 0); |
| 319 | host->bounce_buf = NULL; |
| 320 | } |
| 321 | } |