blob: e4b05dbb9ca822f003f566d07a60c508721bc68c [file] [log] [blame]
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +01001/*
2 * linux/drivers/mmc/tmio_mmc_dma.c
3 *
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA function for TMIO MMC implementations
11 */
12
13#include <linux/device.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010015#include <linux/dmaengine.h>
16#include <linux/mfd/tmio.h>
17#include <linux/mmc/host.h>
Simon Hormancba179a2011-03-24 09:48:36 +010018#include <linux/mmc/tmio.h>
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010019#include <linux/pagemap.h>
20#include <linux/scatterlist.h>
21
22#include "tmio_mmc.h"
23
24#define TMIO_MMC_MIN_DMA_LEN 8
25
Guennadi Liakhovetski162f43e2011-07-14 18:39:10 +020026void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010027{
Guennadi Liakhovetski162f43e2011-07-14 18:39:10 +020028 if (!host->chan_tx || !host->chan_rx)
29 return;
30
Kuninori Morimoto5add2ac2015-01-13 04:59:05 +000031 if (host->dma->enable)
32 host->dma->enable(host, enable);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010033}
34
Guennadi Liakhovetskie3de2be2012-01-06 13:06:51 +010035void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
36{
37 tmio_mmc_enable_dma(host, false);
38
39 if (host->chan_rx)
40 dmaengine_terminate_all(host->chan_rx);
41 if (host->chan_tx)
42 dmaengine_terminate_all(host->chan_tx);
43
44 tmio_mmc_enable_dma(host, true);
45}
46
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010047static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
48{
49 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
50 struct dma_async_tx_descriptor *desc = NULL;
51 struct dma_chan *chan = host->chan_rx;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010052 dma_cookie_t cookie;
53 int ret, i;
54 bool aligned = true, multiple = true;
Kuninori Morimotoe471df02015-01-13 04:58:46 +000055 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010056
57 for_each_sg(sg, sg_tmp, host->sg_len, i) {
58 if (sg_tmp->offset & align)
59 aligned = false;
60 if (sg_tmp->length & align) {
61 multiple = false;
62 break;
63 }
64 }
65
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
67 (align & PAGE_MASK))) || !multiple) {
68 ret = -EINVAL;
69 goto pio;
70 }
71
72 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
73 host->force_pio = true;
74 return;
75 }
76
77 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
78
79 /* The only sg element can be unaligned, use our bounce buffer then */
80 if (!aligned) {
81 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
82 host->sg_ptr = &host->bounce_sg;
83 sg = host->sg_ptr;
84 }
85
86 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
87 if (ret > 0)
Alexandre Bounine16052822012-03-08 16:11:18 -050088 desc = dmaengine_prep_slave_sg(chan, sg, ret,
Vinod Koul05f57992011-10-14 10:45:11 +053089 DMA_DEV_TO_MEM, DMA_CTRL_ACK);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010090
91 if (desc) {
92 cookie = dmaengine_submit(desc);
93 if (cookie < 0) {
94 desc = NULL;
95 ret = cookie;
96 }
97 }
98 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
99 __func__, host->sg_len, ret, cookie, host->mrq);
100
101pio:
102 if (!desc) {
103 /* DMA failed, fall back to PIO */
Sergei Shtylyovf936f9b2013-08-24 23:38:15 -0400104 tmio_mmc_enable_dma(host, false);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100105 if (ret >= 0)
106 ret = -EIO;
107 host->chan_rx = NULL;
108 dma_release_channel(chan);
109 /* Free the Tx channel too */
110 chan = host->chan_tx;
111 if (chan) {
112 host->chan_tx = NULL;
113 dma_release_channel(chan);
114 }
115 dev_warn(&host->pdev->dev,
116 "DMA failed: %d, falling back to PIO\n", ret);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100117 }
118
119 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
120 desc, cookie, host->sg_len);
121}
122
123static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
124{
125 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
126 struct dma_async_tx_descriptor *desc = NULL;
127 struct dma_chan *chan = host->chan_tx;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100128 dma_cookie_t cookie;
129 int ret, i;
130 bool aligned = true, multiple = true;
Kuninori Morimotoe471df02015-01-13 04:58:46 +0000131 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100132
133 for_each_sg(sg, sg_tmp, host->sg_len, i) {
134 if (sg_tmp->offset & align)
135 aligned = false;
136 if (sg_tmp->length & align) {
137 multiple = false;
138 break;
139 }
140 }
141
142 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
143 (align & PAGE_MASK))) || !multiple) {
144 ret = -EINVAL;
145 goto pio;
146 }
147
148 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
149 host->force_pio = true;
150 return;
151 }
152
153 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
154
155 /* The only sg element can be unaligned, use our bounce buffer then */
156 if (!aligned) {
157 unsigned long flags;
158 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
159 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
160 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
161 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
162 host->sg_ptr = &host->bounce_sg;
163 sg = host->sg_ptr;
164 }
165
166 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
167 if (ret > 0)
Alexandre Bounine16052822012-03-08 16:11:18 -0500168 desc = dmaengine_prep_slave_sg(chan, sg, ret,
Vinod Koul05f57992011-10-14 10:45:11 +0530169 DMA_MEM_TO_DEV, DMA_CTRL_ACK);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100170
171 if (desc) {
172 cookie = dmaengine_submit(desc);
173 if (cookie < 0) {
174 desc = NULL;
175 ret = cookie;
176 }
177 }
178 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
179 __func__, host->sg_len, ret, cookie, host->mrq);
180
181pio:
182 if (!desc) {
183 /* DMA failed, fall back to PIO */
Sergei Shtylyovf936f9b2013-08-24 23:38:15 -0400184 tmio_mmc_enable_dma(host, false);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100185 if (ret >= 0)
186 ret = -EIO;
187 host->chan_tx = NULL;
188 dma_release_channel(chan);
189 /* Free the Rx channel too */
190 chan = host->chan_rx;
191 if (chan) {
192 host->chan_rx = NULL;
193 dma_release_channel(chan);
194 }
195 dev_warn(&host->pdev->dev,
196 "DMA failed: %d, falling back to PIO\n", ret);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100197 }
198
199 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
200 desc, cookie);
201}
202
203void tmio_mmc_start_dma(struct tmio_mmc_host *host,
204 struct mmc_data *data)
205{
206 if (data->flags & MMC_DATA_READ) {
207 if (host->chan_rx)
208 tmio_mmc_start_dma_rx(host);
209 } else {
210 if (host->chan_tx)
211 tmio_mmc_start_dma_tx(host);
212 }
213}
214
215static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
216{
217 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
218 struct dma_chan *chan = NULL;
219
220 spin_lock_irq(&host->lock);
221
222 if (host && host->data) {
223 if (host->data->flags & MMC_DATA_READ)
224 chan = host->chan_rx;
225 else
226 chan = host->chan_tx;
227 }
228
229 spin_unlock_irq(&host->lock);
230
231 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
232
233 if (chan)
234 dma_async_issue_pending(chan);
235}
236
237static void tmio_mmc_tasklet_fn(unsigned long arg)
238{
239 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
240
241 spin_lock_irq(&host->lock);
242
243 if (!host->data)
244 goto out;
245
246 if (host->data->flags & MMC_DATA_READ)
247 dma_unmap_sg(host->chan_rx->device->dev,
248 host->sg_ptr, host->sg_len,
249 DMA_FROM_DEVICE);
250 else
251 dma_unmap_sg(host->chan_tx->device->dev,
252 host->sg_ptr, host->sg_len,
253 DMA_TO_DEVICE);
254
255 tmio_mmc_do_data_irq(host);
256out:
257 spin_unlock_irq(&host->lock);
258}
259
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100260void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
261{
262 /* We can only either use DMA for both Tx and Rx or not use it at all */
Kuninori Morimoto7ecc09b2015-01-13 04:57:33 +0000263 if (!host->dma || (!host->pdev->dev.of_node &&
Kuninori Morimotof33c9d62015-02-24 02:06:43 +0000264 (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000265 return;
266
267 if (!host->chan_tx && !host->chan_rx) {
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200268 struct resource *res = platform_get_resource(host->pdev,
269 IORESOURCE_MEM, 0);
270 struct dma_slave_config cfg = {};
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100271 dma_cap_mask_t mask;
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200272 int ret;
273
274 if (!res)
275 return;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100276
277 dma_cap_zero(mask);
278 dma_cap_set(DMA_SLAVE, mask);
279
Guennadi Liakhovetski87ae7bb2013-04-26 17:47:19 +0200280 host->chan_tx = dma_request_slave_channel_compat(mask,
Kuninori Morimotof33c9d62015-02-24 02:06:43 +0000281 host->dma->filter, pdata->chan_priv_tx,
Guennadi Liakhovetski87ae7bb2013-04-26 17:47:19 +0200282 &host->pdev->dev, "tx");
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100283 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
284 host->chan_tx);
285
286 if (!host->chan_tx)
287 return;
288
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200289 cfg.direction = DMA_MEM_TO_DEV;
Kuninori Morimoto7445bf92015-01-13 04:58:20 +0000290 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
Kuninori Morimoto361936e2015-01-13 04:59:14 +0000291 cfg.dst_addr_width = host->dma->dma_buswidth;
292 if (!cfg.dst_addr_width)
293 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200294 cfg.src_addr = 0;
295 ret = dmaengine_slave_config(host->chan_tx, &cfg);
296 if (ret < 0)
297 goto ecfgtx;
298
Guennadi Liakhovetski87ae7bb2013-04-26 17:47:19 +0200299 host->chan_rx = dma_request_slave_channel_compat(mask,
Kuninori Morimotof33c9d62015-02-24 02:06:43 +0000300 host->dma->filter, pdata->chan_priv_rx,
Guennadi Liakhovetski87ae7bb2013-04-26 17:47:19 +0200301 &host->pdev->dev, "rx");
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100302 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
303 host->chan_rx);
304
305 if (!host->chan_rx)
306 goto ereqrx;
307
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200308 cfg.direction = DMA_DEV_TO_MEM;
Kuninori Morimoto8b4c8f32015-01-13 04:58:56 +0000309 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
Kuninori Morimoto361936e2015-01-13 04:59:14 +0000310 cfg.src_addr_width = host->dma->dma_buswidth;
311 if (!cfg.src_addr_width)
312 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200313 cfg.dst_addr = 0;
314 ret = dmaengine_slave_config(host->chan_rx, &cfg);
315 if (ret < 0)
316 goto ecfgrx;
317
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100318 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
319 if (!host->bounce_buf)
320 goto ebouncebuf;
321
322 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
323 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100324 }
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000325
326 tmio_mmc_enable_dma(host, true);
327
328 return;
329
330ebouncebuf:
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200331ecfgrx:
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000332 dma_release_channel(host->chan_rx);
333 host->chan_rx = NULL;
334ereqrx:
Guennadi Liakhovetskieec95ee2013-04-26 17:47:18 +0200335ecfgtx:
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000336 dma_release_channel(host->chan_tx);
337 host->chan_tx = NULL;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100338}
339
340void tmio_mmc_release_dma(struct tmio_mmc_host *host)
341{
342 if (host->chan_tx) {
343 struct dma_chan *chan = host->chan_tx;
344 host->chan_tx = NULL;
345 dma_release_channel(chan);
346 }
347 if (host->chan_rx) {
348 struct dma_chan *chan = host->chan_rx;
349 host->chan_rx = NULL;
350 dma_release_channel(chan);
351 }
352 if (host->bounce_buf) {
353 free_pages((unsigned long)host->bounce_buf, 0);
354 host->bounce_buf = NULL;
355 }
356}