blob: c505278be43f9a1cc4ed0551c9c0084bf9088ce6 [file] [log] [blame]
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +01001/*
2 * linux/drivers/mmc/tmio_mmc_dma.c
3 *
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA function for TMIO MMC implementations
11 */
12
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/mfd/tmio.h>
16#include <linux/mmc/host.h>
17#include <linux/pagemap.h>
18#include <linux/scatterlist.h>
19
20#include "tmio_mmc.h"
21
22#define TMIO_MMC_MIN_DMA_LEN 8
23
24static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
25{
26#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
27 /* Switch DMA mode on or off - SuperH specific? */
28 writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
29#endif
30}
31
32static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
33{
34 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
35 struct dma_async_tx_descriptor *desc = NULL;
36 struct dma_chan *chan = host->chan_rx;
37 struct tmio_mmc_data *pdata = host->pdata;
38 dma_cookie_t cookie;
39 int ret, i;
40 bool aligned = true, multiple = true;
41 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
42
43 for_each_sg(sg, sg_tmp, host->sg_len, i) {
44 if (sg_tmp->offset & align)
45 aligned = false;
46 if (sg_tmp->length & align) {
47 multiple = false;
48 break;
49 }
50 }
51
52 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
53 (align & PAGE_MASK))) || !multiple) {
54 ret = -EINVAL;
55 goto pio;
56 }
57
58 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
59 host->force_pio = true;
60 return;
61 }
62
63 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
64
65 /* The only sg element can be unaligned, use our bounce buffer then */
66 if (!aligned) {
67 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
68 host->sg_ptr = &host->bounce_sg;
69 sg = host->sg_ptr;
70 }
71
72 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
73 if (ret > 0)
74 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
75 DMA_FROM_DEVICE, DMA_CTRL_ACK);
76
77 if (desc) {
78 cookie = dmaengine_submit(desc);
79 if (cookie < 0) {
80 desc = NULL;
81 ret = cookie;
82 }
83 }
84 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
85 __func__, host->sg_len, ret, cookie, host->mrq);
86
87pio:
88 if (!desc) {
89 /* DMA failed, fall back to PIO */
90 if (ret >= 0)
91 ret = -EIO;
92 host->chan_rx = NULL;
93 dma_release_channel(chan);
94 /* Free the Tx channel too */
95 chan = host->chan_tx;
96 if (chan) {
97 host->chan_tx = NULL;
98 dma_release_channel(chan);
99 }
100 dev_warn(&host->pdev->dev,
101 "DMA failed: %d, falling back to PIO\n", ret);
102 tmio_mmc_enable_dma(host, false);
103 }
104
105 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
106 desc, cookie, host->sg_len);
107}
108
109static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
110{
111 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
112 struct dma_async_tx_descriptor *desc = NULL;
113 struct dma_chan *chan = host->chan_tx;
114 struct tmio_mmc_data *pdata = host->pdata;
115 dma_cookie_t cookie;
116 int ret, i;
117 bool aligned = true, multiple = true;
118 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
119
120 for_each_sg(sg, sg_tmp, host->sg_len, i) {
121 if (sg_tmp->offset & align)
122 aligned = false;
123 if (sg_tmp->length & align) {
124 multiple = false;
125 break;
126 }
127 }
128
129 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
130 (align & PAGE_MASK))) || !multiple) {
131 ret = -EINVAL;
132 goto pio;
133 }
134
135 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
136 host->force_pio = true;
137 return;
138 }
139
140 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
141
142 /* The only sg element can be unaligned, use our bounce buffer then */
143 if (!aligned) {
144 unsigned long flags;
145 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
146 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
147 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
148 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
149 host->sg_ptr = &host->bounce_sg;
150 sg = host->sg_ptr;
151 }
152
153 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
154 if (ret > 0)
155 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
156 DMA_TO_DEVICE, DMA_CTRL_ACK);
157
158 if (desc) {
159 cookie = dmaengine_submit(desc);
160 if (cookie < 0) {
161 desc = NULL;
162 ret = cookie;
163 }
164 }
165 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
166 __func__, host->sg_len, ret, cookie, host->mrq);
167
168pio:
169 if (!desc) {
170 /* DMA failed, fall back to PIO */
171 if (ret >= 0)
172 ret = -EIO;
173 host->chan_tx = NULL;
174 dma_release_channel(chan);
175 /* Free the Rx channel too */
176 chan = host->chan_rx;
177 if (chan) {
178 host->chan_rx = NULL;
179 dma_release_channel(chan);
180 }
181 dev_warn(&host->pdev->dev,
182 "DMA failed: %d, falling back to PIO\n", ret);
183 tmio_mmc_enable_dma(host, false);
184 }
185
186 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
187 desc, cookie);
188}
189
190void tmio_mmc_start_dma(struct tmio_mmc_host *host,
191 struct mmc_data *data)
192{
193 if (data->flags & MMC_DATA_READ) {
194 if (host->chan_rx)
195 tmio_mmc_start_dma_rx(host);
196 } else {
197 if (host->chan_tx)
198 tmio_mmc_start_dma_tx(host);
199 }
200}
201
202static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
203{
204 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
205 struct dma_chan *chan = NULL;
206
207 spin_lock_irq(&host->lock);
208
209 if (host && host->data) {
210 if (host->data->flags & MMC_DATA_READ)
211 chan = host->chan_rx;
212 else
213 chan = host->chan_tx;
214 }
215
216 spin_unlock_irq(&host->lock);
217
218 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
219
220 if (chan)
221 dma_async_issue_pending(chan);
222}
223
224static void tmio_mmc_tasklet_fn(unsigned long arg)
225{
226 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
227
228 spin_lock_irq(&host->lock);
229
230 if (!host->data)
231 goto out;
232
233 if (host->data->flags & MMC_DATA_READ)
234 dma_unmap_sg(host->chan_rx->device->dev,
235 host->sg_ptr, host->sg_len,
236 DMA_FROM_DEVICE);
237 else
238 dma_unmap_sg(host->chan_tx->device->dev,
239 host->sg_ptr, host->sg_len,
240 DMA_TO_DEVICE);
241
242 tmio_mmc_do_data_irq(host);
243out:
244 spin_unlock_irq(&host->lock);
245}
246
247/* It might be necessary to make filter MFD specific */
248static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
249{
250 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
251 chan->private = arg;
252 return true;
253}
254
255void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
256{
257 /* We can only either use DMA for both Tx and Rx or not use it at all */
258 if (pdata->dma) {
259 dma_cap_mask_t mask;
260
261 dma_cap_zero(mask);
262 dma_cap_set(DMA_SLAVE, mask);
263
264 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
265 pdata->dma->chan_priv_tx);
266 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
267 host->chan_tx);
268
269 if (!host->chan_tx)
270 return;
271
272 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
273 pdata->dma->chan_priv_rx);
274 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
275 host->chan_rx);
276
277 if (!host->chan_rx)
278 goto ereqrx;
279
280 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
281 if (!host->bounce_buf)
282 goto ebouncebuf;
283
284 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
285 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
286
287 tmio_mmc_enable_dma(host, true);
288
289 return;
290ebouncebuf:
291 dma_release_channel(host->chan_rx);
292 host->chan_rx = NULL;
293ereqrx:
294 dma_release_channel(host->chan_tx);
295 host->chan_tx = NULL;
296 return;
297 }
298}
299
300void tmio_mmc_release_dma(struct tmio_mmc_host *host)
301{
302 if (host->chan_tx) {
303 struct dma_chan *chan = host->chan_tx;
304 host->chan_tx = NULL;
305 dma_release_channel(chan);
306 }
307 if (host->chan_rx) {
308 struct dma_chan *chan = host->chan_rx;
309 host->chan_rx = NULL;
310 dma_release_channel(chan);
311 }
312 if (host->bounce_buf) {
313 free_pages((unsigned long)host->bounce_buf, 0);
314 host->bounce_buf = NULL;
315 }
316}