blob: 2916efc7cfe59eed5734304b5022c150902298fe [file] [log] [blame]
Mika Westerbergcd7bed02013-01-22 12:26:28 +02001/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/pxa2xx_ssp.h>
26#include <linux/spi/spi.h>
27#include <linux/spi/pxa2xx_spi.h>
28
29#include "spi-pxa2xx.h"
30
31#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
32#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
33
34bool pxa2xx_spi_dma_is_possible(size_t len)
35{
36 /* Try to map dma buffer and do a dma transfer if successful, but
37 * only if the length is non-zero and less than MAX_DMA_LEN.
38 *
39 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
40 * of PIO instead. Care is needed above because the transfer may
41 * have have been passed with buffers that are already dma mapped.
42 * A zero-length transfer in PIO mode will not try to write/read
43 * to/from the buffers
44 *
45 * REVISIT large transfers are exactly where we most want to be
46 * using DMA. If this happens much, split those transfers into
47 * multiple DMA segments rather than forcing PIO.
48 */
49 return len > 0 && len <= MAX_DMA_LEN;
50}
51
52int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
53{
54 struct spi_message *msg = drv_data->cur_msg;
55 struct device *dev = &msg->spi->dev;
56
57 if (!drv_data->cur_chip->enable_dma)
58 return 0;
59
60 if (msg->is_dma_mapped)
61 return drv_data->rx_dma && drv_data->tx_dma;
62
63 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
64 return 0;
65
66 /* Modify setup if rx buffer is null */
67 if (drv_data->rx == NULL) {
68 *drv_data->null_dma_buf = 0;
69 drv_data->rx = drv_data->null_dma_buf;
70 drv_data->rx_map_len = 4;
71 } else
72 drv_data->rx_map_len = drv_data->len;
73
74
75 /* Modify setup if tx buffer is null */
76 if (drv_data->tx == NULL) {
77 *drv_data->null_dma_buf = 0;
78 drv_data->tx = drv_data->null_dma_buf;
79 drv_data->tx_map_len = 4;
80 } else
81 drv_data->tx_map_len = drv_data->len;
82
83 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
84 * so we flush the cache *before* invalidating it, in case
85 * the tx and rx buffers overlap.
86 */
87 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
88 drv_data->tx_map_len, DMA_TO_DEVICE);
89 if (dma_mapping_error(dev, drv_data->tx_dma))
90 return 0;
91
92 /* Stream map the rx buffer */
93 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
94 drv_data->rx_map_len, DMA_FROM_DEVICE);
95 if (dma_mapping_error(dev, drv_data->rx_dma)) {
96 dma_unmap_single(dev, drv_data->tx_dma,
97 drv_data->tx_map_len, DMA_TO_DEVICE);
98 return 0;
99 }
100
101 return 1;
102}
103
104static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
105{
106 struct device *dev;
107
108 if (!drv_data->dma_mapped)
109 return;
110
111 if (!drv_data->cur_msg->is_dma_mapped) {
112 dev = &drv_data->cur_msg->spi->dev;
113 dma_unmap_single(dev, drv_data->rx_dma,
114 drv_data->rx_map_len, DMA_FROM_DEVICE);
115 dma_unmap_single(dev, drv_data->tx_dma,
116 drv_data->tx_map_len, DMA_TO_DEVICE);
117 }
118
119 drv_data->dma_mapped = 0;
120}
121
122static int wait_ssp_rx_stall(void const __iomem *ioaddr)
123{
124 unsigned long limit = loops_per_jiffy << 1;
125
126 while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
127 cpu_relax();
128
129 return limit;
130}
131
132static int wait_dma_channel_stop(int channel)
133{
134 unsigned long limit = loops_per_jiffy << 1;
135
136 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
137 cpu_relax();
138
139 return limit;
140}
141
142static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
143 const char *msg)
144{
145 void __iomem *reg = drv_data->ioaddr;
146
147 /* Stop and reset */
148 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
149 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
150 write_SSSR_CS(drv_data, drv_data->clear_sr);
151 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
152 if (!pxa25x_ssp_comp(drv_data))
153 write_SSTO(0, reg);
154 pxa2xx_spi_flush(drv_data);
155 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
156
157 pxa2xx_spi_unmap_dma_buffers(drv_data);
158
159 dev_err(&drv_data->pdev->dev, "%s\n", msg);
160
161 drv_data->cur_msg->state = ERROR_STATE;
162 tasklet_schedule(&drv_data->pump_transfers);
163}
164
165static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
166{
167 void __iomem *reg = drv_data->ioaddr;
168 struct spi_message *msg = drv_data->cur_msg;
169
170 /* Clear and disable interrupts on SSP and DMA channels*/
171 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
172 write_SSSR_CS(drv_data, drv_data->clear_sr);
173 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
174 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
175
176 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
177 dev_err(&drv_data->pdev->dev,
178 "dma_handler: dma rx channel stop failed\n");
179
180 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
181 dev_err(&drv_data->pdev->dev,
182 "dma_transfer: ssp rx stall failed\n");
183
184 pxa2xx_spi_unmap_dma_buffers(drv_data);
185
186 /* update the buffer pointer for the amount completed in dma */
187 drv_data->rx += drv_data->len -
188 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
189
190 /* read trailing data from fifo, it does not matter how many
191 * bytes are in the fifo just read until buffer is full
192 * or fifo is empty, which ever occurs first */
193 drv_data->read(drv_data);
194
195 /* return count of what was actually read */
196 msg->actual_length += drv_data->len -
197 (drv_data->rx_end - drv_data->rx);
198
199 /* Transfer delays and chip select release are
200 * handled in pump_transfers or giveback
201 */
202
203 /* Move to next transfer */
204 msg->state = pxa2xx_spi_next_transfer(drv_data);
205
206 /* Schedule transfer tasklet */
207 tasklet_schedule(&drv_data->pump_transfers);
208}
209
210void pxa2xx_spi_dma_handler(int channel, void *data)
211{
212 struct driver_data *drv_data = data;
213 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
214
215 if (irq_status & DCSR_BUSERR) {
216
217 if (channel == drv_data->tx_channel)
218 pxa2xx_spi_dma_error_stop(drv_data,
219 "dma_handler: bad bus address on tx channel");
220 else
221 pxa2xx_spi_dma_error_stop(drv_data,
222 "dma_handler: bad bus address on rx channel");
223 return;
224 }
225
226 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
227 if ((channel == drv_data->tx_channel)
228 && (irq_status & DCSR_ENDINTR)
229 && (drv_data->ssp_type == PXA25x_SSP)) {
230
231 /* Wait for rx to stall */
232 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
233 dev_err(&drv_data->pdev->dev,
234 "dma_handler: ssp rx stall failed\n");
235
236 /* finish this transfer, start the next */
237 pxa2xx_spi_dma_transfer_complete(drv_data);
238 }
239}
240
241irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
242{
243 u32 irq_status;
244 void __iomem *reg = drv_data->ioaddr;
245
246 irq_status = read_SSSR(reg) & drv_data->mask_sr;
247 if (irq_status & SSSR_ROR) {
248 pxa2xx_spi_dma_error_stop(drv_data,
249 "dma_transfer: fifo overrun");
250 return IRQ_HANDLED;
251 }
252
253 /* Check for false positive timeout */
254 if ((irq_status & SSSR_TINT)
255 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
256 write_SSSR(SSSR_TINT, reg);
257 return IRQ_HANDLED;
258 }
259
260 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
261
262 /* Clear and disable timeout interrupt, do the rest in
263 * dma_transfer_complete */
264 if (!pxa25x_ssp_comp(drv_data))
265 write_SSTO(0, reg);
266
267 /* finish this transfer, start the next */
268 pxa2xx_spi_dma_transfer_complete(drv_data);
269
270 return IRQ_HANDLED;
271 }
272
273 /* Opps problem detected */
274 return IRQ_NONE;
275}
276
277int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
278{
279 u32 dma_width;
280
281 switch (drv_data->n_bytes) {
282 case 1:
283 dma_width = DCMD_WIDTH1;
284 break;
285 case 2:
286 dma_width = DCMD_WIDTH2;
287 break;
288 default:
289 dma_width = DCMD_WIDTH4;
290 break;
291 }
292
293 /* Setup rx DMA Channel */
294 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
295 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
296 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
297 if (drv_data->rx == drv_data->null_dma_buf)
298 /* No target address increment */
299 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
300 | dma_width
301 | dma_burst
302 | drv_data->len;
303 else
304 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
305 | DCMD_FLOWSRC
306 | dma_width
307 | dma_burst
308 | drv_data->len;
309
310 /* Setup tx DMA Channel */
311 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
312 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
313 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
314 if (drv_data->tx == drv_data->null_dma_buf)
315 /* No source address increment */
316 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
317 | dma_width
318 | dma_burst
319 | drv_data->len;
320 else
321 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
322 | DCMD_FLOWTRG
323 | dma_width
324 | dma_burst
325 | drv_data->len;
326
327 /* Enable dma end irqs on SSP to detect end of transfer */
328 if (drv_data->ssp_type == PXA25x_SSP)
329 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
330
331 return 0;
332}
333
334void pxa2xx_spi_dma_start(struct driver_data *drv_data)
335{
336 DCSR(drv_data->rx_channel) |= DCSR_RUN;
337 DCSR(drv_data->tx_channel) |= DCSR_RUN;
338}
339
340int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
341{
342 struct device *dev = &drv_data->pdev->dev;
343 struct ssp_device *ssp = drv_data->ssp;
344
345 /* Get two DMA channels (rx and tx) */
346 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
347 DMA_PRIO_HIGH,
348 pxa2xx_spi_dma_handler,
349 drv_data);
350 if (drv_data->rx_channel < 0) {
351 dev_err(dev, "problem (%d) requesting rx channel\n",
352 drv_data->rx_channel);
353 return -ENODEV;
354 }
355 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
356 DMA_PRIO_MEDIUM,
357 pxa2xx_spi_dma_handler,
358 drv_data);
359 if (drv_data->tx_channel < 0) {
360 dev_err(dev, "problem (%d) requesting tx channel\n",
361 drv_data->tx_channel);
362 pxa_free_dma(drv_data->rx_channel);
363 return -ENODEV;
364 }
365
366 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
367 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
368
369 return 0;
370}
371
372void pxa2xx_spi_dma_release(struct driver_data *drv_data)
373{
374 struct ssp_device *ssp = drv_data->ssp;
375
376 DRCMR(ssp->drcmr_rx) = 0;
377 DRCMR(ssp->drcmr_tx) = 0;
378
379 if (drv_data->tx_channel != 0)
380 pxa_free_dma(drv_data->tx_channel);
381 if (drv_data->rx_channel != 0)
382 pxa_free_dma(drv_data->rx_channel);
383}
384
385void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
386{
387 if (drv_data->rx_channel != -1)
388 DRCMR(drv_data->ssp->drcmr_rx) =
389 DRCMR_MAPVLD | drv_data->rx_channel;
390 if (drv_data->tx_channel != -1)
391 DRCMR(drv_data->ssp->drcmr_tx) =
392 DRCMR_MAPVLD | drv_data->tx_channel;
393}
394
395int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
396 struct spi_device *spi,
397 u8 bits_per_word, u32 *burst_code,
398 u32 *threshold)
399{
400 struct pxa2xx_spi_chip *chip_info =
401 (struct pxa2xx_spi_chip *)spi->controller_data;
402 int bytes_per_word;
403 int burst_bytes;
404 int thresh_words;
405 int req_burst_size;
406 int retval = 0;
407
408 /* Set the threshold (in registers) to equal the same amount of data
409 * as represented by burst size (in bytes). The computation below
410 * is (burst_size rounded up to nearest 8 byte, word or long word)
411 * divided by (bytes/register); the tx threshold is the inverse of
412 * the rx, so that there will always be enough data in the rx fifo
413 * to satisfy a burst, and there will always be enough space in the
414 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
415 * there is not enough space), there must always remain enough empty
416 * space in the rx fifo for any data loaded to the tx fifo.
417 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
418 * will be 8, or half the fifo;
419 * The threshold can only be set to 2, 4 or 8, but not 16, because
420 * to burst 16 to the tx fifo, the fifo would have to be empty;
421 * however, the minimum fifo trigger level is 1, and the tx will
422 * request service when the fifo is at this level, with only 15 spaces.
423 */
424
425 /* find bytes/word */
426 if (bits_per_word <= 8)
427 bytes_per_word = 1;
428 else if (bits_per_word <= 16)
429 bytes_per_word = 2;
430 else
431 bytes_per_word = 4;
432
433 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
434 if (chip_info)
435 req_burst_size = chip_info->dma_burst_size;
436 else {
437 switch (chip->dma_burst_size) {
438 default:
439 /* if the default burst size is not set,
440 * do it now */
441 chip->dma_burst_size = DCMD_BURST8;
442 case DCMD_BURST8:
443 req_burst_size = 8;
444 break;
445 case DCMD_BURST16:
446 req_burst_size = 16;
447 break;
448 case DCMD_BURST32:
449 req_burst_size = 32;
450 break;
451 }
452 }
453 if (req_burst_size <= 8) {
454 *burst_code = DCMD_BURST8;
455 burst_bytes = 8;
456 } else if (req_burst_size <= 16) {
457 if (bytes_per_word == 1) {
458 /* don't burst more than 1/2 the fifo */
459 *burst_code = DCMD_BURST8;
460 burst_bytes = 8;
461 retval = 1;
462 } else {
463 *burst_code = DCMD_BURST16;
464 burst_bytes = 16;
465 }
466 } else {
467 if (bytes_per_word == 1) {
468 /* don't burst more than 1/2 the fifo */
469 *burst_code = DCMD_BURST8;
470 burst_bytes = 8;
471 retval = 1;
472 } else if (bytes_per_word == 2) {
473 /* don't burst more than 1/2 the fifo */
474 *burst_code = DCMD_BURST16;
475 burst_bytes = 16;
476 retval = 1;
477 } else {
478 *burst_code = DCMD_BURST32;
479 burst_bytes = 32;
480 }
481 }
482
483 thresh_words = burst_bytes / bytes_per_word;
484
485 /* thresh_words will be between 2 and 8 */
486 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
487 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
488
489 return retval;
490}