blob: 3fecaaa5b0a58390fac737a19bdc195a2fed5780 [file] [log] [blame]
Mika Westerbergcd7bed02013-01-22 12:26:28 +02001/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
Mika Westerbergcd7bed02013-01-22 12:26:28 +020021#include <linux/delay.h>
22#include <linux/device.h>
23#include <linux/dma-mapping.h>
24#include <linux/pxa2xx_ssp.h>
25#include <linux/spi/spi.h>
26#include <linux/spi/pxa2xx_spi.h>
27
Rob Herring9064bf32015-02-03 17:03:35 -060028#include <mach/dma.h>
Mika Westerbergcd7bed02013-01-22 12:26:28 +020029#include "spi-pxa2xx.h"
30
31#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
32#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
33
34bool pxa2xx_spi_dma_is_possible(size_t len)
35{
36 /* Try to map dma buffer and do a dma transfer if successful, but
37 * only if the length is non-zero and less than MAX_DMA_LEN.
38 *
39 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
40 * of PIO instead. Care is needed above because the transfer may
41 * have have been passed with buffers that are already dma mapped.
42 * A zero-length transfer in PIO mode will not try to write/read
43 * to/from the buffers
44 *
45 * REVISIT large transfers are exactly where we most want to be
46 * using DMA. If this happens much, split those transfers into
47 * multiple DMA segments rather than forcing PIO.
48 */
49 return len > 0 && len <= MAX_DMA_LEN;
50}
51
52int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
53{
54 struct spi_message *msg = drv_data->cur_msg;
55 struct device *dev = &msg->spi->dev;
56
57 if (!drv_data->cur_chip->enable_dma)
58 return 0;
59
60 if (msg->is_dma_mapped)
61 return drv_data->rx_dma && drv_data->tx_dma;
62
63 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
64 return 0;
65
66 /* Modify setup if rx buffer is null */
67 if (drv_data->rx == NULL) {
68 *drv_data->null_dma_buf = 0;
69 drv_data->rx = drv_data->null_dma_buf;
70 drv_data->rx_map_len = 4;
71 } else
72 drv_data->rx_map_len = drv_data->len;
73
74
75 /* Modify setup if tx buffer is null */
76 if (drv_data->tx == NULL) {
77 *drv_data->null_dma_buf = 0;
78 drv_data->tx = drv_data->null_dma_buf;
79 drv_data->tx_map_len = 4;
80 } else
81 drv_data->tx_map_len = drv_data->len;
82
83 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
84 * so we flush the cache *before* invalidating it, in case
85 * the tx and rx buffers overlap.
86 */
87 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
88 drv_data->tx_map_len, DMA_TO_DEVICE);
89 if (dma_mapping_error(dev, drv_data->tx_dma))
90 return 0;
91
92 /* Stream map the rx buffer */
93 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
94 drv_data->rx_map_len, DMA_FROM_DEVICE);
95 if (dma_mapping_error(dev, drv_data->rx_dma)) {
96 dma_unmap_single(dev, drv_data->tx_dma,
97 drv_data->tx_map_len, DMA_TO_DEVICE);
98 return 0;
99 }
100
101 return 1;
102}
103
104static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
105{
106 struct device *dev;
107
108 if (!drv_data->dma_mapped)
109 return;
110
111 if (!drv_data->cur_msg->is_dma_mapped) {
112 dev = &drv_data->cur_msg->spi->dev;
113 dma_unmap_single(dev, drv_data->rx_dma,
114 drv_data->rx_map_len, DMA_FROM_DEVICE);
115 dma_unmap_single(dev, drv_data->tx_dma,
116 drv_data->tx_map_len, DMA_TO_DEVICE);
117 }
118
119 drv_data->dma_mapped = 0;
120}
121
Jarkko Nikula8e8dd9f2014-12-18 15:04:22 +0200122static int wait_ssp_rx_stall(struct driver_data *drv_data)
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200123{
124 unsigned long limit = loops_per_jiffy << 1;
125
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200126 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200127 cpu_relax();
128
129 return limit;
130}
131
132static int wait_dma_channel_stop(int channel)
133{
134 unsigned long limit = loops_per_jiffy << 1;
135
136 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
137 cpu_relax();
138
139 return limit;
140}
141
142static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
143 const char *msg)
144{
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200145 /* Stop and reset */
146 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
147 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
148 write_SSSR_CS(drv_data, drv_data->clear_sr);
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200149 pxa2xx_spi_write(drv_data, SSCR1,
150 pxa2xx_spi_read(drv_data, SSCR1)
151 & ~drv_data->dma_cr1);
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200152 if (!pxa25x_ssp_comp(drv_data))
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200153 pxa2xx_spi_write(drv_data, SSTO, 0);
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200154 pxa2xx_spi_flush(drv_data);
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200155 pxa2xx_spi_write(drv_data, SSCR0,
156 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200157
158 pxa2xx_spi_unmap_dma_buffers(drv_data);
159
160 dev_err(&drv_data->pdev->dev, "%s\n", msg);
161
162 drv_data->cur_msg->state = ERROR_STATE;
163 tasklet_schedule(&drv_data->pump_transfers);
164}
165
166static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
167{
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200168 struct spi_message *msg = drv_data->cur_msg;
169
170 /* Clear and disable interrupts on SSP and DMA channels*/
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200171 pxa2xx_spi_write(drv_data, SSCR1,
172 pxa2xx_spi_read(drv_data, SSCR1)
173 & ~drv_data->dma_cr1);
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200174 write_SSSR_CS(drv_data, drv_data->clear_sr);
175 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
176 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
177
178 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
179 dev_err(&drv_data->pdev->dev,
180 "dma_handler: dma rx channel stop failed\n");
181
182 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
183 dev_err(&drv_data->pdev->dev,
184 "dma_transfer: ssp rx stall failed\n");
185
186 pxa2xx_spi_unmap_dma_buffers(drv_data);
187
188 /* update the buffer pointer for the amount completed in dma */
189 drv_data->rx += drv_data->len -
190 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
191
192 /* read trailing data from fifo, it does not matter how many
193 * bytes are in the fifo just read until buffer is full
194 * or fifo is empty, which ever occurs first */
195 drv_data->read(drv_data);
196
197 /* return count of what was actually read */
198 msg->actual_length += drv_data->len -
199 (drv_data->rx_end - drv_data->rx);
200
201 /* Transfer delays and chip select release are
202 * handled in pump_transfers or giveback
203 */
204
205 /* Move to next transfer */
206 msg->state = pxa2xx_spi_next_transfer(drv_data);
207
208 /* Schedule transfer tasklet */
209 tasklet_schedule(&drv_data->pump_transfers);
210}
211
212void pxa2xx_spi_dma_handler(int channel, void *data)
213{
214 struct driver_data *drv_data = data;
215 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
216
217 if (irq_status & DCSR_BUSERR) {
218
219 if (channel == drv_data->tx_channel)
220 pxa2xx_spi_dma_error_stop(drv_data,
221 "dma_handler: bad bus address on tx channel");
222 else
223 pxa2xx_spi_dma_error_stop(drv_data,
224 "dma_handler: bad bus address on rx channel");
225 return;
226 }
227
228 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
229 if ((channel == drv_data->tx_channel)
230 && (irq_status & DCSR_ENDINTR)
231 && (drv_data->ssp_type == PXA25x_SSP)) {
232
233 /* Wait for rx to stall */
Jarkko Nikula8e8dd9f2014-12-18 15:04:22 +0200234 if (wait_ssp_rx_stall(drv_data) == 0)
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200235 dev_err(&drv_data->pdev->dev,
236 "dma_handler: ssp rx stall failed\n");
237
238 /* finish this transfer, start the next */
239 pxa2xx_spi_dma_transfer_complete(drv_data);
240 }
241}
242
243irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
244{
245 u32 irq_status;
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200246
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200247 irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200248 if (irq_status & SSSR_ROR) {
249 pxa2xx_spi_dma_error_stop(drv_data,
250 "dma_transfer: fifo overrun");
251 return IRQ_HANDLED;
252 }
253
254 /* Check for false positive timeout */
255 if ((irq_status & SSSR_TINT)
256 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200257 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200258 return IRQ_HANDLED;
259 }
260
261 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
262
263 /* Clear and disable timeout interrupt, do the rest in
264 * dma_transfer_complete */
265 if (!pxa25x_ssp_comp(drv_data))
Jarkko Nikulac039dd22014-12-18 15:04:23 +0200266 pxa2xx_spi_write(drv_data, SSTO, 0);
Mika Westerbergcd7bed02013-01-22 12:26:28 +0200267
268 /* finish this transfer, start the next */
269 pxa2xx_spi_dma_transfer_complete(drv_data);
270
271 return IRQ_HANDLED;
272 }
273
274 /* Opps problem detected */
275 return IRQ_NONE;
276}
277
278int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
279{
280 u32 dma_width;
281
282 switch (drv_data->n_bytes) {
283 case 1:
284 dma_width = DCMD_WIDTH1;
285 break;
286 case 2:
287 dma_width = DCMD_WIDTH2;
288 break;
289 default:
290 dma_width = DCMD_WIDTH4;
291 break;
292 }
293
294 /* Setup rx DMA Channel */
295 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
296 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
297 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
298 if (drv_data->rx == drv_data->null_dma_buf)
299 /* No target address increment */
300 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
301 | dma_width
302 | dma_burst
303 | drv_data->len;
304 else
305 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
306 | DCMD_FLOWSRC
307 | dma_width
308 | dma_burst
309 | drv_data->len;
310
311 /* Setup tx DMA Channel */
312 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
313 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
314 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
315 if (drv_data->tx == drv_data->null_dma_buf)
316 /* No source address increment */
317 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
318 | dma_width
319 | dma_burst
320 | drv_data->len;
321 else
322 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
323 | DCMD_FLOWTRG
324 | dma_width
325 | dma_burst
326 | drv_data->len;
327
328 /* Enable dma end irqs on SSP to detect end of transfer */
329 if (drv_data->ssp_type == PXA25x_SSP)
330 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
331
332 return 0;
333}
334
335void pxa2xx_spi_dma_start(struct driver_data *drv_data)
336{
337 DCSR(drv_data->rx_channel) |= DCSR_RUN;
338 DCSR(drv_data->tx_channel) |= DCSR_RUN;
339}
340
341int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
342{
343 struct device *dev = &drv_data->pdev->dev;
344 struct ssp_device *ssp = drv_data->ssp;
345
346 /* Get two DMA channels (rx and tx) */
347 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
348 DMA_PRIO_HIGH,
349 pxa2xx_spi_dma_handler,
350 drv_data);
351 if (drv_data->rx_channel < 0) {
352 dev_err(dev, "problem (%d) requesting rx channel\n",
353 drv_data->rx_channel);
354 return -ENODEV;
355 }
356 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
357 DMA_PRIO_MEDIUM,
358 pxa2xx_spi_dma_handler,
359 drv_data);
360 if (drv_data->tx_channel < 0) {
361 dev_err(dev, "problem (%d) requesting tx channel\n",
362 drv_data->tx_channel);
363 pxa_free_dma(drv_data->rx_channel);
364 return -ENODEV;
365 }
366
367 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
368 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
369
370 return 0;
371}
372
373void pxa2xx_spi_dma_release(struct driver_data *drv_data)
374{
375 struct ssp_device *ssp = drv_data->ssp;
376
377 DRCMR(ssp->drcmr_rx) = 0;
378 DRCMR(ssp->drcmr_tx) = 0;
379
380 if (drv_data->tx_channel != 0)
381 pxa_free_dma(drv_data->tx_channel);
382 if (drv_data->rx_channel != 0)
383 pxa_free_dma(drv_data->rx_channel);
384}
385
386void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
387{
388 if (drv_data->rx_channel != -1)
389 DRCMR(drv_data->ssp->drcmr_rx) =
390 DRCMR_MAPVLD | drv_data->rx_channel;
391 if (drv_data->tx_channel != -1)
392 DRCMR(drv_data->ssp->drcmr_tx) =
393 DRCMR_MAPVLD | drv_data->tx_channel;
394}
395
396int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
397 struct spi_device *spi,
398 u8 bits_per_word, u32 *burst_code,
399 u32 *threshold)
400{
401 struct pxa2xx_spi_chip *chip_info =
402 (struct pxa2xx_spi_chip *)spi->controller_data;
403 int bytes_per_word;
404 int burst_bytes;
405 int thresh_words;
406 int req_burst_size;
407 int retval = 0;
408
409 /* Set the threshold (in registers) to equal the same amount of data
410 * as represented by burst size (in bytes). The computation below
411 * is (burst_size rounded up to nearest 8 byte, word or long word)
412 * divided by (bytes/register); the tx threshold is the inverse of
413 * the rx, so that there will always be enough data in the rx fifo
414 * to satisfy a burst, and there will always be enough space in the
415 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
416 * there is not enough space), there must always remain enough empty
417 * space in the rx fifo for any data loaded to the tx fifo.
418 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
419 * will be 8, or half the fifo;
420 * The threshold can only be set to 2, 4 or 8, but not 16, because
421 * to burst 16 to the tx fifo, the fifo would have to be empty;
422 * however, the minimum fifo trigger level is 1, and the tx will
423 * request service when the fifo is at this level, with only 15 spaces.
424 */
425
426 /* find bytes/word */
427 if (bits_per_word <= 8)
428 bytes_per_word = 1;
429 else if (bits_per_word <= 16)
430 bytes_per_word = 2;
431 else
432 bytes_per_word = 4;
433
434 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
435 if (chip_info)
436 req_burst_size = chip_info->dma_burst_size;
437 else {
438 switch (chip->dma_burst_size) {
439 default:
440 /* if the default burst size is not set,
441 * do it now */
442 chip->dma_burst_size = DCMD_BURST8;
443 case DCMD_BURST8:
444 req_burst_size = 8;
445 break;
446 case DCMD_BURST16:
447 req_burst_size = 16;
448 break;
449 case DCMD_BURST32:
450 req_burst_size = 32;
451 break;
452 }
453 }
454 if (req_burst_size <= 8) {
455 *burst_code = DCMD_BURST8;
456 burst_bytes = 8;
457 } else if (req_burst_size <= 16) {
458 if (bytes_per_word == 1) {
459 /* don't burst more than 1/2 the fifo */
460 *burst_code = DCMD_BURST8;
461 burst_bytes = 8;
462 retval = 1;
463 } else {
464 *burst_code = DCMD_BURST16;
465 burst_bytes = 16;
466 }
467 } else {
468 if (bytes_per_word == 1) {
469 /* don't burst more than 1/2 the fifo */
470 *burst_code = DCMD_BURST8;
471 burst_bytes = 8;
472 retval = 1;
473 } else if (bytes_per_word == 2) {
474 /* don't burst more than 1/2 the fifo */
475 *burst_code = DCMD_BURST16;
476 burst_bytes = 16;
477 retval = 1;
478 } else {
479 *burst_code = DCMD_BURST32;
480 burst_bytes = 32;
481 }
482 }
483
484 thresh_words = burst_bytes / bytes_per_word;
485
486 /* thresh_words will be between 2 and 8 */
487 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
488 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
489
490 return retval;
491}