blob: b0469749310a449444016e719393fd1aa10005f9 [file] [log] [blame]
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -08001/*
2 * Driver for Atmel AT32 and AT91 SPI Controllers
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/clk.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/delay.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/interrupt.h>
20#include <linux/spi/spi.h>
21
22#include <asm/io.h>
23#include <asm/arch/board.h>
24#include <asm/arch/gpio.h>
David Brownellbb2d1c32007-02-20 13:58:19 -080025#include <asm/arch/cpu.h>
David Brownellbb2d1c32007-02-20 13:58:19 -080026
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080027#include "atmel_spi.h"
28
29/*
30 * The core SPI transfer engine just talks to a register bank to set up
31 * DMA transfers; transfer queue progress is driven by IRQs. The clock
32 * framework provides the base clock, subdivided for each spi_device.
33 *
34 * Newer controllers, marked with "new_1" flag, have:
35 * - CR.LASTXFER
36 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
37 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
38 * - SPI_CSRx.CSAAT
39 * - SPI_CSRx.SBCR allows faster clocking
40 */
41struct atmel_spi {
42 spinlock_t lock;
43
44 void __iomem *regs;
45 int irq;
46 struct clk *clk;
47 struct platform_device *pdev;
48 unsigned new_1:1;
David Brownelldefbd3b2007-07-17 04:04:08 -070049 struct spi_device *stay;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080050
51 u8 stopping;
52 struct list_head queue;
53 struct spi_transfer *current_transfer;
54 unsigned long remaining_bytes;
55
56 void *buffer;
57 dma_addr_t buffer_dma;
58};
59
60#define BUFFER_SIZE PAGE_SIZE
61#define INVALID_DMA_ADDRESS 0xffffffff
62
63/*
64 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
65 * they assume that spi slave device state will not change on deselect, so
David Brownelldefbd3b2007-07-17 04:04:08 -070066 * that automagic deselection is OK. ("NPCSx rises if no data is to be
67 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
68 * controllers have CSAAT and friends.
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080069 *
David Brownelldefbd3b2007-07-17 04:04:08 -070070 * Since the CSAAT functionality is a bit weird on newer controllers as
71 * well, we use GPIO to control nCSx pins on all controllers, updating
72 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
73 * support active-high chipselects despite the controller's belief that
74 * only active-low devices/systems exists.
75 *
76 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
77 * right when driven with GPIO. ("Mode Fault does not allow more than one
78 * Master on Chip Select 0.") No workaround exists for that ... so for
79 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
80 * and (c) will trigger that first erratum in some cases.
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080081 */
82
David Brownelldefbd3b2007-07-17 04:04:08 -070083static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080084{
85 unsigned gpio = (unsigned) spi->controller_data;
86 unsigned active = spi->mode & SPI_CS_HIGH;
David Brownelldefbd3b2007-07-17 04:04:08 -070087 u32 mr;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080088
David Brownelldefbd3b2007-07-17 04:04:08 -070089 mr = spi_readl(as, MR);
90 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
91
92 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
93 gpio, active ? " (high)" : "",
94 mr);
95
96 if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
97 gpio_set_value(gpio, active);
98 spi_writel(as, MR, mr);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -080099}
100
David Brownelldefbd3b2007-07-17 04:04:08 -0700101static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800102{
103 unsigned gpio = (unsigned) spi->controller_data;
104 unsigned active = spi->mode & SPI_CS_HIGH;
David Brownelldefbd3b2007-07-17 04:04:08 -0700105 u32 mr;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800106
David Brownelldefbd3b2007-07-17 04:04:08 -0700107 /* only deactivate *this* device; sometimes transfers to
108 * another device may be active when this routine is called.
109 */
110 mr = spi_readl(as, MR);
111 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
112 mr = SPI_BFINS(PCS, 0xf, mr);
113 spi_writel(as, MR, mr);
114 }
115
116 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
117 gpio, active ? " (low)" : "",
118 mr);
119
120 if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
121 gpio_set_value(gpio, !active);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800122}
123
124/*
125 * Submit next transfer for DMA.
126 * lock is held, spi irq is blocked
127 */
128static void atmel_spi_next_xfer(struct spi_master *master,
129 struct spi_message *msg)
130{
131 struct atmel_spi *as = spi_master_get_devdata(master);
132 struct spi_transfer *xfer;
133 u32 len;
134 dma_addr_t tx_dma, rx_dma;
135
136 xfer = as->current_transfer;
137 if (!xfer || as->remaining_bytes == 0) {
138 if (xfer)
139 xfer = list_entry(xfer->transfer_list.next,
140 struct spi_transfer, transfer_list);
141 else
142 xfer = list_entry(msg->transfers.next,
143 struct spi_transfer, transfer_list);
144 as->remaining_bytes = xfer->len;
145 as->current_transfer = xfer;
146 }
147
148 len = as->remaining_bytes;
149
Haavard Skinnemoen5a9a62b2007-06-01 00:47:00 -0700150 tx_dma = xfer->tx_dma + xfer->len - len;
151 rx_dma = xfer->rx_dma + xfer->len - len;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800152
153 /* use scratch buffer only when rx or tx data is unspecified */
Haavard Skinnemoen5a9a62b2007-06-01 00:47:00 -0700154 if (!xfer->rx_buf) {
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800155 rx_dma = as->buffer_dma;
156 if (len > BUFFER_SIZE)
157 len = BUFFER_SIZE;
158 }
Haavard Skinnemoen5a9a62b2007-06-01 00:47:00 -0700159 if (!xfer->tx_buf) {
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800160 tx_dma = as->buffer_dma;
161 if (len > BUFFER_SIZE)
162 len = BUFFER_SIZE;
163 memset(as->buffer, 0, len);
164 dma_sync_single_for_device(&as->pdev->dev,
165 as->buffer_dma, len, DMA_TO_DEVICE);
166 }
167
168 spi_writel(as, RPR, rx_dma);
169 spi_writel(as, TPR, tx_dma);
170
171 as->remaining_bytes -= len;
172 if (msg->spi->bits_per_word > 8)
173 len >>= 1;
174
175 /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
176 * mechanism might help avoid the IRQ latency between transfers
David Brownelldefbd3b2007-07-17 04:04:08 -0700177 * (and improve the nCS0 errata handling on at91rm9200 chips)
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800178 *
179 * We're also waiting for ENDRX before we start the next
180 * transfer because we need to handle some difficult timing
181 * issues otherwise. If we wait for ENDTX in one transfer and
182 * then starts waiting for ENDRX in the next, it's difficult
183 * to tell the difference between the ENDRX interrupt we're
184 * actually waiting for and the ENDRX interrupt of the
185 * previous transfer.
186 *
187 * It should be doable, though. Just not now...
188 */
189 spi_writel(as, TNCR, 0);
190 spi_writel(as, RNCR, 0);
191 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
192
193 dev_dbg(&msg->spi->dev,
194 " start xfer %p: len %u tx %p/%08x rx %p/%08x imr %03x\n",
195 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
196 xfer->rx_buf, xfer->rx_dma, spi_readl(as, IMR));
197
198 spi_writel(as, TCR, len);
199 spi_writel(as, RCR, len);
200 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
201}
202
203static void atmel_spi_next_message(struct spi_master *master)
204{
205 struct atmel_spi *as = spi_master_get_devdata(master);
206 struct spi_message *msg;
David Brownelldefbd3b2007-07-17 04:04:08 -0700207 struct spi_device *spi;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800208
209 BUG_ON(as->current_transfer);
210
211 msg = list_entry(as->queue.next, struct spi_message, queue);
David Brownelldefbd3b2007-07-17 04:04:08 -0700212 spi = msg->spi;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800213
David Brownelldefbd3b2007-07-17 04:04:08 -0700214 dev_dbg(master->cdev.dev, "start message %p for %s\n",
215 msg, spi->dev.bus_id);
216
217 /* select chip if it's not still active */
218 if (as->stay) {
219 if (as->stay != spi) {
220 cs_deactivate(as, as->stay);
221 cs_activate(as, spi);
222 }
223 as->stay = NULL;
224 } else
225 cs_activate(as, spi);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800226
227 atmel_spi_next_xfer(master, msg);
228}
229
David Brownell8da08592007-07-17 04:04:07 -0700230/*
231 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
232 * - The buffer is either valid for CPU access, else NULL
233 * - If the buffer is valid, so is its DMA addresss
234 *
235 * This driver manages the dma addresss unless message->is_dma_mapped.
236 */
237static int
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800238atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
239{
David Brownell8da08592007-07-17 04:04:07 -0700240 struct device *dev = &as->pdev->dev;
241
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800242 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
David Brownell8da08592007-07-17 04:04:07 -0700243 if (xfer->tx_buf) {
244 xfer->tx_dma = dma_map_single(dev,
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800245 (void *) xfer->tx_buf, xfer->len,
246 DMA_TO_DEVICE);
David Brownell8da08592007-07-17 04:04:07 -0700247 if (dma_mapping_error(xfer->tx_dma))
248 return -ENOMEM;
249 }
250 if (xfer->rx_buf) {
251 xfer->rx_dma = dma_map_single(dev,
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800252 xfer->rx_buf, xfer->len,
253 DMA_FROM_DEVICE);
Andrew Victor85787a22007-08-30 23:56:32 -0700254 if (dma_mapping_error(xfer->rx_dma)) {
David Brownell8da08592007-07-17 04:04:07 -0700255 if (xfer->tx_buf)
256 dma_unmap_single(dev,
257 xfer->tx_dma, xfer->len,
258 DMA_TO_DEVICE);
259 return -ENOMEM;
260 }
261 }
262 return 0;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800263}
264
265static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
266 struct spi_transfer *xfer)
267{
268 if (xfer->tx_dma != INVALID_DMA_ADDRESS)
269 dma_unmap_single(master->cdev.dev, xfer->tx_dma,
270 xfer->len, DMA_TO_DEVICE);
271 if (xfer->rx_dma != INVALID_DMA_ADDRESS)
272 dma_unmap_single(master->cdev.dev, xfer->rx_dma,
273 xfer->len, DMA_FROM_DEVICE);
274}
275
276static void
277atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
David Brownelldefbd3b2007-07-17 04:04:08 -0700278 struct spi_message *msg, int status, int stay)
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800279{
David Brownelldefbd3b2007-07-17 04:04:08 -0700280 if (!stay || status < 0)
281 cs_deactivate(as, msg->spi);
282 else
283 as->stay = msg->spi;
284
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800285 list_del(&msg->queue);
286 msg->status = status;
287
288 dev_dbg(master->cdev.dev,
289 "xfer complete: %u bytes transferred\n",
290 msg->actual_length);
291
292 spin_unlock(&as->lock);
293 msg->complete(msg->context);
294 spin_lock(&as->lock);
295
296 as->current_transfer = NULL;
297
298 /* continue if needed */
299 if (list_empty(&as->queue) || as->stopping)
300 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
301 else
302 atmel_spi_next_message(master);
303}
304
305static irqreturn_t
306atmel_spi_interrupt(int irq, void *dev_id)
307{
308 struct spi_master *master = dev_id;
309 struct atmel_spi *as = spi_master_get_devdata(master);
310 struct spi_message *msg;
311 struct spi_transfer *xfer;
312 u32 status, pending, imr;
313 int ret = IRQ_NONE;
314
315 spin_lock(&as->lock);
316
317 xfer = as->current_transfer;
318 msg = list_entry(as->queue.next, struct spi_message, queue);
319
320 imr = spi_readl(as, IMR);
321 status = spi_readl(as, SR);
322 pending = status & imr;
323
324 if (pending & SPI_BIT(OVRES)) {
325 int timeout;
326
327 ret = IRQ_HANDLED;
328
329 spi_writel(as, IDR, (SPI_BIT(ENDTX) | SPI_BIT(ENDRX)
330 | SPI_BIT(OVRES)));
331
332 /*
333 * When we get an overrun, we disregard the current
334 * transfer. Data will not be copied back from any
335 * bounce buffer and msg->actual_len will not be
336 * updated with the last xfer.
337 *
338 * We will also not process any remaning transfers in
339 * the message.
340 *
341 * First, stop the transfer and unmap the DMA buffers.
342 */
343 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
344 if (!msg->is_dma_mapped)
345 atmel_spi_dma_unmap_xfer(master, xfer);
346
347 /* REVISIT: udelay in irq is unfriendly */
348 if (xfer->delay_usecs)
349 udelay(xfer->delay_usecs);
350
351 dev_warn(master->cdev.dev, "fifo overrun (%u/%u remaining)\n",
352 spi_readl(as, TCR), spi_readl(as, RCR));
353
354 /*
355 * Clean up DMA registers and make sure the data
356 * registers are empty.
357 */
358 spi_writel(as, RNCR, 0);
359 spi_writel(as, TNCR, 0);
360 spi_writel(as, RCR, 0);
361 spi_writel(as, TCR, 0);
362 for (timeout = 1000; timeout; timeout--)
363 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
364 break;
365 if (!timeout)
366 dev_warn(master->cdev.dev,
367 "timeout waiting for TXEMPTY");
368 while (spi_readl(as, SR) & SPI_BIT(RDRF))
369 spi_readl(as, RDR);
370
371 /* Clear any overrun happening while cleaning up */
372 spi_readl(as, SR);
373
David Brownelldefbd3b2007-07-17 04:04:08 -0700374 atmel_spi_msg_done(master, as, msg, -EIO, 0);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800375 } else if (pending & SPI_BIT(ENDRX)) {
376 ret = IRQ_HANDLED;
377
378 spi_writel(as, IDR, pending);
379
380 if (as->remaining_bytes == 0) {
381 msg->actual_length += xfer->len;
382
383 if (!msg->is_dma_mapped)
384 atmel_spi_dma_unmap_xfer(master, xfer);
385
386 /* REVISIT: udelay in irq is unfriendly */
387 if (xfer->delay_usecs)
388 udelay(xfer->delay_usecs);
389
390 if (msg->transfers.prev == &xfer->transfer_list) {
391 /* report completed message */
David Brownelldefbd3b2007-07-17 04:04:08 -0700392 atmel_spi_msg_done(master, as, msg, 0,
393 xfer->cs_change);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800394 } else {
395 if (xfer->cs_change) {
David Brownelldefbd3b2007-07-17 04:04:08 -0700396 cs_deactivate(as, msg->spi);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800397 udelay(1);
David Brownelldefbd3b2007-07-17 04:04:08 -0700398 cs_activate(as, msg->spi);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800399 }
400
401 /*
402 * Not done yet. Submit the next transfer.
403 *
404 * FIXME handle protocol options for xfer
405 */
406 atmel_spi_next_xfer(master, msg);
407 }
408 } else {
409 /*
410 * Keep going, we still have data to send in
411 * the current transfer.
412 */
413 atmel_spi_next_xfer(master, msg);
414 }
415 }
416
417 spin_unlock(&as->lock);
418
419 return ret;
420}
421
David Brownelldccd5732007-07-17 04:04:02 -0700422/* the spi->mode bits understood by this driver: */
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800423#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
424
425static int atmel_spi_setup(struct spi_device *spi)
426{
427 struct atmel_spi *as;
428 u32 scbr, csr;
429 unsigned int bits = spi->bits_per_word;
430 unsigned long bus_hz, sck_hz;
431 unsigned int npcs_pin;
432 int ret;
433
434 as = spi_master_get_devdata(spi->master);
435
436 if (as->stopping)
437 return -ESHUTDOWN;
438
439 if (spi->chip_select > spi->master->num_chipselect) {
440 dev_dbg(&spi->dev,
441 "setup: invalid chipselect %u (%u defined)\n",
442 spi->chip_select, spi->master->num_chipselect);
443 return -EINVAL;
444 }
445
446 if (bits == 0)
447 bits = 8;
448 if (bits < 8 || bits > 16) {
449 dev_dbg(&spi->dev,
450 "setup: invalid bits_per_word %u (8 to 16)\n",
451 bits);
452 return -EINVAL;
453 }
454
455 if (spi->mode & ~MODEBITS) {
456 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
457 spi->mode & ~MODEBITS);
458 return -EINVAL;
459 }
460
David Brownelldefbd3b2007-07-17 04:04:08 -0700461 /* see notes above re chipselect */
462 if (cpu_is_at91rm9200()
463 && spi->chip_select == 0
464 && (spi->mode & SPI_CS_HIGH)) {
465 dev_dbg(&spi->dev, "setup: can't be active-high\n");
466 return -EINVAL;
467 }
468
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800469 /* speed zero convention is used by some upper layers */
470 bus_hz = clk_get_rate(as->clk);
471 if (spi->max_speed_hz) {
472 /* assume div32/fdiv/mbz == 0 */
473 if (!as->new_1)
474 bus_hz /= 2;
475 scbr = ((bus_hz + spi->max_speed_hz - 1)
476 / spi->max_speed_hz);
477 if (scbr >= (1 << SPI_SCBR_SIZE)) {
David Brownell8da08592007-07-17 04:04:07 -0700478 dev_dbg(&spi->dev,
479 "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
480 spi->max_speed_hz, scbr, bus_hz/255);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800481 return -EINVAL;
482 }
483 } else
484 scbr = 0xff;
485 sck_hz = bus_hz / scbr;
486
487 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
488 if (spi->mode & SPI_CPOL)
489 csr |= SPI_BIT(CPOL);
490 if (!(spi->mode & SPI_CPHA))
491 csr |= SPI_BIT(NCPHA);
492
493 /* TODO: DLYBS and DLYBCT */
494 csr |= SPI_BF(DLYBS, 10);
495 csr |= SPI_BF(DLYBCT, 10);
496
497 /* chipselect must have been muxed as GPIO (e.g. in board setup) */
498 npcs_pin = (unsigned int)spi->controller_data;
499 if (!spi->controller_state) {
500 ret = gpio_request(npcs_pin, "spi_npcs");
501 if (ret)
502 return ret;
503 spi->controller_state = (void *)npcs_pin;
David Brownell28735a72007-03-16 13:38:14 -0800504 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
David Brownelldefbd3b2007-07-17 04:04:08 -0700505 } else {
506 unsigned long flags;
507
508 spin_lock_irqsave(&as->lock, flags);
509 if (as->stay == spi)
510 as->stay = NULL;
511 cs_deactivate(as, spi);
512 spin_unlock_irqrestore(&as->lock, flags);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800513 }
514
515 dev_dbg(&spi->dev,
516 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
517 sck_hz, bits, spi->mode, spi->chip_select, csr);
518
519 spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
520
521 return 0;
522}
523
524static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
525{
526 struct atmel_spi *as;
527 struct spi_transfer *xfer;
528 unsigned long flags;
529 struct device *controller = spi->master->cdev.dev;
530
531 as = spi_master_get_devdata(spi->master);
532
533 dev_dbg(controller, "new message %p submitted for %s\n",
534 msg, spi->dev.bus_id);
535
536 if (unlikely(list_empty(&msg->transfers)
537 || !spi->max_speed_hz))
538 return -EINVAL;
539
540 if (as->stopping)
541 return -ESHUTDOWN;
542
543 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
544 if (!(xfer->tx_buf || xfer->rx_buf)) {
545 dev_dbg(&spi->dev, "missing rx or tx buf\n");
546 return -EINVAL;
547 }
548
549 /* FIXME implement these protocol options!! */
550 if (xfer->bits_per_word || xfer->speed_hz) {
551 dev_dbg(&spi->dev, "no protocol options yet\n");
552 return -ENOPROTOOPT;
553 }
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800554
David Brownell8da08592007-07-17 04:04:07 -0700555 /*
556 * DMA map early, for performance (empties dcache ASAP) and
557 * better fault reporting. This is a DMA-only driver.
558 *
559 * NOTE that if dma_unmap_single() ever starts to do work on
560 * platforms supported by this driver, we would need to clean
561 * up mappings for previously-mapped transfers.
562 */
563 if (!msg->is_dma_mapped) {
564 if (atmel_spi_dma_map_xfer(as, xfer) < 0)
565 return -ENOMEM;
566 }
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800567 }
568
David Brownelldefbd3b2007-07-17 04:04:08 -0700569#ifdef VERBOSE
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800570 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
571 dev_dbg(controller,
572 " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
573 xfer, xfer->len,
574 xfer->tx_buf, xfer->tx_dma,
575 xfer->rx_buf, xfer->rx_dma);
576 }
David Brownelldefbd3b2007-07-17 04:04:08 -0700577#endif
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800578
579 msg->status = -EINPROGRESS;
580 msg->actual_length = 0;
581
582 spin_lock_irqsave(&as->lock, flags);
583 list_add_tail(&msg->queue, &as->queue);
584 if (!as->current_transfer)
585 atmel_spi_next_message(spi->master);
586 spin_unlock_irqrestore(&as->lock, flags);
587
588 return 0;
589}
590
David Brownellbb2d1c32007-02-20 13:58:19 -0800591static void atmel_spi_cleanup(struct spi_device *spi)
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800592{
David Brownelldefbd3b2007-07-17 04:04:08 -0700593 struct atmel_spi *as = spi_master_get_devdata(spi->master);
594 unsigned gpio = (unsigned) spi->controller_data;
595 unsigned long flags;
596
597 if (!spi->controller_state)
598 return;
599
600 spin_lock_irqsave(&as->lock, flags);
601 if (as->stay == spi) {
602 as->stay = NULL;
603 cs_deactivate(as, spi);
604 }
605 spin_unlock_irqrestore(&as->lock, flags);
606
607 gpio_free(gpio);
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800608}
609
610/*-------------------------------------------------------------------------*/
611
612static int __init atmel_spi_probe(struct platform_device *pdev)
613{
614 struct resource *regs;
615 int irq;
616 struct clk *clk;
617 int ret;
618 struct spi_master *master;
619 struct atmel_spi *as;
620
621 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
622 if (!regs)
623 return -ENXIO;
624
625 irq = platform_get_irq(pdev, 0);
626 if (irq < 0)
627 return irq;
628
629 clk = clk_get(&pdev->dev, "spi_clk");
630 if (IS_ERR(clk))
631 return PTR_ERR(clk);
632
633 /* setup spi core then atmel-specific driver state */
634 ret = -ENOMEM;
635 master = spi_alloc_master(&pdev->dev, sizeof *as);
636 if (!master)
637 goto out_free;
638
639 master->bus_num = pdev->id;
640 master->num_chipselect = 4;
641 master->setup = atmel_spi_setup;
642 master->transfer = atmel_spi_transfer;
643 master->cleanup = atmel_spi_cleanup;
644 platform_set_drvdata(pdev, master);
645
646 as = spi_master_get_devdata(master);
647
David Brownell8da08592007-07-17 04:04:07 -0700648 /*
649 * Scratch buffer is used for throwaway rx and tx data.
650 * It's coherent to minimize dcache pollution.
651 */
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800652 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
653 &as->buffer_dma, GFP_KERNEL);
654 if (!as->buffer)
655 goto out_free;
656
657 spin_lock_init(&as->lock);
658 INIT_LIST_HEAD(&as->queue);
659 as->pdev = pdev;
660 as->regs = ioremap(regs->start, (regs->end - regs->start) + 1);
661 if (!as->regs)
662 goto out_free_buffer;
663 as->irq = irq;
664 as->clk = clk;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800665 if (!cpu_is_at91rm9200())
666 as->new_1 = 1;
Haavard Skinnemoen754ce4f2007-02-14 00:33:09 -0800667
668 ret = request_irq(irq, atmel_spi_interrupt, 0,
669 pdev->dev.bus_id, master);
670 if (ret)
671 goto out_unmap_regs;
672
673 /* Initialize the hardware */
674 clk_enable(clk);
675 spi_writel(as, CR, SPI_BIT(SWRST));
676 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
677 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
678 spi_writel(as, CR, SPI_BIT(SPIEN));
679
680 /* go! */
681 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
682 (unsigned long)regs->start, irq);
683
684 ret = spi_register_master(master);
685 if (ret)
686 goto out_reset_hw;
687
688 return 0;
689
690out_reset_hw:
691 spi_writel(as, CR, SPI_BIT(SWRST));
692 clk_disable(clk);
693 free_irq(irq, master);
694out_unmap_regs:
695 iounmap(as->regs);
696out_free_buffer:
697 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
698 as->buffer_dma);
699out_free:
700 clk_put(clk);
701 spi_master_put(master);
702 return ret;
703}
704
705static int __exit atmel_spi_remove(struct platform_device *pdev)
706{
707 struct spi_master *master = platform_get_drvdata(pdev);
708 struct atmel_spi *as = spi_master_get_devdata(master);
709 struct spi_message *msg;
710
711 /* reset the hardware and block queue progress */
712 spin_lock_irq(&as->lock);
713 as->stopping = 1;
714 spi_writel(as, CR, SPI_BIT(SWRST));
715 spi_readl(as, SR);
716 spin_unlock_irq(&as->lock);
717
718 /* Terminate remaining queued transfers */
719 list_for_each_entry(msg, &as->queue, queue) {
720 /* REVISIT unmapping the dma is a NOP on ARM and AVR32
721 * but we shouldn't depend on that...
722 */
723 msg->status = -ESHUTDOWN;
724 msg->complete(msg->context);
725 }
726
727 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
728 as->buffer_dma);
729
730 clk_disable(as->clk);
731 clk_put(as->clk);
732 free_irq(as->irq, master);
733 iounmap(as->regs);
734
735 spi_unregister_master(master);
736
737 return 0;
738}
739
740#ifdef CONFIG_PM
741
742static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
743{
744 struct spi_master *master = platform_get_drvdata(pdev);
745 struct atmel_spi *as = spi_master_get_devdata(master);
746
747 clk_disable(as->clk);
748 return 0;
749}
750
751static int atmel_spi_resume(struct platform_device *pdev)
752{
753 struct spi_master *master = platform_get_drvdata(pdev);
754 struct atmel_spi *as = spi_master_get_devdata(master);
755
756 clk_enable(as->clk);
757 return 0;
758}
759
760#else
761#define atmel_spi_suspend NULL
762#define atmel_spi_resume NULL
763#endif
764
765
766static struct platform_driver atmel_spi_driver = {
767 .driver = {
768 .name = "atmel_spi",
769 .owner = THIS_MODULE,
770 },
771 .suspend = atmel_spi_suspend,
772 .resume = atmel_spi_resume,
773 .remove = __exit_p(atmel_spi_remove),
774};
775
776static int __init atmel_spi_init(void)
777{
778 return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe);
779}
780module_init(atmel_spi_init);
781
782static void __exit atmel_spi_exit(void)
783{
784 platform_driver_unregister(&atmel_spi_driver);
785}
786module_exit(atmel_spi_exit);
787
788MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
789MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
790MODULE_LICENSE("GPL");