blob: d6b6a49580887e9075ad5e14045901fc9e1dd2b6 [file] [log] [blame]
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Sandeep Paulraj358934a2009-12-16 22:02:18 +000031
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
55#define SPIFMT_CHARLEN_MASK 0x0000001Fu
56
Sandeep Paulraj358934a2009-12-16 22:02:18 +000057
58/* SPIPC0 */
59#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
60#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
61#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
62#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000063
64#define SPIINT_MASKALL 0x0101035F
65#define SPI_INTLVL_1 0x000001FFu
66#define SPI_INTLVL_0 0x00000000u
67
68/* SPIDAT1 */
69#define SPIDAT1_CSHOLD_SHIFT 28
70#define SPIDAT1_CSNR_SHIFT 16
71#define SPIGCR1_CLKMOD_MASK BIT(1)
72#define SPIGCR1_MASTER_MASK BIT(0)
73#define SPIGCR1_LOOPBACK_MASK BIT(16)
Sekhar Nori8e206f12010-08-20 16:20:49 +053074#define SPIGCR1_SPIENA_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000075
76/* SPIBUF */
77#define SPIBUF_TXFULL_MASK BIT(29)
78#define SPIBUF_RXEMPTY_MASK BIT(31)
79
80/* Error Masks */
81#define SPIFLG_DLEN_ERR_MASK BIT(0)
82#define SPIFLG_TIMEOUT_MASK BIT(1)
83#define SPIFLG_PARERR_MASK BIT(2)
84#define SPIFLG_DESYNC_MASK BIT(3)
85#define SPIFLG_BITERR_MASK BIT(4)
86#define SPIFLG_OVRRUN_MASK BIT(6)
87#define SPIFLG_RX_INTR_MASK BIT(8)
88#define SPIFLG_TX_INTR_MASK BIT(9)
89#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000090
Sandeep Paulraj358934a2009-12-16 22:02:18 +000091#define SPIINT_BITERR_INTR BIT(4)
92#define SPIINT_OVRRUN_INTR BIT(6)
93#define SPIINT_RX_INTR BIT(8)
94#define SPIINT_TX_INTR BIT(9)
95#define SPIINT_DMA_REQ_EN BIT(16)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000096
97#define SPI_T2CDELAY_SHIFT 16
98#define SPI_C2TDELAY_SHIFT 24
99
100/* SPI Controller registers */
101#define SPIGCR0 0x00
102#define SPIGCR1 0x04
103#define SPIINT 0x08
104#define SPILVL 0x0c
105#define SPIFLG 0x10
106#define SPIPC0 0x14
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000107#define SPIDAT1 0x3c
108#define SPIBUF 0x40
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000109#define SPIDELAY 0x48
110#define SPIDEF 0x4c
111#define SPIFMT0 0x50
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000112
113struct davinci_spi_slave {
114 u32 cmd_to_write;
115 u32 clk_ctrl_to_write;
116 u32 bytes_per_word;
117 u8 active_cs;
118};
119
120/* We have 2 DMA channels per CS, one for RX and one for TX */
121struct davinci_spi_dma {
122 int dma_tx_channel;
123 int dma_rx_channel;
124 int dma_tx_sync_dev;
125 int dma_rx_sync_dev;
126 enum dma_event_q eventq;
127
128 struct completion dma_tx_completion;
129 struct completion dma_rx_completion;
130};
131
132/* SPI Controller driver's private data. */
133struct davinci_spi {
134 struct spi_bitbang bitbang;
135 struct clk *clk;
136
137 u8 version;
138 resource_size_t pbase;
139 void __iomem *base;
140 size_t region_size;
141 u32 irq;
142 struct completion done;
143
144 const void *tx;
145 void *rx;
146 u8 *tmp_buf;
147 int count;
148 struct davinci_spi_dma *dma_channels;
Brian Niebuhr778e2612010-09-03 15:15:06 +0530149 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000150
151 void (*get_rx)(u32 rx_data, struct davinci_spi *);
152 u32 (*get_tx)(struct davinci_spi *);
153
154 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
155};
156
157static unsigned use_dma;
158
159static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
160{
161 u8 *rx = davinci_spi->rx;
162
163 *rx++ = (u8)data;
164 davinci_spi->rx = rx;
165}
166
167static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
168{
169 u16 *rx = davinci_spi->rx;
170
171 *rx++ = (u16)data;
172 davinci_spi->rx = rx;
173}
174
175static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
176{
177 u32 data;
178 const u8 *tx = davinci_spi->tx;
179
180 data = *tx++;
181 davinci_spi->tx = tx;
182 return data;
183}
184
185static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
186{
187 u32 data;
188 const u16 *tx = davinci_spi->tx;
189
190 data = *tx++;
191 davinci_spi->tx = tx;
192 return data;
193}
194
195static inline void set_io_bits(void __iomem *addr, u32 bits)
196{
197 u32 v = ioread32(addr);
198
199 v |= bits;
200 iowrite32(v, addr);
201}
202
203static inline void clear_io_bits(void __iomem *addr, u32 bits)
204{
205 u32 v = ioread32(addr);
206
207 v &= ~bits;
208 iowrite32(v, addr);
209}
210
211static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
212{
213 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
214}
215
216static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
217{
218 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
219}
220
221static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
222{
223 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
224
225 if (enable)
226 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
227 else
228 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
229}
230
231/*
232 * Interface to control the chip select signal
233 */
234static void davinci_spi_chipselect(struct spi_device *spi, int value)
235{
236 struct davinci_spi *davinci_spi;
237 struct davinci_spi_platform_data *pdata;
238 u32 data1_reg_val = 0;
239
240 davinci_spi = spi_master_get_devdata(spi->master);
241 pdata = davinci_spi->pdata;
242
243 /*
244 * Board specific chip select logic decides the polarity and cs
245 * line for the controller
246 */
247 if (value == BITBANG_CS_INACTIVE) {
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000248 data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
249 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
250
251 while ((ioread32(davinci_spi->base + SPIBUF)
252 & SPIBUF_RXEMPTY_MASK) == 0)
253 cpu_relax();
254 }
255}
256
257/**
258 * davinci_spi_setup_transfer - This functions will determine transfer method
259 * @spi: spi device on which data transfer to be done
260 * @t: spi transfer in which transfer info is filled
261 *
262 * This function determines data transfer method (8/16/32 bit transfer).
263 * It will also set the SPI Clock Control register according to
264 * SPI slave device freq.
265 */
266static int davinci_spi_setup_transfer(struct spi_device *spi,
267 struct spi_transfer *t)
268{
269
270 struct davinci_spi *davinci_spi;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000271 u8 bits_per_word = 0;
Thomas Koeller0c2a2ae2010-04-26 09:01:45 +0000272 u32 hz = 0, prescale = 0, clkspeed;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000273
274 davinci_spi = spi_master_get_devdata(spi->master);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000275
276 if (t) {
277 bits_per_word = t->bits_per_word;
278 hz = t->speed_hz;
279 }
280
281 /* if bits_per_word is not set then set it default */
282 if (!bits_per_word)
283 bits_per_word = spi->bits_per_word;
284
285 /*
286 * Assign function pointer to appropriate transfer method
287 * 8bit, 16bit or 32bit transfer
288 */
289 if (bits_per_word <= 8 && bits_per_word >= 2) {
290 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
291 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
292 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
293 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
294 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
295 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
296 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
297 } else
298 return -EINVAL;
299
300 if (!hz)
301 hz = spi->max_speed_hz;
302
303 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
304 spi->chip_select);
305 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
306 spi->chip_select);
307
Thomas Koeller0c2a2ae2010-04-26 09:01:45 +0000308 clkspeed = clk_get_rate(davinci_spi->clk);
309 if (hz > clkspeed / 2)
310 prescale = 1 << 8;
311 if (hz < clkspeed / 256)
312 prescale = 255 << 8;
313 if (!prescale)
314 prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000315
316 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
Thomas Koeller0c2a2ae2010-04-26 09:01:45 +0000317 set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000318
319 return 0;
320}
321
322static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
323{
324 struct spi_device *spi = (struct spi_device *)data;
325 struct davinci_spi *davinci_spi;
326 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000327
328 davinci_spi = spi_master_get_devdata(spi->master);
329 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000330
331 if (ch_status == DMA_COMPLETE)
332 edma_stop(davinci_spi_dma->dma_rx_channel);
333 else
334 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
335
336 complete(&davinci_spi_dma->dma_rx_completion);
337 /* We must disable the DMA RX request */
338 davinci_spi_set_dma_req(spi, 0);
339}
340
341static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
342{
343 struct spi_device *spi = (struct spi_device *)data;
344 struct davinci_spi *davinci_spi;
345 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000346
347 davinci_spi = spi_master_get_devdata(spi->master);
348 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000349
350 if (ch_status == DMA_COMPLETE)
351 edma_stop(davinci_spi_dma->dma_tx_channel);
352 else
353 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
354
355 complete(&davinci_spi_dma->dma_tx_completion);
356 /* We must disable the DMA TX request */
357 davinci_spi_set_dma_req(spi, 0);
358}
359
360static int davinci_spi_request_dma(struct spi_device *spi)
361{
362 struct davinci_spi *davinci_spi;
363 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000364 struct device *sdev;
365 int r;
366
367 davinci_spi = spi_master_get_devdata(spi->master);
368 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000369 sdev = davinci_spi->bitbang.master->dev.parent;
370
371 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
372 davinci_spi_dma_rx_callback, spi,
373 davinci_spi_dma->eventq);
374 if (r < 0) {
375 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
376 return -EAGAIN;
377 }
378 davinci_spi_dma->dma_rx_channel = r;
379 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
380 davinci_spi_dma_tx_callback, spi,
381 davinci_spi_dma->eventq);
382 if (r < 0) {
383 edma_free_channel(davinci_spi_dma->dma_rx_channel);
384 davinci_spi_dma->dma_rx_channel = -1;
385 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
386 return -EAGAIN;
387 }
388 davinci_spi_dma->dma_tx_channel = r;
389
390 return 0;
391}
392
393/**
394 * davinci_spi_setup - This functions will set default transfer method
395 * @spi: spi device on which data transfer to be done
396 *
397 * This functions sets the default transfer method.
398 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000399static int davinci_spi_setup(struct spi_device *spi)
400{
401 int retval;
402 struct davinci_spi *davinci_spi;
403 struct davinci_spi_dma *davinci_spi_dma;
404 struct device *sdev;
405
406 davinci_spi = spi_master_get_devdata(spi->master);
407 sdev = davinci_spi->bitbang.master->dev.parent;
408
409 /* if bits per word length is zero then set it default 8 */
410 if (!spi->bits_per_word)
411 spi->bits_per_word = 8;
412
413 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
414
415 if (use_dma && davinci_spi->dma_channels) {
416 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
417
418 if ((davinci_spi_dma->dma_rx_channel == -1)
419 || (davinci_spi_dma->dma_tx_channel == -1)) {
420 retval = davinci_spi_request_dma(spi);
421 if (retval < 0)
422 return retval;
423 }
424 }
425
426 /*
427 * SPI in DaVinci and DA8xx operate between
428 * 600 KHz and 50 MHz
429 */
430 if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
431 dev_dbg(sdev, "Operating frequency is not in acceptable "
432 "range\n");
433 return -EINVAL;
434 }
435
436 /*
437 * Set up SPIFMTn register, unique to this chipselect.
438 *
439 * NOTE: we could do all of these with one write. Also, some
440 * of the "version 2" features are found in chips that don't
441 * support all of them...
442 */
443 if (spi->mode & SPI_LSB_FIRST)
444 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
445 spi->chip_select);
446 else
447 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
448 spi->chip_select);
449
450 if (spi->mode & SPI_CPOL)
451 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
452 spi->chip_select);
453 else
454 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
455 spi->chip_select);
456
457 if (!(spi->mode & SPI_CPHA))
458 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
459 spi->chip_select);
460 else
461 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
462 spi->chip_select);
463
464 /*
465 * Version 1 hardware supports two basic SPI modes:
466 * - Standard SPI mode uses 4 pins, with chipselect
467 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
468 * (distinct from SPI_3WIRE, with just one data wire;
469 * or similar variants without MOSI or without MISO)
470 *
471 * Version 2 hardware supports an optional handshaking signal,
472 * so it can support two more modes:
473 * - 5 pin SPI variant is standard SPI plus SPI_READY
474 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
475 */
476
477 if (davinci_spi->version == SPI_VERSION_2) {
478 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
479 spi->chip_select);
480 set_fmt_bits(davinci_spi->base,
481 (davinci_spi->pdata->wdelay
482 << SPIFMT_WDELAY_SHIFT)
483 & SPIFMT_WDELAY_MASK,
484 spi->chip_select);
485
486 if (davinci_spi->pdata->odd_parity)
487 set_fmt_bits(davinci_spi->base,
488 SPIFMT_ODD_PARITY_MASK,
489 spi->chip_select);
490 else
491 clear_fmt_bits(davinci_spi->base,
492 SPIFMT_ODD_PARITY_MASK,
493 spi->chip_select);
494
495 if (davinci_spi->pdata->parity_enable)
496 set_fmt_bits(davinci_spi->base,
497 SPIFMT_PARITYENA_MASK,
498 spi->chip_select);
499 else
500 clear_fmt_bits(davinci_spi->base,
501 SPIFMT_PARITYENA_MASK,
502 spi->chip_select);
503
504 if (davinci_spi->pdata->wait_enable)
505 set_fmt_bits(davinci_spi->base,
506 SPIFMT_WAITENA_MASK,
507 spi->chip_select);
508 else
509 clear_fmt_bits(davinci_spi->base,
510 SPIFMT_WAITENA_MASK,
511 spi->chip_select);
512
513 if (davinci_spi->pdata->timer_disable)
514 set_fmt_bits(davinci_spi->base,
515 SPIFMT_DISTIMER_MASK,
516 spi->chip_select);
517 else
518 clear_fmt_bits(davinci_spi->base,
519 SPIFMT_DISTIMER_MASK,
520 spi->chip_select);
521 }
522
523 retval = davinci_spi_setup_transfer(spi, NULL);
524
525 return retval;
526}
527
528static void davinci_spi_cleanup(struct spi_device *spi)
529{
530 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
531 struct davinci_spi_dma *davinci_spi_dma;
532
533 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
534
535 if (use_dma && davinci_spi->dma_channels) {
536 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
537
538 if ((davinci_spi_dma->dma_rx_channel != -1)
539 && (davinci_spi_dma->dma_tx_channel != -1)) {
540 edma_free_channel(davinci_spi_dma->dma_tx_channel);
541 edma_free_channel(davinci_spi_dma->dma_rx_channel);
542 }
543 }
544}
545
546static int davinci_spi_bufs_prep(struct spi_device *spi,
547 struct davinci_spi *davinci_spi)
548{
549 int op_mode = 0;
550
551 /*
552 * REVISIT unless devices disagree about SPI_LOOP or
553 * SPI_READY (SPI_NO_CS only allows one device!), this
554 * should not need to be done before each message...
555 * optimize for both flags staying cleared.
556 */
557
558 op_mode = SPIPC0_DIFUN_MASK
559 | SPIPC0_DOFUN_MASK
560 | SPIPC0_CLKFUN_MASK;
561 if (!(spi->mode & SPI_NO_CS))
562 op_mode |= 1 << spi->chip_select;
563 if (spi->mode & SPI_READY)
564 op_mode |= SPIPC0_SPIENA_MASK;
565
566 iowrite32(op_mode, davinci_spi->base + SPIPC0);
567
568 if (spi->mode & SPI_LOOP)
569 set_io_bits(davinci_spi->base + SPIGCR1,
570 SPIGCR1_LOOPBACK_MASK);
571 else
572 clear_io_bits(davinci_spi->base + SPIGCR1,
573 SPIGCR1_LOOPBACK_MASK);
574
575 return 0;
576}
577
578static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
579 int int_status)
580{
581 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
582
583 if (int_status & SPIFLG_TIMEOUT_MASK) {
584 dev_dbg(sdev, "SPI Time-out Error\n");
585 return -ETIMEDOUT;
586 }
587 if (int_status & SPIFLG_DESYNC_MASK) {
588 dev_dbg(sdev, "SPI Desynchronization Error\n");
589 return -EIO;
590 }
591 if (int_status & SPIFLG_BITERR_MASK) {
592 dev_dbg(sdev, "SPI Bit error\n");
593 return -EIO;
594 }
595
596 if (davinci_spi->version == SPI_VERSION_2) {
597 if (int_status & SPIFLG_DLEN_ERR_MASK) {
598 dev_dbg(sdev, "SPI Data Length Error\n");
599 return -EIO;
600 }
601 if (int_status & SPIFLG_PARERR_MASK) {
602 dev_dbg(sdev, "SPI Parity Error\n");
603 return -EIO;
604 }
605 if (int_status & SPIFLG_OVRRUN_MASK) {
606 dev_dbg(sdev, "SPI Data Overrun error\n");
607 return -EIO;
608 }
609 if (int_status & SPIFLG_TX_INTR_MASK) {
610 dev_dbg(sdev, "SPI TX intr bit set\n");
611 return -EIO;
612 }
613 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
614 dev_dbg(sdev, "SPI Buffer Init Active\n");
615 return -EBUSY;
616 }
617 }
618
619 return 0;
620}
621
622/**
623 * davinci_spi_bufs - functions which will handle transfer data
624 * @spi: spi device on which data transfer to be done
625 * @t: spi transfer in which transfer info is filled
626 *
627 * This function will put data to be transferred into data register
628 * of SPI controller and then wait until the completion will be marked
629 * by the IRQ Handler.
630 */
631static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
632{
633 struct davinci_spi *davinci_spi;
634 int int_status, count, ret;
635 u8 conv, tmp;
636 u32 tx_data, data1_reg_val;
637 u32 buf_val, flg_val;
638 struct davinci_spi_platform_data *pdata;
639
640 davinci_spi = spi_master_get_devdata(spi->master);
641 pdata = davinci_spi->pdata;
642
643 davinci_spi->tx = t->tx_buf;
644 davinci_spi->rx = t->rx_buf;
645
646 /* convert len to words based on bits_per_word */
647 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
648 davinci_spi->count = t->len / conv;
649
650 INIT_COMPLETION(davinci_spi->done);
651
652 ret = davinci_spi_bufs_prep(spi, davinci_spi);
653 if (ret)
654 return ret;
655
656 /* Enable SPI */
657 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
658
659 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
660 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
661 davinci_spi->base + SPIDELAY);
662
663 count = davinci_spi->count;
664 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
665 tmp = ~(0x1 << spi->chip_select);
666
667 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
668
669 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
670
671 while ((ioread32(davinci_spi->base + SPIBUF)
672 & SPIBUF_RXEMPTY_MASK) == 0)
673 cpu_relax();
674
675 /* Determine the command to execute READ or WRITE */
676 if (t->tx_buf) {
677 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
678
679 while (1) {
680 tx_data = davinci_spi->get_tx(davinci_spi);
681
682 data1_reg_val &= ~(0xFFFF);
683 data1_reg_val |= (0xFFFF & tx_data);
684
685 buf_val = ioread32(davinci_spi->base + SPIBUF);
686 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
687 iowrite32(data1_reg_val,
688 davinci_spi->base + SPIDAT1);
689
690 count--;
691 }
692 while (ioread32(davinci_spi->base + SPIBUF)
693 & SPIBUF_RXEMPTY_MASK)
694 cpu_relax();
695
696 /* getting the returned byte */
697 if (t->rx_buf) {
698 buf_val = ioread32(davinci_spi->base + SPIBUF);
699 davinci_spi->get_rx(buf_val, davinci_spi);
700 }
701 if (count <= 0)
702 break;
703 }
704 } else {
705 if (pdata->poll_mode) {
706 while (1) {
707 /* keeps the serial clock going */
708 if ((ioread32(davinci_spi->base + SPIBUF)
709 & SPIBUF_TXFULL_MASK) == 0)
710 iowrite32(data1_reg_val,
711 davinci_spi->base + SPIDAT1);
712
713 while (ioread32(davinci_spi->base + SPIBUF) &
714 SPIBUF_RXEMPTY_MASK)
715 cpu_relax();
716
717 flg_val = ioread32(davinci_spi->base + SPIFLG);
718 buf_val = ioread32(davinci_spi->base + SPIBUF);
719
720 davinci_spi->get_rx(buf_val, davinci_spi);
721
722 count--;
723 if (count <= 0)
724 break;
725 }
726 } else { /* Receive in Interrupt mode */
727 int i;
728
729 for (i = 0; i < davinci_spi->count; i++) {
730 set_io_bits(davinci_spi->base + SPIINT,
731 SPIINT_BITERR_INTR
732 | SPIINT_OVRRUN_INTR
733 | SPIINT_RX_INTR);
734
735 iowrite32(data1_reg_val,
736 davinci_spi->base + SPIDAT1);
737
738 while (ioread32(davinci_spi->base + SPIINT) &
739 SPIINT_RX_INTR)
740 cpu_relax();
741 }
742 iowrite32((data1_reg_val & 0x0ffcffff),
743 davinci_spi->base + SPIDAT1);
744 }
745 }
746
747 /*
748 * Check for bit error, desync error,parity error,timeout error and
749 * receive overflow errors
750 */
751 int_status = ioread32(davinci_spi->base + SPIFLG);
752
753 ret = davinci_spi_check_error(davinci_spi, int_status);
754 if (ret != 0)
755 return ret;
756
757 /* SPI Framework maintains the count only in bytes so convert back */
758 davinci_spi->count *= conv;
759
760 return t->len;
761}
762
763#define DAVINCI_DMA_DATA_TYPE_S8 0x01
764#define DAVINCI_DMA_DATA_TYPE_S16 0x02
765#define DAVINCI_DMA_DATA_TYPE_S32 0x04
766
767static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
768{
769 struct davinci_spi *davinci_spi;
770 int int_status = 0;
771 int count, temp_count;
772 u8 conv = 1;
773 u8 tmp;
774 u32 data1_reg_val;
775 struct davinci_spi_dma *davinci_spi_dma;
776 int word_len, data_type, ret;
777 unsigned long tx_reg, rx_reg;
778 struct davinci_spi_platform_data *pdata;
779 struct device *sdev;
780
781 davinci_spi = spi_master_get_devdata(spi->master);
782 pdata = davinci_spi->pdata;
783 sdev = davinci_spi->bitbang.master->dev.parent;
784
785 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
786
787 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
788 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
789
790 davinci_spi->tx = t->tx_buf;
791 davinci_spi->rx = t->rx_buf;
792
793 /* convert len to words based on bits_per_word */
794 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
795 davinci_spi->count = t->len / conv;
796
797 INIT_COMPLETION(davinci_spi->done);
798
799 init_completion(&davinci_spi_dma->dma_rx_completion);
800 init_completion(&davinci_spi_dma->dma_tx_completion);
801
802 word_len = conv * 8;
803
804 if (word_len <= 8)
805 data_type = DAVINCI_DMA_DATA_TYPE_S8;
806 else if (word_len <= 16)
807 data_type = DAVINCI_DMA_DATA_TYPE_S16;
808 else if (word_len <= 32)
809 data_type = DAVINCI_DMA_DATA_TYPE_S32;
810 else
811 return -EINVAL;
812
813 ret = davinci_spi_bufs_prep(spi, davinci_spi);
814 if (ret)
815 return ret;
816
817 /* Put delay val if required */
818 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
819 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
820 davinci_spi->base + SPIDELAY);
821
822 count = davinci_spi->count; /* the number of elements */
823 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
824
825 /* CS default = 0xFF */
826 tmp = ~(0x1 << spi->chip_select);
827
828 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
829
830 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
831
832 /* disable all interrupts for dma transfers */
833 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
834 /* Disable SPI to write configuration bits in SPIDAT */
835 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
836 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
837 /* Enable SPI */
838 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
839
840 while ((ioread32(davinci_spi->base + SPIBUF)
841 & SPIBUF_RXEMPTY_MASK) == 0)
842 cpu_relax();
843
844
845 if (t->tx_buf) {
846 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
847 DMA_TO_DEVICE);
848 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
849 dev_dbg(sdev, "Unable to DMA map a %d bytes"
850 " TX buffer\n", count);
851 return -ENOMEM;
852 }
853 temp_count = count;
854 } else {
855 /* We need TX clocking for RX transaction */
856 t->tx_dma = dma_map_single(&spi->dev,
857 (void *)davinci_spi->tmp_buf, count + 1,
858 DMA_TO_DEVICE);
859 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
860 dev_dbg(sdev, "Unable to DMA map a %d bytes"
861 " TX tmp buffer\n", count);
862 return -ENOMEM;
863 }
864 temp_count = count + 1;
865 }
866
867 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
868 data_type, temp_count, 1, 0, ASYNC);
869 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
870 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
871 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
872 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
873
874 if (t->rx_buf) {
875 /* initiate transaction */
876 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
877
878 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
879 DMA_FROM_DEVICE);
880 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
881 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
882 count);
883 if (t->tx_buf != NULL)
884 dma_unmap_single(NULL, t->tx_dma,
885 count, DMA_TO_DEVICE);
886 return -ENOMEM;
887 }
888 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
889 data_type, count, 1, 0, ASYNC);
890 edma_set_src(davinci_spi_dma->dma_rx_channel,
891 rx_reg, INCR, W8BIT);
892 edma_set_dest(davinci_spi_dma->dma_rx_channel,
893 t->rx_dma, INCR, W8BIT);
894 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
895 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
896 data_type, 0);
897 }
898
899 if ((t->tx_buf) || (t->rx_buf))
900 edma_start(davinci_spi_dma->dma_tx_channel);
901
902 if (t->rx_buf)
903 edma_start(davinci_spi_dma->dma_rx_channel);
904
905 if ((t->rx_buf) || (t->tx_buf))
906 davinci_spi_set_dma_req(spi, 1);
907
908 if (t->tx_buf)
909 wait_for_completion_interruptible(
910 &davinci_spi_dma->dma_tx_completion);
911
912 if (t->rx_buf)
913 wait_for_completion_interruptible(
914 &davinci_spi_dma->dma_rx_completion);
915
916 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
917
918 if (t->rx_buf)
919 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
920
921 /*
922 * Check for bit error, desync error,parity error,timeout error and
923 * receive overflow errors
924 */
925 int_status = ioread32(davinci_spi->base + SPIFLG);
926
927 ret = davinci_spi_check_error(davinci_spi, int_status);
928 if (ret != 0)
929 return ret;
930
931 /* SPI Framework maintains the count only in bytes so convert back */
932 davinci_spi->count *= conv;
933
934 return t->len;
935}
936
937/**
938 * davinci_spi_irq - IRQ handler for DaVinci SPI
939 * @irq: IRQ number for this SPI Master
940 * @context_data: structure for SPI Master controller davinci_spi
941 */
942static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
943{
944 struct davinci_spi *davinci_spi = context_data;
945 u32 int_status, rx_data = 0;
946 irqreturn_t ret = IRQ_NONE;
947
948 int_status = ioread32(davinci_spi->base + SPIFLG);
949
950 while ((int_status & SPIFLG_RX_INTR_MASK)) {
951 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
952 ret = IRQ_HANDLED;
953
954 rx_data = ioread32(davinci_spi->base + SPIBUF);
955 davinci_spi->get_rx(rx_data, davinci_spi);
956
957 /* Disable Receive Interrupt */
958 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
959 davinci_spi->base + SPIINT);
960 } else
961 (void)davinci_spi_check_error(davinci_spi, int_status);
962
963 int_status = ioread32(davinci_spi->base + SPIFLG);
964 }
965
966 return ret;
967}
968
969/**
970 * davinci_spi_probe - probe function for SPI Master Controller
971 * @pdev: platform_device structure which contains plateform specific data
972 */
973static int davinci_spi_probe(struct platform_device *pdev)
974{
975 struct spi_master *master;
976 struct davinci_spi *davinci_spi;
977 struct davinci_spi_platform_data *pdata;
978 struct resource *r, *mem;
979 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
980 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
981 resource_size_t dma_eventq = SPI_NO_RESOURCE;
982 int i = 0, ret = 0;
983
984 pdata = pdev->dev.platform_data;
985 if (pdata == NULL) {
986 ret = -ENODEV;
987 goto err;
988 }
989
990 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
991 if (master == NULL) {
992 ret = -ENOMEM;
993 goto err;
994 }
995
996 dev_set_drvdata(&pdev->dev, master);
997
998 davinci_spi = spi_master_get_devdata(master);
999 if (davinci_spi == NULL) {
1000 ret = -ENOENT;
1001 goto free_master;
1002 }
1003
1004 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1005 if (r == NULL) {
1006 ret = -ENOENT;
1007 goto free_master;
1008 }
1009
1010 davinci_spi->pbase = r->start;
1011 davinci_spi->region_size = resource_size(r);
1012 davinci_spi->pdata = pdata;
1013
1014 mem = request_mem_region(r->start, davinci_spi->region_size,
1015 pdev->name);
1016 if (mem == NULL) {
1017 ret = -EBUSY;
1018 goto free_master;
1019 }
1020
Sekhar Nori50356dd2010-10-08 15:27:26 +05301021 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001022 if (davinci_spi->base == NULL) {
1023 ret = -ENOMEM;
1024 goto release_region;
1025 }
1026
1027 davinci_spi->irq = platform_get_irq(pdev, 0);
1028 if (davinci_spi->irq <= 0) {
1029 ret = -EINVAL;
1030 goto unmap_io;
1031 }
1032
1033 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1034 dev_name(&pdev->dev), davinci_spi);
1035 if (ret)
1036 goto unmap_io;
1037
1038 /* Allocate tmp_buf for tx_buf */
1039 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1040 if (davinci_spi->tmp_buf == NULL) {
1041 ret = -ENOMEM;
1042 goto irq_free;
1043 }
1044
1045 davinci_spi->bitbang.master = spi_master_get(master);
1046 if (davinci_spi->bitbang.master == NULL) {
1047 ret = -ENODEV;
1048 goto free_tmp_buf;
1049 }
1050
1051 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1052 if (IS_ERR(davinci_spi->clk)) {
1053 ret = -ENODEV;
1054 goto put_master;
1055 }
1056 clk_enable(davinci_spi->clk);
1057
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001058 master->bus_num = pdev->id;
1059 master->num_chipselect = pdata->num_chipselect;
1060 master->setup = davinci_spi_setup;
1061 master->cleanup = davinci_spi_cleanup;
1062
1063 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1064 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1065
1066 davinci_spi->version = pdata->version;
1067 use_dma = pdata->use_dma;
1068
1069 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1070 if (davinci_spi->version == SPI_VERSION_2)
1071 davinci_spi->bitbang.flags |= SPI_READY;
1072
1073 if (use_dma) {
Brian Niebuhr778e2612010-09-03 15:15:06 +05301074 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1075 if (r)
1076 dma_rx_chan = r->start;
1077 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1078 if (r)
1079 dma_tx_chan = r->start;
1080 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1081 if (r)
1082 dma_eventq = r->start;
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001083 }
1084
1085 if (!use_dma ||
1086 dma_rx_chan == SPI_NO_RESOURCE ||
1087 dma_tx_chan == SPI_NO_RESOURCE ||
1088 dma_eventq == SPI_NO_RESOURCE) {
1089 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1090 use_dma = 0;
1091 } else {
1092 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1093 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1094 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1095 if (davinci_spi->dma_channels == NULL) {
1096 ret = -ENOMEM;
1097 goto free_clk;
1098 }
1099
1100 for (i = 0; i < master->num_chipselect; i++) {
1101 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1102 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1103 dma_rx_chan;
1104 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1105 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1106 dma_tx_chan;
1107 davinci_spi->dma_channels[i].eventq = dma_eventq;
1108 }
1109 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1110 "Using RX channel = %d , TX channel = %d and "
1111 "event queue = %d", dma_rx_chan, dma_tx_chan,
1112 dma_eventq);
1113 }
1114
1115 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1116 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1117
1118 init_completion(&davinci_spi->done);
1119
1120 /* Reset In/OUT SPI module */
1121 iowrite32(0, davinci_spi->base + SPIGCR0);
1122 udelay(100);
1123 iowrite32(1, davinci_spi->base + SPIGCR0);
1124
1125 /* Clock internal */
1126 if (davinci_spi->pdata->clk_internal)
1127 set_io_bits(davinci_spi->base + SPIGCR1,
1128 SPIGCR1_CLKMOD_MASK);
1129 else
1130 clear_io_bits(davinci_spi->base + SPIGCR1,
1131 SPIGCR1_CLKMOD_MASK);
1132
Brian Niebuhr843a7132010-08-12 12:49:05 +05301133 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1134
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001135 /* master mode default */
1136 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1137
1138 if (davinci_spi->pdata->intr_level)
1139 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1140 else
1141 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1142
1143 ret = spi_bitbang_start(&davinci_spi->bitbang);
1144 if (ret)
1145 goto free_clk;
1146
Brian Niebuhr3b740b12010-09-03 14:50:07 +05301147 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001148
1149 if (!pdata->poll_mode)
1150 dev_info(&pdev->dev, "Operating in interrupt mode"
1151 " using IRQ %d\n", davinci_spi->irq);
1152
1153 return ret;
1154
1155free_clk:
1156 clk_disable(davinci_spi->clk);
1157 clk_put(davinci_spi->clk);
1158put_master:
1159 spi_master_put(master);
1160free_tmp_buf:
1161 kfree(davinci_spi->tmp_buf);
1162irq_free:
1163 free_irq(davinci_spi->irq, davinci_spi);
1164unmap_io:
1165 iounmap(davinci_spi->base);
1166release_region:
1167 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1168free_master:
1169 kfree(master);
1170err:
1171 return ret;
1172}
1173
1174/**
1175 * davinci_spi_remove - remove function for SPI Master Controller
1176 * @pdev: platform_device structure which contains plateform specific data
1177 *
1178 * This function will do the reverse action of davinci_spi_probe function
1179 * It will free the IRQ and SPI controller's memory region.
1180 * It will also call spi_bitbang_stop to destroy the work queue which was
1181 * created by spi_bitbang_start.
1182 */
1183static int __exit davinci_spi_remove(struct platform_device *pdev)
1184{
1185 struct davinci_spi *davinci_spi;
1186 struct spi_master *master;
1187
1188 master = dev_get_drvdata(&pdev->dev);
1189 davinci_spi = spi_master_get_devdata(master);
1190
1191 spi_bitbang_stop(&davinci_spi->bitbang);
1192
1193 clk_disable(davinci_spi->clk);
1194 clk_put(davinci_spi->clk);
1195 spi_master_put(master);
1196 kfree(davinci_spi->tmp_buf);
1197 free_irq(davinci_spi->irq, davinci_spi);
1198 iounmap(davinci_spi->base);
1199 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1200
1201 return 0;
1202}
1203
1204static struct platform_driver davinci_spi_driver = {
1205 .driver.name = "spi_davinci",
1206 .remove = __exit_p(davinci_spi_remove),
1207};
1208
1209static int __init davinci_spi_init(void)
1210{
1211 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1212}
1213module_init(davinci_spi_init);
1214
1215static void __exit davinci_spi_exit(void)
1216{
1217 platform_driver_unregister(&davinci_spi_driver);
1218}
1219module_exit(davinci_spi_exit);
1220
1221MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1222MODULE_LICENSE("GPL");