blob: 17269ad54a99393acb5bb505ae99e9caf9b8a983 [file] [log] [blame]
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Sandeep Paulraj358934a2009-12-16 22:02:18 +000031
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
55#define SPIFMT_CHARLEN_MASK 0x0000001Fu
Brian Niebuhr7fe00922010-08-13 13:27:23 +053056#define SPIFMT_PRESCALE_SHIFT 8
Sandeep Paulraj358934a2009-12-16 22:02:18 +000057
Sandeep Paulraj358934a2009-12-16 22:02:18 +000058
59/* SPIPC0 */
60#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
61#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
62#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
63#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000064
65#define SPIINT_MASKALL 0x0101035F
66#define SPI_INTLVL_1 0x000001FFu
67#define SPI_INTLVL_0 0x00000000u
68
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +053069/* SPIDAT1 (upper 16 bit defines) */
70#define SPIDAT1_CSHOLD_MASK BIT(12)
71
72/* SPIGCR1 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000073#define SPIGCR1_CLKMOD_MASK BIT(1)
74#define SPIGCR1_MASTER_MASK BIT(0)
75#define SPIGCR1_LOOPBACK_MASK BIT(16)
Sekhar Nori8e206f12010-08-20 16:20:49 +053076#define SPIGCR1_SPIENA_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000077
78/* SPIBUF */
79#define SPIBUF_TXFULL_MASK BIT(29)
80#define SPIBUF_RXEMPTY_MASK BIT(31)
81
82/* Error Masks */
83#define SPIFLG_DLEN_ERR_MASK BIT(0)
84#define SPIFLG_TIMEOUT_MASK BIT(1)
85#define SPIFLG_PARERR_MASK BIT(2)
86#define SPIFLG_DESYNC_MASK BIT(3)
87#define SPIFLG_BITERR_MASK BIT(4)
88#define SPIFLG_OVRRUN_MASK BIT(6)
89#define SPIFLG_RX_INTR_MASK BIT(8)
90#define SPIFLG_TX_INTR_MASK BIT(9)
91#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000092
Sandeep Paulraj358934a2009-12-16 22:02:18 +000093#define SPIINT_BITERR_INTR BIT(4)
94#define SPIINT_OVRRUN_INTR BIT(6)
95#define SPIINT_RX_INTR BIT(8)
96#define SPIINT_TX_INTR BIT(9)
97#define SPIINT_DMA_REQ_EN BIT(16)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000098
99#define SPI_T2CDELAY_SHIFT 16
100#define SPI_C2TDELAY_SHIFT 24
101
102/* SPI Controller registers */
103#define SPIGCR0 0x00
104#define SPIGCR1 0x04
105#define SPIINT 0x08
106#define SPILVL 0x0c
107#define SPIFLG 0x10
108#define SPIPC0 0x14
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000109#define SPIDAT1 0x3c
110#define SPIBUF 0x40
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000111#define SPIDELAY 0x48
112#define SPIDEF 0x4c
113#define SPIFMT0 0x50
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000114
115struct davinci_spi_slave {
116 u32 cmd_to_write;
117 u32 clk_ctrl_to_write;
118 u32 bytes_per_word;
119 u8 active_cs;
120};
121
122/* We have 2 DMA channels per CS, one for RX and one for TX */
123struct davinci_spi_dma {
124 int dma_tx_channel;
125 int dma_rx_channel;
126 int dma_tx_sync_dev;
127 int dma_rx_sync_dev;
128 enum dma_event_q eventq;
129
130 struct completion dma_tx_completion;
131 struct completion dma_rx_completion;
132};
133
134/* SPI Controller driver's private data. */
135struct davinci_spi {
136 struct spi_bitbang bitbang;
137 struct clk *clk;
138
139 u8 version;
140 resource_size_t pbase;
141 void __iomem *base;
142 size_t region_size;
143 u32 irq;
144 struct completion done;
145
146 const void *tx;
147 void *rx;
148 u8 *tmp_buf;
149 int count;
150 struct davinci_spi_dma *dma_channels;
Brian Niebuhr778e2612010-09-03 15:15:06 +0530151 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000152
153 void (*get_rx)(u32 rx_data, struct davinci_spi *);
154 u32 (*get_tx)(struct davinci_spi *);
155
156 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
157};
158
159static unsigned use_dma;
160
161static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
162{
163 u8 *rx = davinci_spi->rx;
164
165 *rx++ = (u8)data;
166 davinci_spi->rx = rx;
167}
168
169static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
170{
171 u16 *rx = davinci_spi->rx;
172
173 *rx++ = (u16)data;
174 davinci_spi->rx = rx;
175}
176
177static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
178{
179 u32 data;
180 const u8 *tx = davinci_spi->tx;
181
182 data = *tx++;
183 davinci_spi->tx = tx;
184 return data;
185}
186
187static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
188{
189 u32 data;
190 const u16 *tx = davinci_spi->tx;
191
192 data = *tx++;
193 davinci_spi->tx = tx;
194 return data;
195}
196
197static inline void set_io_bits(void __iomem *addr, u32 bits)
198{
199 u32 v = ioread32(addr);
200
201 v |= bits;
202 iowrite32(v, addr);
203}
204
205static inline void clear_io_bits(void __iomem *addr, u32 bits)
206{
207 u32 v = ioread32(addr);
208
209 v &= ~bits;
210 iowrite32(v, addr);
211}
212
213static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
214{
215 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
216}
217
218static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
219{
220 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
221}
222
223static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
224{
225 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
226
227 if (enable)
228 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
229 else
230 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
231}
232
233/*
234 * Interface to control the chip select signal
235 */
236static void davinci_spi_chipselect(struct spi_device *spi, int value)
237{
238 struct davinci_spi *davinci_spi;
239 struct davinci_spi_platform_data *pdata;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530240 u8 chip_sel = spi->chip_select;
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +0530241 u16 spidat1_cfg = CS_DEFAULT;
Brian Niebuhr23853972010-08-13 10:57:44 +0530242 bool gpio_chipsel = false;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000243
244 davinci_spi = spi_master_get_devdata(spi->master);
245 pdata = davinci_spi->pdata;
246
Brian Niebuhr23853972010-08-13 10:57:44 +0530247 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
248 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
249 gpio_chipsel = true;
250
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000251 /*
252 * Board specific chip select logic decides the polarity and cs
253 * line for the controller
254 */
Brian Niebuhr23853972010-08-13 10:57:44 +0530255 if (gpio_chipsel) {
256 if (value == BITBANG_CS_ACTIVE)
257 gpio_set_value(pdata->chip_sel[chip_sel], 0);
258 else
259 gpio_set_value(pdata->chip_sel[chip_sel], 1);
260 } else {
261 if (value == BITBANG_CS_ACTIVE) {
262 spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
263 spidat1_cfg &= ~(0x1 << chip_sel);
264 }
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530265
Brian Niebuhr23853972010-08-13 10:57:44 +0530266 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
267 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000268}
269
270/**
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530271 * davinci_spi_get_prescale - Calculates the correct prescale value
272 * @maxspeed_hz: the maximum rate the SPI clock can run at
273 *
274 * This function calculates the prescale value that generates a clock rate
275 * less than or equal to the specified maximum.
276 *
277 * Returns: calculated prescale - 1 for easy programming into SPI registers
278 * or negative error number if valid prescalar cannot be updated.
279 */
280static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
281 u32 max_speed_hz)
282{
283 int ret;
284
285 ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
286
287 if (ret < 3 || ret > 256)
288 return -EINVAL;
289
290 return ret - 1;
291}
292
293/**
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000294 * davinci_spi_setup_transfer - This functions will determine transfer method
295 * @spi: spi device on which data transfer to be done
296 * @t: spi transfer in which transfer info is filled
297 *
298 * This function determines data transfer method (8/16/32 bit transfer).
299 * It will also set the SPI Clock Control register according to
300 * SPI slave device freq.
301 */
302static int davinci_spi_setup_transfer(struct spi_device *spi,
303 struct spi_transfer *t)
304{
305
306 struct davinci_spi *davinci_spi;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000307 u8 bits_per_word = 0;
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530308 u32 hz = 0, prescale = 0;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000309
310 davinci_spi = spi_master_get_devdata(spi->master);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000311
312 if (t) {
313 bits_per_word = t->bits_per_word;
314 hz = t->speed_hz;
315 }
316
317 /* if bits_per_word is not set then set it default */
318 if (!bits_per_word)
319 bits_per_word = spi->bits_per_word;
320
321 /*
322 * Assign function pointer to appropriate transfer method
323 * 8bit, 16bit or 32bit transfer
324 */
325 if (bits_per_word <= 8 && bits_per_word >= 2) {
326 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
327 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
328 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
329 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
330 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
331 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
332 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
333 } else
334 return -EINVAL;
335
336 if (!hz)
337 hz = spi->max_speed_hz;
338
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530339 prescale = davinci_spi_get_prescale(davinci_spi, hz);
340 if (prescale < 0)
341 return prescale;
342
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000343 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
344 spi->chip_select);
345 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
346 spi->chip_select);
347
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000348 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530349 set_fmt_bits(davinci_spi->base,
350 prescale << SPIFMT_PRESCALE_SHIFT, spi->chip_select);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000351
352 return 0;
353}
354
355static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
356{
357 struct spi_device *spi = (struct spi_device *)data;
358 struct davinci_spi *davinci_spi;
359 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000360
361 davinci_spi = spi_master_get_devdata(spi->master);
362 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000363
364 if (ch_status == DMA_COMPLETE)
365 edma_stop(davinci_spi_dma->dma_rx_channel);
366 else
367 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
368
369 complete(&davinci_spi_dma->dma_rx_completion);
370 /* We must disable the DMA RX request */
371 davinci_spi_set_dma_req(spi, 0);
372}
373
374static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
375{
376 struct spi_device *spi = (struct spi_device *)data;
377 struct davinci_spi *davinci_spi;
378 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000379
380 davinci_spi = spi_master_get_devdata(spi->master);
381 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000382
383 if (ch_status == DMA_COMPLETE)
384 edma_stop(davinci_spi_dma->dma_tx_channel);
385 else
386 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
387
388 complete(&davinci_spi_dma->dma_tx_completion);
389 /* We must disable the DMA TX request */
390 davinci_spi_set_dma_req(spi, 0);
391}
392
393static int davinci_spi_request_dma(struct spi_device *spi)
394{
395 struct davinci_spi *davinci_spi;
396 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000397 struct device *sdev;
398 int r;
399
400 davinci_spi = spi_master_get_devdata(spi->master);
401 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000402 sdev = davinci_spi->bitbang.master->dev.parent;
403
404 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
405 davinci_spi_dma_rx_callback, spi,
406 davinci_spi_dma->eventq);
407 if (r < 0) {
408 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
409 return -EAGAIN;
410 }
411 davinci_spi_dma->dma_rx_channel = r;
412 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
413 davinci_spi_dma_tx_callback, spi,
414 davinci_spi_dma->eventq);
415 if (r < 0) {
416 edma_free_channel(davinci_spi_dma->dma_rx_channel);
417 davinci_spi_dma->dma_rx_channel = -1;
418 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
419 return -EAGAIN;
420 }
421 davinci_spi_dma->dma_tx_channel = r;
422
423 return 0;
424}
425
426/**
427 * davinci_spi_setup - This functions will set default transfer method
428 * @spi: spi device on which data transfer to be done
429 *
430 * This functions sets the default transfer method.
431 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000432static int davinci_spi_setup(struct spi_device *spi)
433{
434 int retval;
435 struct davinci_spi *davinci_spi;
436 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000437
438 davinci_spi = spi_master_get_devdata(spi->master);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000439
440 /* if bits per word length is zero then set it default 8 */
441 if (!spi->bits_per_word)
442 spi->bits_per_word = 8;
443
444 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
445
446 if (use_dma && davinci_spi->dma_channels) {
447 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
448
449 if ((davinci_spi_dma->dma_rx_channel == -1)
450 || (davinci_spi_dma->dma_tx_channel == -1)) {
451 retval = davinci_spi_request_dma(spi);
452 if (retval < 0)
453 return retval;
454 }
455 }
456
457 /*
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000458 * Set up SPIFMTn register, unique to this chipselect.
459 *
460 * NOTE: we could do all of these with one write. Also, some
461 * of the "version 2" features are found in chips that don't
462 * support all of them...
463 */
464 if (spi->mode & SPI_LSB_FIRST)
465 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
466 spi->chip_select);
467 else
468 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
469 spi->chip_select);
470
471 if (spi->mode & SPI_CPOL)
472 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
473 spi->chip_select);
474 else
475 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
476 spi->chip_select);
477
478 if (!(spi->mode & SPI_CPHA))
479 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
480 spi->chip_select);
481 else
482 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
483 spi->chip_select);
484
485 /*
486 * Version 1 hardware supports two basic SPI modes:
487 * - Standard SPI mode uses 4 pins, with chipselect
488 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
489 * (distinct from SPI_3WIRE, with just one data wire;
490 * or similar variants without MOSI or without MISO)
491 *
492 * Version 2 hardware supports an optional handshaking signal,
493 * so it can support two more modes:
494 * - 5 pin SPI variant is standard SPI plus SPI_READY
495 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
496 */
497
498 if (davinci_spi->version == SPI_VERSION_2) {
499 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
500 spi->chip_select);
501 set_fmt_bits(davinci_spi->base,
502 (davinci_spi->pdata->wdelay
503 << SPIFMT_WDELAY_SHIFT)
504 & SPIFMT_WDELAY_MASK,
505 spi->chip_select);
506
507 if (davinci_spi->pdata->odd_parity)
508 set_fmt_bits(davinci_spi->base,
509 SPIFMT_ODD_PARITY_MASK,
510 spi->chip_select);
511 else
512 clear_fmt_bits(davinci_spi->base,
513 SPIFMT_ODD_PARITY_MASK,
514 spi->chip_select);
515
516 if (davinci_spi->pdata->parity_enable)
517 set_fmt_bits(davinci_spi->base,
518 SPIFMT_PARITYENA_MASK,
519 spi->chip_select);
520 else
521 clear_fmt_bits(davinci_spi->base,
522 SPIFMT_PARITYENA_MASK,
523 spi->chip_select);
524
525 if (davinci_spi->pdata->wait_enable)
526 set_fmt_bits(davinci_spi->base,
527 SPIFMT_WAITENA_MASK,
528 spi->chip_select);
529 else
530 clear_fmt_bits(davinci_spi->base,
531 SPIFMT_WAITENA_MASK,
532 spi->chip_select);
533
534 if (davinci_spi->pdata->timer_disable)
535 set_fmt_bits(davinci_spi->base,
536 SPIFMT_DISTIMER_MASK,
537 spi->chip_select);
538 else
539 clear_fmt_bits(davinci_spi->base,
540 SPIFMT_DISTIMER_MASK,
541 spi->chip_select);
542 }
543
544 retval = davinci_spi_setup_transfer(spi, NULL);
545
546 return retval;
547}
548
549static void davinci_spi_cleanup(struct spi_device *spi)
550{
551 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
552 struct davinci_spi_dma *davinci_spi_dma;
553
554 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
555
556 if (use_dma && davinci_spi->dma_channels) {
557 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
558
559 if ((davinci_spi_dma->dma_rx_channel != -1)
560 && (davinci_spi_dma->dma_tx_channel != -1)) {
561 edma_free_channel(davinci_spi_dma->dma_tx_channel);
562 edma_free_channel(davinci_spi_dma->dma_rx_channel);
563 }
564 }
565}
566
567static int davinci_spi_bufs_prep(struct spi_device *spi,
568 struct davinci_spi *davinci_spi)
569{
Brian Niebuhr23853972010-08-13 10:57:44 +0530570 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000571 int op_mode = 0;
572
573 /*
574 * REVISIT unless devices disagree about SPI_LOOP or
575 * SPI_READY (SPI_NO_CS only allows one device!), this
576 * should not need to be done before each message...
577 * optimize for both flags staying cleared.
578 */
579
580 op_mode = SPIPC0_DIFUN_MASK
581 | SPIPC0_DOFUN_MASK
582 | SPIPC0_CLKFUN_MASK;
Brian Niebuhr23853972010-08-13 10:57:44 +0530583 if (!(spi->mode & SPI_NO_CS)) {
584 pdata = davinci_spi->pdata;
585 if (!pdata->chip_sel ||
586 pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)
587 op_mode |= 1 << spi->chip_select;
588 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000589 if (spi->mode & SPI_READY)
590 op_mode |= SPIPC0_SPIENA_MASK;
591
592 iowrite32(op_mode, davinci_spi->base + SPIPC0);
593
594 if (spi->mode & SPI_LOOP)
595 set_io_bits(davinci_spi->base + SPIGCR1,
596 SPIGCR1_LOOPBACK_MASK);
597 else
598 clear_io_bits(davinci_spi->base + SPIGCR1,
599 SPIGCR1_LOOPBACK_MASK);
600
601 return 0;
602}
603
604static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
605 int int_status)
606{
607 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
608
609 if (int_status & SPIFLG_TIMEOUT_MASK) {
610 dev_dbg(sdev, "SPI Time-out Error\n");
611 return -ETIMEDOUT;
612 }
613 if (int_status & SPIFLG_DESYNC_MASK) {
614 dev_dbg(sdev, "SPI Desynchronization Error\n");
615 return -EIO;
616 }
617 if (int_status & SPIFLG_BITERR_MASK) {
618 dev_dbg(sdev, "SPI Bit error\n");
619 return -EIO;
620 }
621
622 if (davinci_spi->version == SPI_VERSION_2) {
623 if (int_status & SPIFLG_DLEN_ERR_MASK) {
624 dev_dbg(sdev, "SPI Data Length Error\n");
625 return -EIO;
626 }
627 if (int_status & SPIFLG_PARERR_MASK) {
628 dev_dbg(sdev, "SPI Parity Error\n");
629 return -EIO;
630 }
631 if (int_status & SPIFLG_OVRRUN_MASK) {
632 dev_dbg(sdev, "SPI Data Overrun error\n");
633 return -EIO;
634 }
635 if (int_status & SPIFLG_TX_INTR_MASK) {
636 dev_dbg(sdev, "SPI TX intr bit set\n");
637 return -EIO;
638 }
639 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
640 dev_dbg(sdev, "SPI Buffer Init Active\n");
641 return -EBUSY;
642 }
643 }
644
645 return 0;
646}
647
648/**
649 * davinci_spi_bufs - functions which will handle transfer data
650 * @spi: spi device on which data transfer to be done
651 * @t: spi transfer in which transfer info is filled
652 *
653 * This function will put data to be transferred into data register
654 * of SPI controller and then wait until the completion will be marked
655 * by the IRQ Handler.
656 */
657static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
658{
659 struct davinci_spi *davinci_spi;
660 int int_status, count, ret;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530661 u8 conv;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000662 u32 tx_data, data1_reg_val;
663 u32 buf_val, flg_val;
664 struct davinci_spi_platform_data *pdata;
665
666 davinci_spi = spi_master_get_devdata(spi->master);
667 pdata = davinci_spi->pdata;
668
669 davinci_spi->tx = t->tx_buf;
670 davinci_spi->rx = t->rx_buf;
671
672 /* convert len to words based on bits_per_word */
673 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
674 davinci_spi->count = t->len / conv;
675
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530676 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
677
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000678 INIT_COMPLETION(davinci_spi->done);
679
680 ret = davinci_spi_bufs_prep(spi, davinci_spi);
681 if (ret)
682 return ret;
683
684 /* Enable SPI */
685 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
686
687 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
688 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
689 davinci_spi->base + SPIDELAY);
690
691 count = davinci_spi->count;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000692
693 /* Determine the command to execute READ or WRITE */
694 if (t->tx_buf) {
695 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
696
697 while (1) {
698 tx_data = davinci_spi->get_tx(davinci_spi);
699
700 data1_reg_val &= ~(0xFFFF);
701 data1_reg_val |= (0xFFFF & tx_data);
702
703 buf_val = ioread32(davinci_spi->base + SPIBUF);
704 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
705 iowrite32(data1_reg_val,
706 davinci_spi->base + SPIDAT1);
707
708 count--;
709 }
710 while (ioread32(davinci_spi->base + SPIBUF)
711 & SPIBUF_RXEMPTY_MASK)
712 cpu_relax();
713
714 /* getting the returned byte */
715 if (t->rx_buf) {
716 buf_val = ioread32(davinci_spi->base + SPIBUF);
717 davinci_spi->get_rx(buf_val, davinci_spi);
718 }
719 if (count <= 0)
720 break;
721 }
722 } else {
723 if (pdata->poll_mode) {
724 while (1) {
725 /* keeps the serial clock going */
726 if ((ioread32(davinci_spi->base + SPIBUF)
727 & SPIBUF_TXFULL_MASK) == 0)
728 iowrite32(data1_reg_val,
729 davinci_spi->base + SPIDAT1);
730
731 while (ioread32(davinci_spi->base + SPIBUF) &
732 SPIBUF_RXEMPTY_MASK)
733 cpu_relax();
734
735 flg_val = ioread32(davinci_spi->base + SPIFLG);
736 buf_val = ioread32(davinci_spi->base + SPIBUF);
737
738 davinci_spi->get_rx(buf_val, davinci_spi);
739
740 count--;
741 if (count <= 0)
742 break;
743 }
744 } else { /* Receive in Interrupt mode */
745 int i;
746
747 for (i = 0; i < davinci_spi->count; i++) {
748 set_io_bits(davinci_spi->base + SPIINT,
749 SPIINT_BITERR_INTR
750 | SPIINT_OVRRUN_INTR
751 | SPIINT_RX_INTR);
752
753 iowrite32(data1_reg_val,
754 davinci_spi->base + SPIDAT1);
755
756 while (ioread32(davinci_spi->base + SPIINT) &
757 SPIINT_RX_INTR)
758 cpu_relax();
759 }
760 iowrite32((data1_reg_val & 0x0ffcffff),
761 davinci_spi->base + SPIDAT1);
762 }
763 }
764
765 /*
766 * Check for bit error, desync error,parity error,timeout error and
767 * receive overflow errors
768 */
769 int_status = ioread32(davinci_spi->base + SPIFLG);
770
771 ret = davinci_spi_check_error(davinci_spi, int_status);
772 if (ret != 0)
773 return ret;
774
775 /* SPI Framework maintains the count only in bytes so convert back */
776 davinci_spi->count *= conv;
777
778 return t->len;
779}
780
781#define DAVINCI_DMA_DATA_TYPE_S8 0x01
782#define DAVINCI_DMA_DATA_TYPE_S16 0x02
783#define DAVINCI_DMA_DATA_TYPE_S32 0x04
784
785static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
786{
787 struct davinci_spi *davinci_spi;
788 int int_status = 0;
789 int count, temp_count;
790 u8 conv = 1;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000791 u32 data1_reg_val;
792 struct davinci_spi_dma *davinci_spi_dma;
793 int word_len, data_type, ret;
794 unsigned long tx_reg, rx_reg;
795 struct davinci_spi_platform_data *pdata;
796 struct device *sdev;
797
798 davinci_spi = spi_master_get_devdata(spi->master);
799 pdata = davinci_spi->pdata;
800 sdev = davinci_spi->bitbang.master->dev.parent;
801
802 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
803
804 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
805 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
806
807 davinci_spi->tx = t->tx_buf;
808 davinci_spi->rx = t->rx_buf;
809
810 /* convert len to words based on bits_per_word */
811 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
812 davinci_spi->count = t->len / conv;
813
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530814 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
815
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000816 INIT_COMPLETION(davinci_spi->done);
817
818 init_completion(&davinci_spi_dma->dma_rx_completion);
819 init_completion(&davinci_spi_dma->dma_tx_completion);
820
821 word_len = conv * 8;
822
823 if (word_len <= 8)
824 data_type = DAVINCI_DMA_DATA_TYPE_S8;
825 else if (word_len <= 16)
826 data_type = DAVINCI_DMA_DATA_TYPE_S16;
827 else if (word_len <= 32)
828 data_type = DAVINCI_DMA_DATA_TYPE_S32;
829 else
830 return -EINVAL;
831
832 ret = davinci_spi_bufs_prep(spi, davinci_spi);
833 if (ret)
834 return ret;
835
836 /* Put delay val if required */
837 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
838 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
839 davinci_spi->base + SPIDELAY);
840
841 count = davinci_spi->count; /* the number of elements */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000842
843 /* disable all interrupts for dma transfers */
844 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
845 /* Disable SPI to write configuration bits in SPIDAT */
846 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000847 /* Enable SPI */
848 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
849
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000850 if (t->tx_buf) {
851 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
852 DMA_TO_DEVICE);
853 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
854 dev_dbg(sdev, "Unable to DMA map a %d bytes"
855 " TX buffer\n", count);
856 return -ENOMEM;
857 }
858 temp_count = count;
859 } else {
860 /* We need TX clocking for RX transaction */
861 t->tx_dma = dma_map_single(&spi->dev,
862 (void *)davinci_spi->tmp_buf, count + 1,
863 DMA_TO_DEVICE);
864 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
865 dev_dbg(sdev, "Unable to DMA map a %d bytes"
866 " TX tmp buffer\n", count);
867 return -ENOMEM;
868 }
869 temp_count = count + 1;
870 }
871
872 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
873 data_type, temp_count, 1, 0, ASYNC);
874 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
875 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
876 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
877 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
878
879 if (t->rx_buf) {
880 /* initiate transaction */
881 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
882
883 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
884 DMA_FROM_DEVICE);
885 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
886 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
887 count);
888 if (t->tx_buf != NULL)
889 dma_unmap_single(NULL, t->tx_dma,
890 count, DMA_TO_DEVICE);
891 return -ENOMEM;
892 }
893 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
894 data_type, count, 1, 0, ASYNC);
895 edma_set_src(davinci_spi_dma->dma_rx_channel,
896 rx_reg, INCR, W8BIT);
897 edma_set_dest(davinci_spi_dma->dma_rx_channel,
898 t->rx_dma, INCR, W8BIT);
899 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
900 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
901 data_type, 0);
902 }
903
904 if ((t->tx_buf) || (t->rx_buf))
905 edma_start(davinci_spi_dma->dma_tx_channel);
906
907 if (t->rx_buf)
908 edma_start(davinci_spi_dma->dma_rx_channel);
909
910 if ((t->rx_buf) || (t->tx_buf))
911 davinci_spi_set_dma_req(spi, 1);
912
913 if (t->tx_buf)
914 wait_for_completion_interruptible(
915 &davinci_spi_dma->dma_tx_completion);
916
917 if (t->rx_buf)
918 wait_for_completion_interruptible(
919 &davinci_spi_dma->dma_rx_completion);
920
921 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
922
923 if (t->rx_buf)
924 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
925
926 /*
927 * Check for bit error, desync error,parity error,timeout error and
928 * receive overflow errors
929 */
930 int_status = ioread32(davinci_spi->base + SPIFLG);
931
932 ret = davinci_spi_check_error(davinci_spi, int_status);
933 if (ret != 0)
934 return ret;
935
936 /* SPI Framework maintains the count only in bytes so convert back */
937 davinci_spi->count *= conv;
938
939 return t->len;
940}
941
942/**
943 * davinci_spi_irq - IRQ handler for DaVinci SPI
944 * @irq: IRQ number for this SPI Master
945 * @context_data: structure for SPI Master controller davinci_spi
946 */
947static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
948{
949 struct davinci_spi *davinci_spi = context_data;
950 u32 int_status, rx_data = 0;
951 irqreturn_t ret = IRQ_NONE;
952
953 int_status = ioread32(davinci_spi->base + SPIFLG);
954
955 while ((int_status & SPIFLG_RX_INTR_MASK)) {
956 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
957 ret = IRQ_HANDLED;
958
959 rx_data = ioread32(davinci_spi->base + SPIBUF);
960 davinci_spi->get_rx(rx_data, davinci_spi);
961
962 /* Disable Receive Interrupt */
963 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
964 davinci_spi->base + SPIINT);
965 } else
966 (void)davinci_spi_check_error(davinci_spi, int_status);
967
968 int_status = ioread32(davinci_spi->base + SPIFLG);
969 }
970
971 return ret;
972}
973
974/**
975 * davinci_spi_probe - probe function for SPI Master Controller
976 * @pdev: platform_device structure which contains plateform specific data
977 */
978static int davinci_spi_probe(struct platform_device *pdev)
979{
980 struct spi_master *master;
981 struct davinci_spi *davinci_spi;
982 struct davinci_spi_platform_data *pdata;
983 struct resource *r, *mem;
984 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
985 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
986 resource_size_t dma_eventq = SPI_NO_RESOURCE;
987 int i = 0, ret = 0;
988
989 pdata = pdev->dev.platform_data;
990 if (pdata == NULL) {
991 ret = -ENODEV;
992 goto err;
993 }
994
995 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
996 if (master == NULL) {
997 ret = -ENOMEM;
998 goto err;
999 }
1000
1001 dev_set_drvdata(&pdev->dev, master);
1002
1003 davinci_spi = spi_master_get_devdata(master);
1004 if (davinci_spi == NULL) {
1005 ret = -ENOENT;
1006 goto free_master;
1007 }
1008
1009 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1010 if (r == NULL) {
1011 ret = -ENOENT;
1012 goto free_master;
1013 }
1014
1015 davinci_spi->pbase = r->start;
1016 davinci_spi->region_size = resource_size(r);
1017 davinci_spi->pdata = pdata;
1018
1019 mem = request_mem_region(r->start, davinci_spi->region_size,
1020 pdev->name);
1021 if (mem == NULL) {
1022 ret = -EBUSY;
1023 goto free_master;
1024 }
1025
Sekhar Nori50356dd2010-10-08 15:27:26 +05301026 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001027 if (davinci_spi->base == NULL) {
1028 ret = -ENOMEM;
1029 goto release_region;
1030 }
1031
1032 davinci_spi->irq = platform_get_irq(pdev, 0);
1033 if (davinci_spi->irq <= 0) {
1034 ret = -EINVAL;
1035 goto unmap_io;
1036 }
1037
1038 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1039 dev_name(&pdev->dev), davinci_spi);
1040 if (ret)
1041 goto unmap_io;
1042
1043 /* Allocate tmp_buf for tx_buf */
1044 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1045 if (davinci_spi->tmp_buf == NULL) {
1046 ret = -ENOMEM;
1047 goto irq_free;
1048 }
1049
1050 davinci_spi->bitbang.master = spi_master_get(master);
1051 if (davinci_spi->bitbang.master == NULL) {
1052 ret = -ENODEV;
1053 goto free_tmp_buf;
1054 }
1055
1056 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1057 if (IS_ERR(davinci_spi->clk)) {
1058 ret = -ENODEV;
1059 goto put_master;
1060 }
1061 clk_enable(davinci_spi->clk);
1062
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001063 master->bus_num = pdev->id;
1064 master->num_chipselect = pdata->num_chipselect;
1065 master->setup = davinci_spi_setup;
1066 master->cleanup = davinci_spi_cleanup;
1067
1068 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1069 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1070
1071 davinci_spi->version = pdata->version;
1072 use_dma = pdata->use_dma;
1073
1074 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1075 if (davinci_spi->version == SPI_VERSION_2)
1076 davinci_spi->bitbang.flags |= SPI_READY;
1077
1078 if (use_dma) {
Brian Niebuhr778e2612010-09-03 15:15:06 +05301079 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1080 if (r)
1081 dma_rx_chan = r->start;
1082 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1083 if (r)
1084 dma_tx_chan = r->start;
1085 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1086 if (r)
1087 dma_eventq = r->start;
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001088 }
1089
1090 if (!use_dma ||
1091 dma_rx_chan == SPI_NO_RESOURCE ||
1092 dma_tx_chan == SPI_NO_RESOURCE ||
1093 dma_eventq == SPI_NO_RESOURCE) {
1094 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1095 use_dma = 0;
1096 } else {
1097 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1098 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1099 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1100 if (davinci_spi->dma_channels == NULL) {
1101 ret = -ENOMEM;
1102 goto free_clk;
1103 }
1104
1105 for (i = 0; i < master->num_chipselect; i++) {
1106 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1107 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1108 dma_rx_chan;
1109 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1110 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1111 dma_tx_chan;
1112 davinci_spi->dma_channels[i].eventq = dma_eventq;
1113 }
1114 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1115 "Using RX channel = %d , TX channel = %d and "
1116 "event queue = %d", dma_rx_chan, dma_tx_chan,
1117 dma_eventq);
1118 }
1119
1120 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1121 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1122
1123 init_completion(&davinci_spi->done);
1124
1125 /* Reset In/OUT SPI module */
1126 iowrite32(0, davinci_spi->base + SPIGCR0);
1127 udelay(100);
1128 iowrite32(1, davinci_spi->base + SPIGCR0);
1129
Brian Niebuhr23853972010-08-13 10:57:44 +05301130 /* initialize chip selects */
1131 if (pdata->chip_sel) {
1132 for (i = 0; i < pdata->num_chipselect; i++) {
1133 if (pdata->chip_sel[i] != SPI_INTERN_CS)
1134 gpio_direction_output(pdata->chip_sel[i], 1);
1135 }
1136 }
1137
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001138 /* Clock internal */
1139 if (davinci_spi->pdata->clk_internal)
1140 set_io_bits(davinci_spi->base + SPIGCR1,
1141 SPIGCR1_CLKMOD_MASK);
1142 else
1143 clear_io_bits(davinci_spi->base + SPIGCR1,
1144 SPIGCR1_CLKMOD_MASK);
1145
Brian Niebuhr843a7132010-08-12 12:49:05 +05301146 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1147
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001148 /* master mode default */
1149 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1150
1151 if (davinci_spi->pdata->intr_level)
1152 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1153 else
1154 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1155
1156 ret = spi_bitbang_start(&davinci_spi->bitbang);
1157 if (ret)
1158 goto free_clk;
1159
Brian Niebuhr3b740b12010-09-03 14:50:07 +05301160 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001161
1162 if (!pdata->poll_mode)
1163 dev_info(&pdev->dev, "Operating in interrupt mode"
1164 " using IRQ %d\n", davinci_spi->irq);
1165
1166 return ret;
1167
1168free_clk:
1169 clk_disable(davinci_spi->clk);
1170 clk_put(davinci_spi->clk);
1171put_master:
1172 spi_master_put(master);
1173free_tmp_buf:
1174 kfree(davinci_spi->tmp_buf);
1175irq_free:
1176 free_irq(davinci_spi->irq, davinci_spi);
1177unmap_io:
1178 iounmap(davinci_spi->base);
1179release_region:
1180 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1181free_master:
1182 kfree(master);
1183err:
1184 return ret;
1185}
1186
1187/**
1188 * davinci_spi_remove - remove function for SPI Master Controller
1189 * @pdev: platform_device structure which contains plateform specific data
1190 *
1191 * This function will do the reverse action of davinci_spi_probe function
1192 * It will free the IRQ and SPI controller's memory region.
1193 * It will also call spi_bitbang_stop to destroy the work queue which was
1194 * created by spi_bitbang_start.
1195 */
1196static int __exit davinci_spi_remove(struct platform_device *pdev)
1197{
1198 struct davinci_spi *davinci_spi;
1199 struct spi_master *master;
1200
1201 master = dev_get_drvdata(&pdev->dev);
1202 davinci_spi = spi_master_get_devdata(master);
1203
1204 spi_bitbang_stop(&davinci_spi->bitbang);
1205
1206 clk_disable(davinci_spi->clk);
1207 clk_put(davinci_spi->clk);
1208 spi_master_put(master);
1209 kfree(davinci_spi->tmp_buf);
1210 free_irq(davinci_spi->irq, davinci_spi);
1211 iounmap(davinci_spi->base);
1212 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1213
1214 return 0;
1215}
1216
1217static struct platform_driver davinci_spi_driver = {
1218 .driver.name = "spi_davinci",
1219 .remove = __exit_p(davinci_spi_remove),
1220};
1221
1222static int __init davinci_spi_init(void)
1223{
1224 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1225}
1226module_init(davinci_spi_init);
1227
1228static void __exit davinci_spi_exit(void)
1229{
1230 platform_driver_unregister(&davinci_spi_driver);
1231}
1232module_exit(davinci_spi_exit);
1233
1234MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1235MODULE_LICENSE("GPL");