blob: 31b9c2278dec1843219af72abde5f87372d1985b [file] [log] [blame]
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Sandeep Paulraj358934a2009-12-16 22:02:18 +000031
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
Brian Niebuhr7fe00922010-08-13 13:27:23 +053055#define SPIFMT_PRESCALE_SHIFT 8
Sandeep Paulraj358934a2009-12-16 22:02:18 +000056
Sandeep Paulraj358934a2009-12-16 22:02:18 +000057
58/* SPIPC0 */
59#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
60#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
61#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
62#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000063
64#define SPIINT_MASKALL 0x0101035F
65#define SPI_INTLVL_1 0x000001FFu
66#define SPI_INTLVL_0 0x00000000u
67
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +053068/* SPIDAT1 (upper 16 bit defines) */
69#define SPIDAT1_CSHOLD_MASK BIT(12)
70
71/* SPIGCR1 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +000072#define SPIGCR1_CLKMOD_MASK BIT(1)
73#define SPIGCR1_MASTER_MASK BIT(0)
74#define SPIGCR1_LOOPBACK_MASK BIT(16)
Sekhar Nori8e206f12010-08-20 16:20:49 +053075#define SPIGCR1_SPIENA_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +000076
77/* SPIBUF */
78#define SPIBUF_TXFULL_MASK BIT(29)
79#define SPIBUF_RXEMPTY_MASK BIT(31)
80
Brian Niebuhr7abbf232010-08-19 15:07:38 +053081/* SPIDELAY */
82#define SPIDELAY_C2TDELAY_SHIFT 24
83#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
84#define SPIDELAY_T2CDELAY_SHIFT 16
85#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
86#define SPIDELAY_T2EDELAY_SHIFT 8
87#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
88#define SPIDELAY_C2EDELAY_SHIFT 0
89#define SPIDELAY_C2EDELAY_MASK 0xFF
90
Sandeep Paulraj358934a2009-12-16 22:02:18 +000091/* Error Masks */
92#define SPIFLG_DLEN_ERR_MASK BIT(0)
93#define SPIFLG_TIMEOUT_MASK BIT(1)
94#define SPIFLG_PARERR_MASK BIT(2)
95#define SPIFLG_DESYNC_MASK BIT(3)
96#define SPIFLG_BITERR_MASK BIT(4)
97#define SPIFLG_OVRRUN_MASK BIT(6)
98#define SPIFLG_RX_INTR_MASK BIT(8)
99#define SPIFLG_TX_INTR_MASK BIT(9)
100#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000101
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000102#define SPIINT_BITERR_INTR BIT(4)
103#define SPIINT_OVRRUN_INTR BIT(6)
104#define SPIINT_RX_INTR BIT(8)
105#define SPIINT_TX_INTR BIT(9)
106#define SPIINT_DMA_REQ_EN BIT(16)
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000107
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000108/* SPI Controller registers */
109#define SPIGCR0 0x00
110#define SPIGCR1 0x04
111#define SPIINT 0x08
112#define SPILVL 0x0c
113#define SPIFLG 0x10
114#define SPIPC0 0x14
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000115#define SPIDAT1 0x3c
116#define SPIBUF 0x40
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000117#define SPIDELAY 0x48
118#define SPIDEF 0x4c
119#define SPIFMT0 0x50
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000120
121struct davinci_spi_slave {
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000122 u32 bytes_per_word;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000123};
124
125/* We have 2 DMA channels per CS, one for RX and one for TX */
126struct davinci_spi_dma {
127 int dma_tx_channel;
128 int dma_rx_channel;
129 int dma_tx_sync_dev;
130 int dma_rx_sync_dev;
131 enum dma_event_q eventq;
132
133 struct completion dma_tx_completion;
134 struct completion dma_rx_completion;
135};
136
137/* SPI Controller driver's private data. */
138struct davinci_spi {
139 struct spi_bitbang bitbang;
140 struct clk *clk;
141
142 u8 version;
143 resource_size_t pbase;
144 void __iomem *base;
145 size_t region_size;
146 u32 irq;
147 struct completion done;
148
149 const void *tx;
150 void *rx;
151 u8 *tmp_buf;
152 int count;
153 struct davinci_spi_dma *dma_channels;
Brian Niebuhr778e2612010-09-03 15:15:06 +0530154 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000155
156 void (*get_rx)(u32 rx_data, struct davinci_spi *);
157 u32 (*get_tx)(struct davinci_spi *);
158
159 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
160};
161
Brian Niebuhr53a31b02010-08-16 15:05:51 +0530162static struct davinci_spi_config davinci_spi_default_cfg;
163
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000164static unsigned use_dma;
165
166static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
167{
168 u8 *rx = davinci_spi->rx;
169
170 *rx++ = (u8)data;
171 davinci_spi->rx = rx;
172}
173
174static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
175{
176 u16 *rx = davinci_spi->rx;
177
178 *rx++ = (u16)data;
179 davinci_spi->rx = rx;
180}
181
182static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
183{
184 u32 data;
185 const u8 *tx = davinci_spi->tx;
186
187 data = *tx++;
188 davinci_spi->tx = tx;
189 return data;
190}
191
192static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
193{
194 u32 data;
195 const u16 *tx = davinci_spi->tx;
196
197 data = *tx++;
198 davinci_spi->tx = tx;
199 return data;
200}
201
202static inline void set_io_bits(void __iomem *addr, u32 bits)
203{
204 u32 v = ioread32(addr);
205
206 v |= bits;
207 iowrite32(v, addr);
208}
209
210static inline void clear_io_bits(void __iomem *addr, u32 bits)
211{
212 u32 v = ioread32(addr);
213
214 v &= ~bits;
215 iowrite32(v, addr);
216}
217
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000218static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
219{
220 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
221
222 if (enable)
223 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
224 else
225 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
226}
227
228/*
229 * Interface to control the chip select signal
230 */
231static void davinci_spi_chipselect(struct spi_device *spi, int value)
232{
233 struct davinci_spi *davinci_spi;
234 struct davinci_spi_platform_data *pdata;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530235 u8 chip_sel = spi->chip_select;
Brian Niebuhrcfbc5d12010-08-12 12:27:33 +0530236 u16 spidat1_cfg = CS_DEFAULT;
Brian Niebuhr23853972010-08-13 10:57:44 +0530237 bool gpio_chipsel = false;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000238
239 davinci_spi = spi_master_get_devdata(spi->master);
240 pdata = davinci_spi->pdata;
241
Brian Niebuhr23853972010-08-13 10:57:44 +0530242 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
243 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
244 gpio_chipsel = true;
245
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000246 /*
247 * Board specific chip select logic decides the polarity and cs
248 * line for the controller
249 */
Brian Niebuhr23853972010-08-13 10:57:44 +0530250 if (gpio_chipsel) {
251 if (value == BITBANG_CS_ACTIVE)
252 gpio_set_value(pdata->chip_sel[chip_sel], 0);
253 else
254 gpio_set_value(pdata->chip_sel[chip_sel], 1);
255 } else {
256 if (value == BITBANG_CS_ACTIVE) {
257 spidat1_cfg |= SPIDAT1_CSHOLD_MASK;
258 spidat1_cfg &= ~(0x1 << chip_sel);
259 }
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530260
Brian Niebuhr23853972010-08-13 10:57:44 +0530261 iowrite16(spidat1_cfg, davinci_spi->base + SPIDAT1 + 2);
262 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000263}
264
265/**
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530266 * davinci_spi_get_prescale - Calculates the correct prescale value
267 * @maxspeed_hz: the maximum rate the SPI clock can run at
268 *
269 * This function calculates the prescale value that generates a clock rate
270 * less than or equal to the specified maximum.
271 *
272 * Returns: calculated prescale - 1 for easy programming into SPI registers
273 * or negative error number if valid prescalar cannot be updated.
274 */
275static inline int davinci_spi_get_prescale(struct davinci_spi *davinci_spi,
276 u32 max_speed_hz)
277{
278 int ret;
279
280 ret = DIV_ROUND_UP(clk_get_rate(davinci_spi->clk), max_speed_hz);
281
282 if (ret < 3 || ret > 256)
283 return -EINVAL;
284
285 return ret - 1;
286}
287
288/**
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000289 * davinci_spi_setup_transfer - This functions will determine transfer method
290 * @spi: spi device on which data transfer to be done
291 * @t: spi transfer in which transfer info is filled
292 *
293 * This function determines data transfer method (8/16/32 bit transfer).
294 * It will also set the SPI Clock Control register according to
295 * SPI slave device freq.
296 */
297static int davinci_spi_setup_transfer(struct spi_device *spi,
298 struct spi_transfer *t)
299{
300
301 struct davinci_spi *davinci_spi;
Brian Niebuhr25f33512010-08-19 12:15:22 +0530302 struct davinci_spi_config *spicfg;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000303 u8 bits_per_word = 0;
Brian Niebuhr25f33512010-08-19 12:15:22 +0530304 u32 hz = 0, spifmt = 0, prescale = 0;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000305
306 davinci_spi = spi_master_get_devdata(spi->master);
Brian Niebuhr25f33512010-08-19 12:15:22 +0530307 spicfg = (struct davinci_spi_config *)spi->controller_data;
308 if (!spicfg)
309 spicfg = &davinci_spi_default_cfg;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000310
311 if (t) {
312 bits_per_word = t->bits_per_word;
313 hz = t->speed_hz;
314 }
315
316 /* if bits_per_word is not set then set it default */
317 if (!bits_per_word)
318 bits_per_word = spi->bits_per_word;
319
320 /*
321 * Assign function pointer to appropriate transfer method
322 * 8bit, 16bit or 32bit transfer
323 */
324 if (bits_per_word <= 8 && bits_per_word >= 2) {
325 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
326 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
327 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
328 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
329 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
330 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
331 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
332 } else
333 return -EINVAL;
334
335 if (!hz)
336 hz = spi->max_speed_hz;
337
Brian Niebuhr25f33512010-08-19 12:15:22 +0530338 /* Set up SPIFMTn register, unique to this chipselect. */
339
Brian Niebuhr7fe00922010-08-13 13:27:23 +0530340 prescale = davinci_spi_get_prescale(davinci_spi, hz);
341 if (prescale < 0)
342 return prescale;
343
Brian Niebuhr25f33512010-08-19 12:15:22 +0530344 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000345
Brian Niebuhr25f33512010-08-19 12:15:22 +0530346 if (spi->mode & SPI_LSB_FIRST)
347 spifmt |= SPIFMT_SHIFTDIR_MASK;
348
349 if (spi->mode & SPI_CPOL)
350 spifmt |= SPIFMT_POLARITY_MASK;
351
352 if (!(spi->mode & SPI_CPHA))
353 spifmt |= SPIFMT_PHASE_MASK;
354
355 /*
356 * Version 1 hardware supports two basic SPI modes:
357 * - Standard SPI mode uses 4 pins, with chipselect
358 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
359 * (distinct from SPI_3WIRE, with just one data wire;
360 * or similar variants without MOSI or without MISO)
361 *
362 * Version 2 hardware supports an optional handshaking signal,
363 * so it can support two more modes:
364 * - 5 pin SPI variant is standard SPI plus SPI_READY
365 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
366 */
367
368 if (davinci_spi->version == SPI_VERSION_2) {
369
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530370 u32 delay = 0;
371
Brian Niebuhr25f33512010-08-19 12:15:22 +0530372 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
373 & SPIFMT_WDELAY_MASK);
374
375 if (spicfg->odd_parity)
376 spifmt |= SPIFMT_ODD_PARITY_MASK;
377
378 if (spicfg->parity_enable)
379 spifmt |= SPIFMT_PARITYENA_MASK;
380
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530381 if (spicfg->timer_disable) {
Brian Niebuhr25f33512010-08-19 12:15:22 +0530382 spifmt |= SPIFMT_DISTIMER_MASK;
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530383 } else {
384 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
385 & SPIDELAY_C2TDELAY_MASK;
386 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
387 & SPIDELAY_T2CDELAY_MASK;
388 }
Brian Niebuhr25f33512010-08-19 12:15:22 +0530389
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530390 if (spi->mode & SPI_READY) {
Brian Niebuhr25f33512010-08-19 12:15:22 +0530391 spifmt |= SPIFMT_WAITENA_MASK;
Brian Niebuhr7abbf232010-08-19 15:07:38 +0530392 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
393 & SPIDELAY_T2EDELAY_MASK;
394 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
395 & SPIDELAY_C2EDELAY_MASK;
396 }
397
398 iowrite32(delay, davinci_spi->base + SPIDELAY);
Brian Niebuhr25f33512010-08-19 12:15:22 +0530399 }
400
401 iowrite32(spifmt, davinci_spi->base + SPIFMT0);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000402
403 return 0;
404}
405
406static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
407{
408 struct spi_device *spi = (struct spi_device *)data;
409 struct davinci_spi *davinci_spi;
410 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000411
412 davinci_spi = spi_master_get_devdata(spi->master);
413 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000414
415 if (ch_status == DMA_COMPLETE)
416 edma_stop(davinci_spi_dma->dma_rx_channel);
417 else
418 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
419
420 complete(&davinci_spi_dma->dma_rx_completion);
421 /* We must disable the DMA RX request */
422 davinci_spi_set_dma_req(spi, 0);
423}
424
425static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
426{
427 struct spi_device *spi = (struct spi_device *)data;
428 struct davinci_spi *davinci_spi;
429 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000430
431 davinci_spi = spi_master_get_devdata(spi->master);
432 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000433
434 if (ch_status == DMA_COMPLETE)
435 edma_stop(davinci_spi_dma->dma_tx_channel);
436 else
437 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
438
439 complete(&davinci_spi_dma->dma_tx_completion);
440 /* We must disable the DMA TX request */
441 davinci_spi_set_dma_req(spi, 0);
442}
443
444static int davinci_spi_request_dma(struct spi_device *spi)
445{
446 struct davinci_spi *davinci_spi;
447 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000448 struct device *sdev;
449 int r;
450
451 davinci_spi = spi_master_get_devdata(spi->master);
452 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000453 sdev = davinci_spi->bitbang.master->dev.parent;
454
455 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
456 davinci_spi_dma_rx_callback, spi,
457 davinci_spi_dma->eventq);
458 if (r < 0) {
459 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
460 return -EAGAIN;
461 }
462 davinci_spi_dma->dma_rx_channel = r;
463 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
464 davinci_spi_dma_tx_callback, spi,
465 davinci_spi_dma->eventq);
466 if (r < 0) {
467 edma_free_channel(davinci_spi_dma->dma_rx_channel);
468 davinci_spi_dma->dma_rx_channel = -1;
469 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
470 return -EAGAIN;
471 }
472 davinci_spi_dma->dma_tx_channel = r;
473
474 return 0;
475}
476
477/**
478 * davinci_spi_setup - This functions will set default transfer method
479 * @spi: spi device on which data transfer to be done
480 *
481 * This functions sets the default transfer method.
482 */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000483static int davinci_spi_setup(struct spi_device *spi)
484{
485 int retval;
486 struct davinci_spi *davinci_spi;
487 struct davinci_spi_dma *davinci_spi_dma;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000488
489 davinci_spi = spi_master_get_devdata(spi->master);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000490
491 /* if bits per word length is zero then set it default 8 */
492 if (!spi->bits_per_word)
493 spi->bits_per_word = 8;
494
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000495 if (use_dma && davinci_spi->dma_channels) {
496 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
497
498 if ((davinci_spi_dma->dma_rx_channel == -1)
499 || (davinci_spi_dma->dma_tx_channel == -1)) {
500 retval = davinci_spi_request_dma(spi);
501 if (retval < 0)
502 return retval;
503 }
504 }
505
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000506 retval = davinci_spi_setup_transfer(spi, NULL);
507
508 return retval;
509}
510
511static void davinci_spi_cleanup(struct spi_device *spi)
512{
513 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
514 struct davinci_spi_dma *davinci_spi_dma;
515
516 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
517
518 if (use_dma && davinci_spi->dma_channels) {
519 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
520
521 if ((davinci_spi_dma->dma_rx_channel != -1)
522 && (davinci_spi_dma->dma_tx_channel != -1)) {
523 edma_free_channel(davinci_spi_dma->dma_tx_channel);
524 edma_free_channel(davinci_spi_dma->dma_rx_channel);
525 }
526 }
527}
528
529static int davinci_spi_bufs_prep(struct spi_device *spi,
530 struct davinci_spi *davinci_spi)
531{
Brian Niebuhr23853972010-08-13 10:57:44 +0530532 struct davinci_spi_platform_data *pdata;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000533 int op_mode = 0;
534
535 /*
536 * REVISIT unless devices disagree about SPI_LOOP or
537 * SPI_READY (SPI_NO_CS only allows one device!), this
538 * should not need to be done before each message...
539 * optimize for both flags staying cleared.
540 */
541
542 op_mode = SPIPC0_DIFUN_MASK
543 | SPIPC0_DOFUN_MASK
544 | SPIPC0_CLKFUN_MASK;
Brian Niebuhr23853972010-08-13 10:57:44 +0530545 if (!(spi->mode & SPI_NO_CS)) {
546 pdata = davinci_spi->pdata;
547 if (!pdata->chip_sel ||
548 pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)
549 op_mode |= 1 << spi->chip_select;
550 }
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000551 if (spi->mode & SPI_READY)
552 op_mode |= SPIPC0_SPIENA_MASK;
553
554 iowrite32(op_mode, davinci_spi->base + SPIPC0);
555
556 if (spi->mode & SPI_LOOP)
557 set_io_bits(davinci_spi->base + SPIGCR1,
558 SPIGCR1_LOOPBACK_MASK);
559 else
560 clear_io_bits(davinci_spi->base + SPIGCR1,
561 SPIGCR1_LOOPBACK_MASK);
562
563 return 0;
564}
565
566static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
567 int int_status)
568{
569 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
570
571 if (int_status & SPIFLG_TIMEOUT_MASK) {
572 dev_dbg(sdev, "SPI Time-out Error\n");
573 return -ETIMEDOUT;
574 }
575 if (int_status & SPIFLG_DESYNC_MASK) {
576 dev_dbg(sdev, "SPI Desynchronization Error\n");
577 return -EIO;
578 }
579 if (int_status & SPIFLG_BITERR_MASK) {
580 dev_dbg(sdev, "SPI Bit error\n");
581 return -EIO;
582 }
583
584 if (davinci_spi->version == SPI_VERSION_2) {
585 if (int_status & SPIFLG_DLEN_ERR_MASK) {
586 dev_dbg(sdev, "SPI Data Length Error\n");
587 return -EIO;
588 }
589 if (int_status & SPIFLG_PARERR_MASK) {
590 dev_dbg(sdev, "SPI Parity Error\n");
591 return -EIO;
592 }
593 if (int_status & SPIFLG_OVRRUN_MASK) {
594 dev_dbg(sdev, "SPI Data Overrun error\n");
595 return -EIO;
596 }
597 if (int_status & SPIFLG_TX_INTR_MASK) {
598 dev_dbg(sdev, "SPI TX intr bit set\n");
599 return -EIO;
600 }
601 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
602 dev_dbg(sdev, "SPI Buffer Init Active\n");
603 return -EBUSY;
604 }
605 }
606
607 return 0;
608}
609
610/**
611 * davinci_spi_bufs - functions which will handle transfer data
612 * @spi: spi device on which data transfer to be done
613 * @t: spi transfer in which transfer info is filled
614 *
615 * This function will put data to be transferred into data register
616 * of SPI controller and then wait until the completion will be marked
617 * by the IRQ Handler.
618 */
619static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
620{
621 struct davinci_spi *davinci_spi;
622 int int_status, count, ret;
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530623 u8 conv;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000624 u32 tx_data, data1_reg_val;
625 u32 buf_val, flg_val;
626 struct davinci_spi_platform_data *pdata;
627
628 davinci_spi = spi_master_get_devdata(spi->master);
629 pdata = davinci_spi->pdata;
630
631 davinci_spi->tx = t->tx_buf;
632 davinci_spi->rx = t->rx_buf;
633
634 /* convert len to words based on bits_per_word */
635 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
636 davinci_spi->count = t->len / conv;
637
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530638 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
639
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000640 INIT_COMPLETION(davinci_spi->done);
641
642 ret = davinci_spi_bufs_prep(spi, davinci_spi);
643 if (ret)
644 return ret;
645
646 /* Enable SPI */
647 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
648
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000649 count = davinci_spi->count;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000650
651 /* Determine the command to execute READ or WRITE */
652 if (t->tx_buf) {
653 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
654
655 while (1) {
656 tx_data = davinci_spi->get_tx(davinci_spi);
657
658 data1_reg_val &= ~(0xFFFF);
659 data1_reg_val |= (0xFFFF & tx_data);
660
661 buf_val = ioread32(davinci_spi->base + SPIBUF);
662 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
663 iowrite32(data1_reg_val,
664 davinci_spi->base + SPIDAT1);
665
666 count--;
667 }
668 while (ioread32(davinci_spi->base + SPIBUF)
669 & SPIBUF_RXEMPTY_MASK)
670 cpu_relax();
671
672 /* getting the returned byte */
673 if (t->rx_buf) {
674 buf_val = ioread32(davinci_spi->base + SPIBUF);
675 davinci_spi->get_rx(buf_val, davinci_spi);
676 }
677 if (count <= 0)
678 break;
679 }
680 } else {
681 if (pdata->poll_mode) {
682 while (1) {
683 /* keeps the serial clock going */
684 if ((ioread32(davinci_spi->base + SPIBUF)
685 & SPIBUF_TXFULL_MASK) == 0)
686 iowrite32(data1_reg_val,
687 davinci_spi->base + SPIDAT1);
688
689 while (ioread32(davinci_spi->base + SPIBUF) &
690 SPIBUF_RXEMPTY_MASK)
691 cpu_relax();
692
693 flg_val = ioread32(davinci_spi->base + SPIFLG);
694 buf_val = ioread32(davinci_spi->base + SPIBUF);
695
696 davinci_spi->get_rx(buf_val, davinci_spi);
697
698 count--;
699 if (count <= 0)
700 break;
701 }
702 } else { /* Receive in Interrupt mode */
703 int i;
704
705 for (i = 0; i < davinci_spi->count; i++) {
706 set_io_bits(davinci_spi->base + SPIINT,
707 SPIINT_BITERR_INTR
708 | SPIINT_OVRRUN_INTR
709 | SPIINT_RX_INTR);
710
711 iowrite32(data1_reg_val,
712 davinci_spi->base + SPIDAT1);
713
714 while (ioread32(davinci_spi->base + SPIINT) &
715 SPIINT_RX_INTR)
716 cpu_relax();
717 }
718 iowrite32((data1_reg_val & 0x0ffcffff),
719 davinci_spi->base + SPIDAT1);
720 }
721 }
722
723 /*
724 * Check for bit error, desync error,parity error,timeout error and
725 * receive overflow errors
726 */
727 int_status = ioread32(davinci_spi->base + SPIFLG);
728
729 ret = davinci_spi_check_error(davinci_spi, int_status);
730 if (ret != 0)
731 return ret;
732
733 /* SPI Framework maintains the count only in bytes so convert back */
734 davinci_spi->count *= conv;
735
736 return t->len;
737}
738
739#define DAVINCI_DMA_DATA_TYPE_S8 0x01
740#define DAVINCI_DMA_DATA_TYPE_S16 0x02
741#define DAVINCI_DMA_DATA_TYPE_S32 0x04
742
743static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
744{
745 struct davinci_spi *davinci_spi;
746 int int_status = 0;
747 int count, temp_count;
748 u8 conv = 1;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000749 u32 data1_reg_val;
750 struct davinci_spi_dma *davinci_spi_dma;
751 int word_len, data_type, ret;
752 unsigned long tx_reg, rx_reg;
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000753 struct device *sdev;
754
755 davinci_spi = spi_master_get_devdata(spi->master);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000756 sdev = davinci_spi->bitbang.master->dev.parent;
757
758 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
759
760 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
761 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
762
763 davinci_spi->tx = t->tx_buf;
764 davinci_spi->rx = t->rx_buf;
765
766 /* convert len to words based on bits_per_word */
767 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
768 davinci_spi->count = t->len / conv;
769
Brian Niebuhr7978b8c2010-08-13 10:11:03 +0530770 data1_reg_val = ioread32(davinci_spi->base + SPIDAT1);
771
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000772 INIT_COMPLETION(davinci_spi->done);
773
774 init_completion(&davinci_spi_dma->dma_rx_completion);
775 init_completion(&davinci_spi_dma->dma_tx_completion);
776
777 word_len = conv * 8;
778
779 if (word_len <= 8)
780 data_type = DAVINCI_DMA_DATA_TYPE_S8;
781 else if (word_len <= 16)
782 data_type = DAVINCI_DMA_DATA_TYPE_S16;
783 else if (word_len <= 32)
784 data_type = DAVINCI_DMA_DATA_TYPE_S32;
785 else
786 return -EINVAL;
787
788 ret = davinci_spi_bufs_prep(spi, davinci_spi);
789 if (ret)
790 return ret;
791
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000792 count = davinci_spi->count; /* the number of elements */
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000793
794 /* disable all interrupts for dma transfers */
795 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
796 /* Disable SPI to write configuration bits in SPIDAT */
797 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000798 /* Enable SPI */
799 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
800
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000801 if (t->tx_buf) {
802 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
803 DMA_TO_DEVICE);
804 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
805 dev_dbg(sdev, "Unable to DMA map a %d bytes"
806 " TX buffer\n", count);
807 return -ENOMEM;
808 }
809 temp_count = count;
810 } else {
811 /* We need TX clocking for RX transaction */
812 t->tx_dma = dma_map_single(&spi->dev,
813 (void *)davinci_spi->tmp_buf, count + 1,
814 DMA_TO_DEVICE);
815 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
816 dev_dbg(sdev, "Unable to DMA map a %d bytes"
817 " TX tmp buffer\n", count);
818 return -ENOMEM;
819 }
820 temp_count = count + 1;
821 }
822
823 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
824 data_type, temp_count, 1, 0, ASYNC);
825 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
826 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
827 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
828 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
829
830 if (t->rx_buf) {
831 /* initiate transaction */
832 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
833
834 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
835 DMA_FROM_DEVICE);
836 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
837 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
838 count);
839 if (t->tx_buf != NULL)
840 dma_unmap_single(NULL, t->tx_dma,
841 count, DMA_TO_DEVICE);
842 return -ENOMEM;
843 }
844 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
845 data_type, count, 1, 0, ASYNC);
846 edma_set_src(davinci_spi_dma->dma_rx_channel,
847 rx_reg, INCR, W8BIT);
848 edma_set_dest(davinci_spi_dma->dma_rx_channel,
849 t->rx_dma, INCR, W8BIT);
850 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
851 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
852 data_type, 0);
853 }
854
855 if ((t->tx_buf) || (t->rx_buf))
856 edma_start(davinci_spi_dma->dma_tx_channel);
857
858 if (t->rx_buf)
859 edma_start(davinci_spi_dma->dma_rx_channel);
860
861 if ((t->rx_buf) || (t->tx_buf))
862 davinci_spi_set_dma_req(spi, 1);
863
864 if (t->tx_buf)
865 wait_for_completion_interruptible(
866 &davinci_spi_dma->dma_tx_completion);
867
868 if (t->rx_buf)
869 wait_for_completion_interruptible(
870 &davinci_spi_dma->dma_rx_completion);
871
872 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
873
874 if (t->rx_buf)
875 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
876
877 /*
878 * Check for bit error, desync error,parity error,timeout error and
879 * receive overflow errors
880 */
881 int_status = ioread32(davinci_spi->base + SPIFLG);
882
883 ret = davinci_spi_check_error(davinci_spi, int_status);
884 if (ret != 0)
885 return ret;
886
887 /* SPI Framework maintains the count only in bytes so convert back */
888 davinci_spi->count *= conv;
889
890 return t->len;
891}
892
893/**
894 * davinci_spi_irq - IRQ handler for DaVinci SPI
895 * @irq: IRQ number for this SPI Master
896 * @context_data: structure for SPI Master controller davinci_spi
897 */
898static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
899{
900 struct davinci_spi *davinci_spi = context_data;
901 u32 int_status, rx_data = 0;
902 irqreturn_t ret = IRQ_NONE;
903
904 int_status = ioread32(davinci_spi->base + SPIFLG);
905
906 while ((int_status & SPIFLG_RX_INTR_MASK)) {
907 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
908 ret = IRQ_HANDLED;
909
910 rx_data = ioread32(davinci_spi->base + SPIBUF);
911 davinci_spi->get_rx(rx_data, davinci_spi);
912
913 /* Disable Receive Interrupt */
914 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
915 davinci_spi->base + SPIINT);
916 } else
917 (void)davinci_spi_check_error(davinci_spi, int_status);
918
919 int_status = ioread32(davinci_spi->base + SPIFLG);
920 }
921
922 return ret;
923}
924
925/**
926 * davinci_spi_probe - probe function for SPI Master Controller
927 * @pdev: platform_device structure which contains plateform specific data
928 */
929static int davinci_spi_probe(struct platform_device *pdev)
930{
931 struct spi_master *master;
932 struct davinci_spi *davinci_spi;
933 struct davinci_spi_platform_data *pdata;
934 struct resource *r, *mem;
935 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
936 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
937 resource_size_t dma_eventq = SPI_NO_RESOURCE;
938 int i = 0, ret = 0;
939
940 pdata = pdev->dev.platform_data;
941 if (pdata == NULL) {
942 ret = -ENODEV;
943 goto err;
944 }
945
946 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
947 if (master == NULL) {
948 ret = -ENOMEM;
949 goto err;
950 }
951
952 dev_set_drvdata(&pdev->dev, master);
953
954 davinci_spi = spi_master_get_devdata(master);
955 if (davinci_spi == NULL) {
956 ret = -ENOENT;
957 goto free_master;
958 }
959
960 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
961 if (r == NULL) {
962 ret = -ENOENT;
963 goto free_master;
964 }
965
966 davinci_spi->pbase = r->start;
967 davinci_spi->region_size = resource_size(r);
968 davinci_spi->pdata = pdata;
969
970 mem = request_mem_region(r->start, davinci_spi->region_size,
971 pdev->name);
972 if (mem == NULL) {
973 ret = -EBUSY;
974 goto free_master;
975 }
976
Sekhar Nori50356dd2010-10-08 15:27:26 +0530977 davinci_spi->base = ioremap(r->start, davinci_spi->region_size);
Sandeep Paulraj358934a2009-12-16 22:02:18 +0000978 if (davinci_spi->base == NULL) {
979 ret = -ENOMEM;
980 goto release_region;
981 }
982
983 davinci_spi->irq = platform_get_irq(pdev, 0);
984 if (davinci_spi->irq <= 0) {
985 ret = -EINVAL;
986 goto unmap_io;
987 }
988
989 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
990 dev_name(&pdev->dev), davinci_spi);
991 if (ret)
992 goto unmap_io;
993
994 /* Allocate tmp_buf for tx_buf */
995 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
996 if (davinci_spi->tmp_buf == NULL) {
997 ret = -ENOMEM;
998 goto irq_free;
999 }
1000
1001 davinci_spi->bitbang.master = spi_master_get(master);
1002 if (davinci_spi->bitbang.master == NULL) {
1003 ret = -ENODEV;
1004 goto free_tmp_buf;
1005 }
1006
1007 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1008 if (IS_ERR(davinci_spi->clk)) {
1009 ret = -ENODEV;
1010 goto put_master;
1011 }
1012 clk_enable(davinci_spi->clk);
1013
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001014 master->bus_num = pdev->id;
1015 master->num_chipselect = pdata->num_chipselect;
1016 master->setup = davinci_spi_setup;
1017 master->cleanup = davinci_spi_cleanup;
1018
1019 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1020 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1021
1022 davinci_spi->version = pdata->version;
1023 use_dma = pdata->use_dma;
1024
1025 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1026 if (davinci_spi->version == SPI_VERSION_2)
1027 davinci_spi->bitbang.flags |= SPI_READY;
1028
1029 if (use_dma) {
Brian Niebuhr778e2612010-09-03 15:15:06 +05301030 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1031 if (r)
1032 dma_rx_chan = r->start;
1033 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1034 if (r)
1035 dma_tx_chan = r->start;
1036 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1037 if (r)
1038 dma_eventq = r->start;
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001039 }
1040
1041 if (!use_dma ||
1042 dma_rx_chan == SPI_NO_RESOURCE ||
1043 dma_tx_chan == SPI_NO_RESOURCE ||
1044 dma_eventq == SPI_NO_RESOURCE) {
1045 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1046 use_dma = 0;
1047 } else {
1048 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1049 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1050 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1051 if (davinci_spi->dma_channels == NULL) {
1052 ret = -ENOMEM;
1053 goto free_clk;
1054 }
1055
1056 for (i = 0; i < master->num_chipselect; i++) {
1057 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1058 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1059 dma_rx_chan;
1060 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1061 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1062 dma_tx_chan;
1063 davinci_spi->dma_channels[i].eventq = dma_eventq;
1064 }
1065 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1066 "Using RX channel = %d , TX channel = %d and "
1067 "event queue = %d", dma_rx_chan, dma_tx_chan,
1068 dma_eventq);
1069 }
1070
1071 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1072 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1073
1074 init_completion(&davinci_spi->done);
1075
1076 /* Reset In/OUT SPI module */
1077 iowrite32(0, davinci_spi->base + SPIGCR0);
1078 udelay(100);
1079 iowrite32(1, davinci_spi->base + SPIGCR0);
1080
Brian Niebuhr23853972010-08-13 10:57:44 +05301081 /* initialize chip selects */
1082 if (pdata->chip_sel) {
1083 for (i = 0; i < pdata->num_chipselect; i++) {
1084 if (pdata->chip_sel[i] != SPI_INTERN_CS)
1085 gpio_direction_output(pdata->chip_sel[i], 1);
1086 }
1087 }
1088
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001089 /* Clock internal */
1090 if (davinci_spi->pdata->clk_internal)
1091 set_io_bits(davinci_spi->base + SPIGCR1,
1092 SPIGCR1_CLKMOD_MASK);
1093 else
1094 clear_io_bits(davinci_spi->base + SPIGCR1,
1095 SPIGCR1_CLKMOD_MASK);
1096
Brian Niebuhr843a7132010-08-12 12:49:05 +05301097 iowrite32(CS_DEFAULT, davinci_spi->base + SPIDEF);
1098
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001099 /* master mode default */
1100 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1101
1102 if (davinci_spi->pdata->intr_level)
1103 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1104 else
1105 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1106
1107 ret = spi_bitbang_start(&davinci_spi->bitbang);
1108 if (ret)
1109 goto free_clk;
1110
Brian Niebuhr3b740b12010-09-03 14:50:07 +05301111 dev_info(&pdev->dev, "Controller at 0x%p\n", davinci_spi->base);
Sandeep Paulraj358934a2009-12-16 22:02:18 +00001112
1113 if (!pdata->poll_mode)
1114 dev_info(&pdev->dev, "Operating in interrupt mode"
1115 " using IRQ %d\n", davinci_spi->irq);
1116
1117 return ret;
1118
1119free_clk:
1120 clk_disable(davinci_spi->clk);
1121 clk_put(davinci_spi->clk);
1122put_master:
1123 spi_master_put(master);
1124free_tmp_buf:
1125 kfree(davinci_spi->tmp_buf);
1126irq_free:
1127 free_irq(davinci_spi->irq, davinci_spi);
1128unmap_io:
1129 iounmap(davinci_spi->base);
1130release_region:
1131 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1132free_master:
1133 kfree(master);
1134err:
1135 return ret;
1136}
1137
1138/**
1139 * davinci_spi_remove - remove function for SPI Master Controller
1140 * @pdev: platform_device structure which contains plateform specific data
1141 *
1142 * This function will do the reverse action of davinci_spi_probe function
1143 * It will free the IRQ and SPI controller's memory region.
1144 * It will also call spi_bitbang_stop to destroy the work queue which was
1145 * created by spi_bitbang_start.
1146 */
1147static int __exit davinci_spi_remove(struct platform_device *pdev)
1148{
1149 struct davinci_spi *davinci_spi;
1150 struct spi_master *master;
1151
1152 master = dev_get_drvdata(&pdev->dev);
1153 davinci_spi = spi_master_get_devdata(master);
1154
1155 spi_bitbang_stop(&davinci_spi->bitbang);
1156
1157 clk_disable(davinci_spi->clk);
1158 clk_put(davinci_spi->clk);
1159 spi_master_put(master);
1160 kfree(davinci_spi->tmp_buf);
1161 free_irq(davinci_spi->irq, davinci_spi);
1162 iounmap(davinci_spi->base);
1163 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1164
1165 return 0;
1166}
1167
1168static struct platform_driver davinci_spi_driver = {
1169 .driver.name = "spi_davinci",
1170 .remove = __exit_p(davinci_spi_remove),
1171};
1172
1173static int __init davinci_spi_init(void)
1174{
1175 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1176}
1177module_init(davinci_spi_init);
1178
1179static void __exit davinci_spi_exit(void)
1180{
1181 platform_driver_unregister(&davinci_spi_driver);
1182}
1183module_exit(davinci_spi_exit);
1184
1185MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1186MODULE_LICENSE("GPL");