blob: 6e60eecbfc4f97902927c44cb11c6b2a8eb5f090 [file] [log] [blame]
Grant Likelyca632f52011-06-06 01:16:30 -06001/*
Jassi Brar230d42d2009-11-30 07:39:42 +00002 * Copyright (C) 2009 Samsung Electronics Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/workqueue.h>
Mark Brownc2573122011-11-10 10:57:32 +000023#include <linux/interrupt.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000024#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
Mark Brownb97b6622011-12-04 00:58:06 +000028#include <linux/pm_runtime.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000029#include <linux/spi/spi.h>
30
31#include <mach/dma.h>
Jassi Brare6b873c2010-01-20 13:49:45 -070032#include <plat/s3c64xx-spi.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000033
34/* Registers and bit-fields */
35
36#define S3C64XX_SPI_CH_CFG 0x00
37#define S3C64XX_SPI_CLK_CFG 0x04
38#define S3C64XX_SPI_MODE_CFG 0x08
39#define S3C64XX_SPI_SLAVE_SEL 0x0C
40#define S3C64XX_SPI_INT_EN 0x10
41#define S3C64XX_SPI_STATUS 0x14
42#define S3C64XX_SPI_TX_DATA 0x18
43#define S3C64XX_SPI_RX_DATA 0x1C
44#define S3C64XX_SPI_PACKET_CNT 0x20
45#define S3C64XX_SPI_PENDING_CLR 0x24
46#define S3C64XX_SPI_SWAP_CFG 0x28
47#define S3C64XX_SPI_FB_CLK 0x2C
48
49#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
50#define S3C64XX_SPI_CH_SW_RST (1<<5)
51#define S3C64XX_SPI_CH_SLAVE (1<<4)
52#define S3C64XX_SPI_CPOL_L (1<<3)
53#define S3C64XX_SPI_CPHA_B (1<<2)
54#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
55#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
56
57#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
58#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
59#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
60#define S3C64XX_SPI_PSR_MASK 0xff
61
62#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
65#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
66#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
69#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
70#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
71#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
72#define S3C64XX_SPI_MODE_4BURST (1<<0)
73
74#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
75#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
76
77#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
78
79#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
80 (c)->regs + S3C64XX_SPI_SLAVE_SEL)
81
82#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
83#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
84#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
85#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
86#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
87#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
88#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
89
90#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
91#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
92#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
93#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
94#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
95#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
96
97#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
98
99#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
100#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
101#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
102#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
103#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
104
105#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
106#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
107#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
108#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
109#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
110#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
111#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
112#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
113
114#define S3C64XX_SPI_FBCLK_MSK (3<<0)
115
Padmavathi Venna30757412011-07-05 17:14:02 +0900116#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
Jassi Brar230d42d2009-11-30 07:39:42 +0000117#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
118#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
119
120#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
121#define S3C64XX_SPI_TRAILCNT_OFF 19
122
123#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
124
125#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
126
Jassi Brar230d42d2009-11-30 07:39:42 +0000127#define RXBUSY (1<<2)
128#define TXBUSY (1<<3)
129
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900130struct s3c64xx_spi_dma_data {
131 unsigned ch;
132 enum dma_data_direction direction;
133 enum dma_ch dmach;
134};
135
Jassi Brar230d42d2009-11-30 07:39:42 +0000136/**
137 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
138 * @clk: Pointer to the spi clock.
Jassi Brarb0d5d6e2010-01-20 13:49:44 -0700139 * @src_clk: Pointer to the clock used to generate SPI signals.
Jassi Brar230d42d2009-11-30 07:39:42 +0000140 * @master: Pointer to the SPI Protocol master.
Jassi Brar230d42d2009-11-30 07:39:42 +0000141 * @cntrlr_info: Platform specific data for the controller this driver manages.
142 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
Jassi Brar230d42d2009-11-30 07:39:42 +0000143 * @queue: To log SPI xfer requests.
144 * @lock: Controller specific lock.
145 * @state: Set of FLAGS to indicate status.
146 * @rx_dmach: Controller's DMA channel for Rx.
147 * @tx_dmach: Controller's DMA channel for Tx.
148 * @sfr_start: BUS address of SPI controller regs.
149 * @regs: Pointer to ioremap'ed controller registers.
Mark Brownc2573122011-11-10 10:57:32 +0000150 * @irq: interrupt
Jassi Brar230d42d2009-11-30 07:39:42 +0000151 * @xfer_completion: To indicate completion of xfer task.
152 * @cur_mode: Stores the active configuration of the controller.
153 * @cur_bpw: Stores the active bits per word settings.
154 * @cur_speed: Stores the active xfer clock speed.
155 */
156struct s3c64xx_spi_driver_data {
157 void __iomem *regs;
158 struct clk *clk;
Jassi Brarb0d5d6e2010-01-20 13:49:44 -0700159 struct clk *src_clk;
Jassi Brar230d42d2009-11-30 07:39:42 +0000160 struct platform_device *pdev;
161 struct spi_master *master;
Jassi Brarad7de722010-01-20 13:49:44 -0700162 struct s3c64xx_spi_info *cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000163 struct spi_device *tgl_spi;
Jassi Brar230d42d2009-11-30 07:39:42 +0000164 struct list_head queue;
165 spinlock_t lock;
Jassi Brar230d42d2009-11-30 07:39:42 +0000166 unsigned long sfr_start;
167 struct completion xfer_completion;
168 unsigned state;
169 unsigned cur_mode, cur_bpw;
170 unsigned cur_speed;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900171 struct s3c64xx_spi_dma_data rx_dma;
172 struct s3c64xx_spi_dma_data tx_dma;
Boojin Kim39d3e802011-09-02 09:44:41 +0900173 struct samsung_dma_ops *ops;
Jassi Brar230d42d2009-11-30 07:39:42 +0000174};
175
176static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
177 .name = "samsung-spi-dma",
178};
179
180static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
181{
Jassi Brarad7de722010-01-20 13:49:44 -0700182 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000183 void __iomem *regs = sdd->regs;
184 unsigned long loops;
185 u32 val;
186
187 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
188
189 val = readl(regs + S3C64XX_SPI_CH_CFG);
190 val |= S3C64XX_SPI_CH_SW_RST;
191 val &= ~S3C64XX_SPI_CH_HS_EN;
192 writel(val, regs + S3C64XX_SPI_CH_CFG);
193
194 /* Flush TxFIFO*/
195 loops = msecs_to_loops(1);
196 do {
197 val = readl(regs + S3C64XX_SPI_STATUS);
198 } while (TX_FIFO_LVL(val, sci) && loops--);
199
Mark Brownbe7852a2010-08-23 17:40:56 +0100200 if (loops == 0)
201 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
202
Jassi Brar230d42d2009-11-30 07:39:42 +0000203 /* Flush RxFIFO*/
204 loops = msecs_to_loops(1);
205 do {
206 val = readl(regs + S3C64XX_SPI_STATUS);
207 if (RX_FIFO_LVL(val, sci))
208 readl(regs + S3C64XX_SPI_RX_DATA);
209 else
210 break;
211 } while (loops--);
212
Mark Brownbe7852a2010-08-23 17:40:56 +0100213 if (loops == 0)
214 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
215
Jassi Brar230d42d2009-11-30 07:39:42 +0000216 val = readl(regs + S3C64XX_SPI_CH_CFG);
217 val &= ~S3C64XX_SPI_CH_SW_RST;
218 writel(val, regs + S3C64XX_SPI_CH_CFG);
219
220 val = readl(regs + S3C64XX_SPI_MODE_CFG);
221 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
222 writel(val, regs + S3C64XX_SPI_MODE_CFG);
223
224 val = readl(regs + S3C64XX_SPI_CH_CFG);
225 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
226 writel(val, regs + S3C64XX_SPI_CH_CFG);
227}
228
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900229static void s3c64xx_spi_dmacb(void *data)
Boojin Kim39d3e802011-09-02 09:44:41 +0900230{
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900231 struct s3c64xx_spi_driver_data *sdd;
232 struct s3c64xx_spi_dma_data *dma = data;
Boojin Kim39d3e802011-09-02 09:44:41 +0900233 unsigned long flags;
234
Kyoungil Kim054ebcc2012-03-10 09:48:46 +0900235 if (dma->direction == DMA_DEV_TO_MEM)
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900236 sdd = container_of(data,
237 struct s3c64xx_spi_driver_data, rx_dma);
238 else
239 sdd = container_of(data,
240 struct s3c64xx_spi_driver_data, tx_dma);
241
Boojin Kim39d3e802011-09-02 09:44:41 +0900242 spin_lock_irqsave(&sdd->lock, flags);
243
Kyoungil Kim054ebcc2012-03-10 09:48:46 +0900244 if (dma->direction == DMA_DEV_TO_MEM) {
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900245 sdd->state &= ~RXBUSY;
246 if (!(sdd->state & TXBUSY))
247 complete(&sdd->xfer_completion);
248 } else {
249 sdd->state &= ~TXBUSY;
250 if (!(sdd->state & RXBUSY))
251 complete(&sdd->xfer_completion);
252 }
Boojin Kim39d3e802011-09-02 09:44:41 +0900253
254 spin_unlock_irqrestore(&sdd->lock, flags);
255}
256
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900257static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
258 unsigned len, dma_addr_t buf)
Boojin Kim39d3e802011-09-02 09:44:41 +0900259{
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900260 struct s3c64xx_spi_driver_data *sdd;
Boojin Kim4969c322012-06-19 13:27:03 +0900261 struct samsung_dma_prep info;
262 struct samsung_dma_config config;
Boojin Kim39d3e802011-09-02 09:44:41 +0900263
Boojin Kim4969c322012-06-19 13:27:03 +0900264 if (dma->direction == DMA_DEV_TO_MEM) {
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900265 sdd = container_of((void *)dma,
266 struct s3c64xx_spi_driver_data, rx_dma);
Boojin Kim4969c322012-06-19 13:27:03 +0900267 config.direction = sdd->rx_dma.direction;
268 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
269 config.width = sdd->cur_bpw / 8;
270 sdd->ops->config(sdd->rx_dma.ch, &config);
271 } else {
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900272 sdd = container_of((void *)dma,
273 struct s3c64xx_spi_driver_data, tx_dma);
Boojin Kim4969c322012-06-19 13:27:03 +0900274 config.direction = sdd->tx_dma.direction;
275 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
276 config.width = sdd->cur_bpw / 8;
277 sdd->ops->config(sdd->tx_dma.ch, &config);
278 }
Boojin Kim39d3e802011-09-02 09:44:41 +0900279
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900280 info.cap = DMA_SLAVE;
281 info.len = len;
282 info.fp = s3c64xx_spi_dmacb;
283 info.fp_param = dma;
284 info.direction = dma->direction;
285 info.buf = buf;
Boojin Kim39d3e802011-09-02 09:44:41 +0900286
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900287 sdd->ops->prepare(dma->ch, &info);
288 sdd->ops->trigger(dma->ch);
289}
290
291static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
292{
Boojin Kim4969c322012-06-19 13:27:03 +0900293 struct samsung_dma_req req;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900294
295 sdd->ops = samsung_dma_get_ops();
296
Boojin Kim4969c322012-06-19 13:27:03 +0900297 req.cap = DMA_SLAVE;
298 req.client = &s3c64xx_spi_dma_client;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900299
Boojin Kim4969c322012-06-19 13:27:03 +0900300 sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req);
301 sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req);
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900302
303 return 1;
Boojin Kim39d3e802011-09-02 09:44:41 +0900304}
305
Jassi Brar230d42d2009-11-30 07:39:42 +0000306static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
307 struct spi_device *spi,
308 struct spi_transfer *xfer, int dma_mode)
309{
Jassi Brarad7de722010-01-20 13:49:44 -0700310 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000311 void __iomem *regs = sdd->regs;
312 u32 modecfg, chcfg;
313
314 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
315 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
316
317 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
318 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
319
320 if (dma_mode) {
321 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
322 } else {
323 /* Always shift in data in FIFO, even if xfer is Tx only,
324 * this helps setting PCKT_CNT value for generating clocks
325 * as exactly needed.
326 */
327 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
328 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
329 | S3C64XX_SPI_PACKET_CNT_EN,
330 regs + S3C64XX_SPI_PACKET_CNT);
331 }
332
333 if (xfer->tx_buf != NULL) {
334 sdd->state |= TXBUSY;
335 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
336 if (dma_mode) {
337 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900338 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
Jassi Brar230d42d2009-11-30 07:39:42 +0000339 } else {
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900340 switch (sdd->cur_bpw) {
341 case 32:
342 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
343 xfer->tx_buf, xfer->len / 4);
344 break;
345 case 16:
346 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
347 xfer->tx_buf, xfer->len / 2);
348 break;
349 default:
350 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
351 xfer->tx_buf, xfer->len);
352 break;
353 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000354 }
355 }
356
357 if (xfer->rx_buf != NULL) {
358 sdd->state |= RXBUSY;
359
360 if (sci->high_speed && sdd->cur_speed >= 30000000UL
361 && !(sdd->cur_mode & SPI_CPHA))
362 chcfg |= S3C64XX_SPI_CH_HS_EN;
363
364 if (dma_mode) {
365 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
366 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
367 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
368 | S3C64XX_SPI_PACKET_CNT_EN,
369 regs + S3C64XX_SPI_PACKET_CNT);
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900370 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
Jassi Brar230d42d2009-11-30 07:39:42 +0000371 }
372 }
373
374 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
375 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
376}
377
378static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
379 struct spi_device *spi)
380{
381 struct s3c64xx_spi_csinfo *cs;
382
383 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
384 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
385 /* Deselect the last toggled device */
386 cs = sdd->tgl_spi->controller_data;
Jassi Brarfa0fcde2010-01-20 13:49:45 -0700387 cs->set_level(cs->line,
388 spi->mode & SPI_CS_HIGH ? 0 : 1);
Jassi Brar230d42d2009-11-30 07:39:42 +0000389 }
390 sdd->tgl_spi = NULL;
391 }
392
393 cs = spi->controller_data;
Jassi Brarfa0fcde2010-01-20 13:49:45 -0700394 cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
Jassi Brar230d42d2009-11-30 07:39:42 +0000395}
396
397static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
398 struct spi_transfer *xfer, int dma_mode)
399{
Jassi Brarad7de722010-01-20 13:49:44 -0700400 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000401 void __iomem *regs = sdd->regs;
402 unsigned long val;
403 int ms;
404
405 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
406 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
Mark Brown9d8f86b2010-09-07 16:37:52 +0100407 ms += 10; /* some tolerance */
Jassi Brar230d42d2009-11-30 07:39:42 +0000408
409 if (dma_mode) {
410 val = msecs_to_jiffies(ms) + 10;
411 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
412 } else {
Jassi Brarc3f139b2010-09-03 10:36:46 +0900413 u32 status;
Jassi Brar230d42d2009-11-30 07:39:42 +0000414 val = msecs_to_loops(ms);
415 do {
Jassi Brarc3f139b2010-09-03 10:36:46 +0900416 status = readl(regs + S3C64XX_SPI_STATUS);
417 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
Jassi Brar230d42d2009-11-30 07:39:42 +0000418 }
419
420 if (!val)
421 return -EIO;
422
423 if (dma_mode) {
424 u32 status;
425
426 /*
427 * DmaTx returns after simply writing data in the FIFO,
428 * w/o waiting for real transmission on the bus to finish.
429 * DmaRx returns only after Dma read data from FIFO which
430 * needs bus transmission to finish, so we don't worry if
431 * Xfer involved Rx(with or without Tx).
432 */
433 if (xfer->rx_buf == NULL) {
434 val = msecs_to_loops(10);
435 status = readl(regs + S3C64XX_SPI_STATUS);
436 while ((TX_FIFO_LVL(status, sci)
437 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
438 && --val) {
439 cpu_relax();
440 status = readl(regs + S3C64XX_SPI_STATUS);
441 }
442
443 if (!val)
444 return -EIO;
445 }
446 } else {
Jassi Brar230d42d2009-11-30 07:39:42 +0000447 /* If it was only Tx */
448 if (xfer->rx_buf == NULL) {
449 sdd->state &= ~TXBUSY;
450 return 0;
451 }
452
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900453 switch (sdd->cur_bpw) {
454 case 32:
455 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
456 xfer->rx_buf, xfer->len / 4);
457 break;
458 case 16:
459 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
460 xfer->rx_buf, xfer->len / 2);
461 break;
462 default:
463 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
464 xfer->rx_buf, xfer->len);
465 break;
466 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000467 sdd->state &= ~RXBUSY;
468 }
469
470 return 0;
471}
472
473static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
474 struct spi_device *spi)
475{
476 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
477
478 if (sdd->tgl_spi == spi)
479 sdd->tgl_spi = NULL;
480
Jassi Brarfa0fcde2010-01-20 13:49:45 -0700481 cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
Jassi Brar230d42d2009-11-30 07:39:42 +0000482}
483
484static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
485{
Jassi Brarb42a81c2010-09-29 17:31:33 +0900486 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000487 void __iomem *regs = sdd->regs;
488 u32 val;
489
490 /* Disable Clock */
Jassi Brarb42a81c2010-09-29 17:31:33 +0900491 if (sci->clk_from_cmu) {
492 clk_disable(sdd->src_clk);
493 } else {
494 val = readl(regs + S3C64XX_SPI_CLK_CFG);
495 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
496 writel(val, regs + S3C64XX_SPI_CLK_CFG);
497 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000498
499 /* Set Polarity and Phase */
500 val = readl(regs + S3C64XX_SPI_CH_CFG);
501 val &= ~(S3C64XX_SPI_CH_SLAVE |
502 S3C64XX_SPI_CPOL_L |
503 S3C64XX_SPI_CPHA_B);
504
505 if (sdd->cur_mode & SPI_CPOL)
506 val |= S3C64XX_SPI_CPOL_L;
507
508 if (sdd->cur_mode & SPI_CPHA)
509 val |= S3C64XX_SPI_CPHA_B;
510
511 writel(val, regs + S3C64XX_SPI_CH_CFG);
512
513 /* Set Channel & DMA Mode */
514 val = readl(regs + S3C64XX_SPI_MODE_CFG);
515 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
516 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
517
518 switch (sdd->cur_bpw) {
519 case 32:
520 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900521 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
Jassi Brar230d42d2009-11-30 07:39:42 +0000522 break;
523 case 16:
524 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900525 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
Jassi Brar230d42d2009-11-30 07:39:42 +0000526 break;
527 default:
528 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900529 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
Jassi Brar230d42d2009-11-30 07:39:42 +0000530 break;
531 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000532
533 writel(val, regs + S3C64XX_SPI_MODE_CFG);
534
Jassi Brarb42a81c2010-09-29 17:31:33 +0900535 if (sci->clk_from_cmu) {
536 /* Configure Clock */
537 /* There is half-multiplier before the SPI */
538 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
539 /* Enable Clock */
540 clk_enable(sdd->src_clk);
541 } else {
542 /* Configure Clock */
543 val = readl(regs + S3C64XX_SPI_CLK_CFG);
544 val &= ~S3C64XX_SPI_PSR_MASK;
545 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
546 & S3C64XX_SPI_PSR_MASK);
547 writel(val, regs + S3C64XX_SPI_CLK_CFG);
Jassi Brar230d42d2009-11-30 07:39:42 +0000548
Jassi Brarb42a81c2010-09-29 17:31:33 +0900549 /* Enable Clock */
550 val = readl(regs + S3C64XX_SPI_CLK_CFG);
551 val |= S3C64XX_SPI_ENCLK_ENABLE;
552 writel(val, regs + S3C64XX_SPI_CLK_CFG);
553 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000554}
555
Jassi Brar230d42d2009-11-30 07:39:42 +0000556#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
557
558static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
559 struct spi_message *msg)
560{
Jassi Brare02ddd42010-09-29 17:31:31 +0900561 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000562 struct device *dev = &sdd->pdev->dev;
563 struct spi_transfer *xfer;
564
565 if (msg->is_dma_mapped)
566 return 0;
567
568 /* First mark all xfer unmapped */
569 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
570 xfer->rx_dma = XFER_DMAADDR_INVALID;
571 xfer->tx_dma = XFER_DMAADDR_INVALID;
572 }
573
574 /* Map until end or first fail */
575 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
576
Jassi Brare02ddd42010-09-29 17:31:31 +0900577 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
578 continue;
579
Jassi Brar230d42d2009-11-30 07:39:42 +0000580 if (xfer->tx_buf != NULL) {
Jassi Brar251ee472010-09-03 10:36:26 +0900581 xfer->tx_dma = dma_map_single(dev,
582 (void *)xfer->tx_buf, xfer->len,
583 DMA_TO_DEVICE);
Jassi Brar230d42d2009-11-30 07:39:42 +0000584 if (dma_mapping_error(dev, xfer->tx_dma)) {
585 dev_err(dev, "dma_map_single Tx failed\n");
586 xfer->tx_dma = XFER_DMAADDR_INVALID;
587 return -ENOMEM;
588 }
589 }
590
591 if (xfer->rx_buf != NULL) {
592 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
593 xfer->len, DMA_FROM_DEVICE);
594 if (dma_mapping_error(dev, xfer->rx_dma)) {
595 dev_err(dev, "dma_map_single Rx failed\n");
596 dma_unmap_single(dev, xfer->tx_dma,
597 xfer->len, DMA_TO_DEVICE);
598 xfer->tx_dma = XFER_DMAADDR_INVALID;
599 xfer->rx_dma = XFER_DMAADDR_INVALID;
600 return -ENOMEM;
601 }
602 }
603 }
604
605 return 0;
606}
607
608static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
609 struct spi_message *msg)
610{
Jassi Brare02ddd42010-09-29 17:31:31 +0900611 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000612 struct device *dev = &sdd->pdev->dev;
613 struct spi_transfer *xfer;
614
615 if (msg->is_dma_mapped)
616 return;
617
618 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
619
Jassi Brare02ddd42010-09-29 17:31:31 +0900620 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
621 continue;
622
Jassi Brar230d42d2009-11-30 07:39:42 +0000623 if (xfer->rx_buf != NULL
624 && xfer->rx_dma != XFER_DMAADDR_INVALID)
625 dma_unmap_single(dev, xfer->rx_dma,
626 xfer->len, DMA_FROM_DEVICE);
627
628 if (xfer->tx_buf != NULL
629 && xfer->tx_dma != XFER_DMAADDR_INVALID)
630 dma_unmap_single(dev, xfer->tx_dma,
631 xfer->len, DMA_TO_DEVICE);
632 }
633}
634
Mark Brownad2a99a2012-02-15 14:48:32 -0800635static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
636 struct spi_message *msg)
Jassi Brar230d42d2009-11-30 07:39:42 +0000637{
Mark Brownad2a99a2012-02-15 14:48:32 -0800638 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brarad7de722010-01-20 13:49:44 -0700639 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000640 struct spi_device *spi = msg->spi;
641 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
642 struct spi_transfer *xfer;
643 int status = 0, cs_toggle = 0;
644 u32 speed;
645 u8 bpw;
646
647 /* If Master's(controller) state differs from that needed by Slave */
648 if (sdd->cur_speed != spi->max_speed_hz
649 || sdd->cur_mode != spi->mode
650 || sdd->cur_bpw != spi->bits_per_word) {
651 sdd->cur_bpw = spi->bits_per_word;
652 sdd->cur_speed = spi->max_speed_hz;
653 sdd->cur_mode = spi->mode;
654 s3c64xx_spi_config(sdd);
655 }
656
657 /* Map all the transfers if needed */
658 if (s3c64xx_spi_map_mssg(sdd, msg)) {
659 dev_err(&spi->dev,
660 "Xfer: Unable to map message buffers!\n");
661 status = -ENOMEM;
662 goto out;
663 }
664
665 /* Configure feedback delay */
666 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
667
668 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
669
670 unsigned long flags;
671 int use_dma;
672
673 INIT_COMPLETION(sdd->xfer_completion);
674
675 /* Only BPW and Speed may change across transfers */
676 bpw = xfer->bits_per_word ? : spi->bits_per_word;
677 speed = xfer->speed_hz ? : spi->max_speed_hz;
678
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900679 if (xfer->len % (bpw / 8)) {
680 dev_err(&spi->dev,
681 "Xfer length(%u) not a multiple of word size(%u)\n",
682 xfer->len, bpw / 8);
683 status = -EIO;
684 goto out;
685 }
686
Jassi Brar230d42d2009-11-30 07:39:42 +0000687 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
688 sdd->cur_bpw = bpw;
689 sdd->cur_speed = speed;
690 s3c64xx_spi_config(sdd);
691 }
692
693 /* Polling method for xfers not bigger than FIFO capacity */
694 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
695 use_dma = 0;
696 else
697 use_dma = 1;
698
699 spin_lock_irqsave(&sdd->lock, flags);
700
701 /* Pending only which is to be done */
702 sdd->state &= ~RXBUSY;
703 sdd->state &= ~TXBUSY;
704
705 enable_datapath(sdd, spi, xfer, use_dma);
706
707 /* Slave Select */
708 enable_cs(sdd, spi);
709
710 /* Start the signals */
711 S3C64XX_SPI_ACT(sdd);
712
713 spin_unlock_irqrestore(&sdd->lock, flags);
714
715 status = wait_for_xfer(sdd, xfer, use_dma);
716
717 /* Quiese the signals */
718 S3C64XX_SPI_DEACT(sdd);
719
720 if (status) {
Joe Perches8a349d42010-02-02 07:22:13 +0000721 dev_err(&spi->dev, "I/O Error: "
722 "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
Jassi Brar230d42d2009-11-30 07:39:42 +0000723 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
724 (sdd->state & RXBUSY) ? 'f' : 'p',
725 (sdd->state & TXBUSY) ? 'f' : 'p',
726 xfer->len);
727
728 if (use_dma) {
729 if (xfer->tx_buf != NULL
730 && (sdd->state & TXBUSY))
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900731 sdd->ops->stop(sdd->tx_dma.ch);
Jassi Brar230d42d2009-11-30 07:39:42 +0000732 if (xfer->rx_buf != NULL
733 && (sdd->state & RXBUSY))
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900734 sdd->ops->stop(sdd->rx_dma.ch);
Jassi Brar230d42d2009-11-30 07:39:42 +0000735 }
736
737 goto out;
738 }
739
740 if (xfer->delay_usecs)
741 udelay(xfer->delay_usecs);
742
743 if (xfer->cs_change) {
744 /* Hint that the next mssg is gonna be
745 for the same device */
746 if (list_is_last(&xfer->transfer_list,
747 &msg->transfers))
748 cs_toggle = 1;
749 else
750 disable_cs(sdd, spi);
751 }
752
753 msg->actual_length += xfer->len;
754
755 flush_fifo(sdd);
756 }
757
758out:
759 if (!cs_toggle || status)
760 disable_cs(sdd, spi);
761 else
762 sdd->tgl_spi = spi;
763
764 s3c64xx_spi_unmap_mssg(sdd, msg);
765
766 msg->status = status;
767
Mark Brownad2a99a2012-02-15 14:48:32 -0800768 spi_finalize_current_message(master);
769
770 return 0;
Jassi Brar230d42d2009-11-30 07:39:42 +0000771}
772
Mark Brownad2a99a2012-02-15 14:48:32 -0800773static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
Jassi Brar230d42d2009-11-30 07:39:42 +0000774{
Mark Brownad2a99a2012-02-15 14:48:32 -0800775 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
Jassi Brar230d42d2009-11-30 07:39:42 +0000776
777 /* Acquire DMA channels */
778 while (!acquire_dma(sdd))
779 msleep(10);
780
Mark Brownb97b6622011-12-04 00:58:06 +0000781 pm_runtime_get_sync(&sdd->pdev->dev);
782
Mark Brownad2a99a2012-02-15 14:48:32 -0800783 return 0;
784}
Jassi Brar230d42d2009-11-30 07:39:42 +0000785
Mark Brownad2a99a2012-02-15 14:48:32 -0800786static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
787{
788 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
Jassi Brar230d42d2009-11-30 07:39:42 +0000789
790 /* Free DMA channels */
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900791 sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
792 sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
Mark Brownb97b6622011-12-04 00:58:06 +0000793
794 pm_runtime_put(&sdd->pdev->dev);
Jassi Brar230d42d2009-11-30 07:39:42 +0000795
796 return 0;
797}
798
799/*
800 * Here we only check the validity of requested configuration
801 * and save the configuration in a local data-structure.
802 * The controller is actually configured only just before we
803 * get a message to transfer.
804 */
805static int s3c64xx_spi_setup(struct spi_device *spi)
806{
807 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
808 struct s3c64xx_spi_driver_data *sdd;
Jassi Brarad7de722010-01-20 13:49:44 -0700809 struct s3c64xx_spi_info *sci;
Jassi Brar230d42d2009-11-30 07:39:42 +0000810 struct spi_message *msg;
Jassi Brar230d42d2009-11-30 07:39:42 +0000811 unsigned long flags;
812 int err = 0;
813
814 if (cs == NULL || cs->set_level == NULL) {
815 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
816 return -ENODEV;
817 }
818
819 sdd = spi_master_get_devdata(spi->master);
820 sci = sdd->cntrlr_info;
821
822 spin_lock_irqsave(&sdd->lock, flags);
823
824 list_for_each_entry(msg, &sdd->queue, queue) {
825 /* Is some mssg is already queued for this device */
826 if (msg->spi == spi) {
827 dev_err(&spi->dev,
828 "setup: attempt while mssg in queue!\n");
829 spin_unlock_irqrestore(&sdd->lock, flags);
830 return -EBUSY;
831 }
832 }
833
Jassi Brar230d42d2009-11-30 07:39:42 +0000834 spin_unlock_irqrestore(&sdd->lock, flags);
835
836 if (spi->bits_per_word != 8
837 && spi->bits_per_word != 16
838 && spi->bits_per_word != 32) {
839 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
840 spi->bits_per_word);
841 err = -EINVAL;
842 goto setup_exit;
843 }
844
Mark Brownb97b6622011-12-04 00:58:06 +0000845 pm_runtime_get_sync(&sdd->pdev->dev);
846
Jassi Brar230d42d2009-11-30 07:39:42 +0000847 /* Check if we can provide the requested rate */
Jassi Brarb42a81c2010-09-29 17:31:33 +0900848 if (!sci->clk_from_cmu) {
849 u32 psr, speed;
Jassi Brar230d42d2009-11-30 07:39:42 +0000850
Jassi Brarb42a81c2010-09-29 17:31:33 +0900851 /* Max possible */
852 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
Jassi Brar230d42d2009-11-30 07:39:42 +0000853
Jassi Brarb42a81c2010-09-29 17:31:33 +0900854 if (spi->max_speed_hz > speed)
855 spi->max_speed_hz = speed;
Jassi Brar230d42d2009-11-30 07:39:42 +0000856
Jassi Brarb42a81c2010-09-29 17:31:33 +0900857 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
858 psr &= S3C64XX_SPI_PSR_MASK;
859 if (psr == S3C64XX_SPI_PSR_MASK)
860 psr--;
861
862 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
863 if (spi->max_speed_hz < speed) {
864 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
865 psr++;
866 } else {
867 err = -EINVAL;
868 goto setup_exit;
869 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000870 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000871
Jassi Brarb42a81c2010-09-29 17:31:33 +0900872 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
873 if (spi->max_speed_hz >= speed)
874 spi->max_speed_hz = speed;
875 else
876 err = -EINVAL;
877 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000878
Mark Brownb97b6622011-12-04 00:58:06 +0000879 pm_runtime_put(&sdd->pdev->dev);
880
Jassi Brar230d42d2009-11-30 07:39:42 +0000881setup_exit:
882
883 /* setup() returns with device de-selected */
884 disable_cs(sdd, spi);
885
886 return err;
887}
888
Mark Brownc2573122011-11-10 10:57:32 +0000889static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
890{
891 struct s3c64xx_spi_driver_data *sdd = data;
892 struct spi_master *spi = sdd->master;
893 unsigned int val;
894
895 val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
896
897 val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
898 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
899 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
900 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
901
902 writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
903
904 if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
905 dev_err(&spi->dev, "RX overrun\n");
906 if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
907 dev_err(&spi->dev, "RX underrun\n");
908 if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
909 dev_err(&spi->dev, "TX overrun\n");
910 if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
911 dev_err(&spi->dev, "TX underrun\n");
912
913 return IRQ_HANDLED;
914}
915
Jassi Brar230d42d2009-11-30 07:39:42 +0000916static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
917{
Jassi Brarad7de722010-01-20 13:49:44 -0700918 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000919 void __iomem *regs = sdd->regs;
920 unsigned int val;
921
922 sdd->cur_speed = 0;
923
924 S3C64XX_SPI_DEACT(sdd);
925
926 /* Disable Interrupts - we use Polling if not DMA mode */
927 writel(0, regs + S3C64XX_SPI_INT_EN);
928
Jassi Brarb42a81c2010-09-29 17:31:33 +0900929 if (!sci->clk_from_cmu)
930 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
Jassi Brar230d42d2009-11-30 07:39:42 +0000931 regs + S3C64XX_SPI_CLK_CFG);
932 writel(0, regs + S3C64XX_SPI_MODE_CFG);
933 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
934
935 /* Clear any irq pending bits */
936 writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
937 regs + S3C64XX_SPI_PENDING_CLR);
938
939 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
940
941 val = readl(regs + S3C64XX_SPI_MODE_CFG);
942 val &= ~S3C64XX_SPI_MODE_4BURST;
943 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
944 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
945 writel(val, regs + S3C64XX_SPI_MODE_CFG);
946
947 flush_fifo(sdd);
948}
949
950static int __init s3c64xx_spi_probe(struct platform_device *pdev)
951{
952 struct resource *mem_res, *dmatx_res, *dmarx_res;
953 struct s3c64xx_spi_driver_data *sdd;
Jassi Brarad7de722010-01-20 13:49:44 -0700954 struct s3c64xx_spi_info *sci;
Jassi Brar230d42d2009-11-30 07:39:42 +0000955 struct spi_master *master;
Mark Brownc2573122011-11-10 10:57:32 +0000956 int ret, irq;
Padmavathi Vennaa24d8502011-11-02 20:04:19 +0900957 char clk_name[16];
Jassi Brar230d42d2009-11-30 07:39:42 +0000958
959 if (pdev->id < 0) {
960 dev_err(&pdev->dev,
961 "Invalid platform device id-%d\n", pdev->id);
962 return -ENODEV;
963 }
964
965 if (pdev->dev.platform_data == NULL) {
966 dev_err(&pdev->dev, "platform_data missing!\n");
967 return -ENODEV;
968 }
969
Mark Browncc0fc0b2010-09-01 08:55:22 -0600970 sci = pdev->dev.platform_data;
Mark Browncc0fc0b2010-09-01 08:55:22 -0600971
Jassi Brar230d42d2009-11-30 07:39:42 +0000972 /* Check for availability of necessary resource */
973
974 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
975 if (dmatx_res == NULL) {
976 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
977 return -ENXIO;
978 }
979
980 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
981 if (dmarx_res == NULL) {
982 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
983 return -ENXIO;
984 }
985
986 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 if (mem_res == NULL) {
988 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
989 return -ENXIO;
990 }
991
Mark Brownc2573122011-11-10 10:57:32 +0000992 irq = platform_get_irq(pdev, 0);
993 if (irq < 0) {
994 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
995 return irq;
996 }
997
Jassi Brar230d42d2009-11-30 07:39:42 +0000998 master = spi_alloc_master(&pdev->dev,
999 sizeof(struct s3c64xx_spi_driver_data));
1000 if (master == NULL) {
1001 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1002 return -ENOMEM;
1003 }
1004
Jassi Brar230d42d2009-11-30 07:39:42 +00001005 platform_set_drvdata(pdev, master);
1006
1007 sdd = spi_master_get_devdata(master);
1008 sdd->master = master;
1009 sdd->cntrlr_info = sci;
1010 sdd->pdev = pdev;
1011 sdd->sfr_start = mem_res->start;
Boojin Kim82ab8cd2011-09-02 09:44:42 +09001012 sdd->tx_dma.dmach = dmatx_res->start;
Kyoungil Kim054ebcc2012-03-10 09:48:46 +09001013 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
Boojin Kim82ab8cd2011-09-02 09:44:42 +09001014 sdd->rx_dma.dmach = dmarx_res->start;
Kyoungil Kim054ebcc2012-03-10 09:48:46 +09001015 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
Jassi Brar230d42d2009-11-30 07:39:42 +00001016
1017 sdd->cur_bpw = 8;
1018
1019 master->bus_num = pdev->id;
1020 master->setup = s3c64xx_spi_setup;
Mark Brownad2a99a2012-02-15 14:48:32 -08001021 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1022 master->transfer_one_message = s3c64xx_spi_transfer_one_message;
1023 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
Jassi Brar230d42d2009-11-30 07:39:42 +00001024 master->num_chipselect = sci->num_cs;
1025 master->dma_alignment = 8;
1026 /* the spi->mode bits understood by this driver: */
1027 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1028
1029 if (request_mem_region(mem_res->start,
1030 resource_size(mem_res), pdev->name) == NULL) {
1031 dev_err(&pdev->dev, "Req mem region failed\n");
1032 ret = -ENXIO;
1033 goto err0;
1034 }
1035
1036 sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
1037 if (sdd->regs == NULL) {
1038 dev_err(&pdev->dev, "Unable to remap IO\n");
1039 ret = -ENXIO;
1040 goto err1;
1041 }
1042
1043 if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
1044 dev_err(&pdev->dev, "Unable to config gpio\n");
1045 ret = -EBUSY;
1046 goto err2;
1047 }
1048
1049 /* Setup clocks */
1050 sdd->clk = clk_get(&pdev->dev, "spi");
1051 if (IS_ERR(sdd->clk)) {
1052 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1053 ret = PTR_ERR(sdd->clk);
1054 goto err3;
1055 }
1056
1057 if (clk_enable(sdd->clk)) {
1058 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1059 ret = -EBUSY;
1060 goto err4;
1061 }
1062
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001063 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1064 sdd->src_clk = clk_get(&pdev->dev, clk_name);
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001065 if (IS_ERR(sdd->src_clk)) {
Jassi Brar230d42d2009-11-30 07:39:42 +00001066 dev_err(&pdev->dev,
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001067 "Unable to acquire clock '%s'\n", clk_name);
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001068 ret = PTR_ERR(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001069 goto err5;
1070 }
1071
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001072 if (clk_enable(sdd->src_clk)) {
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001073 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
Jassi Brar230d42d2009-11-30 07:39:42 +00001074 ret = -EBUSY;
1075 goto err6;
1076 }
1077
Jassi Brar230d42d2009-11-30 07:39:42 +00001078 /* Setup Deufult Mode */
1079 s3c64xx_spi_hwinit(sdd, pdev->id);
1080
1081 spin_lock_init(&sdd->lock);
1082 init_completion(&sdd->xfer_completion);
Jassi Brar230d42d2009-11-30 07:39:42 +00001083 INIT_LIST_HEAD(&sdd->queue);
1084
Mark Brownc2573122011-11-10 10:57:32 +00001085 ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
1086 if (ret != 0) {
1087 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1088 irq, ret);
Mark Brownad2a99a2012-02-15 14:48:32 -08001089 goto err7;
Mark Brownc2573122011-11-10 10:57:32 +00001090 }
1091
1092 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1093 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1094 sdd->regs + S3C64XX_SPI_INT_EN);
1095
Jassi Brar230d42d2009-11-30 07:39:42 +00001096 if (spi_register_master(master)) {
1097 dev_err(&pdev->dev, "cannot register SPI master\n");
1098 ret = -EBUSY;
Mark Brownad2a99a2012-02-15 14:48:32 -08001099 goto err8;
Jassi Brar230d42d2009-11-30 07:39:42 +00001100 }
1101
Joe Perches8a349d42010-02-02 07:22:13 +00001102 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
1103 "with %d Slaves attached\n",
Jassi Brar230d42d2009-11-30 07:39:42 +00001104 pdev->id, master->num_chipselect);
Joe Perches8a349d42010-02-02 07:22:13 +00001105 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
Jassi Brar230d42d2009-11-30 07:39:42 +00001106 mem_res->end, mem_res->start,
Boojin Kim82ab8cd2011-09-02 09:44:42 +09001107 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
Jassi Brar230d42d2009-11-30 07:39:42 +00001108
Mark Brownb97b6622011-12-04 00:58:06 +00001109 pm_runtime_enable(&pdev->dev);
1110
Jassi Brar230d42d2009-11-30 07:39:42 +00001111 return 0;
1112
1113err8:
Mark Brownad2a99a2012-02-15 14:48:32 -08001114 free_irq(irq, sdd);
Jassi Brar230d42d2009-11-30 07:39:42 +00001115err7:
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001116 clk_disable(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001117err6:
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001118 clk_put(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001119err5:
1120 clk_disable(sdd->clk);
1121err4:
1122 clk_put(sdd->clk);
1123err3:
1124err2:
1125 iounmap((void *) sdd->regs);
1126err1:
1127 release_mem_region(mem_res->start, resource_size(mem_res));
1128err0:
1129 platform_set_drvdata(pdev, NULL);
1130 spi_master_put(master);
1131
1132 return ret;
1133}
1134
1135static int s3c64xx_spi_remove(struct platform_device *pdev)
1136{
1137 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1138 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001139 struct resource *mem_res;
Jassi Brar230d42d2009-11-30 07:39:42 +00001140
Mark Brownb97b6622011-12-04 00:58:06 +00001141 pm_runtime_disable(&pdev->dev);
1142
Jassi Brar230d42d2009-11-30 07:39:42 +00001143 spi_unregister_master(master);
1144
Mark Brownc2573122011-11-10 10:57:32 +00001145 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1146
1147 free_irq(platform_get_irq(pdev, 0), sdd);
1148
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001149 clk_disable(sdd->src_clk);
1150 clk_put(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001151
1152 clk_disable(sdd->clk);
1153 clk_put(sdd->clk);
1154
1155 iounmap((void *) sdd->regs);
1156
1157 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Jassi Braref6c6802010-01-20 13:49:44 -07001158 if (mem_res != NULL)
1159 release_mem_region(mem_res->start, resource_size(mem_res));
Jassi Brar230d42d2009-11-30 07:39:42 +00001160
1161 platform_set_drvdata(pdev, NULL);
1162 spi_master_put(master);
1163
1164 return 0;
1165}
1166
1167#ifdef CONFIG_PM
Mark Browne25d0bf2011-12-04 00:36:18 +00001168static int s3c64xx_spi_suspend(struct device *dev)
Jassi Brar230d42d2009-11-30 07:39:42 +00001169{
Mark Browne25d0bf2011-12-04 00:36:18 +00001170 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
Jassi Brar230d42d2009-11-30 07:39:42 +00001171 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001172
Mark Brownad2a99a2012-02-15 14:48:32 -08001173 spi_master_suspend(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001174
1175 /* Disable the clock */
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001176 clk_disable(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001177 clk_disable(sdd->clk);
1178
1179 sdd->cur_speed = 0; /* Output Clock is stopped */
1180
1181 return 0;
1182}
1183
Mark Browne25d0bf2011-12-04 00:36:18 +00001184static int s3c64xx_spi_resume(struct device *dev)
Jassi Brar230d42d2009-11-30 07:39:42 +00001185{
Mark Browne25d0bf2011-12-04 00:36:18 +00001186 struct platform_device *pdev = to_platform_device(dev);
1187 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
Jassi Brar230d42d2009-11-30 07:39:42 +00001188 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brarad7de722010-01-20 13:49:44 -07001189 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +00001190
1191 sci->cfg_gpio(pdev);
1192
1193 /* Enable the clock */
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001194 clk_enable(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001195 clk_enable(sdd->clk);
1196
1197 s3c64xx_spi_hwinit(sdd, pdev->id);
1198
Mark Brownad2a99a2012-02-15 14:48:32 -08001199 spi_master_resume(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001200
1201 return 0;
1202}
Jassi Brar230d42d2009-11-30 07:39:42 +00001203#endif /* CONFIG_PM */
1204
Mark Brownb97b6622011-12-04 00:58:06 +00001205#ifdef CONFIG_PM_RUNTIME
1206static int s3c64xx_spi_runtime_suspend(struct device *dev)
1207{
1208 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1209 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1210
1211 clk_disable(sdd->clk);
1212 clk_disable(sdd->src_clk);
1213
1214 return 0;
1215}
1216
1217static int s3c64xx_spi_runtime_resume(struct device *dev)
1218{
1219 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1220 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1221
1222 clk_enable(sdd->src_clk);
1223 clk_enable(sdd->clk);
1224
1225 return 0;
1226}
1227#endif /* CONFIG_PM_RUNTIME */
1228
Mark Browne25d0bf2011-12-04 00:36:18 +00001229static const struct dev_pm_ops s3c64xx_spi_pm = {
1230 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
Mark Brownb97b6622011-12-04 00:58:06 +00001231 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1232 s3c64xx_spi_runtime_resume, NULL)
Mark Browne25d0bf2011-12-04 00:36:18 +00001233};
1234
Jassi Brar230d42d2009-11-30 07:39:42 +00001235static struct platform_driver s3c64xx_spi_driver = {
1236 .driver = {
1237 .name = "s3c64xx-spi",
1238 .owner = THIS_MODULE,
Mark Browne25d0bf2011-12-04 00:36:18 +00001239 .pm = &s3c64xx_spi_pm,
Jassi Brar230d42d2009-11-30 07:39:42 +00001240 },
1241 .remove = s3c64xx_spi_remove,
Jassi Brar230d42d2009-11-30 07:39:42 +00001242};
1243MODULE_ALIAS("platform:s3c64xx-spi");
1244
1245static int __init s3c64xx_spi_init(void)
1246{
1247 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1248}
Mark Brownd2a787f2010-09-07 11:29:17 +01001249subsys_initcall(s3c64xx_spi_init);
Jassi Brar230d42d2009-11-30 07:39:42 +00001250
1251static void __exit s3c64xx_spi_exit(void)
1252{
1253 platform_driver_unregister(&s3c64xx_spi_driver);
1254}
1255module_exit(s3c64xx_spi_exit);
1256
1257MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1258MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1259MODULE_LICENSE("GPL");