blob: 25c9bd409a87ad9c42107558967255566b110168 [file] [log] [blame]
Grant Likelyca632f52011-06-06 01:16:30 -06001/*
Jassi Brar230d42d2009-11-30 07:39:42 +00002 * Copyright (C) 2009 Samsung Electronics Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/workqueue.h>
Mark Brownc2573122011-11-10 10:57:32 +000023#include <linux/interrupt.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000024#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
Arnd Bergmann78843722013-04-11 22:42:03 +020027#include <linux/dmaengine.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000028#include <linux/platform_device.h>
Mark Brownb97b6622011-12-04 00:58:06 +000029#include <linux/pm_runtime.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000030#include <linux/spi/spi.h>
Thomas Abraham1c20c202012-07-13 07:15:14 +090031#include <linux/gpio.h>
Thomas Abraham2b908072012-07-13 07:15:15 +090032#include <linux/of.h>
33#include <linux/of_gpio.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000034
Arnd Bergmann436d42c2012-08-24 15:22:12 +020035#include <linux/platform_data/spi-s3c64xx.h>
Jassi Brar230d42d2009-11-30 07:39:42 +000036
Mark Brown563b4442013-04-18 18:06:05 +010037#ifdef CONFIG_S3C_DMA
Arnd Bergmann78843722013-04-11 22:42:03 +020038#include <mach/dma.h>
39#endif
40
Thomas Abrahama5238e32012-07-13 07:15:14 +090041#define MAX_SPI_PORTS 3
Girish K S7e995552013-05-20 12:21:32 +053042#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
Thomas Abrahama5238e32012-07-13 07:15:14 +090043
Jassi Brar230d42d2009-11-30 07:39:42 +000044/* Registers and bit-fields */
45
46#define S3C64XX_SPI_CH_CFG 0x00
47#define S3C64XX_SPI_CLK_CFG 0x04
48#define S3C64XX_SPI_MODE_CFG 0x08
49#define S3C64XX_SPI_SLAVE_SEL 0x0C
50#define S3C64XX_SPI_INT_EN 0x10
51#define S3C64XX_SPI_STATUS 0x14
52#define S3C64XX_SPI_TX_DATA 0x18
53#define S3C64XX_SPI_RX_DATA 0x1C
54#define S3C64XX_SPI_PACKET_CNT 0x20
55#define S3C64XX_SPI_PENDING_CLR 0x24
56#define S3C64XX_SPI_SWAP_CFG 0x28
57#define S3C64XX_SPI_FB_CLK 0x2C
58
59#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
60#define S3C64XX_SPI_CH_SW_RST (1<<5)
61#define S3C64XX_SPI_CH_SLAVE (1<<4)
62#define S3C64XX_SPI_CPOL_L (1<<3)
63#define S3C64XX_SPI_CPHA_B (1<<2)
64#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
65#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
66
67#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
68#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
69#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
Jingoo Han75bf3362013-01-31 15:25:01 +090070#define S3C64XX_SPI_PSR_MASK 0xff
Jassi Brar230d42d2009-11-30 07:39:42 +000071
72#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
73#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
74#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
75#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
76#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
77#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
78#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
79#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
80#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
81#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
82#define S3C64XX_SPI_MODE_4BURST (1<<0)
83
84#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
85#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
86
Jassi Brar230d42d2009-11-30 07:39:42 +000087#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
88#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
89#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
90#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
91#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
92#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
93#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
94
95#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
96#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
97#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
98#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
99#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
100#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
101
102#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
103
104#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
105#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
106#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
107#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
108#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
109
110#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
111#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
112#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
113#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
114#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
115#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
116#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
117#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
118
119#define S3C64XX_SPI_FBCLK_MSK (3<<0)
120
Thomas Abrahama5238e32012-07-13 07:15:14 +0900121#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
122#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
123 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
124#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
125#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
126 FIFO_LVL_MASK(i))
Jassi Brar230d42d2009-11-30 07:39:42 +0000127
128#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
129#define S3C64XX_SPI_TRAILCNT_OFF 19
130
131#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
132
133#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
Girish K S7e995552013-05-20 12:21:32 +0530134#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
Jassi Brar230d42d2009-11-30 07:39:42 +0000135
Jassi Brar230d42d2009-11-30 07:39:42 +0000136#define RXBUSY (1<<2)
137#define TXBUSY (1<<3)
138
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900139struct s3c64xx_spi_dma_data {
Arnd Bergmann78843722013-04-11 22:42:03 +0200140 struct dma_chan *ch;
Arnd Bergmannc10356b2012-04-30 16:31:27 +0000141 enum dma_transfer_direction direction;
Arnd Bergmann78843722013-04-11 22:42:03 +0200142 unsigned int dmach;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900143};
144
Jassi Brar230d42d2009-11-30 07:39:42 +0000145/**
Thomas Abrahama5238e32012-07-13 07:15:14 +0900146 * struct s3c64xx_spi_info - SPI Controller hardware info
147 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
148 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
149 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
150 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
151 * @clk_from_cmu: True, if the controller does not include a clock mux and
152 * prescaler unit.
153 *
154 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
155 * differ in some aspects such as the size of the fifo and spi bus clock
156 * setup. Such differences are specified to the driver using this structure
157 * which is provided as driver data to the driver.
158 */
159struct s3c64xx_spi_port_config {
160 int fifo_lvl_mask[MAX_SPI_PORTS];
161 int rx_lvl_offset;
162 int tx_st_done;
Girish K S7e995552013-05-20 12:21:32 +0530163 int quirks;
Thomas Abrahama5238e32012-07-13 07:15:14 +0900164 bool high_speed;
165 bool clk_from_cmu;
166};
167
168/**
Jassi Brar230d42d2009-11-30 07:39:42 +0000169 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
170 * @clk: Pointer to the spi clock.
Jassi Brarb0d5d6e2010-01-20 13:49:44 -0700171 * @src_clk: Pointer to the clock used to generate SPI signals.
Jassi Brar230d42d2009-11-30 07:39:42 +0000172 * @master: Pointer to the SPI Protocol master.
Jassi Brar230d42d2009-11-30 07:39:42 +0000173 * @cntrlr_info: Platform specific data for the controller this driver manages.
174 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
Jassi Brar230d42d2009-11-30 07:39:42 +0000175 * @lock: Controller specific lock.
176 * @state: Set of FLAGS to indicate status.
177 * @rx_dmach: Controller's DMA channel for Rx.
178 * @tx_dmach: Controller's DMA channel for Tx.
179 * @sfr_start: BUS address of SPI controller regs.
180 * @regs: Pointer to ioremap'ed controller registers.
Mark Brownc2573122011-11-10 10:57:32 +0000181 * @irq: interrupt
Jassi Brar230d42d2009-11-30 07:39:42 +0000182 * @xfer_completion: To indicate completion of xfer task.
183 * @cur_mode: Stores the active configuration of the controller.
184 * @cur_bpw: Stores the active bits per word settings.
185 * @cur_speed: Stores the active xfer clock speed.
186 */
187struct s3c64xx_spi_driver_data {
188 void __iomem *regs;
189 struct clk *clk;
Jassi Brarb0d5d6e2010-01-20 13:49:44 -0700190 struct clk *src_clk;
Jassi Brar230d42d2009-11-30 07:39:42 +0000191 struct platform_device *pdev;
192 struct spi_master *master;
Jassi Brarad7de722010-01-20 13:49:44 -0700193 struct s3c64xx_spi_info *cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +0000194 struct spi_device *tgl_spi;
Jassi Brar230d42d2009-11-30 07:39:42 +0000195 spinlock_t lock;
Jassi Brar230d42d2009-11-30 07:39:42 +0000196 unsigned long sfr_start;
197 struct completion xfer_completion;
198 unsigned state;
199 unsigned cur_mode, cur_bpw;
200 unsigned cur_speed;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900201 struct s3c64xx_spi_dma_data rx_dma;
202 struct s3c64xx_spi_dma_data tx_dma;
Mark Brown563b4442013-04-18 18:06:05 +0100203#ifdef CONFIG_S3C_DMA
Boojin Kim39d3e802011-09-02 09:44:41 +0900204 struct samsung_dma_ops *ops;
Arnd Bergmann78843722013-04-11 22:42:03 +0200205#endif
Thomas Abrahama5238e32012-07-13 07:15:14 +0900206 struct s3c64xx_spi_port_config *port_conf;
207 unsigned int port_id;
Girish K S3146bee2013-06-21 11:26:12 +0530208 bool cs_gpio;
Jassi Brar230d42d2009-11-30 07:39:42 +0000209};
210
Jassi Brar230d42d2009-11-30 07:39:42 +0000211static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
212{
Jassi Brar230d42d2009-11-30 07:39:42 +0000213 void __iomem *regs = sdd->regs;
214 unsigned long loops;
215 u32 val;
216
217 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
218
219 val = readl(regs + S3C64XX_SPI_CH_CFG);
Kyoungil Kim7d859ff2012-05-23 21:29:51 +0900220 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
221 writel(val, regs + S3C64XX_SPI_CH_CFG);
222
223 val = readl(regs + S3C64XX_SPI_CH_CFG);
Jassi Brar230d42d2009-11-30 07:39:42 +0000224 val |= S3C64XX_SPI_CH_SW_RST;
225 val &= ~S3C64XX_SPI_CH_HS_EN;
226 writel(val, regs + S3C64XX_SPI_CH_CFG);
227
228 /* Flush TxFIFO*/
229 loops = msecs_to_loops(1);
230 do {
231 val = readl(regs + S3C64XX_SPI_STATUS);
Thomas Abrahama5238e32012-07-13 07:15:14 +0900232 } while (TX_FIFO_LVL(val, sdd) && loops--);
Jassi Brar230d42d2009-11-30 07:39:42 +0000233
Mark Brownbe7852a2010-08-23 17:40:56 +0100234 if (loops == 0)
235 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
236
Jassi Brar230d42d2009-11-30 07:39:42 +0000237 /* Flush RxFIFO*/
238 loops = msecs_to_loops(1);
239 do {
240 val = readl(regs + S3C64XX_SPI_STATUS);
Thomas Abrahama5238e32012-07-13 07:15:14 +0900241 if (RX_FIFO_LVL(val, sdd))
Jassi Brar230d42d2009-11-30 07:39:42 +0000242 readl(regs + S3C64XX_SPI_RX_DATA);
243 else
244 break;
245 } while (loops--);
246
Mark Brownbe7852a2010-08-23 17:40:56 +0100247 if (loops == 0)
248 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
249
Jassi Brar230d42d2009-11-30 07:39:42 +0000250 val = readl(regs + S3C64XX_SPI_CH_CFG);
251 val &= ~S3C64XX_SPI_CH_SW_RST;
252 writel(val, regs + S3C64XX_SPI_CH_CFG);
253
254 val = readl(regs + S3C64XX_SPI_MODE_CFG);
255 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
256 writel(val, regs + S3C64XX_SPI_MODE_CFG);
Jassi Brar230d42d2009-11-30 07:39:42 +0000257}
258
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900259static void s3c64xx_spi_dmacb(void *data)
Boojin Kim39d3e802011-09-02 09:44:41 +0900260{
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900261 struct s3c64xx_spi_driver_data *sdd;
262 struct s3c64xx_spi_dma_data *dma = data;
Boojin Kim39d3e802011-09-02 09:44:41 +0900263 unsigned long flags;
264
Kyoungil Kim054ebcc2012-03-10 09:48:46 +0900265 if (dma->direction == DMA_DEV_TO_MEM)
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900266 sdd = container_of(data,
267 struct s3c64xx_spi_driver_data, rx_dma);
268 else
269 sdd = container_of(data,
270 struct s3c64xx_spi_driver_data, tx_dma);
271
Boojin Kim39d3e802011-09-02 09:44:41 +0900272 spin_lock_irqsave(&sdd->lock, flags);
273
Kyoungil Kim054ebcc2012-03-10 09:48:46 +0900274 if (dma->direction == DMA_DEV_TO_MEM) {
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900275 sdd->state &= ~RXBUSY;
276 if (!(sdd->state & TXBUSY))
277 complete(&sdd->xfer_completion);
278 } else {
279 sdd->state &= ~TXBUSY;
280 if (!(sdd->state & RXBUSY))
281 complete(&sdd->xfer_completion);
282 }
Boojin Kim39d3e802011-09-02 09:44:41 +0900283
284 spin_unlock_irqrestore(&sdd->lock, flags);
285}
286
Mark Brown563b4442013-04-18 18:06:05 +0100287#ifdef CONFIG_S3C_DMA
Arnd Bergmann78843722013-04-11 22:42:03 +0200288/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
289
290static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
291 .name = "samsung-spi-dma",
292};
293
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900294static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
295 unsigned len, dma_addr_t buf)
Boojin Kim39d3e802011-09-02 09:44:41 +0900296{
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900297 struct s3c64xx_spi_driver_data *sdd;
Boojin Kim4969c322012-06-19 13:27:03 +0900298 struct samsung_dma_prep info;
299 struct samsung_dma_config config;
Boojin Kim39d3e802011-09-02 09:44:41 +0900300
Boojin Kim4969c322012-06-19 13:27:03 +0900301 if (dma->direction == DMA_DEV_TO_MEM) {
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900302 sdd = container_of((void *)dma,
303 struct s3c64xx_spi_driver_data, rx_dma);
Boojin Kim4969c322012-06-19 13:27:03 +0900304 config.direction = sdd->rx_dma.direction;
305 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
306 config.width = sdd->cur_bpw / 8;
Arnd Bergmann78843722013-04-11 22:42:03 +0200307 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
Boojin Kim4969c322012-06-19 13:27:03 +0900308 } else {
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900309 sdd = container_of((void *)dma,
310 struct s3c64xx_spi_driver_data, tx_dma);
Boojin Kim4969c322012-06-19 13:27:03 +0900311 config.direction = sdd->tx_dma.direction;
312 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
313 config.width = sdd->cur_bpw / 8;
Arnd Bergmann78843722013-04-11 22:42:03 +0200314 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
Boojin Kim4969c322012-06-19 13:27:03 +0900315 }
Boojin Kim39d3e802011-09-02 09:44:41 +0900316
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900317 info.cap = DMA_SLAVE;
318 info.len = len;
319 info.fp = s3c64xx_spi_dmacb;
320 info.fp_param = dma;
321 info.direction = dma->direction;
322 info.buf = buf;
Boojin Kim39d3e802011-09-02 09:44:41 +0900323
Arnd Bergmann78843722013-04-11 22:42:03 +0200324 sdd->ops->prepare((enum dma_ch)dma->ch, &info);
325 sdd->ops->trigger((enum dma_ch)dma->ch);
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900326}
327
328static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
329{
Boojin Kim4969c322012-06-19 13:27:03 +0900330 struct samsung_dma_req req;
Padmavathi Vennab5be04d2013-01-18 17:17:03 +0530331 struct device *dev = &sdd->pdev->dev;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900332
333 sdd->ops = samsung_dma_get_ops();
334
Boojin Kim4969c322012-06-19 13:27:03 +0900335 req.cap = DMA_SLAVE;
336 req.client = &s3c64xx_spi_dma_client;
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900337
Jingoo Hanb998aca82013-07-17 17:54:11 +0900338 sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
339 sdd->rx_dma.dmach, &req, dev, "rx");
340 sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
341 sdd->tx_dma.dmach, &req, dev, "tx");
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900342
343 return 1;
Boojin Kim39d3e802011-09-02 09:44:41 +0900344}
345
Arnd Bergmann78843722013-04-11 22:42:03 +0200346static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
347{
348 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
349
Girish K S7e995552013-05-20 12:21:32 +0530350 /*
351 * If DMA resource was not available during
352 * probe, no need to continue with dma requests
353 * else Acquire DMA channels
354 */
355 while (!is_polling(sdd) && !acquire_dma(sdd))
Arnd Bergmann78843722013-04-11 22:42:03 +0200356 usleep_range(10000, 11000);
357
Arnd Bergmann78843722013-04-11 22:42:03 +0200358 return 0;
359}
360
361static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
362{
363 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
364
365 /* Free DMA channels */
Girish K S7e995552013-05-20 12:21:32 +0530366 if (!is_polling(sdd)) {
367 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
368 &s3c64xx_spi_dma_client);
369 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
370 &s3c64xx_spi_dma_client);
371 }
Arnd Bergmann78843722013-04-11 22:42:03 +0200372
373 return 0;
374}
375
376static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
377 struct s3c64xx_spi_dma_data *dma)
378{
379 sdd->ops->stop((enum dma_ch)dma->ch);
380}
381#else
382
383static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
Mark Brown6ad45a22014-02-02 13:47:47 +0000384 struct sg_table *sgt)
Arnd Bergmann78843722013-04-11 22:42:03 +0200385{
386 struct s3c64xx_spi_driver_data *sdd;
387 struct dma_slave_config config;
Arnd Bergmann78843722013-04-11 22:42:03 +0200388 struct dma_async_tx_descriptor *desc;
389
Tomasz Figab1a8e782013-08-11 02:33:28 +0200390 memset(&config, 0, sizeof(config));
391
Arnd Bergmann78843722013-04-11 22:42:03 +0200392 if (dma->direction == DMA_DEV_TO_MEM) {
393 sdd = container_of((void *)dma,
394 struct s3c64xx_spi_driver_data, rx_dma);
395 config.direction = dma->direction;
396 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
397 config.src_addr_width = sdd->cur_bpw / 8;
398 config.src_maxburst = 1;
399 dmaengine_slave_config(dma->ch, &config);
400 } else {
401 sdd = container_of((void *)dma,
402 struct s3c64xx_spi_driver_data, tx_dma);
403 config.direction = dma->direction;
404 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
405 config.dst_addr_width = sdd->cur_bpw / 8;
406 config.dst_maxburst = 1;
407 dmaengine_slave_config(dma->ch, &config);
408 }
409
Mark Brown6ad45a22014-02-02 13:47:47 +0000410 desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
411 dma->direction, DMA_PREP_INTERRUPT);
Arnd Bergmann78843722013-04-11 22:42:03 +0200412
413 desc->callback = s3c64xx_spi_dmacb;
414 desc->callback_param = dma;
415
416 dmaengine_submit(desc);
417 dma_async_issue_pending(dma->ch);
418}
419
420static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
421{
422 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
423 dma_filter_fn filter = sdd->cntrlr_info->filter;
424 struct device *dev = &sdd->pdev->dev;
425 dma_cap_mask_t mask;
Mark Brownfb9d0442013-04-18 18:12:00 +0100426 int ret;
Arnd Bergmann78843722013-04-11 22:42:03 +0200427
Mark Brownc12f9642013-08-13 19:03:01 +0100428 if (!is_polling(sdd)) {
429 dma_cap_zero(mask);
430 dma_cap_set(DMA_SLAVE, mask);
Girish K Sd96760f2013-06-27 12:26:53 +0530431
Mark Brownc12f9642013-08-13 19:03:01 +0100432 /* Acquire DMA channels */
433 sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
434 (void *)sdd->rx_dma.dmach, dev, "rx");
435 if (!sdd->rx_dma.ch) {
436 dev_err(dev, "Failed to get RX DMA channel\n");
437 ret = -EBUSY;
438 goto out;
439 }
Arnd Bergmann78843722013-04-11 22:42:03 +0200440
Mark Brownc12f9642013-08-13 19:03:01 +0100441 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
442 (void *)sdd->tx_dma.dmach, dev, "tx");
443 if (!sdd->tx_dma.ch) {
444 dev_err(dev, "Failed to get TX DMA channel\n");
445 ret = -EBUSY;
446 goto out_rx;
447 }
Mark Brownfb9d0442013-04-18 18:12:00 +0100448 }
449
450 ret = pm_runtime_get_sync(&sdd->pdev->dev);
Sylwester Nawrocki6c6cf642013-06-10 18:22:26 +0200451 if (ret < 0) {
Mark Brownfb9d0442013-04-18 18:12:00 +0100452 dev_err(dev, "Failed to enable device: %d\n", ret);
453 goto out_tx;
454 }
Arnd Bergmann78843722013-04-11 22:42:03 +0200455
456 return 0;
Mark Brownfb9d0442013-04-18 18:12:00 +0100457
458out_tx:
459 dma_release_channel(sdd->tx_dma.ch);
460out_rx:
461 dma_release_channel(sdd->rx_dma.ch);
462out:
463 return ret;
Arnd Bergmann78843722013-04-11 22:42:03 +0200464}
465
466static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
467{
468 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
469
470 /* Free DMA channels */
Girish K S7e995552013-05-20 12:21:32 +0530471 if (!is_polling(sdd)) {
472 dma_release_channel(sdd->rx_dma.ch);
473 dma_release_channel(sdd->tx_dma.ch);
474 }
Arnd Bergmann78843722013-04-11 22:42:03 +0200475
476 pm_runtime_put(&sdd->pdev->dev);
477 return 0;
478}
479
480static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
481 struct s3c64xx_spi_dma_data *dma)
482{
483 dmaengine_terminate_all(dma->ch);
484}
485#endif
486
Jassi Brar230d42d2009-11-30 07:39:42 +0000487static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
488 struct spi_device *spi,
489 struct spi_transfer *xfer, int dma_mode)
490{
Jassi Brar230d42d2009-11-30 07:39:42 +0000491 void __iomem *regs = sdd->regs;
492 u32 modecfg, chcfg;
493
494 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
495 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
496
497 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
498 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
499
500 if (dma_mode) {
501 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
502 } else {
503 /* Always shift in data in FIFO, even if xfer is Tx only,
504 * this helps setting PCKT_CNT value for generating clocks
505 * as exactly needed.
506 */
507 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
508 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
509 | S3C64XX_SPI_PACKET_CNT_EN,
510 regs + S3C64XX_SPI_PACKET_CNT);
511 }
512
513 if (xfer->tx_buf != NULL) {
514 sdd->state |= TXBUSY;
515 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
516 if (dma_mode) {
517 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
Mark Brown6ad45a22014-02-02 13:47:47 +0000518#ifndef CONFIG_S3C_DMA
519 prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
520#else
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900521 prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
Mark Brown6ad45a22014-02-02 13:47:47 +0000522#endif
Jassi Brar230d42d2009-11-30 07:39:42 +0000523 } else {
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900524 switch (sdd->cur_bpw) {
525 case 32:
526 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
527 xfer->tx_buf, xfer->len / 4);
528 break;
529 case 16:
530 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
531 xfer->tx_buf, xfer->len / 2);
532 break;
533 default:
534 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
535 xfer->tx_buf, xfer->len);
536 break;
537 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000538 }
539 }
540
541 if (xfer->rx_buf != NULL) {
542 sdd->state |= RXBUSY;
543
Thomas Abrahama5238e32012-07-13 07:15:14 +0900544 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
Jassi Brar230d42d2009-11-30 07:39:42 +0000545 && !(sdd->cur_mode & SPI_CPHA))
546 chcfg |= S3C64XX_SPI_CH_HS_EN;
547
548 if (dma_mode) {
549 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
550 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
551 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
552 | S3C64XX_SPI_PACKET_CNT_EN,
553 regs + S3C64XX_SPI_PACKET_CNT);
Mark Brown6ad45a22014-02-02 13:47:47 +0000554#ifndef CONFIG_S3C_DMA
555 prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
556#else
Boojin Kim82ab8cd2011-09-02 09:44:42 +0900557 prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
Mark Brown6ad45a22014-02-02 13:47:47 +0000558#endif
Jassi Brar230d42d2009-11-30 07:39:42 +0000559 }
560 }
561
562 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
563 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
564}
565
Mark Brown79617072013-06-19 19:12:39 +0100566static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
Girish K S7e995552013-05-20 12:21:32 +0530567 int timeout_ms)
568{
569 void __iomem *regs = sdd->regs;
570 unsigned long val = 1;
571 u32 status;
572
573 /* max fifo depth available */
574 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
575
576 if (timeout_ms)
577 val = msecs_to_loops(timeout_ms);
578
579 do {
580 status = readl(regs + S3C64XX_SPI_STATUS);
581 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
582
583 /* return the actual received data length */
584 return RX_FIFO_LVL(status, sdd);
Jassi Brar230d42d2009-11-30 07:39:42 +0000585}
586
Mark Brown3700c6e2014-01-24 20:05:43 +0000587static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
588 struct spi_transfer *xfer)
Jassi Brar230d42d2009-11-30 07:39:42 +0000589{
Jassi Brar230d42d2009-11-30 07:39:42 +0000590 void __iomem *regs = sdd->regs;
591 unsigned long val;
Mark Brown3700c6e2014-01-24 20:05:43 +0000592 u32 status;
Jassi Brar230d42d2009-11-30 07:39:42 +0000593 int ms;
594
595 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
596 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
Mark Brown9d8f86b2010-09-07 16:37:52 +0100597 ms += 10; /* some tolerance */
Jassi Brar230d42d2009-11-30 07:39:42 +0000598
Mark Brown3700c6e2014-01-24 20:05:43 +0000599 val = msecs_to_jiffies(ms) + 10;
600 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
601
602 /*
603 * If the previous xfer was completed within timeout, then
604 * proceed further else return -EIO.
605 * DmaTx returns after simply writing data in the FIFO,
606 * w/o waiting for real transmission on the bus to finish.
607 * DmaRx returns only after Dma read data from FIFO which
608 * needs bus transmission to finish, so we don't worry if
609 * Xfer involved Rx(with or without Tx).
610 */
611 if (val && !xfer->rx_buf) {
612 val = msecs_to_loops(10);
613 status = readl(regs + S3C64XX_SPI_STATUS);
614 while ((TX_FIFO_LVL(status, sdd)
615 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
616 && --val) {
617 cpu_relax();
Jassi Brarc3f139b2010-09-03 10:36:46 +0900618 status = readl(regs + S3C64XX_SPI_STATUS);
Jassi Brar230d42d2009-11-30 07:39:42 +0000619 }
Girish K S7e995552013-05-20 12:21:32 +0530620
Mark Brown3700c6e2014-01-24 20:05:43 +0000621 }
Girish K S7e995552013-05-20 12:21:32 +0530622
Mark Brown3700c6e2014-01-24 20:05:43 +0000623 /* If timed out while checking rx/tx status return error */
624 if (!val)
625 return -EIO;
626
627 return 0;
628}
629
630static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
631 struct spi_transfer *xfer)
632{
633 void __iomem *regs = sdd->regs;
634 unsigned long val;
635 u32 status;
636 int loops;
637 u32 cpy_len;
638 u8 *buf;
639 int ms;
640
641 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
642 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
643 ms += 10; /* some tolerance */
644
645 val = msecs_to_loops(ms);
646 do {
647 status = readl(regs + S3C64XX_SPI_STATUS);
648 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
649
650
651 /* If it was only Tx */
652 if (!xfer->rx_buf) {
653 sdd->state &= ~TXBUSY;
654 return 0;
655 }
656
657 /*
658 * If the receive length is bigger than the controller fifo
659 * size, calculate the loops and read the fifo as many times.
660 * loops = length / max fifo size (calculated by using the
661 * fifo mask).
662 * For any size less than the fifo size the below code is
663 * executed atleast once.
664 */
665 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
666 buf = xfer->rx_buf;
667 do {
668 /* wait for data to be received in the fifo */
669 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
670 (loops ? ms : 0));
671
672 switch (sdd->cur_bpw) {
673 case 32:
674 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
675 buf, cpy_len / 4);
676 break;
677 case 16:
678 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
679 buf, cpy_len / 2);
680 break;
681 default:
682 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
683 buf, cpy_len);
684 break;
Jassi Brar230d42d2009-11-30 07:39:42 +0000685 }
686
Mark Brown3700c6e2014-01-24 20:05:43 +0000687 buf = buf + cpy_len;
688 } while (loops--);
689 sdd->state &= ~RXBUSY;
Jassi Brar230d42d2009-11-30 07:39:42 +0000690
691 return 0;
692}
693
Jassi Brar230d42d2009-11-30 07:39:42 +0000694static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
695{
Jassi Brar230d42d2009-11-30 07:39:42 +0000696 void __iomem *regs = sdd->regs;
697 u32 val;
698
699 /* Disable Clock */
Thomas Abrahama5238e32012-07-13 07:15:14 +0900700 if (sdd->port_conf->clk_from_cmu) {
Thomas Abraham9f667bf2012-10-03 08:30:12 +0900701 clk_disable_unprepare(sdd->src_clk);
Jassi Brarb42a81c2010-09-29 17:31:33 +0900702 } else {
703 val = readl(regs + S3C64XX_SPI_CLK_CFG);
704 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
705 writel(val, regs + S3C64XX_SPI_CLK_CFG);
706 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000707
708 /* Set Polarity and Phase */
709 val = readl(regs + S3C64XX_SPI_CH_CFG);
710 val &= ~(S3C64XX_SPI_CH_SLAVE |
711 S3C64XX_SPI_CPOL_L |
712 S3C64XX_SPI_CPHA_B);
713
714 if (sdd->cur_mode & SPI_CPOL)
715 val |= S3C64XX_SPI_CPOL_L;
716
717 if (sdd->cur_mode & SPI_CPHA)
718 val |= S3C64XX_SPI_CPHA_B;
719
720 writel(val, regs + S3C64XX_SPI_CH_CFG);
721
722 /* Set Channel & DMA Mode */
723 val = readl(regs + S3C64XX_SPI_MODE_CFG);
724 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
725 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
726
727 switch (sdd->cur_bpw) {
728 case 32:
729 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900730 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
Jassi Brar230d42d2009-11-30 07:39:42 +0000731 break;
732 case 16:
733 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900734 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
Jassi Brar230d42d2009-11-30 07:39:42 +0000735 break;
736 default:
737 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
Jassi Brar0c92ecf2010-09-29 17:31:33 +0900738 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
Jassi Brar230d42d2009-11-30 07:39:42 +0000739 break;
740 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000741
742 writel(val, regs + S3C64XX_SPI_MODE_CFG);
743
Thomas Abrahama5238e32012-07-13 07:15:14 +0900744 if (sdd->port_conf->clk_from_cmu) {
Jassi Brarb42a81c2010-09-29 17:31:33 +0900745 /* Configure Clock */
746 /* There is half-multiplier before the SPI */
747 clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
748 /* Enable Clock */
Thomas Abraham9f667bf2012-10-03 08:30:12 +0900749 clk_prepare_enable(sdd->src_clk);
Jassi Brarb42a81c2010-09-29 17:31:33 +0900750 } else {
751 /* Configure Clock */
752 val = readl(regs + S3C64XX_SPI_CLK_CFG);
753 val &= ~S3C64XX_SPI_PSR_MASK;
754 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
755 & S3C64XX_SPI_PSR_MASK);
756 writel(val, regs + S3C64XX_SPI_CLK_CFG);
Jassi Brar230d42d2009-11-30 07:39:42 +0000757
Jassi Brarb42a81c2010-09-29 17:31:33 +0900758 /* Enable Clock */
759 val = readl(regs + S3C64XX_SPI_CLK_CFG);
760 val |= S3C64XX_SPI_ENCLK_ENABLE;
761 writel(val, regs + S3C64XX_SPI_CLK_CFG);
762 }
Jassi Brar230d42d2009-11-30 07:39:42 +0000763}
764
Jassi Brar230d42d2009-11-30 07:39:42 +0000765#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
766
767static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
768 struct spi_message *msg)
769{
770 struct device *dev = &sdd->pdev->dev;
771 struct spi_transfer *xfer;
772
Girish K S7e995552013-05-20 12:21:32 +0530773 if (is_polling(sdd) || msg->is_dma_mapped)
Jassi Brar230d42d2009-11-30 07:39:42 +0000774 return 0;
775
776 /* First mark all xfer unmapped */
777 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
778 xfer->rx_dma = XFER_DMAADDR_INVALID;
779 xfer->tx_dma = XFER_DMAADDR_INVALID;
780 }
781
782 /* Map until end or first fail */
783 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
784
Thomas Abrahama5238e32012-07-13 07:15:14 +0900785 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
Jassi Brare02ddd42010-09-29 17:31:31 +0900786 continue;
787
Jassi Brar230d42d2009-11-30 07:39:42 +0000788 if (xfer->tx_buf != NULL) {
Jassi Brar251ee472010-09-03 10:36:26 +0900789 xfer->tx_dma = dma_map_single(dev,
790 (void *)xfer->tx_buf, xfer->len,
791 DMA_TO_DEVICE);
Jassi Brar230d42d2009-11-30 07:39:42 +0000792 if (dma_mapping_error(dev, xfer->tx_dma)) {
793 dev_err(dev, "dma_map_single Tx failed\n");
794 xfer->tx_dma = XFER_DMAADDR_INVALID;
795 return -ENOMEM;
796 }
797 }
798
799 if (xfer->rx_buf != NULL) {
800 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
801 xfer->len, DMA_FROM_DEVICE);
802 if (dma_mapping_error(dev, xfer->rx_dma)) {
803 dev_err(dev, "dma_map_single Rx failed\n");
804 dma_unmap_single(dev, xfer->tx_dma,
805 xfer->len, DMA_TO_DEVICE);
806 xfer->tx_dma = XFER_DMAADDR_INVALID;
807 xfer->rx_dma = XFER_DMAADDR_INVALID;
808 return -ENOMEM;
809 }
810 }
811 }
812
813 return 0;
814}
815
816static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
817 struct spi_message *msg)
818{
819 struct device *dev = &sdd->pdev->dev;
820 struct spi_transfer *xfer;
821
Girish K S7e995552013-05-20 12:21:32 +0530822 if (is_polling(sdd) || msg->is_dma_mapped)
Jassi Brar230d42d2009-11-30 07:39:42 +0000823 return;
824
825 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
826
Thomas Abrahama5238e32012-07-13 07:15:14 +0900827 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
Jassi Brare02ddd42010-09-29 17:31:31 +0900828 continue;
829
Jassi Brar230d42d2009-11-30 07:39:42 +0000830 if (xfer->rx_buf != NULL
831 && xfer->rx_dma != XFER_DMAADDR_INVALID)
832 dma_unmap_single(dev, xfer->rx_dma,
833 xfer->len, DMA_FROM_DEVICE);
834
835 if (xfer->tx_buf != NULL
836 && xfer->tx_dma != XFER_DMAADDR_INVALID)
837 dma_unmap_single(dev, xfer->tx_dma,
838 xfer->len, DMA_TO_DEVICE);
839 }
840}
841
Mark Brown6bb9c0e2013-10-05 00:42:58 +0100842static int s3c64xx_spi_prepare_message(struct spi_master *master,
843 struct spi_message *msg)
Jassi Brar230d42d2009-11-30 07:39:42 +0000844{
Mark Brownad2a99a2012-02-15 14:48:32 -0800845 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brar230d42d2009-11-30 07:39:42 +0000846 struct spi_device *spi = msg->spi;
847 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
Jassi Brar230d42d2009-11-30 07:39:42 +0000848
849 /* If Master's(controller) state differs from that needed by Slave */
850 if (sdd->cur_speed != spi->max_speed_hz
851 || sdd->cur_mode != spi->mode
852 || sdd->cur_bpw != spi->bits_per_word) {
853 sdd->cur_bpw = spi->bits_per_word;
854 sdd->cur_speed = spi->max_speed_hz;
855 sdd->cur_mode = spi->mode;
856 s3c64xx_spi_config(sdd);
857 }
858
859 /* Map all the transfers if needed */
860 if (s3c64xx_spi_map_mssg(sdd, msg)) {
861 dev_err(&spi->dev,
862 "Xfer: Unable to map message buffers!\n");
Mark Brown6bb9c0e2013-10-05 00:42:58 +0100863 return -ENOMEM;
Jassi Brar230d42d2009-11-30 07:39:42 +0000864 }
865
866 /* Configure feedback delay */
867 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
868
Mark Brown6bb9c0e2013-10-05 00:42:58 +0100869 return 0;
870}
Jassi Brar230d42d2009-11-30 07:39:42 +0000871
Mark Brown0732a9d2013-10-05 11:51:14 +0100872static int s3c64xx_spi_transfer_one(struct spi_master *master,
873 struct spi_device *spi,
874 struct spi_transfer *xfer)
Mark Brown6bb9c0e2013-10-05 00:42:58 +0100875{
876 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Mark Brown0732a9d2013-10-05 11:51:14 +0100877 int status;
Mark Brown6bb9c0e2013-10-05 00:42:58 +0100878 u32 speed;
879 u8 bpw;
Mark Brown0732a9d2013-10-05 11:51:14 +0100880 unsigned long flags;
881 int use_dma;
Jassi Brar230d42d2009-11-30 07:39:42 +0000882
Geert Uytterhoeven3e83c192014-01-12 14:07:50 +0100883 reinit_completion(&sdd->xfer_completion);
Jassi Brar230d42d2009-11-30 07:39:42 +0000884
Mark Brown0732a9d2013-10-05 11:51:14 +0100885 /* Only BPW and Speed may change across transfers */
886 bpw = xfer->bits_per_word;
887 speed = xfer->speed_hz ? : spi->max_speed_hz;
Jassi Brar230d42d2009-11-30 07:39:42 +0000888
Mark Brown0732a9d2013-10-05 11:51:14 +0100889 if (xfer->len % (bpw / 8)) {
890 dev_err(&spi->dev,
891 "Xfer length(%u) not a multiple of word size(%u)\n",
892 xfer->len, bpw / 8);
893 return -EIO;
Jassi Brar230d42d2009-11-30 07:39:42 +0000894 }
895
Mark Brown0732a9d2013-10-05 11:51:14 +0100896 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
897 sdd->cur_bpw = bpw;
898 sdd->cur_speed = speed;
899 s3c64xx_spi_config(sdd);
900 }
901
902 /* Polling method for xfers not bigger than FIFO capacity */
903 use_dma = 0;
904 if (!is_polling(sdd) &&
905 (sdd->rx_dma.ch && sdd->tx_dma.ch &&
906 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
907 use_dma = 1;
908
909 spin_lock_irqsave(&sdd->lock, flags);
910
911 /* Pending only which is to be done */
912 sdd->state &= ~RXBUSY;
913 sdd->state &= ~TXBUSY;
914
915 enable_datapath(sdd, spi, xfer, use_dma);
916
917 /* Start the signals */
918 writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
919
Mark Brown0732a9d2013-10-05 11:51:14 +0100920 spin_unlock_irqrestore(&sdd->lock, flags);
921
Mark Brown3700c6e2014-01-24 20:05:43 +0000922 if (use_dma)
923 status = wait_for_dma(sdd, xfer);
924 else
925 status = wait_for_pio(sdd, xfer);
Mark Brown0732a9d2013-10-05 11:51:14 +0100926
927 if (status) {
928 dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
929 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
930 (sdd->state & RXBUSY) ? 'f' : 'p',
931 (sdd->state & TXBUSY) ? 'f' : 'p',
932 xfer->len);
933
934 if (use_dma) {
935 if (xfer->tx_buf != NULL
936 && (sdd->state & TXBUSY))
937 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
938 if (xfer->rx_buf != NULL
939 && (sdd->state & RXBUSY))
940 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
Jassi Brar230d42d2009-11-30 07:39:42 +0000941 }
Mark Brown8c09daa2013-09-27 19:56:31 +0100942 } else {
Jassi Brar230d42d2009-11-30 07:39:42 +0000943 flush_fifo(sdd);
944 }
945
Mark Brown0732a9d2013-10-05 11:51:14 +0100946 return status;
Jassi Brar230d42d2009-11-30 07:39:42 +0000947}
948
Mark Brown6bb9c0e2013-10-05 00:42:58 +0100949static int s3c64xx_spi_unprepare_message(struct spi_master *master,
950 struct spi_message *msg)
951{
952 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brar230d42d2009-11-30 07:39:42 +0000953
954 s3c64xx_spi_unmap_mssg(sdd, msg);
955
Jassi Brar230d42d2009-11-30 07:39:42 +0000956 return 0;
957}
958
Thomas Abraham2b908072012-07-13 07:15:15 +0900959static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
Thomas Abraham2b908072012-07-13 07:15:15 +0900960 struct spi_device *spi)
961{
962 struct s3c64xx_spi_csinfo *cs;
Arnd Bergmann4732cc62012-08-04 11:18:20 +0000963 struct device_node *slave_np, *data_np = NULL;
Girish K S3146bee2013-06-21 11:26:12 +0530964 struct s3c64xx_spi_driver_data *sdd;
Thomas Abraham2b908072012-07-13 07:15:15 +0900965 u32 fb_delay = 0;
966
Girish K S3146bee2013-06-21 11:26:12 +0530967 sdd = spi_master_get_devdata(spi->master);
Thomas Abraham2b908072012-07-13 07:15:15 +0900968 slave_np = spi->dev.of_node;
969 if (!slave_np) {
970 dev_err(&spi->dev, "device node not found\n");
971 return ERR_PTR(-EINVAL);
972 }
973
Srinivas Kandagatla06455bb2012-09-18 08:10:49 +0100974 data_np = of_get_child_by_name(slave_np, "controller-data");
Thomas Abraham2b908072012-07-13 07:15:15 +0900975 if (!data_np) {
976 dev_err(&spi->dev, "child node 'controller-data' not found\n");
977 return ERR_PTR(-EINVAL);
978 }
979
980 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
981 if (!cs) {
Jingoo Han75bf3362013-01-31 15:25:01 +0900982 dev_err(&spi->dev, "could not allocate memory for controller data\n");
Srinivas Kandagatla06455bb2012-09-18 08:10:49 +0100983 of_node_put(data_np);
Thomas Abraham2b908072012-07-13 07:15:15 +0900984 return ERR_PTR(-ENOMEM);
985 }
986
Girish K S3146bee2013-06-21 11:26:12 +0530987 /* The CS line is asserted/deasserted by the gpio pin */
988 if (sdd->cs_gpio)
989 cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
990
Thomas Abraham2b908072012-07-13 07:15:15 +0900991 if (!gpio_is_valid(cs->line)) {
Jingoo Han75bf3362013-01-31 15:25:01 +0900992 dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
Thomas Abraham2b908072012-07-13 07:15:15 +0900993 kfree(cs);
Srinivas Kandagatla06455bb2012-09-18 08:10:49 +0100994 of_node_put(data_np);
Thomas Abraham2b908072012-07-13 07:15:15 +0900995 return ERR_PTR(-EINVAL);
996 }
997
998 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
999 cs->fb_delay = fb_delay;
Srinivas Kandagatla06455bb2012-09-18 08:10:49 +01001000 of_node_put(data_np);
Thomas Abraham2b908072012-07-13 07:15:15 +09001001 return cs;
1002}
1003
Jassi Brar230d42d2009-11-30 07:39:42 +00001004/*
1005 * Here we only check the validity of requested configuration
1006 * and save the configuration in a local data-structure.
1007 * The controller is actually configured only just before we
1008 * get a message to transfer.
1009 */
1010static int s3c64xx_spi_setup(struct spi_device *spi)
1011{
1012 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
1013 struct s3c64xx_spi_driver_data *sdd;
Jassi Brarad7de722010-01-20 13:49:44 -07001014 struct s3c64xx_spi_info *sci;
Thomas Abraham2b908072012-07-13 07:15:15 +09001015 int err;
Jassi Brar230d42d2009-11-30 07:39:42 +00001016
Thomas Abraham2b908072012-07-13 07:15:15 +09001017 sdd = spi_master_get_devdata(spi->master);
1018 if (!cs && spi->dev.of_node) {
Matthias Brugger5c725b32013-03-26 10:27:35 +01001019 cs = s3c64xx_get_slave_ctrldata(spi);
Thomas Abraham2b908072012-07-13 07:15:15 +09001020 spi->controller_data = cs;
1021 }
1022
1023 if (IS_ERR_OR_NULL(cs)) {
Jassi Brar230d42d2009-11-30 07:39:42 +00001024 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
1025 return -ENODEV;
1026 }
1027
Tomasz Figa01498712013-08-11 02:33:29 +02001028 if (!spi_get_ctldata(spi)) {
1029 /* Request gpio only if cs line is asserted by gpio pins */
1030 if (sdd->cs_gpio) {
1031 err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
1032 dev_name(&spi->dev));
1033 if (err) {
1034 dev_err(&spi->dev,
1035 "Failed to get /CS gpio [%d]: %d\n",
1036 cs->line, err);
1037 goto err_gpio_req;
1038 }
Mark Browndd97e262013-09-27 18:58:55 +01001039
1040 spi->cs_gpio = cs->line;
Thomas Abraham1c20c202012-07-13 07:15:14 +09001041 }
Thomas Abraham1c20c202012-07-13 07:15:14 +09001042
Girish K S3146bee2013-06-21 11:26:12 +05301043 spi_set_ctldata(spi, cs);
Tomasz Figa01498712013-08-11 02:33:29 +02001044 }
Girish K S3146bee2013-06-21 11:26:12 +05301045
Jassi Brar230d42d2009-11-30 07:39:42 +00001046 sci = sdd->cntrlr_info;
1047
Mark Brownb97b6622011-12-04 00:58:06 +00001048 pm_runtime_get_sync(&sdd->pdev->dev);
1049
Jassi Brar230d42d2009-11-30 07:39:42 +00001050 /* Check if we can provide the requested rate */
Thomas Abrahama5238e32012-07-13 07:15:14 +09001051 if (!sdd->port_conf->clk_from_cmu) {
Jassi Brarb42a81c2010-09-29 17:31:33 +09001052 u32 psr, speed;
Jassi Brar230d42d2009-11-30 07:39:42 +00001053
Jassi Brarb42a81c2010-09-29 17:31:33 +09001054 /* Max possible */
1055 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
Jassi Brar230d42d2009-11-30 07:39:42 +00001056
Jassi Brarb42a81c2010-09-29 17:31:33 +09001057 if (spi->max_speed_hz > speed)
1058 spi->max_speed_hz = speed;
Jassi Brar230d42d2009-11-30 07:39:42 +00001059
Jassi Brarb42a81c2010-09-29 17:31:33 +09001060 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
1061 psr &= S3C64XX_SPI_PSR_MASK;
1062 if (psr == S3C64XX_SPI_PSR_MASK)
1063 psr--;
1064
1065 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
1066 if (spi->max_speed_hz < speed) {
1067 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
1068 psr++;
1069 } else {
1070 err = -EINVAL;
1071 goto setup_exit;
1072 }
Jassi Brar230d42d2009-11-30 07:39:42 +00001073 }
Jassi Brar230d42d2009-11-30 07:39:42 +00001074
Jassi Brarb42a81c2010-09-29 17:31:33 +09001075 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
Thomas Abraham2b908072012-07-13 07:15:15 +09001076 if (spi->max_speed_hz >= speed) {
Jassi Brarb42a81c2010-09-29 17:31:33 +09001077 spi->max_speed_hz = speed;
Thomas Abraham2b908072012-07-13 07:15:15 +09001078 } else {
Mark Browne1b0f0d2012-12-20 18:27:31 +00001079 dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
1080 spi->max_speed_hz);
Jassi Brarb42a81c2010-09-29 17:31:33 +09001081 err = -EINVAL;
Thomas Abraham2b908072012-07-13 07:15:15 +09001082 goto setup_exit;
1083 }
Jassi Brarb42a81c2010-09-29 17:31:33 +09001084 }
Jassi Brar230d42d2009-11-30 07:39:42 +00001085
Mark Brownb97b6622011-12-04 00:58:06 +00001086 pm_runtime_put(&sdd->pdev->dev);
Mark Brown8c09daa2013-09-27 19:56:31 +01001087 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
Thomas Abraham2b908072012-07-13 07:15:15 +09001088 return 0;
Mark Brownb97b6622011-12-04 00:58:06 +00001089
Jassi Brar230d42d2009-11-30 07:39:42 +00001090setup_exit:
Krzysztof Kozlowski7b8f7ee2013-10-17 14:45:41 +02001091 pm_runtime_put(&sdd->pdev->dev);
Jassi Brar230d42d2009-11-30 07:39:42 +00001092 /* setup() returns with device de-selected */
Mark Brown8c09daa2013-09-27 19:56:31 +01001093 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
Jassi Brar230d42d2009-11-30 07:39:42 +00001094
Thomas Abraham2b908072012-07-13 07:15:15 +09001095 gpio_free(cs->line);
1096 spi_set_ctldata(spi, NULL);
1097
1098err_gpio_req:
Sylwester Nawrocki5bee3b92012-09-13 16:31:30 +02001099 if (spi->dev.of_node)
1100 kfree(cs);
Thomas Abraham2b908072012-07-13 07:15:15 +09001101
Jassi Brar230d42d2009-11-30 07:39:42 +00001102 return err;
1103}
1104
Thomas Abraham1c20c202012-07-13 07:15:14 +09001105static void s3c64xx_spi_cleanup(struct spi_device *spi)
1106{
1107 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
Girish K S3146bee2013-06-21 11:26:12 +05301108 struct s3c64xx_spi_driver_data *sdd;
Thomas Abraham1c20c202012-07-13 07:15:14 +09001109
Girish K S3146bee2013-06-21 11:26:12 +05301110 sdd = spi_master_get_devdata(spi->master);
Mark Browndd97e262013-09-27 18:58:55 +01001111 if (spi->cs_gpio) {
1112 gpio_free(spi->cs_gpio);
Thomas Abraham2b908072012-07-13 07:15:15 +09001113 if (spi->dev.of_node)
1114 kfree(cs);
1115 }
Thomas Abraham1c20c202012-07-13 07:15:14 +09001116 spi_set_ctldata(spi, NULL);
1117}
1118
Mark Brownc2573122011-11-10 10:57:32 +00001119static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
1120{
1121 struct s3c64xx_spi_driver_data *sdd = data;
1122 struct spi_master *spi = sdd->master;
Girish K S375981f2013-03-13 12:13:30 +05301123 unsigned int val, clr = 0;
Mark Brownc2573122011-11-10 10:57:32 +00001124
Girish K S375981f2013-03-13 12:13:30 +05301125 val = readl(sdd->regs + S3C64XX_SPI_STATUS);
Mark Brownc2573122011-11-10 10:57:32 +00001126
Girish K S375981f2013-03-13 12:13:30 +05301127 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
1128 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
Mark Brownc2573122011-11-10 10:57:32 +00001129 dev_err(&spi->dev, "RX overrun\n");
Girish K S375981f2013-03-13 12:13:30 +05301130 }
1131 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
1132 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
Mark Brownc2573122011-11-10 10:57:32 +00001133 dev_err(&spi->dev, "RX underrun\n");
Girish K S375981f2013-03-13 12:13:30 +05301134 }
1135 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
1136 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
Mark Brownc2573122011-11-10 10:57:32 +00001137 dev_err(&spi->dev, "TX overrun\n");
Girish K S375981f2013-03-13 12:13:30 +05301138 }
1139 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
1140 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
Mark Brownc2573122011-11-10 10:57:32 +00001141 dev_err(&spi->dev, "TX underrun\n");
Girish K S375981f2013-03-13 12:13:30 +05301142 }
1143
1144 /* Clear the pending irq by setting and then clearing it */
1145 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
1146 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
Mark Brownc2573122011-11-10 10:57:32 +00001147
1148 return IRQ_HANDLED;
1149}
1150
Jassi Brar230d42d2009-11-30 07:39:42 +00001151static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
1152{
Jassi Brarad7de722010-01-20 13:49:44 -07001153 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +00001154 void __iomem *regs = sdd->regs;
1155 unsigned int val;
1156
1157 sdd->cur_speed = 0;
1158
Mark Brown5fc3e832012-07-19 14:36:23 +09001159 writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
Jassi Brar230d42d2009-11-30 07:39:42 +00001160
1161 /* Disable Interrupts - we use Polling if not DMA mode */
1162 writel(0, regs + S3C64XX_SPI_INT_EN);
1163
Thomas Abrahama5238e32012-07-13 07:15:14 +09001164 if (!sdd->port_conf->clk_from_cmu)
Jassi Brarb42a81c2010-09-29 17:31:33 +09001165 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
Jassi Brar230d42d2009-11-30 07:39:42 +00001166 regs + S3C64XX_SPI_CLK_CFG);
1167 writel(0, regs + S3C64XX_SPI_MODE_CFG);
1168 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1169
Girish K S375981f2013-03-13 12:13:30 +05301170 /* Clear any irq pending bits, should set and clear the bits */
1171 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1172 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1173 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1174 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1175 writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1176 writel(0, regs + S3C64XX_SPI_PENDING_CLR);
Jassi Brar230d42d2009-11-30 07:39:42 +00001177
1178 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1179
1180 val = readl(regs + S3C64XX_SPI_MODE_CFG);
1181 val &= ~S3C64XX_SPI_MODE_4BURST;
1182 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1183 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1184 writel(val, regs + S3C64XX_SPI_MODE_CFG);
1185
1186 flush_fifo(sdd);
1187}
1188
Thomas Abraham2b908072012-07-13 07:15:15 +09001189#ifdef CONFIG_OF
Jingoo Han75bf3362013-01-31 15:25:01 +09001190static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
Thomas Abraham2b908072012-07-13 07:15:15 +09001191{
1192 struct s3c64xx_spi_info *sci;
1193 u32 temp;
1194
1195 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1196 if (!sci) {
1197 dev_err(dev, "memory allocation for spi_info failed\n");
1198 return ERR_PTR(-ENOMEM);
1199 }
1200
1201 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
Jingoo Han75bf3362013-01-31 15:25:01 +09001202 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
Thomas Abraham2b908072012-07-13 07:15:15 +09001203 sci->src_clk_nr = 0;
1204 } else {
1205 sci->src_clk_nr = temp;
1206 }
1207
1208 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
Jingoo Han75bf3362013-01-31 15:25:01 +09001209 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
Thomas Abraham2b908072012-07-13 07:15:15 +09001210 sci->num_cs = 1;
1211 } else {
1212 sci->num_cs = temp;
1213 }
1214
1215 return sci;
1216}
1217#else
1218static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1219{
Jingoo Han8074cf02013-07-30 16:58:59 +09001220 return dev_get_platdata(dev);
Thomas Abraham2b908072012-07-13 07:15:15 +09001221}
Thomas Abraham2b908072012-07-13 07:15:15 +09001222#endif
1223
1224static const struct of_device_id s3c64xx_spi_dt_match[];
1225
Thomas Abrahama5238e32012-07-13 07:15:14 +09001226static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1227 struct platform_device *pdev)
1228{
Thomas Abraham2b908072012-07-13 07:15:15 +09001229#ifdef CONFIG_OF
1230 if (pdev->dev.of_node) {
1231 const struct of_device_id *match;
1232 match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
1233 return (struct s3c64xx_spi_port_config *)match->data;
1234 }
1235#endif
Thomas Abrahama5238e32012-07-13 07:15:14 +09001236 return (struct s3c64xx_spi_port_config *)
1237 platform_get_device_id(pdev)->driver_data;
1238}
1239
Grant Likely2deff8d2013-02-05 13:27:35 +00001240static int s3c64xx_spi_probe(struct platform_device *pdev)
Jassi Brar230d42d2009-11-30 07:39:42 +00001241{
Thomas Abraham2b908072012-07-13 07:15:15 +09001242 struct resource *mem_res;
Padmavathi Vennab5be04d2013-01-18 17:17:03 +05301243 struct resource *res;
Jassi Brar230d42d2009-11-30 07:39:42 +00001244 struct s3c64xx_spi_driver_data *sdd;
Jingoo Han8074cf02013-07-30 16:58:59 +09001245 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
Jassi Brar230d42d2009-11-30 07:39:42 +00001246 struct spi_master *master;
Mark Brownc2573122011-11-10 10:57:32 +00001247 int ret, irq;
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001248 char clk_name[16];
Jassi Brar230d42d2009-11-30 07:39:42 +00001249
Thomas Abraham2b908072012-07-13 07:15:15 +09001250 if (!sci && pdev->dev.of_node) {
1251 sci = s3c64xx_spi_parse_dt(&pdev->dev);
1252 if (IS_ERR(sci))
1253 return PTR_ERR(sci);
Jassi Brar230d42d2009-11-30 07:39:42 +00001254 }
1255
Thomas Abraham2b908072012-07-13 07:15:15 +09001256 if (!sci) {
Jassi Brar230d42d2009-11-30 07:39:42 +00001257 dev_err(&pdev->dev, "platform_data missing!\n");
1258 return -ENODEV;
1259 }
1260
Jassi Brar230d42d2009-11-30 07:39:42 +00001261 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1262 if (mem_res == NULL) {
1263 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1264 return -ENXIO;
1265 }
1266
Mark Brownc2573122011-11-10 10:57:32 +00001267 irq = platform_get_irq(pdev, 0);
1268 if (irq < 0) {
1269 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1270 return irq;
1271 }
1272
Jassi Brar230d42d2009-11-30 07:39:42 +00001273 master = spi_alloc_master(&pdev->dev,
1274 sizeof(struct s3c64xx_spi_driver_data));
1275 if (master == NULL) {
1276 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1277 return -ENOMEM;
1278 }
1279
Jassi Brar230d42d2009-11-30 07:39:42 +00001280 platform_set_drvdata(pdev, master);
1281
1282 sdd = spi_master_get_devdata(master);
Thomas Abrahama5238e32012-07-13 07:15:14 +09001283 sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
Jassi Brar230d42d2009-11-30 07:39:42 +00001284 sdd->master = master;
1285 sdd->cntrlr_info = sci;
1286 sdd->pdev = pdev;
1287 sdd->sfr_start = mem_res->start;
Girish K S3146bee2013-06-21 11:26:12 +05301288 sdd->cs_gpio = true;
Thomas Abraham2b908072012-07-13 07:15:15 +09001289 if (pdev->dev.of_node) {
Girish K S3146bee2013-06-21 11:26:12 +05301290 if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL))
1291 sdd->cs_gpio = false;
1292
Thomas Abraham2b908072012-07-13 07:15:15 +09001293 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1294 if (ret < 0) {
Jingoo Han75bf3362013-01-31 15:25:01 +09001295 dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1296 ret);
Thomas Abraham2b908072012-07-13 07:15:15 +09001297 goto err0;
1298 }
1299 sdd->port_id = ret;
1300 } else {
1301 sdd->port_id = pdev->id;
1302 }
Jassi Brar230d42d2009-11-30 07:39:42 +00001303
1304 sdd->cur_bpw = 8;
1305
Padmavathi Vennab5be04d2013-01-18 17:17:03 +05301306 if (!sdd->pdev->dev.of_node) {
1307 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1308 if (!res) {
Jingoo Handb0606e2013-07-15 15:11:57 +09001309 dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
Girish K S7e995552013-05-20 12:21:32 +05301310 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1311 } else
1312 sdd->tx_dma.dmach = res->start;
Thomas Abraham2b908072012-07-13 07:15:15 +09001313
Padmavathi Vennab5be04d2013-01-18 17:17:03 +05301314 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1315 if (!res) {
Jingoo Handb0606e2013-07-15 15:11:57 +09001316 dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
Girish K S7e995552013-05-20 12:21:32 +05301317 sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
1318 } else
1319 sdd->rx_dma.dmach = res->start;
Padmavathi Vennab5be04d2013-01-18 17:17:03 +05301320 }
1321
1322 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1323 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
Thomas Abraham2b908072012-07-13 07:15:15 +09001324
1325 master->dev.of_node = pdev->dev.of_node;
Thomas Abrahama5238e32012-07-13 07:15:14 +09001326 master->bus_num = sdd->port_id;
Jassi Brar230d42d2009-11-30 07:39:42 +00001327 master->setup = s3c64xx_spi_setup;
Thomas Abraham1c20c202012-07-13 07:15:14 +09001328 master->cleanup = s3c64xx_spi_cleanup;
Mark Brownad2a99a2012-02-15 14:48:32 -08001329 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
Mark Brown6bb9c0e2013-10-05 00:42:58 +01001330 master->prepare_message = s3c64xx_spi_prepare_message;
Mark Brown0732a9d2013-10-05 11:51:14 +01001331 master->transfer_one = s3c64xx_spi_transfer_one;
Mark Brown6bb9c0e2013-10-05 00:42:58 +01001332 master->unprepare_message = s3c64xx_spi_unprepare_message;
Mark Brownad2a99a2012-02-15 14:48:32 -08001333 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
Jassi Brar230d42d2009-11-30 07:39:42 +00001334 master->num_chipselect = sci->num_cs;
1335 master->dma_alignment = 8;
Stephen Warren24778be2013-05-21 20:36:35 -06001336 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1337 SPI_BPW_MASK(8);
Jassi Brar230d42d2009-11-30 07:39:42 +00001338 /* the spi->mode bits understood by this driver: */
1339 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
Mark Brownfc0f81b2013-07-28 15:24:54 +01001340 master->auto_runtime_pm = true;
Jassi Brar230d42d2009-11-30 07:39:42 +00001341
Thierry Redingb0ee5602013-01-21 11:09:18 +01001342 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1343 if (IS_ERR(sdd->regs)) {
1344 ret = PTR_ERR(sdd->regs);
Jingoo Han4eb77002013-01-10 11:04:21 +09001345 goto err0;
Jassi Brar230d42d2009-11-30 07:39:42 +00001346 }
1347
Thomas Abraham00ab5392013-04-15 20:42:57 -07001348 if (sci->cfg_gpio && sci->cfg_gpio()) {
Jassi Brar230d42d2009-11-30 07:39:42 +00001349 dev_err(&pdev->dev, "Unable to config gpio\n");
1350 ret = -EBUSY;
Jingoo Han4eb77002013-01-10 11:04:21 +09001351 goto err0;
Jassi Brar230d42d2009-11-30 07:39:42 +00001352 }
1353
1354 /* Setup clocks */
Jingoo Han4eb77002013-01-10 11:04:21 +09001355 sdd->clk = devm_clk_get(&pdev->dev, "spi");
Jassi Brar230d42d2009-11-30 07:39:42 +00001356 if (IS_ERR(sdd->clk)) {
1357 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1358 ret = PTR_ERR(sdd->clk);
Thomas Abraham00ab5392013-04-15 20:42:57 -07001359 goto err0;
Jassi Brar230d42d2009-11-30 07:39:42 +00001360 }
1361
Thomas Abraham9f667bf2012-10-03 08:30:12 +09001362 if (clk_prepare_enable(sdd->clk)) {
Jassi Brar230d42d2009-11-30 07:39:42 +00001363 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1364 ret = -EBUSY;
Thomas Abraham00ab5392013-04-15 20:42:57 -07001365 goto err0;
Jassi Brar230d42d2009-11-30 07:39:42 +00001366 }
1367
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001368 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
Jingoo Han4eb77002013-01-10 11:04:21 +09001369 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001370 if (IS_ERR(sdd->src_clk)) {
Jassi Brar230d42d2009-11-30 07:39:42 +00001371 dev_err(&pdev->dev,
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001372 "Unable to acquire clock '%s'\n", clk_name);
Jassi Brarb0d5d6e2010-01-20 13:49:44 -07001373 ret = PTR_ERR(sdd->src_clk);
Jingoo Han4eb77002013-01-10 11:04:21 +09001374 goto err2;
Jassi Brar230d42d2009-11-30 07:39:42 +00001375 }
1376
Thomas Abraham9f667bf2012-10-03 08:30:12 +09001377 if (clk_prepare_enable(sdd->src_clk)) {
Padmavathi Vennaa24d8502011-11-02 20:04:19 +09001378 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
Jassi Brar230d42d2009-11-30 07:39:42 +00001379 ret = -EBUSY;
Jingoo Han4eb77002013-01-10 11:04:21 +09001380 goto err2;
Jassi Brar230d42d2009-11-30 07:39:42 +00001381 }
1382
Jassi Brar230d42d2009-11-30 07:39:42 +00001383 /* Setup Deufult Mode */
Thomas Abrahama5238e32012-07-13 07:15:14 +09001384 s3c64xx_spi_hwinit(sdd, sdd->port_id);
Jassi Brar230d42d2009-11-30 07:39:42 +00001385
1386 spin_lock_init(&sdd->lock);
1387 init_completion(&sdd->xfer_completion);
Jassi Brar230d42d2009-11-30 07:39:42 +00001388
Jingoo Han4eb77002013-01-10 11:04:21 +09001389 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1390 "spi-s3c64xx", sdd);
Mark Brownc2573122011-11-10 10:57:32 +00001391 if (ret != 0) {
1392 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1393 irq, ret);
Jingoo Han4eb77002013-01-10 11:04:21 +09001394 goto err3;
Mark Brownc2573122011-11-10 10:57:32 +00001395 }
1396
1397 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1398 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1399 sdd->regs + S3C64XX_SPI_INT_EN);
1400
Krzysztof Kozlowski38338252013-10-17 18:06:46 +02001401 pm_runtime_set_active(&pdev->dev);
Mark Brown3e2bd642013-09-27 11:52:35 +01001402 pm_runtime_enable(&pdev->dev);
1403
Mark Brown91800f02013-08-31 18:55:53 +01001404 ret = devm_spi_register_master(&pdev->dev, master);
1405 if (ret != 0) {
1406 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
Jingoo Han4eb77002013-01-10 11:04:21 +09001407 goto err3;
Jassi Brar230d42d2009-11-30 07:39:42 +00001408 }
1409
Jingoo Han75bf3362013-01-31 15:25:01 +09001410 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
Thomas Abrahama5238e32012-07-13 07:15:14 +09001411 sdd->port_id, master->num_chipselect);
Jingoo Hanc65bc4a2013-07-16 08:53:33 +09001412 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
1413 mem_res,
Boojin Kim82ab8cd2011-09-02 09:44:42 +09001414 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
Jassi Brar230d42d2009-11-30 07:39:42 +00001415
1416 return 0;
1417
Jassi Brar230d42d2009-11-30 07:39:42 +00001418err3:
Jingoo Han4eb77002013-01-10 11:04:21 +09001419 clk_disable_unprepare(sdd->src_clk);
1420err2:
1421 clk_disable_unprepare(sdd->clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001422err0:
Jassi Brar230d42d2009-11-30 07:39:42 +00001423 spi_master_put(master);
1424
1425 return ret;
1426}
1427
1428static int s3c64xx_spi_remove(struct platform_device *pdev)
1429{
1430 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1431 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001432
Mark Brownb97b6622011-12-04 00:58:06 +00001433 pm_runtime_disable(&pdev->dev);
1434
Mark Brownc2573122011-11-10 10:57:32 +00001435 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1436
Thomas Abraham9f667bf2012-10-03 08:30:12 +09001437 clk_disable_unprepare(sdd->src_clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001438
Thomas Abraham9f667bf2012-10-03 08:30:12 +09001439 clk_disable_unprepare(sdd->clk);
Jassi Brar230d42d2009-11-30 07:39:42 +00001440
Jassi Brar230d42d2009-11-30 07:39:42 +00001441 return 0;
1442}
1443
Jingoo Han997230d2013-03-22 02:09:08 +00001444#ifdef CONFIG_PM_SLEEP
Mark Browne25d0bf2011-12-04 00:36:18 +00001445static int s3c64xx_spi_suspend(struct device *dev)
Jassi Brar230d42d2009-11-30 07:39:42 +00001446{
Guenter Roeck9a2a5242012-08-16 20:14:25 -07001447 struct spi_master *master = dev_get_drvdata(dev);
Jassi Brar230d42d2009-11-30 07:39:42 +00001448 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001449
Krzysztof Kozlowski347de6b2013-10-21 15:42:49 +02001450 int ret = spi_master_suspend(master);
1451 if (ret)
1452 return ret;
Jassi Brar230d42d2009-11-30 07:39:42 +00001453
Krzysztof Kozlowski9d7fd212013-10-21 15:42:50 +02001454 if (!pm_runtime_suspended(dev)) {
1455 clk_disable_unprepare(sdd->clk);
1456 clk_disable_unprepare(sdd->src_clk);
1457 }
Jassi Brar230d42d2009-11-30 07:39:42 +00001458
1459 sdd->cur_speed = 0; /* Output Clock is stopped */
1460
1461 return 0;
1462}
1463
Mark Browne25d0bf2011-12-04 00:36:18 +00001464static int s3c64xx_spi_resume(struct device *dev)
Jassi Brar230d42d2009-11-30 07:39:42 +00001465{
Guenter Roeck9a2a5242012-08-16 20:14:25 -07001466 struct spi_master *master = dev_get_drvdata(dev);
Jassi Brar230d42d2009-11-30 07:39:42 +00001467 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Jassi Brarad7de722010-01-20 13:49:44 -07001468 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
Jassi Brar230d42d2009-11-30 07:39:42 +00001469
Thomas Abraham00ab5392013-04-15 20:42:57 -07001470 if (sci->cfg_gpio)
Thomas Abraham2b908072012-07-13 07:15:15 +09001471 sci->cfg_gpio();
Jassi Brar230d42d2009-11-30 07:39:42 +00001472
Krzysztof Kozlowski9d7fd212013-10-21 15:42:50 +02001473 if (!pm_runtime_suspended(dev)) {
1474 clk_prepare_enable(sdd->src_clk);
1475 clk_prepare_enable(sdd->clk);
1476 }
Jassi Brar230d42d2009-11-30 07:39:42 +00001477
Thomas Abrahama5238e32012-07-13 07:15:14 +09001478 s3c64xx_spi_hwinit(sdd, sdd->port_id);
Jassi Brar230d42d2009-11-30 07:39:42 +00001479
Krzysztof Kozlowski347de6b2013-10-21 15:42:49 +02001480 return spi_master_resume(master);
Jassi Brar230d42d2009-11-30 07:39:42 +00001481}
Jingoo Han997230d2013-03-22 02:09:08 +00001482#endif /* CONFIG_PM_SLEEP */
Jassi Brar230d42d2009-11-30 07:39:42 +00001483
Mark Brownb97b6622011-12-04 00:58:06 +00001484#ifdef CONFIG_PM_RUNTIME
1485static int s3c64xx_spi_runtime_suspend(struct device *dev)
1486{
Guenter Roeck9a2a5242012-08-16 20:14:25 -07001487 struct spi_master *master = dev_get_drvdata(dev);
Mark Brownb97b6622011-12-04 00:58:06 +00001488 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1489
Thomas Abraham9f667bf2012-10-03 08:30:12 +09001490 clk_disable_unprepare(sdd->clk);
1491 clk_disable_unprepare(sdd->src_clk);
Mark Brownb97b6622011-12-04 00:58:06 +00001492
1493 return 0;
1494}
1495
1496static int s3c64xx_spi_runtime_resume(struct device *dev)
1497{
Guenter Roeck9a2a5242012-08-16 20:14:25 -07001498 struct spi_master *master = dev_get_drvdata(dev);
Mark Brownb97b6622011-12-04 00:58:06 +00001499 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
Mark Brown8b06d5b2013-09-27 18:44:53 +01001500 int ret;
Mark Brownb97b6622011-12-04 00:58:06 +00001501
Mark Brown8b06d5b2013-09-27 18:44:53 +01001502 ret = clk_prepare_enable(sdd->src_clk);
1503 if (ret != 0)
1504 return ret;
1505
1506 ret = clk_prepare_enable(sdd->clk);
1507 if (ret != 0) {
1508 clk_disable_unprepare(sdd->src_clk);
1509 return ret;
1510 }
Mark Brownb97b6622011-12-04 00:58:06 +00001511
1512 return 0;
1513}
1514#endif /* CONFIG_PM_RUNTIME */
1515
Mark Browne25d0bf2011-12-04 00:36:18 +00001516static const struct dev_pm_ops s3c64xx_spi_pm = {
1517 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
Mark Brownb97b6622011-12-04 00:58:06 +00001518 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1519 s3c64xx_spi_runtime_resume, NULL)
Mark Browne25d0bf2011-12-04 00:36:18 +00001520};
1521
Sachin Kamat10ce0472012-08-03 10:08:12 +05301522static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
Thomas Abrahama5238e32012-07-13 07:15:14 +09001523 .fifo_lvl_mask = { 0x7f },
1524 .rx_lvl_offset = 13,
1525 .tx_st_done = 21,
1526 .high_speed = true,
1527};
1528
Sachin Kamat10ce0472012-08-03 10:08:12 +05301529static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
Thomas Abrahama5238e32012-07-13 07:15:14 +09001530 .fifo_lvl_mask = { 0x7f, 0x7F },
1531 .rx_lvl_offset = 13,
1532 .tx_st_done = 21,
1533};
1534
Sachin Kamat10ce0472012-08-03 10:08:12 +05301535static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
Thomas Abrahama5238e32012-07-13 07:15:14 +09001536 .fifo_lvl_mask = { 0x1ff, 0x7F },
1537 .rx_lvl_offset = 15,
1538 .tx_st_done = 25,
1539};
1540
Sachin Kamat10ce0472012-08-03 10:08:12 +05301541static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
Thomas Abrahama5238e32012-07-13 07:15:14 +09001542 .fifo_lvl_mask = { 0x7f, 0x7F },
1543 .rx_lvl_offset = 13,
1544 .tx_st_done = 21,
1545 .high_speed = true,
1546};
1547
Sachin Kamat10ce0472012-08-03 10:08:12 +05301548static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
Thomas Abrahama5238e32012-07-13 07:15:14 +09001549 .fifo_lvl_mask = { 0x1ff, 0x7F },
1550 .rx_lvl_offset = 15,
1551 .tx_st_done = 25,
1552 .high_speed = true,
1553};
1554
Sachin Kamat10ce0472012-08-03 10:08:12 +05301555static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
Thomas Abrahama5238e32012-07-13 07:15:14 +09001556 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
1557 .rx_lvl_offset = 15,
1558 .tx_st_done = 25,
1559 .high_speed = true,
1560 .clk_from_cmu = true,
1561};
1562
Girish K Sbff82032013-06-21 11:26:13 +05301563static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
1564 .fifo_lvl_mask = { 0x1ff },
1565 .rx_lvl_offset = 15,
1566 .tx_st_done = 25,
1567 .high_speed = true,
1568 .clk_from_cmu = true,
1569 .quirks = S3C64XX_SPI_QUIRK_POLL,
1570};
1571
Thomas Abrahama5238e32012-07-13 07:15:14 +09001572static struct platform_device_id s3c64xx_spi_driver_ids[] = {
1573 {
1574 .name = "s3c2443-spi",
1575 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
1576 }, {
1577 .name = "s3c6410-spi",
1578 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
1579 }, {
1580 .name = "s5p64x0-spi",
1581 .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config,
1582 }, {
1583 .name = "s5pc100-spi",
1584 .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config,
1585 }, {
1586 .name = "s5pv210-spi",
1587 .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
1588 }, {
1589 .name = "exynos4210-spi",
1590 .driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
1591 },
1592 { },
1593};
1594
Thomas Abraham2b908072012-07-13 07:15:15 +09001595static const struct of_device_id s3c64xx_spi_dt_match[] = {
Mateusz Krawczuka3b924d2013-09-23 11:45:45 +02001596 { .compatible = "samsung,s3c2443-spi",
1597 .data = (void *)&s3c2443_spi_port_config,
1598 },
1599 { .compatible = "samsung,s3c6410-spi",
1600 .data = (void *)&s3c6410_spi_port_config,
1601 },
1602 { .compatible = "samsung,s5pc100-spi",
1603 .data = (void *)&s5pc100_spi_port_config,
1604 },
1605 { .compatible = "samsung,s5pv210-spi",
1606 .data = (void *)&s5pv210_spi_port_config,
1607 },
Thomas Abraham2b908072012-07-13 07:15:15 +09001608 { .compatible = "samsung,exynos4210-spi",
1609 .data = (void *)&exynos4_spi_port_config,
1610 },
Girish K Sbff82032013-06-21 11:26:13 +05301611 { .compatible = "samsung,exynos5440-spi",
1612 .data = (void *)&exynos5440_spi_port_config,
1613 },
Thomas Abraham2b908072012-07-13 07:15:15 +09001614 { },
1615};
1616MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
Thomas Abraham2b908072012-07-13 07:15:15 +09001617
Jassi Brar230d42d2009-11-30 07:39:42 +00001618static struct platform_driver s3c64xx_spi_driver = {
1619 .driver = {
1620 .name = "s3c64xx-spi",
1621 .owner = THIS_MODULE,
Mark Browne25d0bf2011-12-04 00:36:18 +00001622 .pm = &s3c64xx_spi_pm,
Thomas Abraham2b908072012-07-13 07:15:15 +09001623 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
Jassi Brar230d42d2009-11-30 07:39:42 +00001624 },
Lukasz Czerwinski50c959f2013-09-09 16:09:25 +02001625 .probe = s3c64xx_spi_probe,
Jassi Brar230d42d2009-11-30 07:39:42 +00001626 .remove = s3c64xx_spi_remove,
Thomas Abrahama5238e32012-07-13 07:15:14 +09001627 .id_table = s3c64xx_spi_driver_ids,
Jassi Brar230d42d2009-11-30 07:39:42 +00001628};
1629MODULE_ALIAS("platform:s3c64xx-spi");
1630
Lukasz Czerwinski50c959f2013-09-09 16:09:25 +02001631module_platform_driver(s3c64xx_spi_driver);
Jassi Brar230d42d2009-11-30 07:39:42 +00001632
1633MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1634MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1635MODULE_LICENSE("GPL");