blob: 46d14772c0cc6a85ba3476b53ba3542c146b4130 [file] [log] [blame]
Leilk Liua5682312015-08-07 15:19:50 +08001/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Leilk Liu <leilk.liu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/device.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
Leilk Liudd69a0a2015-08-24 11:45:15 +080019#include <linux/io.h>
Leilk Liua5682312015-08-07 15:19:50 +080020#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/platform_data/spi-mt65xx.h>
25#include <linux/pm_runtime.h>
26#include <linux/spi/spi.h>
27
28#define SPI_CFG0_REG 0x0000
29#define SPI_CFG1_REG 0x0004
30#define SPI_TX_SRC_REG 0x0008
31#define SPI_RX_DST_REG 0x000c
32#define SPI_TX_DATA_REG 0x0010
33#define SPI_RX_DATA_REG 0x0014
34#define SPI_CMD_REG 0x0018
35#define SPI_STATUS0_REG 0x001c
36#define SPI_PAD_SEL_REG 0x0024
37
38#define SPI_CFG0_SCK_HIGH_OFFSET 0
39#define SPI_CFG0_SCK_LOW_OFFSET 8
40#define SPI_CFG0_CS_HOLD_OFFSET 16
41#define SPI_CFG0_CS_SETUP_OFFSET 24
42
43#define SPI_CFG1_CS_IDLE_OFFSET 0
44#define SPI_CFG1_PACKET_LOOP_OFFSET 8
45#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
46#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
47
48#define SPI_CFG1_CS_IDLE_MASK 0xff
49#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
50#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
51
Leilk Liua71d6ea2015-08-20 17:19:08 +080052#define SPI_CMD_ACT BIT(0)
53#define SPI_CMD_RESUME BIT(1)
Leilk Liua5682312015-08-07 15:19:50 +080054#define SPI_CMD_RST BIT(2)
55#define SPI_CMD_PAUSE_EN BIT(4)
56#define SPI_CMD_DEASSERT BIT(5)
57#define SPI_CMD_CPHA BIT(8)
58#define SPI_CMD_CPOL BIT(9)
59#define SPI_CMD_RX_DMA BIT(10)
60#define SPI_CMD_TX_DMA BIT(11)
61#define SPI_CMD_TXMSBF BIT(12)
62#define SPI_CMD_RXMSBF BIT(13)
63#define SPI_CMD_RX_ENDIAN BIT(14)
64#define SPI_CMD_TX_ENDIAN BIT(15)
65#define SPI_CMD_FINISH_IE BIT(16)
66#define SPI_CMD_PAUSE_IE BIT(17)
67
Leilk Liua5682312015-08-07 15:19:50 +080068#define MT8173_SPI_MAX_PAD_SEL 3
69
70#define MTK_SPI_IDLE 0
71#define MTK_SPI_PAUSED 1
72
73#define MTK_SPI_MAX_FIFO_SIZE 32
74#define MTK_SPI_PACKET_SIZE 1024
75
76struct mtk_spi_compatible {
Leilk Liuaf579372015-08-20 17:19:07 +080077 bool need_pad_sel;
78 /* Must explicitly send dummy Tx bytes to do Rx only transfer */
79 bool must_tx;
Leilk Liua5682312015-08-07 15:19:50 +080080};
81
82struct mtk_spi {
83 void __iomem *base;
84 u32 state;
85 u32 pad_sel;
86 struct clk *spi_clk, *parent_clk;
87 struct spi_transfer *cur_transfer;
88 u32 xfer_len;
89 struct scatterlist *tx_sgl, *rx_sgl;
90 u32 tx_sgl_len, rx_sgl_len;
91 const struct mtk_spi_compatible *dev_comp;
92};
93
Leilk Liuaf579372015-08-20 17:19:07 +080094static const struct mtk_spi_compatible mt6589_compat;
95static const struct mtk_spi_compatible mt8135_compat;
Leilk Liua5682312015-08-07 15:19:50 +080096static const struct mtk_spi_compatible mt8173_compat = {
Leilk Liuaf579372015-08-20 17:19:07 +080097 .need_pad_sel = true,
98 .must_tx = true,
Leilk Liua5682312015-08-07 15:19:50 +080099};
100
101/*
102 * A piece of default chip info unless the platform
103 * supplies it.
104 */
105static const struct mtk_chip_config mtk_default_chip_info = {
106 .rx_mlsb = 1,
107 .tx_mlsb = 1,
Leilk Liua5682312015-08-07 15:19:50 +0800108};
109
110static const struct of_device_id mtk_spi_of_match[] = {
111 { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat },
112 { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat },
113 { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat },
114 {}
115};
116MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
117
118static void mtk_spi_reset(struct mtk_spi *mdata)
119{
120 u32 reg_val;
121
122 /* set the software reset bit in SPI_CMD_REG. */
123 reg_val = readl(mdata->base + SPI_CMD_REG);
124 reg_val |= SPI_CMD_RST;
125 writel(reg_val, mdata->base + SPI_CMD_REG);
126
127 reg_val = readl(mdata->base + SPI_CMD_REG);
128 reg_val &= ~SPI_CMD_RST;
129 writel(reg_val, mdata->base + SPI_CMD_REG);
130}
131
132static void mtk_spi_config(struct mtk_spi *mdata,
133 struct mtk_chip_config *chip_config)
134{
135 u32 reg_val;
136
137 reg_val = readl(mdata->base + SPI_CMD_REG);
138
139 /* set the mlsbx and mlsbtx */
Leilk Liua71d6ea2015-08-20 17:19:08 +0800140 if (chip_config->tx_mlsb)
141 reg_val |= SPI_CMD_TXMSBF;
142 else
143 reg_val &= ~SPI_CMD_TXMSBF;
144 if (chip_config->rx_mlsb)
145 reg_val |= SPI_CMD_RXMSBF;
146 else
147 reg_val &= ~SPI_CMD_RXMSBF;
Leilk Liua5682312015-08-07 15:19:50 +0800148
149 /* set the tx/rx endian */
Leilk Liu44f636d2015-08-20 17:19:06 +0800150#ifdef __LITTLE_ENDIAN
151 reg_val &= ~SPI_CMD_TX_ENDIAN;
152 reg_val &= ~SPI_CMD_RX_ENDIAN;
153#else
154 reg_val |= SPI_CMD_TX_ENDIAN;
155 reg_val |= SPI_CMD_RX_ENDIAN;
156#endif
Leilk Liua5682312015-08-07 15:19:50 +0800157
158 /* set finish and pause interrupt always enable */
159 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_EN;
160
161 /* disable dma mode */
162 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
163
164 /* disable deassert mode */
165 reg_val &= ~SPI_CMD_DEASSERT;
166
167 writel(reg_val, mdata->base + SPI_CMD_REG);
168
169 /* pad select */
170 if (mdata->dev_comp->need_pad_sel)
171 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
172}
173
174static int mtk_spi_prepare_hardware(struct spi_master *master)
175{
176 struct spi_transfer *trans;
177 struct mtk_spi *mdata = spi_master_get_devdata(master);
178 struct spi_message *msg = master->cur_msg;
Leilk Liua5682312015-08-07 15:19:50 +0800179
180 trans = list_first_entry(&msg->transfers, struct spi_transfer,
181 transfer_list);
182 if (trans->cs_change == 0) {
183 mdata->state = MTK_SPI_IDLE;
184 mtk_spi_reset(mdata);
185 }
186
Leilk Liua5682312015-08-07 15:19:50 +0800187 return 0;
188}
189
190static int mtk_spi_prepare_message(struct spi_master *master,
191 struct spi_message *msg)
192{
193 u32 reg_val;
194 u8 cpha, cpol;
195 struct mtk_chip_config *chip_config;
196 struct spi_device *spi = msg->spi;
197 struct mtk_spi *mdata = spi_master_get_devdata(master);
198
199 cpha = spi->mode & SPI_CPHA ? 1 : 0;
200 cpol = spi->mode & SPI_CPOL ? 1 : 0;
201
202 reg_val = readl(mdata->base + SPI_CMD_REG);
Leilk Liua71d6ea2015-08-20 17:19:08 +0800203 if (cpha)
204 reg_val |= SPI_CMD_CPHA;
205 else
206 reg_val &= ~SPI_CMD_CPHA;
207 if (cpol)
208 reg_val |= SPI_CMD_CPOL;
209 else
210 reg_val &= ~SPI_CMD_CPOL;
Leilk Liua5682312015-08-07 15:19:50 +0800211 writel(reg_val, mdata->base + SPI_CMD_REG);
212
213 chip_config = spi->controller_data;
214 if (!chip_config) {
215 chip_config = (void *)&mtk_default_chip_info;
216 spi->controller_data = chip_config;
217 }
218 mtk_spi_config(mdata, chip_config);
219
220 return 0;
221}
222
223static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
224{
225 u32 reg_val;
226 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
227
228 reg_val = readl(mdata->base + SPI_CMD_REG);
229 if (!enable)
230 reg_val |= SPI_CMD_PAUSE_EN;
231 else
232 reg_val &= ~SPI_CMD_PAUSE_EN;
233 writel(reg_val, mdata->base + SPI_CMD_REG);
234}
235
236static void mtk_spi_prepare_transfer(struct spi_master *master,
237 struct spi_transfer *xfer)
238{
239 u32 spi_clk_hz, div, high_time, low_time, holdtime,
240 setuptime, cs_idletime, reg_val = 0;
241 struct mtk_spi *mdata = spi_master_get_devdata(master);
242
243 spi_clk_hz = clk_get_rate(mdata->spi_clk);
244 if (xfer->speed_hz < spi_clk_hz / 2)
245 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
246 else
247 div = 1;
248
249 high_time = (div + 1) / 2;
250 low_time = (div + 1) / 2;
251 holdtime = (div + 1) / 2 * 2;
252 setuptime = (div + 1) / 2 * 2;
253 cs_idletime = (div + 1) / 2 * 2;
254
255 reg_val |= (((high_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
256 reg_val |= (((low_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
257 reg_val |= (((holdtime - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
258 reg_val |= (((setuptime - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
259 writel(reg_val, mdata->base + SPI_CFG0_REG);
260
261 reg_val = readl(mdata->base + SPI_CFG1_REG);
262 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
263 reg_val |= (((cs_idletime - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
264 writel(reg_val, mdata->base + SPI_CFG1_REG);
265}
266
267static void mtk_spi_setup_packet(struct spi_master *master)
268{
269 u32 packet_size, packet_loop, reg_val;
270 struct mtk_spi *mdata = spi_master_get_devdata(master);
271
272 packet_size = min_t(unsigned, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
273 packet_loop = mdata->xfer_len / packet_size;
274
275 reg_val = readl(mdata->base + SPI_CFG1_REG);
276 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK + SPI_CFG1_PACKET_LOOP_MASK);
277 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
278 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
279 writel(reg_val, mdata->base + SPI_CFG1_REG);
280}
281
282static void mtk_spi_enable_transfer(struct spi_master *master)
283{
284 int cmd;
285 struct mtk_spi *mdata = spi_master_get_devdata(master);
286
287 cmd = readl(mdata->base + SPI_CMD_REG);
288 if (mdata->state == MTK_SPI_IDLE)
Leilk Liua71d6ea2015-08-20 17:19:08 +0800289 cmd |= SPI_CMD_ACT;
Leilk Liua5682312015-08-07 15:19:50 +0800290 else
Leilk Liua71d6ea2015-08-20 17:19:08 +0800291 cmd |= SPI_CMD_RESUME;
Leilk Liua5682312015-08-07 15:19:50 +0800292 writel(cmd, mdata->base + SPI_CMD_REG);
293}
294
295static int mtk_spi_get_mult_delta(int xfer_len)
296{
297 int mult_delta;
298
299 if (xfer_len > MTK_SPI_PACKET_SIZE)
300 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
301 else
302 mult_delta = 0;
303
304 return mult_delta;
305}
306
307static void mtk_spi_update_mdata_len(struct spi_master *master)
308{
309 int mult_delta;
310 struct mtk_spi *mdata = spi_master_get_devdata(master);
311
312 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
313 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
314 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
315 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
316 mdata->rx_sgl_len = mult_delta;
317 mdata->tx_sgl_len -= mdata->xfer_len;
318 } else {
319 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
320 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
321 mdata->tx_sgl_len = mult_delta;
322 mdata->rx_sgl_len -= mdata->xfer_len;
323 }
324 } else if (mdata->tx_sgl_len) {
325 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
326 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
327 mdata->tx_sgl_len = mult_delta;
328 } else if (mdata->rx_sgl_len) {
329 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
330 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
331 mdata->rx_sgl_len = mult_delta;
332 }
333}
334
335static void mtk_spi_setup_dma_addr(struct spi_master *master,
336 struct spi_transfer *xfer)
337{
338 struct mtk_spi *mdata = spi_master_get_devdata(master);
339
340 if (mdata->tx_sgl)
Leilk Liu39ba9282015-08-13 20:06:41 +0800341 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
Leilk Liua5682312015-08-07 15:19:50 +0800342 if (mdata->rx_sgl)
Leilk Liu39ba9282015-08-13 20:06:41 +0800343 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
Leilk Liua5682312015-08-07 15:19:50 +0800344}
345
346static int mtk_spi_fifo_transfer(struct spi_master *master,
347 struct spi_device *spi,
348 struct spi_transfer *xfer)
349{
Leilk Liu44f636d2015-08-20 17:19:06 +0800350 int cnt;
Leilk Liua5682312015-08-07 15:19:50 +0800351 struct mtk_spi *mdata = spi_master_get_devdata(master);
352
353 mdata->cur_transfer = xfer;
354 mdata->xfer_len = xfer->len;
355 mtk_spi_prepare_transfer(master, xfer);
356 mtk_spi_setup_packet(master);
357
358 if (xfer->len % 4)
359 cnt = xfer->len / 4 + 1;
360 else
361 cnt = xfer->len / 4;
Leilk Liu44f636d2015-08-20 17:19:06 +0800362 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
Leilk Liua5682312015-08-07 15:19:50 +0800363
364 mtk_spi_enable_transfer(master);
365
366 return 1;
367}
368
369static int mtk_spi_dma_transfer(struct spi_master *master,
370 struct spi_device *spi,
371 struct spi_transfer *xfer)
372{
373 int cmd;
374 struct mtk_spi *mdata = spi_master_get_devdata(master);
375
376 mdata->tx_sgl = NULL;
377 mdata->rx_sgl = NULL;
378 mdata->tx_sgl_len = 0;
379 mdata->rx_sgl_len = 0;
380 mdata->cur_transfer = xfer;
381
382 mtk_spi_prepare_transfer(master, xfer);
383
384 cmd = readl(mdata->base + SPI_CMD_REG);
385 if (xfer->tx_buf)
386 cmd |= SPI_CMD_TX_DMA;
387 if (xfer->rx_buf)
388 cmd |= SPI_CMD_RX_DMA;
389 writel(cmd, mdata->base + SPI_CMD_REG);
390
391 if (xfer->tx_buf)
392 mdata->tx_sgl = xfer->tx_sg.sgl;
393 if (xfer->rx_buf)
394 mdata->rx_sgl = xfer->rx_sg.sgl;
395
396 if (mdata->tx_sgl) {
397 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
398 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
399 }
400 if (mdata->rx_sgl) {
401 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
402 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
403 }
404
405 mtk_spi_update_mdata_len(master);
406 mtk_spi_setup_packet(master);
407 mtk_spi_setup_dma_addr(master, xfer);
408 mtk_spi_enable_transfer(master);
409
410 return 1;
411}
412
413static int mtk_spi_transfer_one(struct spi_master *master,
414 struct spi_device *spi,
415 struct spi_transfer *xfer)
416{
417 if (master->can_dma(master, spi, xfer))
418 return mtk_spi_dma_transfer(master, spi, xfer);
419 else
420 return mtk_spi_fifo_transfer(master, spi, xfer);
421}
422
423static bool mtk_spi_can_dma(struct spi_master *master,
424 struct spi_device *spi,
425 struct spi_transfer *xfer)
426{
427 return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
428}
429
430static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
431{
Leilk Liu44f636d2015-08-20 17:19:06 +0800432 u32 cmd, reg_val, cnt;
Leilk Liua5682312015-08-07 15:19:50 +0800433 struct spi_master *master = dev_id;
434 struct mtk_spi *mdata = spi_master_get_devdata(master);
435 struct spi_transfer *trans = mdata->cur_transfer;
436
437 reg_val = readl(mdata->base + SPI_STATUS0_REG);
438 if (reg_val & 0x2)
439 mdata->state = MTK_SPI_PAUSED;
440 else
441 mdata->state = MTK_SPI_IDLE;
442
443 if (!master->can_dma(master, master->cur_msg->spi, trans)) {
Leilk Liua5682312015-08-07 15:19:50 +0800444 if (trans->rx_buf) {
Leilk Liu44f636d2015-08-20 17:19:06 +0800445 if (mdata->xfer_len % 4)
446 cnt = mdata->xfer_len / 4 + 1;
447 else
448 cnt = mdata->xfer_len / 4;
449 ioread32_rep(mdata->base + SPI_RX_DATA_REG,
450 trans->rx_buf, cnt);
Leilk Liua5682312015-08-07 15:19:50 +0800451 }
452 spi_finalize_current_transfer(master);
453 return IRQ_HANDLED;
454 }
455
456 if (mdata->tx_sgl)
457 trans->tx_dma += mdata->xfer_len;
458 if (mdata->rx_sgl)
459 trans->rx_dma += mdata->xfer_len;
460
461 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
462 mdata->tx_sgl = sg_next(mdata->tx_sgl);
463 if (mdata->tx_sgl) {
464 trans->tx_dma = sg_dma_address(mdata->tx_sgl);
465 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
466 }
467 }
468 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
469 mdata->rx_sgl = sg_next(mdata->rx_sgl);
470 if (mdata->rx_sgl) {
471 trans->rx_dma = sg_dma_address(mdata->rx_sgl);
472 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
473 }
474 }
475
476 if (!mdata->tx_sgl && !mdata->rx_sgl) {
477 /* spi disable dma */
478 cmd = readl(mdata->base + SPI_CMD_REG);
479 cmd &= ~SPI_CMD_TX_DMA;
480 cmd &= ~SPI_CMD_RX_DMA;
481 writel(cmd, mdata->base + SPI_CMD_REG);
482
483 spi_finalize_current_transfer(master);
484 return IRQ_HANDLED;
485 }
486
487 mtk_spi_update_mdata_len(master);
488 mtk_spi_setup_packet(master);
489 mtk_spi_setup_dma_addr(master, trans);
490 mtk_spi_enable_transfer(master);
491
492 return IRQ_HANDLED;
493}
494
495static int mtk_spi_probe(struct platform_device *pdev)
496{
497 struct spi_master *master;
498 struct mtk_spi *mdata;
499 const struct of_device_id *of_id;
500 struct resource *res;
501 int irq, ret;
502
503 master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
504 if (!master) {
505 dev_err(&pdev->dev, "failed to alloc spi master\n");
506 return -ENOMEM;
507 }
508
509 master->auto_runtime_pm = true;
510 master->dev.of_node = pdev->dev.of_node;
511 master->mode_bits = SPI_CPOL | SPI_CPHA;
512
513 master->set_cs = mtk_spi_set_cs;
514 master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
Leilk Liua5682312015-08-07 15:19:50 +0800515 master->prepare_message = mtk_spi_prepare_message;
516 master->transfer_one = mtk_spi_transfer_one;
517 master->can_dma = mtk_spi_can_dma;
518
519 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
520 if (!of_id) {
521 dev_err(&pdev->dev, "failed to probe of_node\n");
522 ret = -EINVAL;
523 goto err_put_master;
524 }
525
526 mdata = spi_master_get_devdata(master);
527 mdata->dev_comp = of_id->data;
528 if (mdata->dev_comp->must_tx)
529 master->flags = SPI_MASTER_MUST_TX;
530
531 if (mdata->dev_comp->need_pad_sel) {
532 ret = of_property_read_u32(pdev->dev.of_node,
533 "mediatek,pad-select",
534 &mdata->pad_sel);
535 if (ret) {
536 dev_err(&pdev->dev, "failed to read pad select: %d\n",
537 ret);
538 goto err_put_master;
539 }
540
541 if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) {
542 dev_err(&pdev->dev, "wrong pad-select: %u\n",
543 mdata->pad_sel);
544 ret = -EINVAL;
545 goto err_put_master;
546 }
547 }
548
549 platform_set_drvdata(pdev, master);
550
551 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
552 if (!res) {
553 ret = -ENODEV;
554 dev_err(&pdev->dev, "failed to determine base address\n");
555 goto err_put_master;
556 }
557
558 mdata->base = devm_ioremap_resource(&pdev->dev, res);
559 if (IS_ERR(mdata->base)) {
560 ret = PTR_ERR(mdata->base);
561 goto err_put_master;
562 }
563
564 irq = platform_get_irq(pdev, 0);
565 if (irq < 0) {
566 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
567 ret = irq;
568 goto err_put_master;
569 }
570
571 if (!pdev->dev.dma_mask)
572 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
573
574 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
575 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
576 if (ret) {
577 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
578 goto err_put_master;
579 }
580
581 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
582 if (IS_ERR(mdata->spi_clk)) {
583 ret = PTR_ERR(mdata->spi_clk);
584 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
585 goto err_put_master;
586 }
587
588 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
589 if (IS_ERR(mdata->parent_clk)) {
590 ret = PTR_ERR(mdata->parent_clk);
591 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
592 goto err_put_master;
593 }
594
595 ret = clk_prepare_enable(mdata->spi_clk);
596 if (ret < 0) {
597 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
598 goto err_put_master;
599 }
600
601 ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
602 if (ret < 0) {
603 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
604 goto err_disable_clk;
605 }
606
607 clk_disable_unprepare(mdata->spi_clk);
608
609 pm_runtime_enable(&pdev->dev);
610
611 ret = devm_spi_register_master(&pdev->dev, master);
612 if (ret) {
613 dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
614 goto err_put_master;
615 }
616
617 return 0;
618
619err_disable_clk:
620 clk_disable_unprepare(mdata->spi_clk);
621err_put_master:
622 spi_master_put(master);
623
624 return ret;
625}
626
627static int mtk_spi_remove(struct platform_device *pdev)
628{
629 struct spi_master *master = platform_get_drvdata(pdev);
630 struct mtk_spi *mdata = spi_master_get_devdata(master);
631
632 pm_runtime_disable(&pdev->dev);
633
634 mtk_spi_reset(mdata);
635 clk_disable_unprepare(mdata->spi_clk);
636 spi_master_put(master);
637
638 return 0;
639}
640
641#ifdef CONFIG_PM_SLEEP
642static int mtk_spi_suspend(struct device *dev)
643{
644 int ret;
645 struct spi_master *master = dev_get_drvdata(dev);
646 struct mtk_spi *mdata = spi_master_get_devdata(master);
647
648 ret = spi_master_suspend(master);
649 if (ret)
650 return ret;
651
652 if (!pm_runtime_suspended(dev))
653 clk_disable_unprepare(mdata->spi_clk);
654
655 return ret;
656}
657
658static int mtk_spi_resume(struct device *dev)
659{
660 int ret;
661 struct spi_master *master = dev_get_drvdata(dev);
662 struct mtk_spi *mdata = spi_master_get_devdata(master);
663
664 if (!pm_runtime_suspended(dev)) {
665 ret = clk_prepare_enable(mdata->spi_clk);
666 if (ret < 0)
667 return ret;
668 }
669
670 ret = spi_master_resume(master);
671 if (ret < 0)
672 clk_disable_unprepare(mdata->spi_clk);
673
674 return ret;
675}
676#endif /* CONFIG_PM_SLEEP */
677
678#ifdef CONFIG_PM
679static int mtk_spi_runtime_suspend(struct device *dev)
680{
681 struct spi_master *master = dev_get_drvdata(dev);
682 struct mtk_spi *mdata = spi_master_get_devdata(master);
683
684 clk_disable_unprepare(mdata->spi_clk);
685
686 return 0;
687}
688
689static int mtk_spi_runtime_resume(struct device *dev)
690{
691 struct spi_master *master = dev_get_drvdata(dev);
692 struct mtk_spi *mdata = spi_master_get_devdata(master);
693
694 return clk_prepare_enable(mdata->spi_clk);
695}
696#endif /* CONFIG_PM */
697
698static const struct dev_pm_ops mtk_spi_pm = {
699 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
700 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
701 mtk_spi_runtime_resume, NULL)
702};
703
kbuild test robot4299aaa2015-08-07 22:33:11 +0800704static struct platform_driver mtk_spi_driver = {
Leilk Liua5682312015-08-07 15:19:50 +0800705 .driver = {
706 .name = "mtk-spi",
707 .pm = &mtk_spi_pm,
708 .of_match_table = mtk_spi_of_match,
709 },
710 .probe = mtk_spi_probe,
711 .remove = mtk_spi_remove,
712};
713
714module_platform_driver(mtk_spi_driver);
715
716MODULE_DESCRIPTION("MTK SPI Controller driver");
717MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
718MODULE_LICENSE("GPL v2");
Axel Line4001882015-08-11 09:15:30 +0800719MODULE_ALIAS("platform:mtk-spi");