blob: 516b4ed757e5827531a86bf56a7057b3418bb9a0 [file] [log] [blame]
Leilk Liua5682312015-08-07 15:19:50 +08001/*
2 * Copyright (c) 2015 MediaTek Inc.
3 * Author: Leilk Liu <leilk.liu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/device.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/platform_data/spi-mt65xx.h>
24#include <linux/pm_runtime.h>
25#include <linux/spi/spi.h>
26
27#define SPI_CFG0_REG 0x0000
28#define SPI_CFG1_REG 0x0004
29#define SPI_TX_SRC_REG 0x0008
30#define SPI_RX_DST_REG 0x000c
31#define SPI_TX_DATA_REG 0x0010
32#define SPI_RX_DATA_REG 0x0014
33#define SPI_CMD_REG 0x0018
34#define SPI_STATUS0_REG 0x001c
35#define SPI_PAD_SEL_REG 0x0024
36
37#define SPI_CFG0_SCK_HIGH_OFFSET 0
38#define SPI_CFG0_SCK_LOW_OFFSET 8
39#define SPI_CFG0_CS_HOLD_OFFSET 16
40#define SPI_CFG0_CS_SETUP_OFFSET 24
41
42#define SPI_CFG1_CS_IDLE_OFFSET 0
43#define SPI_CFG1_PACKET_LOOP_OFFSET 8
44#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
45#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
46
47#define SPI_CFG1_CS_IDLE_MASK 0xff
48#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
49#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
50
Leilk Liua71d6ea2015-08-20 17:19:08 +080051#define SPI_CMD_ACT BIT(0)
52#define SPI_CMD_RESUME BIT(1)
Leilk Liua5682312015-08-07 15:19:50 +080053#define SPI_CMD_RST BIT(2)
54#define SPI_CMD_PAUSE_EN BIT(4)
55#define SPI_CMD_DEASSERT BIT(5)
56#define SPI_CMD_CPHA BIT(8)
57#define SPI_CMD_CPOL BIT(9)
58#define SPI_CMD_RX_DMA BIT(10)
59#define SPI_CMD_TX_DMA BIT(11)
60#define SPI_CMD_TXMSBF BIT(12)
61#define SPI_CMD_RXMSBF BIT(13)
62#define SPI_CMD_RX_ENDIAN BIT(14)
63#define SPI_CMD_TX_ENDIAN BIT(15)
64#define SPI_CMD_FINISH_IE BIT(16)
65#define SPI_CMD_PAUSE_IE BIT(17)
66
Leilk Liua5682312015-08-07 15:19:50 +080067#define MT8173_SPI_MAX_PAD_SEL 3
68
69#define MTK_SPI_IDLE 0
70#define MTK_SPI_PAUSED 1
71
72#define MTK_SPI_MAX_FIFO_SIZE 32
73#define MTK_SPI_PACKET_SIZE 1024
74
75struct mtk_spi_compatible {
Leilk Liuaf579372015-08-20 17:19:07 +080076 bool need_pad_sel;
77 /* Must explicitly send dummy Tx bytes to do Rx only transfer */
78 bool must_tx;
Leilk Liua5682312015-08-07 15:19:50 +080079};
80
81struct mtk_spi {
82 void __iomem *base;
83 u32 state;
84 u32 pad_sel;
85 struct clk *spi_clk, *parent_clk;
86 struct spi_transfer *cur_transfer;
87 u32 xfer_len;
88 struct scatterlist *tx_sgl, *rx_sgl;
89 u32 tx_sgl_len, rx_sgl_len;
90 const struct mtk_spi_compatible *dev_comp;
91};
92
Leilk Liuaf579372015-08-20 17:19:07 +080093static const struct mtk_spi_compatible mt6589_compat;
94static const struct mtk_spi_compatible mt8135_compat;
Leilk Liua5682312015-08-07 15:19:50 +080095static const struct mtk_spi_compatible mt8173_compat = {
Leilk Liuaf579372015-08-20 17:19:07 +080096 .need_pad_sel = true,
97 .must_tx = true,
Leilk Liua5682312015-08-07 15:19:50 +080098};
99
100/*
101 * A piece of default chip info unless the platform
102 * supplies it.
103 */
104static const struct mtk_chip_config mtk_default_chip_info = {
105 .rx_mlsb = 1,
106 .tx_mlsb = 1,
Leilk Liua5682312015-08-07 15:19:50 +0800107};
108
109static const struct of_device_id mtk_spi_of_match[] = {
110 { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat },
111 { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat },
112 { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat },
113 {}
114};
115MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
116
117static void mtk_spi_reset(struct mtk_spi *mdata)
118{
119 u32 reg_val;
120
121 /* set the software reset bit in SPI_CMD_REG. */
122 reg_val = readl(mdata->base + SPI_CMD_REG);
123 reg_val |= SPI_CMD_RST;
124 writel(reg_val, mdata->base + SPI_CMD_REG);
125
126 reg_val = readl(mdata->base + SPI_CMD_REG);
127 reg_val &= ~SPI_CMD_RST;
128 writel(reg_val, mdata->base + SPI_CMD_REG);
129}
130
131static void mtk_spi_config(struct mtk_spi *mdata,
132 struct mtk_chip_config *chip_config)
133{
134 u32 reg_val;
135
136 reg_val = readl(mdata->base + SPI_CMD_REG);
137
138 /* set the mlsbx and mlsbtx */
Leilk Liua71d6ea2015-08-20 17:19:08 +0800139 if (chip_config->tx_mlsb)
140 reg_val |= SPI_CMD_TXMSBF;
141 else
142 reg_val &= ~SPI_CMD_TXMSBF;
143 if (chip_config->rx_mlsb)
144 reg_val |= SPI_CMD_RXMSBF;
145 else
146 reg_val &= ~SPI_CMD_RXMSBF;
Leilk Liua5682312015-08-07 15:19:50 +0800147
148 /* set the tx/rx endian */
Leilk Liu44f636d2015-08-20 17:19:06 +0800149#ifdef __LITTLE_ENDIAN
150 reg_val &= ~SPI_CMD_TX_ENDIAN;
151 reg_val &= ~SPI_CMD_RX_ENDIAN;
152#else
153 reg_val |= SPI_CMD_TX_ENDIAN;
154 reg_val |= SPI_CMD_RX_ENDIAN;
155#endif
Leilk Liua5682312015-08-07 15:19:50 +0800156
157 /* set finish and pause interrupt always enable */
158 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_EN;
159
160 /* disable dma mode */
161 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
162
163 /* disable deassert mode */
164 reg_val &= ~SPI_CMD_DEASSERT;
165
166 writel(reg_val, mdata->base + SPI_CMD_REG);
167
168 /* pad select */
169 if (mdata->dev_comp->need_pad_sel)
170 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
171}
172
173static int mtk_spi_prepare_hardware(struct spi_master *master)
174{
175 struct spi_transfer *trans;
176 struct mtk_spi *mdata = spi_master_get_devdata(master);
177 struct spi_message *msg = master->cur_msg;
Leilk Liua5682312015-08-07 15:19:50 +0800178
179 trans = list_first_entry(&msg->transfers, struct spi_transfer,
180 transfer_list);
181 if (trans->cs_change == 0) {
182 mdata->state = MTK_SPI_IDLE;
183 mtk_spi_reset(mdata);
184 }
185
Leilk Liua5682312015-08-07 15:19:50 +0800186 return 0;
187}
188
189static int mtk_spi_prepare_message(struct spi_master *master,
190 struct spi_message *msg)
191{
192 u32 reg_val;
193 u8 cpha, cpol;
194 struct mtk_chip_config *chip_config;
195 struct spi_device *spi = msg->spi;
196 struct mtk_spi *mdata = spi_master_get_devdata(master);
197
198 cpha = spi->mode & SPI_CPHA ? 1 : 0;
199 cpol = spi->mode & SPI_CPOL ? 1 : 0;
200
201 reg_val = readl(mdata->base + SPI_CMD_REG);
Leilk Liua71d6ea2015-08-20 17:19:08 +0800202 if (cpha)
203 reg_val |= SPI_CMD_CPHA;
204 else
205 reg_val &= ~SPI_CMD_CPHA;
206 if (cpol)
207 reg_val |= SPI_CMD_CPOL;
208 else
209 reg_val &= ~SPI_CMD_CPOL;
Leilk Liua5682312015-08-07 15:19:50 +0800210 writel(reg_val, mdata->base + SPI_CMD_REG);
211
212 chip_config = spi->controller_data;
213 if (!chip_config) {
214 chip_config = (void *)&mtk_default_chip_info;
215 spi->controller_data = chip_config;
216 }
217 mtk_spi_config(mdata, chip_config);
218
219 return 0;
220}
221
222static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
223{
224 u32 reg_val;
225 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
226
227 reg_val = readl(mdata->base + SPI_CMD_REG);
228 if (!enable)
229 reg_val |= SPI_CMD_PAUSE_EN;
230 else
231 reg_val &= ~SPI_CMD_PAUSE_EN;
232 writel(reg_val, mdata->base + SPI_CMD_REG);
233}
234
235static void mtk_spi_prepare_transfer(struct spi_master *master,
236 struct spi_transfer *xfer)
237{
238 u32 spi_clk_hz, div, high_time, low_time, holdtime,
239 setuptime, cs_idletime, reg_val = 0;
240 struct mtk_spi *mdata = spi_master_get_devdata(master);
241
242 spi_clk_hz = clk_get_rate(mdata->spi_clk);
243 if (xfer->speed_hz < spi_clk_hz / 2)
244 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
245 else
246 div = 1;
247
248 high_time = (div + 1) / 2;
249 low_time = (div + 1) / 2;
250 holdtime = (div + 1) / 2 * 2;
251 setuptime = (div + 1) / 2 * 2;
252 cs_idletime = (div + 1) / 2 * 2;
253
254 reg_val |= (((high_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
255 reg_val |= (((low_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
256 reg_val |= (((holdtime - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
257 reg_val |= (((setuptime - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
258 writel(reg_val, mdata->base + SPI_CFG0_REG);
259
260 reg_val = readl(mdata->base + SPI_CFG1_REG);
261 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
262 reg_val |= (((cs_idletime - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
263 writel(reg_val, mdata->base + SPI_CFG1_REG);
264}
265
266static void mtk_spi_setup_packet(struct spi_master *master)
267{
268 u32 packet_size, packet_loop, reg_val;
269 struct mtk_spi *mdata = spi_master_get_devdata(master);
270
271 packet_size = min_t(unsigned, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
272 packet_loop = mdata->xfer_len / packet_size;
273
274 reg_val = readl(mdata->base + SPI_CFG1_REG);
275 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK + SPI_CFG1_PACKET_LOOP_MASK);
276 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
277 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
278 writel(reg_val, mdata->base + SPI_CFG1_REG);
279}
280
281static void mtk_spi_enable_transfer(struct spi_master *master)
282{
283 int cmd;
284 struct mtk_spi *mdata = spi_master_get_devdata(master);
285
286 cmd = readl(mdata->base + SPI_CMD_REG);
287 if (mdata->state == MTK_SPI_IDLE)
Leilk Liua71d6ea2015-08-20 17:19:08 +0800288 cmd |= SPI_CMD_ACT;
Leilk Liua5682312015-08-07 15:19:50 +0800289 else
Leilk Liua71d6ea2015-08-20 17:19:08 +0800290 cmd |= SPI_CMD_RESUME;
Leilk Liua5682312015-08-07 15:19:50 +0800291 writel(cmd, mdata->base + SPI_CMD_REG);
292}
293
294static int mtk_spi_get_mult_delta(int xfer_len)
295{
296 int mult_delta;
297
298 if (xfer_len > MTK_SPI_PACKET_SIZE)
299 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
300 else
301 mult_delta = 0;
302
303 return mult_delta;
304}
305
306static void mtk_spi_update_mdata_len(struct spi_master *master)
307{
308 int mult_delta;
309 struct mtk_spi *mdata = spi_master_get_devdata(master);
310
311 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
312 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
313 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
314 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
315 mdata->rx_sgl_len = mult_delta;
316 mdata->tx_sgl_len -= mdata->xfer_len;
317 } else {
318 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
319 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
320 mdata->tx_sgl_len = mult_delta;
321 mdata->rx_sgl_len -= mdata->xfer_len;
322 }
323 } else if (mdata->tx_sgl_len) {
324 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
325 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
326 mdata->tx_sgl_len = mult_delta;
327 } else if (mdata->rx_sgl_len) {
328 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
329 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
330 mdata->rx_sgl_len = mult_delta;
331 }
332}
333
334static void mtk_spi_setup_dma_addr(struct spi_master *master,
335 struct spi_transfer *xfer)
336{
337 struct mtk_spi *mdata = spi_master_get_devdata(master);
338
339 if (mdata->tx_sgl)
Leilk Liu39ba9282015-08-13 20:06:41 +0800340 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
Leilk Liua5682312015-08-07 15:19:50 +0800341 if (mdata->rx_sgl)
Leilk Liu39ba9282015-08-13 20:06:41 +0800342 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
Leilk Liua5682312015-08-07 15:19:50 +0800343}
344
345static int mtk_spi_fifo_transfer(struct spi_master *master,
346 struct spi_device *spi,
347 struct spi_transfer *xfer)
348{
Leilk Liu44f636d2015-08-20 17:19:06 +0800349 int cnt;
Leilk Liua5682312015-08-07 15:19:50 +0800350 struct mtk_spi *mdata = spi_master_get_devdata(master);
351
352 mdata->cur_transfer = xfer;
353 mdata->xfer_len = xfer->len;
354 mtk_spi_prepare_transfer(master, xfer);
355 mtk_spi_setup_packet(master);
356
357 if (xfer->len % 4)
358 cnt = xfer->len / 4 + 1;
359 else
360 cnt = xfer->len / 4;
Leilk Liu44f636d2015-08-20 17:19:06 +0800361 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
Leilk Liua5682312015-08-07 15:19:50 +0800362
363 mtk_spi_enable_transfer(master);
364
365 return 1;
366}
367
368static int mtk_spi_dma_transfer(struct spi_master *master,
369 struct spi_device *spi,
370 struct spi_transfer *xfer)
371{
372 int cmd;
373 struct mtk_spi *mdata = spi_master_get_devdata(master);
374
375 mdata->tx_sgl = NULL;
376 mdata->rx_sgl = NULL;
377 mdata->tx_sgl_len = 0;
378 mdata->rx_sgl_len = 0;
379 mdata->cur_transfer = xfer;
380
381 mtk_spi_prepare_transfer(master, xfer);
382
383 cmd = readl(mdata->base + SPI_CMD_REG);
384 if (xfer->tx_buf)
385 cmd |= SPI_CMD_TX_DMA;
386 if (xfer->rx_buf)
387 cmd |= SPI_CMD_RX_DMA;
388 writel(cmd, mdata->base + SPI_CMD_REG);
389
390 if (xfer->tx_buf)
391 mdata->tx_sgl = xfer->tx_sg.sgl;
392 if (xfer->rx_buf)
393 mdata->rx_sgl = xfer->rx_sg.sgl;
394
395 if (mdata->tx_sgl) {
396 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
397 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
398 }
399 if (mdata->rx_sgl) {
400 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
401 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
402 }
403
404 mtk_spi_update_mdata_len(master);
405 mtk_spi_setup_packet(master);
406 mtk_spi_setup_dma_addr(master, xfer);
407 mtk_spi_enable_transfer(master);
408
409 return 1;
410}
411
412static int mtk_spi_transfer_one(struct spi_master *master,
413 struct spi_device *spi,
414 struct spi_transfer *xfer)
415{
416 if (master->can_dma(master, spi, xfer))
417 return mtk_spi_dma_transfer(master, spi, xfer);
418 else
419 return mtk_spi_fifo_transfer(master, spi, xfer);
420}
421
422static bool mtk_spi_can_dma(struct spi_master *master,
423 struct spi_device *spi,
424 struct spi_transfer *xfer)
425{
426 return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
427}
428
429static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
430{
Leilk Liu44f636d2015-08-20 17:19:06 +0800431 u32 cmd, reg_val, cnt;
Leilk Liua5682312015-08-07 15:19:50 +0800432 struct spi_master *master = dev_id;
433 struct mtk_spi *mdata = spi_master_get_devdata(master);
434 struct spi_transfer *trans = mdata->cur_transfer;
435
436 reg_val = readl(mdata->base + SPI_STATUS0_REG);
437 if (reg_val & 0x2)
438 mdata->state = MTK_SPI_PAUSED;
439 else
440 mdata->state = MTK_SPI_IDLE;
441
442 if (!master->can_dma(master, master->cur_msg->spi, trans)) {
Leilk Liua5682312015-08-07 15:19:50 +0800443 if (trans->rx_buf) {
Leilk Liu44f636d2015-08-20 17:19:06 +0800444 if (mdata->xfer_len % 4)
445 cnt = mdata->xfer_len / 4 + 1;
446 else
447 cnt = mdata->xfer_len / 4;
448 ioread32_rep(mdata->base + SPI_RX_DATA_REG,
449 trans->rx_buf, cnt);
Leilk Liua5682312015-08-07 15:19:50 +0800450 }
451 spi_finalize_current_transfer(master);
452 return IRQ_HANDLED;
453 }
454
455 if (mdata->tx_sgl)
456 trans->tx_dma += mdata->xfer_len;
457 if (mdata->rx_sgl)
458 trans->rx_dma += mdata->xfer_len;
459
460 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
461 mdata->tx_sgl = sg_next(mdata->tx_sgl);
462 if (mdata->tx_sgl) {
463 trans->tx_dma = sg_dma_address(mdata->tx_sgl);
464 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
465 }
466 }
467 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
468 mdata->rx_sgl = sg_next(mdata->rx_sgl);
469 if (mdata->rx_sgl) {
470 trans->rx_dma = sg_dma_address(mdata->rx_sgl);
471 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
472 }
473 }
474
475 if (!mdata->tx_sgl && !mdata->rx_sgl) {
476 /* spi disable dma */
477 cmd = readl(mdata->base + SPI_CMD_REG);
478 cmd &= ~SPI_CMD_TX_DMA;
479 cmd &= ~SPI_CMD_RX_DMA;
480 writel(cmd, mdata->base + SPI_CMD_REG);
481
482 spi_finalize_current_transfer(master);
483 return IRQ_HANDLED;
484 }
485
486 mtk_spi_update_mdata_len(master);
487 mtk_spi_setup_packet(master);
488 mtk_spi_setup_dma_addr(master, trans);
489 mtk_spi_enable_transfer(master);
490
491 return IRQ_HANDLED;
492}
493
494static int mtk_spi_probe(struct platform_device *pdev)
495{
496 struct spi_master *master;
497 struct mtk_spi *mdata;
498 const struct of_device_id *of_id;
499 struct resource *res;
500 int irq, ret;
501
502 master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
503 if (!master) {
504 dev_err(&pdev->dev, "failed to alloc spi master\n");
505 return -ENOMEM;
506 }
507
508 master->auto_runtime_pm = true;
509 master->dev.of_node = pdev->dev.of_node;
510 master->mode_bits = SPI_CPOL | SPI_CPHA;
511
512 master->set_cs = mtk_spi_set_cs;
513 master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
Leilk Liua5682312015-08-07 15:19:50 +0800514 master->prepare_message = mtk_spi_prepare_message;
515 master->transfer_one = mtk_spi_transfer_one;
516 master->can_dma = mtk_spi_can_dma;
517
518 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
519 if (!of_id) {
520 dev_err(&pdev->dev, "failed to probe of_node\n");
521 ret = -EINVAL;
522 goto err_put_master;
523 }
524
525 mdata = spi_master_get_devdata(master);
526 mdata->dev_comp = of_id->data;
527 if (mdata->dev_comp->must_tx)
528 master->flags = SPI_MASTER_MUST_TX;
529
530 if (mdata->dev_comp->need_pad_sel) {
531 ret = of_property_read_u32(pdev->dev.of_node,
532 "mediatek,pad-select",
533 &mdata->pad_sel);
534 if (ret) {
535 dev_err(&pdev->dev, "failed to read pad select: %d\n",
536 ret);
537 goto err_put_master;
538 }
539
540 if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) {
541 dev_err(&pdev->dev, "wrong pad-select: %u\n",
542 mdata->pad_sel);
543 ret = -EINVAL;
544 goto err_put_master;
545 }
546 }
547
548 platform_set_drvdata(pdev, master);
549
550 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
551 if (!res) {
552 ret = -ENODEV;
553 dev_err(&pdev->dev, "failed to determine base address\n");
554 goto err_put_master;
555 }
556
557 mdata->base = devm_ioremap_resource(&pdev->dev, res);
558 if (IS_ERR(mdata->base)) {
559 ret = PTR_ERR(mdata->base);
560 goto err_put_master;
561 }
562
563 irq = platform_get_irq(pdev, 0);
564 if (irq < 0) {
565 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
566 ret = irq;
567 goto err_put_master;
568 }
569
570 if (!pdev->dev.dma_mask)
571 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
572
573 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
574 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
575 if (ret) {
576 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
577 goto err_put_master;
578 }
579
580 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
581 if (IS_ERR(mdata->spi_clk)) {
582 ret = PTR_ERR(mdata->spi_clk);
583 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
584 goto err_put_master;
585 }
586
587 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
588 if (IS_ERR(mdata->parent_clk)) {
589 ret = PTR_ERR(mdata->parent_clk);
590 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
591 goto err_put_master;
592 }
593
594 ret = clk_prepare_enable(mdata->spi_clk);
595 if (ret < 0) {
596 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
597 goto err_put_master;
598 }
599
600 ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
601 if (ret < 0) {
602 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
603 goto err_disable_clk;
604 }
605
606 clk_disable_unprepare(mdata->spi_clk);
607
608 pm_runtime_enable(&pdev->dev);
609
610 ret = devm_spi_register_master(&pdev->dev, master);
611 if (ret) {
612 dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
613 goto err_put_master;
614 }
615
616 return 0;
617
618err_disable_clk:
619 clk_disable_unprepare(mdata->spi_clk);
620err_put_master:
621 spi_master_put(master);
622
623 return ret;
624}
625
626static int mtk_spi_remove(struct platform_device *pdev)
627{
628 struct spi_master *master = platform_get_drvdata(pdev);
629 struct mtk_spi *mdata = spi_master_get_devdata(master);
630
631 pm_runtime_disable(&pdev->dev);
632
633 mtk_spi_reset(mdata);
634 clk_disable_unprepare(mdata->spi_clk);
635 spi_master_put(master);
636
637 return 0;
638}
639
640#ifdef CONFIG_PM_SLEEP
641static int mtk_spi_suspend(struct device *dev)
642{
643 int ret;
644 struct spi_master *master = dev_get_drvdata(dev);
645 struct mtk_spi *mdata = spi_master_get_devdata(master);
646
647 ret = spi_master_suspend(master);
648 if (ret)
649 return ret;
650
651 if (!pm_runtime_suspended(dev))
652 clk_disable_unprepare(mdata->spi_clk);
653
654 return ret;
655}
656
657static int mtk_spi_resume(struct device *dev)
658{
659 int ret;
660 struct spi_master *master = dev_get_drvdata(dev);
661 struct mtk_spi *mdata = spi_master_get_devdata(master);
662
663 if (!pm_runtime_suspended(dev)) {
664 ret = clk_prepare_enable(mdata->spi_clk);
665 if (ret < 0)
666 return ret;
667 }
668
669 ret = spi_master_resume(master);
670 if (ret < 0)
671 clk_disable_unprepare(mdata->spi_clk);
672
673 return ret;
674}
675#endif /* CONFIG_PM_SLEEP */
676
677#ifdef CONFIG_PM
678static int mtk_spi_runtime_suspend(struct device *dev)
679{
680 struct spi_master *master = dev_get_drvdata(dev);
681 struct mtk_spi *mdata = spi_master_get_devdata(master);
682
683 clk_disable_unprepare(mdata->spi_clk);
684
685 return 0;
686}
687
688static int mtk_spi_runtime_resume(struct device *dev)
689{
690 struct spi_master *master = dev_get_drvdata(dev);
691 struct mtk_spi *mdata = spi_master_get_devdata(master);
692
693 return clk_prepare_enable(mdata->spi_clk);
694}
695#endif /* CONFIG_PM */
696
697static const struct dev_pm_ops mtk_spi_pm = {
698 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
699 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
700 mtk_spi_runtime_resume, NULL)
701};
702
kbuild test robot4299aaa2015-08-07 22:33:11 +0800703static struct platform_driver mtk_spi_driver = {
Leilk Liua5682312015-08-07 15:19:50 +0800704 .driver = {
705 .name = "mtk-spi",
706 .pm = &mtk_spi_pm,
707 .of_match_table = mtk_spi_of_match,
708 },
709 .probe = mtk_spi_probe,
710 .remove = mtk_spi_remove,
711};
712
713module_platform_driver(mtk_spi_driver);
714
715MODULE_DESCRIPTION("MTK SPI Controller driver");
716MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
717MODULE_LICENSE("GPL v2");
Axel Line4001882015-08-11 09:15:30 +0800718MODULE_ALIAS("platform:mtk-spi");