blob: 3a94f465e8e05eb46e197fcf0e31758c6fdfaf90 [file] [log] [blame]
addy ke64e36822014-07-01 09:03:59 +08001/*
2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
Addy Ke5dcc44e2014-07-11 10:07:56 +08003 * Author: Addy Ke <addy.ke@rock-chips.com>
addy ke64e36822014-07-01 09:03:59 +08004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
addy ke64e36822014-07-01 09:03:59 +080016#include <linux/clk.h>
addy ke64e36822014-07-01 09:03:59 +080017#include <linux/dmaengine.h>
Shawn Linec5c5d82016-03-10 14:51:48 +080018#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/spi/spi.h>
22#include <linux/pm_runtime.h>
23#include <linux/scatterlist.h>
addy ke64e36822014-07-01 09:03:59 +080024
25#define DRIVER_NAME "rockchip-spi"
26
27/* SPI register offsets */
28#define ROCKCHIP_SPI_CTRLR0 0x0000
29#define ROCKCHIP_SPI_CTRLR1 0x0004
30#define ROCKCHIP_SPI_SSIENR 0x0008
31#define ROCKCHIP_SPI_SER 0x000c
32#define ROCKCHIP_SPI_BAUDR 0x0010
33#define ROCKCHIP_SPI_TXFTLR 0x0014
34#define ROCKCHIP_SPI_RXFTLR 0x0018
35#define ROCKCHIP_SPI_TXFLR 0x001c
36#define ROCKCHIP_SPI_RXFLR 0x0020
37#define ROCKCHIP_SPI_SR 0x0024
38#define ROCKCHIP_SPI_IPR 0x0028
39#define ROCKCHIP_SPI_IMR 0x002c
40#define ROCKCHIP_SPI_ISR 0x0030
41#define ROCKCHIP_SPI_RISR 0x0034
42#define ROCKCHIP_SPI_ICR 0x0038
43#define ROCKCHIP_SPI_DMACR 0x003c
44#define ROCKCHIP_SPI_DMATDLR 0x0040
45#define ROCKCHIP_SPI_DMARDLR 0x0044
46#define ROCKCHIP_SPI_TXDR 0x0400
47#define ROCKCHIP_SPI_RXDR 0x0800
48
49/* Bit fields in CTRLR0 */
50#define CR0_DFS_OFFSET 0
51
52#define CR0_CFS_OFFSET 2
53
54#define CR0_SCPH_OFFSET 6
55
56#define CR0_SCPOL_OFFSET 7
57
58#define CR0_CSM_OFFSET 8
59#define CR0_CSM_KEEP 0x0
60/* ss_n be high for half sclk_out cycles */
61#define CR0_CSM_HALF 0X1
62/* ss_n be high for one sclk_out cycle */
63#define CR0_CSM_ONE 0x2
64
65/* ss_n to sclk_out delay */
66#define CR0_SSD_OFFSET 10
67/*
68 * The period between ss_n active and
69 * sclk_out active is half sclk_out cycles
70 */
71#define CR0_SSD_HALF 0x0
72/*
73 * The period between ss_n active and
74 * sclk_out active is one sclk_out cycle
75 */
76#define CR0_SSD_ONE 0x1
77
78#define CR0_EM_OFFSET 11
79#define CR0_EM_LITTLE 0x0
80#define CR0_EM_BIG 0x1
81
82#define CR0_FBM_OFFSET 12
83#define CR0_FBM_MSB 0x0
84#define CR0_FBM_LSB 0x1
85
86#define CR0_BHT_OFFSET 13
87#define CR0_BHT_16BIT 0x0
88#define CR0_BHT_8BIT 0x1
89
90#define CR0_RSD_OFFSET 14
91
92#define CR0_FRF_OFFSET 16
93#define CR0_FRF_SPI 0x0
94#define CR0_FRF_SSP 0x1
95#define CR0_FRF_MICROWIRE 0x2
96
97#define CR0_XFM_OFFSET 18
98#define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
99#define CR0_XFM_TR 0x0
100#define CR0_XFM_TO 0x1
101#define CR0_XFM_RO 0x2
102
103#define CR0_OPM_OFFSET 20
104#define CR0_OPM_MASTER 0x0
105#define CR0_OPM_SLAVE 0x1
106
107#define CR0_MTM_OFFSET 0x21
108
109/* Bit fields in SER, 2bit */
110#define SER_MASK 0x3
111
112/* Bit fields in SR, 5bit */
113#define SR_MASK 0x1f
114#define SR_BUSY (1 << 0)
115#define SR_TF_FULL (1 << 1)
116#define SR_TF_EMPTY (1 << 2)
117#define SR_RF_EMPTY (1 << 3)
118#define SR_RF_FULL (1 << 4)
119
120/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
121#define INT_MASK 0x1f
122#define INT_TF_EMPTY (1 << 0)
123#define INT_TF_OVERFLOW (1 << 1)
124#define INT_RF_UNDERFLOW (1 << 2)
125#define INT_RF_OVERFLOW (1 << 3)
126#define INT_RF_FULL (1 << 4)
127
128/* Bit fields in ICR, 4bit */
129#define ICR_MASK 0x0f
130#define ICR_ALL (1 << 0)
131#define ICR_RF_UNDERFLOW (1 << 1)
132#define ICR_RF_OVERFLOW (1 << 2)
133#define ICR_TF_OVERFLOW (1 << 3)
134
135/* Bit fields in DMACR */
136#define RF_DMA_EN (1 << 0)
137#define TF_DMA_EN (1 << 1)
138
139#define RXBUSY (1 << 0)
140#define TXBUSY (1 << 1)
141
Addy Kef9cfd522014-10-15 19:25:49 +0800142/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
143#define MAX_SCLK_OUT 50000000
144
Brian Norris5185a812016-07-14 18:30:59 -0700145/*
146 * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
147 * the controller seems to hang when given 0x10000, so stick with this for now.
148 */
149#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
150
addy ke64e36822014-07-01 09:03:59 +0800151enum rockchip_ssi_type {
152 SSI_MOTO_SPI = 0,
153 SSI_TI_SSP,
154 SSI_NS_MICROWIRE,
155};
156
157struct rockchip_spi_dma_data {
158 struct dma_chan *ch;
159 enum dma_transfer_direction direction;
160 dma_addr_t addr;
161};
162
163struct rockchip_spi {
164 struct device *dev;
165 struct spi_master *master;
166
167 struct clk *spiclk;
168 struct clk *apb_pclk;
169
170 void __iomem *regs;
171 /*depth of the FIFO buffer */
172 u32 fifo_len;
173 /* max bus freq supported */
174 u32 max_freq;
175 /* supported slave numbers */
176 enum rockchip_ssi_type type;
177
178 u16 mode;
179 u8 tmode;
180 u8 bpw;
181 u8 n_bytes;
Shawn Lin108b5c82016-03-10 14:52:27 +0800182 u32 rsd_nsecs;
addy ke64e36822014-07-01 09:03:59 +0800183 unsigned len;
184 u32 speed;
185
186 const void *tx;
187 const void *tx_end;
188 void *rx;
189 void *rx_end;
190
191 u32 state;
Addy Ke5dcc44e2014-07-11 10:07:56 +0800192 /* protect state */
addy ke64e36822014-07-01 09:03:59 +0800193 spinlock_t lock;
194
addy ke64e36822014-07-01 09:03:59 +0800195 u32 use_dma;
196 struct sg_table tx_sg;
197 struct sg_table rx_sg;
198 struct rockchip_spi_dma_data dma_rx;
199 struct rockchip_spi_dma_data dma_tx;
Addy Ke80abf882016-01-22 19:06:52 +0800200 struct dma_slave_caps dma_caps;
addy ke64e36822014-07-01 09:03:59 +0800201};
202
203static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
204{
205 writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
206}
207
208static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
209{
210 writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
211}
212
213static inline void flush_fifo(struct rockchip_spi *rs)
214{
215 while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
216 readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
217}
218
Addy Ke2df08e72014-07-11 10:08:24 +0800219static inline void wait_for_idle(struct rockchip_spi *rs)
220{
221 unsigned long timeout = jiffies + msecs_to_jiffies(5);
222
223 do {
224 if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
225 return;
Doug Anderson64bc0112014-09-03 13:44:25 -0700226 } while (!time_after(jiffies, timeout));
Addy Ke2df08e72014-07-11 10:08:24 +0800227
228 dev_warn(rs->dev, "spi controller is in busy state!\n");
229}
230
addy ke64e36822014-07-01 09:03:59 +0800231static u32 get_fifo_len(struct rockchip_spi *rs)
232{
233 u32 fifo;
234
235 for (fifo = 2; fifo < 32; fifo++) {
236 writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
237 if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
238 break;
239 }
240
241 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
242
243 return (fifo == 31) ? 0 : fifo;
244}
245
246static inline u32 tx_max(struct rockchip_spi *rs)
247{
248 u32 tx_left, tx_room;
249
250 tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
251 tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
252
253 return min(tx_left, tx_room);
254}
255
256static inline u32 rx_max(struct rockchip_spi *rs)
257{
258 u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
259 u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
260
261 return min(rx_left, rx_room);
262}
263
264static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
265{
266 u32 ser;
Huibin Hongb920cc32016-02-24 18:00:04 +0800267 struct spi_master *master = spi->master;
268 struct rockchip_spi *rs = spi_master_get_devdata(master);
269
270 pm_runtime_get_sync(rs->dev);
addy ke64e36822014-07-01 09:03:59 +0800271
272 ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
273
274 /*
275 * drivers/spi/spi.c:
276 * static void spi_set_cs(struct spi_device *spi, bool enable)
277 * {
278 * if (spi->mode & SPI_CS_HIGH)
279 * enable = !enable;
280 *
281 * if (spi->cs_gpio >= 0)
282 * gpio_set_value(spi->cs_gpio, !enable);
283 * else if (spi->master->set_cs)
284 * spi->master->set_cs(spi, !enable);
285 * }
286 *
287 * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
288 */
289 if (!enable)
290 ser |= 1 << spi->chip_select;
291 else
292 ser &= ~(1 << spi->chip_select);
293
294 writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
Huibin Hongb920cc32016-02-24 18:00:04 +0800295
296 pm_runtime_put_sync(rs->dev);
addy ke64e36822014-07-01 09:03:59 +0800297}
298
299static int rockchip_spi_prepare_message(struct spi_master *master,
Addy Ke5dcc44e2014-07-11 10:07:56 +0800300 struct spi_message *msg)
addy ke64e36822014-07-01 09:03:59 +0800301{
302 struct rockchip_spi *rs = spi_master_get_devdata(master);
303 struct spi_device *spi = msg->spi;
304
addy ke64e36822014-07-01 09:03:59 +0800305 rs->mode = spi->mode;
306
307 return 0;
308}
309
Andy Shevchenko22917932015-02-27 17:34:16 +0200310static void rockchip_spi_handle_err(struct spi_master *master,
311 struct spi_message *msg)
addy ke64e36822014-07-01 09:03:59 +0800312{
313 unsigned long flags;
314 struct rockchip_spi *rs = spi_master_get_devdata(master);
315
316 spin_lock_irqsave(&rs->lock, flags);
317
Addy Ke5dcc44e2014-07-11 10:07:56 +0800318 /*
319 * For DMA mode, we need terminate DMA channel and flush
320 * fifo for the next transfer if DMA thansfer timeout.
Andy Shevchenko22917932015-02-27 17:34:16 +0200321 * handle_err() was called by core if transfer failed.
322 * Maybe it is reasonable for error handling here.
Addy Ke5dcc44e2014-07-11 10:07:56 +0800323 */
addy ke64e36822014-07-01 09:03:59 +0800324 if (rs->use_dma) {
325 if (rs->state & RXBUSY) {
Shawn Lin557b7ea2016-03-09 16:11:23 +0800326 dmaengine_terminate_async(rs->dma_rx.ch);
addy ke64e36822014-07-01 09:03:59 +0800327 flush_fifo(rs);
328 }
329
330 if (rs->state & TXBUSY)
Shawn Lin557b7ea2016-03-09 16:11:23 +0800331 dmaengine_terminate_async(rs->dma_tx.ch);
addy ke64e36822014-07-01 09:03:59 +0800332 }
333
334 spin_unlock_irqrestore(&rs->lock, flags);
Andy Shevchenko22917932015-02-27 17:34:16 +0200335}
336
337static int rockchip_spi_unprepare_message(struct spi_master *master,
338 struct spi_message *msg)
339{
340 struct rockchip_spi *rs = spi_master_get_devdata(master);
addy ke64e36822014-07-01 09:03:59 +0800341
Addy Kec28be312014-10-15 19:26:18 +0800342 spi_enable_chip(rs, 0);
343
addy ke64e36822014-07-01 09:03:59 +0800344 return 0;
345}
346
347static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
348{
349 u32 max = tx_max(rs);
350 u32 txw = 0;
351
352 while (max--) {
353 if (rs->n_bytes == 1)
354 txw = *(u8 *)(rs->tx);
355 else
356 txw = *(u16 *)(rs->tx);
357
358 writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
359 rs->tx += rs->n_bytes;
360 }
361}
362
363static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
364{
365 u32 max = rx_max(rs);
366 u32 rxw;
367
368 while (max--) {
369 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
370 if (rs->n_bytes == 1)
371 *(u8 *)(rs->rx) = (u8)rxw;
372 else
373 *(u16 *)(rs->rx) = (u16)rxw;
374 rs->rx += rs->n_bytes;
Addy Ke5dcc44e2014-07-11 10:07:56 +0800375 }
addy ke64e36822014-07-01 09:03:59 +0800376}
377
378static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
379{
380 int remain = 0;
381
382 do {
383 if (rs->tx) {
384 remain = rs->tx_end - rs->tx;
385 rockchip_spi_pio_writer(rs);
386 }
387
388 if (rs->rx) {
389 remain = rs->rx_end - rs->rx;
390 rockchip_spi_pio_reader(rs);
391 }
392
393 cpu_relax();
394 } while (remain);
395
Addy Ke2df08e72014-07-11 10:08:24 +0800396 /* If tx, wait until the FIFO data completely. */
397 if (rs->tx)
398 wait_for_idle(rs);
399
Addy Kec28be312014-10-15 19:26:18 +0800400 spi_enable_chip(rs, 0);
401
addy ke64e36822014-07-01 09:03:59 +0800402 return 0;
403}
404
405static void rockchip_spi_dma_rxcb(void *data)
406{
407 unsigned long flags;
408 struct rockchip_spi *rs = data;
409
410 spin_lock_irqsave(&rs->lock, flags);
411
412 rs->state &= ~RXBUSY;
Addy Kec28be312014-10-15 19:26:18 +0800413 if (!(rs->state & TXBUSY)) {
414 spi_enable_chip(rs, 0);
addy ke64e36822014-07-01 09:03:59 +0800415 spi_finalize_current_transfer(rs->master);
Addy Kec28be312014-10-15 19:26:18 +0800416 }
addy ke64e36822014-07-01 09:03:59 +0800417
418 spin_unlock_irqrestore(&rs->lock, flags);
419}
420
421static void rockchip_spi_dma_txcb(void *data)
422{
423 unsigned long flags;
424 struct rockchip_spi *rs = data;
425
Addy Ke2df08e72014-07-11 10:08:24 +0800426 /* Wait until the FIFO data completely. */
427 wait_for_idle(rs);
428
addy ke64e36822014-07-01 09:03:59 +0800429 spin_lock_irqsave(&rs->lock, flags);
430
431 rs->state &= ~TXBUSY;
Addy Ke2c2bc742014-10-17 09:44:13 +0800432 if (!(rs->state & RXBUSY)) {
433 spi_enable_chip(rs, 0);
addy ke64e36822014-07-01 09:03:59 +0800434 spi_finalize_current_transfer(rs->master);
Addy Ke2c2bc742014-10-17 09:44:13 +0800435 }
addy ke64e36822014-07-01 09:03:59 +0800436
437 spin_unlock_irqrestore(&rs->lock, flags);
438}
439
Shawn Linea984912016-03-09 16:11:15 +0800440static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
addy ke64e36822014-07-01 09:03:59 +0800441{
442 unsigned long flags;
443 struct dma_slave_config rxconf, txconf;
444 struct dma_async_tx_descriptor *rxdesc, *txdesc;
445
Huibin Hongc5e007b42018-10-10 11:00:32 +0200446 memset(&rxconf, 0, sizeof(rxconf));
447 memset(&txconf, 0, sizeof(txconf));
448
addy ke64e36822014-07-01 09:03:59 +0800449 spin_lock_irqsave(&rs->lock, flags);
450 rs->state &= ~RXBUSY;
451 rs->state &= ~TXBUSY;
452 spin_unlock_irqrestore(&rs->lock, flags);
453
Arnd Bergmann97cf5662015-01-28 14:25:10 +0100454 rxdesc = NULL;
addy ke64e36822014-07-01 09:03:59 +0800455 if (rs->rx) {
456 rxconf.direction = rs->dma_rx.direction;
457 rxconf.src_addr = rs->dma_rx.addr;
458 rxconf.src_addr_width = rs->n_bytes;
Addy Ke80abf882016-01-22 19:06:52 +0800459 if (rs->dma_caps.max_burst > 4)
460 rxconf.src_maxburst = 4;
461 else
462 rxconf.src_maxburst = 1;
addy ke64e36822014-07-01 09:03:59 +0800463 dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
464
Addy Ke5dcc44e2014-07-11 10:07:56 +0800465 rxdesc = dmaengine_prep_slave_sg(
466 rs->dma_rx.ch,
addy ke64e36822014-07-01 09:03:59 +0800467 rs->rx_sg.sgl, rs->rx_sg.nents,
468 rs->dma_rx.direction, DMA_PREP_INTERRUPT);
Shawn Linea984912016-03-09 16:11:15 +0800469 if (!rxdesc)
470 return -EINVAL;
addy ke64e36822014-07-01 09:03:59 +0800471
472 rxdesc->callback = rockchip_spi_dma_rxcb;
473 rxdesc->callback_param = rs;
474 }
475
Arnd Bergmann97cf5662015-01-28 14:25:10 +0100476 txdesc = NULL;
addy ke64e36822014-07-01 09:03:59 +0800477 if (rs->tx) {
478 txconf.direction = rs->dma_tx.direction;
479 txconf.dst_addr = rs->dma_tx.addr;
480 txconf.dst_addr_width = rs->n_bytes;
Addy Ke80abf882016-01-22 19:06:52 +0800481 if (rs->dma_caps.max_burst > 4)
482 txconf.dst_maxburst = 4;
483 else
484 txconf.dst_maxburst = 1;
addy ke64e36822014-07-01 09:03:59 +0800485 dmaengine_slave_config(rs->dma_tx.ch, &txconf);
486
Addy Ke5dcc44e2014-07-11 10:07:56 +0800487 txdesc = dmaengine_prep_slave_sg(
488 rs->dma_tx.ch,
addy ke64e36822014-07-01 09:03:59 +0800489 rs->tx_sg.sgl, rs->tx_sg.nents,
490 rs->dma_tx.direction, DMA_PREP_INTERRUPT);
Shawn Linea984912016-03-09 16:11:15 +0800491 if (!txdesc) {
492 if (rxdesc)
493 dmaengine_terminate_sync(rs->dma_rx.ch);
494 return -EINVAL;
495 }
addy ke64e36822014-07-01 09:03:59 +0800496
497 txdesc->callback = rockchip_spi_dma_txcb;
498 txdesc->callback_param = rs;
499 }
500
501 /* rx must be started before tx due to spi instinct */
Arnd Bergmann97cf5662015-01-28 14:25:10 +0100502 if (rxdesc) {
addy ke64e36822014-07-01 09:03:59 +0800503 spin_lock_irqsave(&rs->lock, flags);
504 rs->state |= RXBUSY;
505 spin_unlock_irqrestore(&rs->lock, flags);
506 dmaengine_submit(rxdesc);
507 dma_async_issue_pending(rs->dma_rx.ch);
508 }
509
Arnd Bergmann97cf5662015-01-28 14:25:10 +0100510 if (txdesc) {
addy ke64e36822014-07-01 09:03:59 +0800511 spin_lock_irqsave(&rs->lock, flags);
512 rs->state |= TXBUSY;
513 spin_unlock_irqrestore(&rs->lock, flags);
514 dmaengine_submit(txdesc);
515 dma_async_issue_pending(rs->dma_tx.ch);
516 }
Shawn Linea984912016-03-09 16:11:15 +0800517
518 return 0;
addy ke64e36822014-07-01 09:03:59 +0800519}
520
521static void rockchip_spi_config(struct rockchip_spi *rs)
522{
523 u32 div = 0;
524 u32 dmacr = 0;
Julius Werner76b17e62015-03-26 16:30:25 -0700525 int rsd = 0;
addy ke64e36822014-07-01 09:03:59 +0800526
527 u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
Alexander Kochetkov0277e012016-03-06 13:04:17 +0300528 | (CR0_SSD_ONE << CR0_SSD_OFFSET)
529 | (CR0_EM_BIG << CR0_EM_OFFSET);
addy ke64e36822014-07-01 09:03:59 +0800530
531 cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
532 cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
533 cr0 |= (rs->tmode << CR0_XFM_OFFSET);
534 cr0 |= (rs->type << CR0_FRF_OFFSET);
535
536 if (rs->use_dma) {
537 if (rs->tx)
538 dmacr |= TF_DMA_EN;
539 if (rs->rx)
540 dmacr |= RF_DMA_EN;
541 }
542
Addy Kef9cfd522014-10-15 19:25:49 +0800543 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
544 rs->speed = MAX_SCLK_OUT;
545
Geert Uytterhoevenbb515372016-03-14 16:30:16 +0100546 /* the minimum divisor is 2 */
Addy Kef9cfd522014-10-15 19:25:49 +0800547 if (rs->max_freq < 2 * rs->speed) {
548 clk_set_rate(rs->spiclk, 2 * rs->speed);
549 rs->max_freq = clk_get_rate(rs->spiclk);
550 }
551
addy ke64e36822014-07-01 09:03:59 +0800552 /* div doesn't support odd number */
Julius Werner754ec432015-03-26 16:30:24 -0700553 div = DIV_ROUND_UP(rs->max_freq, rs->speed);
addy ke64e36822014-07-01 09:03:59 +0800554 div = (div + 1) & 0xfffe;
555
Julius Werner76b17e62015-03-26 16:30:25 -0700556 /* Rx sample delay is expressed in parent clock cycles (max 3) */
557 rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
558 1000000000 >> 8);
559 if (!rsd && rs->rsd_nsecs) {
560 pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
561 rs->max_freq, rs->rsd_nsecs);
562 } else if (rsd > 3) {
563 rsd = 3;
564 pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
565 rs->max_freq, rs->rsd_nsecs,
566 rsd * 1000000000U / rs->max_freq);
567 }
568 cr0 |= rsd << CR0_RSD_OFFSET;
569
addy ke64e36822014-07-01 09:03:59 +0800570 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
571
572 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
573 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
574 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
575
576 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
577 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
578 writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
579
580 spi_set_clk(rs, div);
581
Addy Ke5dcc44e2014-07-11 10:07:56 +0800582 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
addy ke64e36822014-07-01 09:03:59 +0800583}
584
Brian Norris5185a812016-07-14 18:30:59 -0700585static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
586{
587 return ROCKCHIP_SPI_MAX_TRANLEN;
588}
589
Addy Ke5dcc44e2014-07-11 10:07:56 +0800590static int rockchip_spi_transfer_one(
591 struct spi_master *master,
addy ke64e36822014-07-01 09:03:59 +0800592 struct spi_device *spi,
593 struct spi_transfer *xfer)
594{
Tomeu Vizoso4dc0dd82016-06-08 09:32:51 +0200595 int ret = 0;
addy ke64e36822014-07-01 09:03:59 +0800596 struct rockchip_spi *rs = spi_master_get_devdata(master);
597
Doug Anderson62946172014-09-03 13:44:26 -0700598 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
599 (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
addy ke64e36822014-07-01 09:03:59 +0800600
601 if (!xfer->tx_buf && !xfer->rx_buf) {
602 dev_err(rs->dev, "No buffer for transfer\n");
603 return -EINVAL;
604 }
605
Brian Norris5185a812016-07-14 18:30:59 -0700606 if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
607 dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
608 return -EINVAL;
609 }
610
addy ke64e36822014-07-01 09:03:59 +0800611 rs->speed = xfer->speed_hz;
612 rs->bpw = xfer->bits_per_word;
613 rs->n_bytes = rs->bpw >> 3;
614
615 rs->tx = xfer->tx_buf;
616 rs->tx_end = rs->tx + xfer->len;
617 rs->rx = xfer->rx_buf;
618 rs->rx_end = rs->rx + xfer->len;
619 rs->len = xfer->len;
620
621 rs->tx_sg = xfer->tx_sg;
622 rs->rx_sg = xfer->rx_sg;
623
addy ke64e36822014-07-01 09:03:59 +0800624 if (rs->tx && rs->rx)
625 rs->tmode = CR0_XFM_TR;
626 else if (rs->tx)
627 rs->tmode = CR0_XFM_TO;
628 else if (rs->rx)
629 rs->tmode = CR0_XFM_RO;
630
Addy Kea24e70c2014-09-25 14:59:41 +0800631 /* we need prepare dma before spi was enabled */
Addy Kec28be312014-10-15 19:26:18 +0800632 if (master->can_dma && master->can_dma(master, spi, xfer))
addy ke64e36822014-07-01 09:03:59 +0800633 rs->use_dma = 1;
Addy Kec28be312014-10-15 19:26:18 +0800634 else
addy ke64e36822014-07-01 09:03:59 +0800635 rs->use_dma = 0;
636
637 rockchip_spi_config(rs);
638
Addy Kec28be312014-10-15 19:26:18 +0800639 if (rs->use_dma) {
640 if (rs->tmode == CR0_XFM_RO) {
641 /* rx: dma must be prepared first */
Shawn Linea984912016-03-09 16:11:15 +0800642 ret = rockchip_spi_prepare_dma(rs);
Addy Kec28be312014-10-15 19:26:18 +0800643 spi_enable_chip(rs, 1);
644 } else {
645 /* tx or tr: spi must be enabled first */
646 spi_enable_chip(rs, 1);
Shawn Linea984912016-03-09 16:11:15 +0800647 ret = rockchip_spi_prepare_dma(rs);
Addy Kec28be312014-10-15 19:26:18 +0800648 }
Tomeu Vizoso4dc0dd82016-06-08 09:32:51 +0200649 /* successful DMA prepare means the transfer is in progress */
650 ret = ret ? ret : 1;
Addy Kec28be312014-10-15 19:26:18 +0800651 } else {
652 spi_enable_chip(rs, 1);
addy ke64e36822014-07-01 09:03:59 +0800653 ret = rockchip_spi_pio_transfer(rs);
Addy Kec28be312014-10-15 19:26:18 +0800654 }
addy ke64e36822014-07-01 09:03:59 +0800655
656 return ret;
657}
658
659static bool rockchip_spi_can_dma(struct spi_master *master,
Addy Ke5dcc44e2014-07-11 10:07:56 +0800660 struct spi_device *spi,
661 struct spi_transfer *xfer)
addy ke64e36822014-07-01 09:03:59 +0800662{
663 struct rockchip_spi *rs = spi_master_get_devdata(master);
664
665 return (xfer->len > rs->fifo_len);
666}
667
668static int rockchip_spi_probe(struct platform_device *pdev)
669{
670 int ret = 0;
671 struct rockchip_spi *rs;
672 struct spi_master *master;
673 struct resource *mem;
Julius Werner76b17e62015-03-26 16:30:25 -0700674 u32 rsd_nsecs;
addy ke64e36822014-07-01 09:03:59 +0800675
676 master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
Addy Ke5dcc44e2014-07-11 10:07:56 +0800677 if (!master)
addy ke64e36822014-07-01 09:03:59 +0800678 return -ENOMEM;
Addy Ke5dcc44e2014-07-11 10:07:56 +0800679
addy ke64e36822014-07-01 09:03:59 +0800680 platform_set_drvdata(pdev, master);
681
682 rs = spi_master_get_devdata(master);
addy ke64e36822014-07-01 09:03:59 +0800683
684 /* Get basic io resource and map it */
685 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
686 rs->regs = devm_ioremap_resource(&pdev->dev, mem);
687 if (IS_ERR(rs->regs)) {
addy ke64e36822014-07-01 09:03:59 +0800688 ret = PTR_ERR(rs->regs);
689 goto err_ioremap_resource;
690 }
691
692 rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
693 if (IS_ERR(rs->apb_pclk)) {
694 dev_err(&pdev->dev, "Failed to get apb_pclk\n");
695 ret = PTR_ERR(rs->apb_pclk);
696 goto err_ioremap_resource;
697 }
698
699 rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
700 if (IS_ERR(rs->spiclk)) {
701 dev_err(&pdev->dev, "Failed to get spi_pclk\n");
702 ret = PTR_ERR(rs->spiclk);
703 goto err_ioremap_resource;
704 }
705
706 ret = clk_prepare_enable(rs->apb_pclk);
707 if (ret) {
708 dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
709 goto err_ioremap_resource;
710 }
711
712 ret = clk_prepare_enable(rs->spiclk);
713 if (ret) {
714 dev_err(&pdev->dev, "Failed to enable spi_clk\n");
715 goto err_spiclk_enable;
716 }
717
718 spi_enable_chip(rs, 0);
719
720 rs->type = SSI_MOTO_SPI;
721 rs->master = master;
722 rs->dev = &pdev->dev;
723 rs->max_freq = clk_get_rate(rs->spiclk);
724
Julius Werner76b17e62015-03-26 16:30:25 -0700725 if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
726 &rsd_nsecs))
727 rs->rsd_nsecs = rsd_nsecs;
728
addy ke64e36822014-07-01 09:03:59 +0800729 rs->fifo_len = get_fifo_len(rs);
730 if (!rs->fifo_len) {
731 dev_err(&pdev->dev, "Failed to get fifo length\n");
Wei Yongjundb7e8d92014-07-20 22:02:04 +0800732 ret = -EINVAL;
addy ke64e36822014-07-01 09:03:59 +0800733 goto err_get_fifo_len;
734 }
735
736 spin_lock_init(&rs->lock);
737
738 pm_runtime_set_active(&pdev->dev);
739 pm_runtime_enable(&pdev->dev);
740
741 master->auto_runtime_pm = true;
742 master->bus_num = pdev->id;
Addy Keee780992014-07-11 10:08:51 +0800743 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
addy ke64e36822014-07-01 09:03:59 +0800744 master->num_chipselect = 2;
745 master->dev.of_node = pdev->dev.of_node;
746 master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
747
748 master->set_cs = rockchip_spi_set_cs;
749 master->prepare_message = rockchip_spi_prepare_message;
750 master->unprepare_message = rockchip_spi_unprepare_message;
751 master->transfer_one = rockchip_spi_transfer_one;
Brian Norris5185a812016-07-14 18:30:59 -0700752 master->max_transfer_size = rockchip_spi_max_transfer_size;
Andy Shevchenko22917932015-02-27 17:34:16 +0200753 master->handle_err = rockchip_spi_handle_err;
addy ke64e36822014-07-01 09:03:59 +0800754
Shawn Line4c0e062016-03-31 11:11:41 +0800755 rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
756 if (IS_ERR(rs->dma_tx.ch)) {
Shawn Lin61cadcf2016-03-09 16:11:32 +0800757 /* Check tx to see if we need defer probing driver */
758 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
759 ret = -EPROBE_DEFER;
760 goto err_get_fifo_len;
761 }
addy ke64e36822014-07-01 09:03:59 +0800762 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
Shawn Line4c0e062016-03-31 11:11:41 +0800763 rs->dma_tx.ch = NULL;
Shawn Lin61cadcf2016-03-09 16:11:32 +0800764 }
addy ke64e36822014-07-01 09:03:59 +0800765
Shawn Line4c0e062016-03-31 11:11:41 +0800766 rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
767 if (IS_ERR(rs->dma_rx.ch)) {
768 if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
Shawn Line4c0e062016-03-31 11:11:41 +0800769 ret = -EPROBE_DEFER;
Dan Carpenter5de7ed02016-05-04 09:25:46 +0300770 goto err_free_dma_tx;
addy ke64e36822014-07-01 09:03:59 +0800771 }
772 dev_warn(rs->dev, "Failed to request RX DMA channel\n");
Shawn Line4c0e062016-03-31 11:11:41 +0800773 rs->dma_rx.ch = NULL;
addy ke64e36822014-07-01 09:03:59 +0800774 }
775
776 if (rs->dma_tx.ch && rs->dma_rx.ch) {
Addy Ke80abf882016-01-22 19:06:52 +0800777 dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
addy ke64e36822014-07-01 09:03:59 +0800778 rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
779 rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
780 rs->dma_tx.direction = DMA_MEM_TO_DEV;
Addy Ke0ac7a492014-08-20 11:47:42 +0800781 rs->dma_rx.direction = DMA_DEV_TO_MEM;
addy ke64e36822014-07-01 09:03:59 +0800782
783 master->can_dma = rockchip_spi_can_dma;
784 master->dma_tx = rs->dma_tx.ch;
785 master->dma_rx = rs->dma_rx.ch;
786 }
787
788 ret = devm_spi_register_master(&pdev->dev, master);
789 if (ret) {
790 dev_err(&pdev->dev, "Failed to register master\n");
791 goto err_register_master;
792 }
793
addy ke64e36822014-07-01 09:03:59 +0800794 return 0;
795
796err_register_master:
Shawn Linb8659ad2016-02-15 16:27:58 +0800797 pm_runtime_disable(&pdev->dev);
addy ke64e36822014-07-01 09:03:59 +0800798 if (rs->dma_rx.ch)
799 dma_release_channel(rs->dma_rx.ch);
Dan Carpenter5de7ed02016-05-04 09:25:46 +0300800err_free_dma_tx:
801 if (rs->dma_tx.ch)
802 dma_release_channel(rs->dma_tx.ch);
addy ke64e36822014-07-01 09:03:59 +0800803err_get_fifo_len:
804 clk_disable_unprepare(rs->spiclk);
805err_spiclk_enable:
806 clk_disable_unprepare(rs->apb_pclk);
807err_ioremap_resource:
808 spi_master_put(master);
809
810 return ret;
811}
812
813static int rockchip_spi_remove(struct platform_device *pdev)
814{
815 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
816 struct rockchip_spi *rs = spi_master_get_devdata(master);
817
818 pm_runtime_disable(&pdev->dev);
819
820 clk_disable_unprepare(rs->spiclk);
821 clk_disable_unprepare(rs->apb_pclk);
822
823 if (rs->dma_tx.ch)
824 dma_release_channel(rs->dma_tx.ch);
825 if (rs->dma_rx.ch)
826 dma_release_channel(rs->dma_rx.ch);
827
Shawn Lin844c9f42016-02-15 16:28:12 +0800828 spi_master_put(master);
829
addy ke64e36822014-07-01 09:03:59 +0800830 return 0;
831}
832
833#ifdef CONFIG_PM_SLEEP
834static int rockchip_spi_suspend(struct device *dev)
835{
836 int ret = 0;
837 struct spi_master *master = dev_get_drvdata(dev);
838 struct rockchip_spi *rs = spi_master_get_devdata(master);
839
840 ret = spi_master_suspend(rs->master);
841 if (ret)
842 return ret;
843
844 if (!pm_runtime_suspended(dev)) {
845 clk_disable_unprepare(rs->spiclk);
846 clk_disable_unprepare(rs->apb_pclk);
847 }
848
849 return ret;
850}
851
852static int rockchip_spi_resume(struct device *dev)
853{
854 int ret = 0;
855 struct spi_master *master = dev_get_drvdata(dev);
856 struct rockchip_spi *rs = spi_master_get_devdata(master);
857
858 if (!pm_runtime_suspended(dev)) {
859 ret = clk_prepare_enable(rs->apb_pclk);
860 if (ret < 0)
861 return ret;
862
863 ret = clk_prepare_enable(rs->spiclk);
864 if (ret < 0) {
865 clk_disable_unprepare(rs->apb_pclk);
866 return ret;
867 }
868 }
869
870 ret = spi_master_resume(rs->master);
871 if (ret < 0) {
872 clk_disable_unprepare(rs->spiclk);
873 clk_disable_unprepare(rs->apb_pclk);
874 }
875
876 return ret;
877}
878#endif /* CONFIG_PM_SLEEP */
879
Rafael J. Wysockiec833052014-12-13 00:41:15 +0100880#ifdef CONFIG_PM
addy ke64e36822014-07-01 09:03:59 +0800881static int rockchip_spi_runtime_suspend(struct device *dev)
882{
883 struct spi_master *master = dev_get_drvdata(dev);
884 struct rockchip_spi *rs = spi_master_get_devdata(master);
885
886 clk_disable_unprepare(rs->spiclk);
887 clk_disable_unprepare(rs->apb_pclk);
888
889 return 0;
890}
891
892static int rockchip_spi_runtime_resume(struct device *dev)
893{
894 int ret;
895 struct spi_master *master = dev_get_drvdata(dev);
896 struct rockchip_spi *rs = spi_master_get_devdata(master);
897
898 ret = clk_prepare_enable(rs->apb_pclk);
899 if (ret)
900 return ret;
901
902 ret = clk_prepare_enable(rs->spiclk);
903 if (ret)
904 clk_disable_unprepare(rs->apb_pclk);
905
906 return ret;
907}
Rafael J. Wysockiec833052014-12-13 00:41:15 +0100908#endif /* CONFIG_PM */
addy ke64e36822014-07-01 09:03:59 +0800909
910static const struct dev_pm_ops rockchip_spi_pm = {
911 SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
912 SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
913 rockchip_spi_runtime_resume, NULL)
914};
915
916static const struct of_device_id rockchip_spi_dt_match[] = {
Caesar Wangaa29ea32016-05-20 07:56:21 +0800917 { .compatible = "rockchip,rk3036-spi", },
addy ke64e36822014-07-01 09:03:59 +0800918 { .compatible = "rockchip,rk3066-spi", },
Addy Keb839b782014-07-11 10:09:19 +0800919 { .compatible = "rockchip,rk3188-spi", },
Caesar Wangaa29ea32016-05-20 07:56:21 +0800920 { .compatible = "rockchip,rk3228-spi", },
Addy Keb839b782014-07-11 10:09:19 +0800921 { .compatible = "rockchip,rk3288-spi", },
Caesar Wangaa29ea32016-05-20 07:56:21 +0800922 { .compatible = "rockchip,rk3368-spi", },
Xu Jianqun9b7a5622016-02-18 19:16:31 +0800923 { .compatible = "rockchip,rk3399-spi", },
addy ke64e36822014-07-01 09:03:59 +0800924 { },
925};
926MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
927
928static struct platform_driver rockchip_spi_driver = {
929 .driver = {
930 .name = DRIVER_NAME,
addy ke64e36822014-07-01 09:03:59 +0800931 .pm = &rockchip_spi_pm,
932 .of_match_table = of_match_ptr(rockchip_spi_dt_match),
933 },
934 .probe = rockchip_spi_probe,
935 .remove = rockchip_spi_remove,
936};
937
938module_platform_driver(rockchip_spi_driver);
939
Addy Ke5dcc44e2014-07-11 10:07:56 +0800940MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
addy ke64e36822014-07-01 09:03:59 +0800941MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
942MODULE_LICENSE("GPL v2");