blob: 1d332e23f6ede6874e3a7c76329a9ab0ff67a751 [file] [log] [blame]
Mingkai Hu8b60d6c2010-10-12 18:18:32 +08001/*
2 * Freescale eSPI controller driver.
3 *
4 * Copyright 2010 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080011#include <linux/delay.h>
Xiubo Lia3108362014-09-29 10:57:06 +080012#include <linux/err.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080013#include <linux/fsl_devices.h>
Xiubo Lia3108362014-09-29 10:57:06 +080014#include <linux/interrupt.h>
Xiubo Lia3108362014-09-29 10:57:06 +080015#include <linux/module.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080016#include <linux/mm.h>
17#include <linux/of.h>
Rob Herring5af50732013-09-17 14:28:33 -050018#include <linux/of_address.h>
19#include <linux/of_irq.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080020#include <linux/of_platform.h>
Xiubo Lia3108362014-09-29 10:57:06 +080021#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
Heiner Kallweite9abb4d2015-08-26 21:21:55 +020023#include <linux/pm_runtime.h>
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080024#include <sysdev/fsl_soc.h>
25
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080026/* eSPI Controller registers */
Heiner Kallweit46afd382016-09-13 23:16:02 +020027#define ESPI_SPMODE 0x00 /* eSPI mode register */
28#define ESPI_SPIE 0x04 /* eSPI event register */
29#define ESPI_SPIM 0x08 /* eSPI mask register */
30#define ESPI_SPCOM 0x0c /* eSPI command register */
31#define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
32#define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
33#define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
34
35#define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080036
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080037/* eSPI Controller mode register definitions */
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020038#define SPMODE_ENABLE BIT(31)
39#define SPMODE_LOOP BIT(30)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080040#define SPMODE_TXTHR(x) ((x) << 8)
41#define SPMODE_RXTHR(x) ((x) << 0)
42
43/* eSPI Controller CS mode register definitions */
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020044#define CSMODE_CI_INACTIVEHIGH BIT(31)
45#define CSMODE_CP_BEGIN_EDGECLK BIT(30)
46#define CSMODE_REV BIT(29)
47#define CSMODE_DIV16 BIT(28)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080048#define CSMODE_PM(x) ((x) << 24)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020049#define CSMODE_POL_1 BIT(20)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080050#define CSMODE_LEN(x) ((x) << 16)
51#define CSMODE_BEF(x) ((x) << 12)
52#define CSMODE_AFT(x) ((x) << 8)
53#define CSMODE_CG(x) ((x) << 3)
54
Heiner Kallweit54731262016-10-27 21:25:58 +020055#define FSL_ESPI_FIFO_SIZE 32
Heiner Kallweite508cea2016-10-27 21:27:56 +020056#define FSL_ESPI_RXTHR 15
Heiner Kallweit54731262016-10-27 21:25:58 +020057
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080058/* Default mode/csmode for eSPI controller */
Heiner Kallweite508cea2016-10-27 21:27:56 +020059#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080060#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
61 | CSMODE_AFT(0) | CSMODE_CG(1))
62
63/* SPIE register values */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080064#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
65#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020066#define SPIE_TXE BIT(15) /* TX FIFO empty */
67#define SPIE_DON BIT(14) /* TX done */
68#define SPIE_RXT BIT(13) /* RX FIFO threshold */
69#define SPIE_RXF BIT(12) /* RX FIFO full */
70#define SPIE_TXT BIT(11) /* TX FIFO threshold*/
71#define SPIE_RNE BIT(9) /* RX FIFO not empty */
72#define SPIE_TNF BIT(8) /* TX FIFO not full */
73
74/* SPIM register values */
75#define SPIM_TXE BIT(15) /* TX FIFO empty */
76#define SPIM_DON BIT(14) /* TX done */
77#define SPIM_RXT BIT(13) /* RX FIFO threshold */
78#define SPIM_RXF BIT(12) /* RX FIFO full */
79#define SPIM_TXT BIT(11) /* TX FIFO threshold*/
80#define SPIM_RNE BIT(9) /* RX FIFO not empty */
81#define SPIM_TNF BIT(8) /* TX FIFO not full */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080082
83/* SPCOM register values */
84#define SPCOM_CS(x) ((x) << 30)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020085#define SPCOM_DO BIT(28) /* Dual output */
86#define SPCOM_TO BIT(27) /* TX only */
87#define SPCOM_RXSKIP(x) ((x) << 16)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080088#define SPCOM_TRANLEN(x) ((x) << 0)
Heiner Kallweit81abc2e2016-09-13 23:16:06 +020089
Hou Zhiqiang5cfa1e42016-01-22 18:58:26 +080090#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
Mingkai Hu8b60d6c2010-10-12 18:18:32 +080091
Heiner Kallweite9abb4d2015-08-26 21:21:55 +020092#define AUTOSUSPEND_TIMEOUT 2000
93
Heiner Kallweit35ab0462016-11-13 14:40:51 +010094struct fsl_espi {
95 struct device *dev;
96 void __iomem *reg_base;
97
Heiner Kallweit05823432016-11-25 23:59:24 +010098 struct list_head *m_transfers;
99 struct spi_transfer *tx_t;
100 unsigned int tx_pos;
101 bool tx_done;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100102 struct spi_transfer *rx_t;
103 unsigned int rx_pos;
104 bool rx_done;
Heiner Kallweit05823432016-11-25 23:59:24 +0100105
Heiner Kallweite1cdee72016-11-25 23:58:49 +0100106 bool swab;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100107 unsigned int rxskip;
108
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100109 spinlock_t lock;
110
111 u32 spibrg; /* SPIBRG input clock */
112
113 struct completion done;
114};
115
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100116struct fsl_espi_cs {
117 u32 hw_mode;
118};
119
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100120static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
Heiner Kallweit46afd382016-09-13 23:16:02 +0200121{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100122 return ioread32be(espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200123}
124
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100125static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
126{
Heiner Kallweit7e2ef002016-11-30 20:28:09 +0100127 return ioread16be(espi->reg_base + offset);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100128}
129
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100130static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
Heiner Kallweit46afd382016-09-13 23:16:02 +0200131{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100132 return ioread8(espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200133}
134
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100135static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
Heiner Kallweit46afd382016-09-13 23:16:02 +0200136 u32 val)
137{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100138 iowrite32be(val, espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200139}
140
Heiner Kallweit05823432016-11-25 23:59:24 +0100141static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
142 u16 val)
143{
Heiner Kallweit7e2ef002016-11-30 20:28:09 +0100144 iowrite16be(val, espi->reg_base + offset);
Heiner Kallweit05823432016-11-25 23:59:24 +0100145}
146
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100147static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
Heiner Kallweit46afd382016-09-13 23:16:02 +0200148 u8 val)
149{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100150 iowrite8(val, espi->reg_base + offset);
Heiner Kallweit46afd382016-09-13 23:16:02 +0200151}
152
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200153static int fsl_espi_check_message(struct spi_message *m)
154{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100155 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200156 struct spi_transfer *t, *first;
157
158 if (m->frame_length > SPCOM_TRANLEN_MAX) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100159 dev_err(espi->dev, "message too long, size is %u bytes\n",
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200160 m->frame_length);
161 return -EMSGSIZE;
162 }
163
164 first = list_first_entry(&m->transfers, struct spi_transfer,
165 transfer_list);
Heiner Kallweite4be7052016-10-02 14:22:35 +0200166
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200167 list_for_each_entry(t, &m->transfers, transfer_list) {
168 if (first->bits_per_word != t->bits_per_word ||
169 first->speed_hz != t->speed_hz) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100170 dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200171 return -EINVAL;
172 }
173 }
174
Heiner Kallweite4be7052016-10-02 14:22:35 +0200175 /* ESPI supports MSB-first transfers for word size 8 / 16 only */
176 if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
177 first->bits_per_word != 16) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100178 dev_err(espi->dev,
Heiner Kallweite4be7052016-10-02 14:22:35 +0200179 "MSB-first transfer not supported for wordsize %u\n",
180 first->bits_per_word);
181 return -EINVAL;
182 }
183
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200184 return 0;
185}
186
Heiner Kallweitaca75152016-11-09 22:58:01 +0100187static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
188{
189 struct spi_transfer *t;
190 unsigned int i = 0, rxskip = 0;
191
192 /*
193 * prerequisites for ESPI rxskip mode:
194 * - message has two transfers
195 * - first transfer is a write and second is a read
196 *
197 * In addition the current low-level transfer mechanism requires
198 * that the rxskip bytes fit into the TX FIFO. Else the transfer
199 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
200 * the TX FIFO isn't re-filled.
201 */
202 list_for_each_entry(t, &m->transfers, transfer_list) {
203 if (i == 0) {
204 if (!t->tx_buf || t->rx_buf ||
205 t->len > FSL_ESPI_FIFO_SIZE)
206 return 0;
207 rxskip = t->len;
208 } else if (i == 1) {
209 if (t->tx_buf || !t->rx_buf)
210 return 0;
211 }
212 i++;
213 }
214
215 return i == 2 ? rxskip : 0;
216}
217
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100218static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
Heiner Kallweit54731262016-10-27 21:25:58 +0200219{
220 u32 tx_fifo_avail;
Heiner Kallweit05823432016-11-25 23:59:24 +0100221 unsigned int tx_left;
222 const void *tx_buf;
Heiner Kallweit54731262016-10-27 21:25:58 +0200223
224 /* if events is zero transfer has not started and tx fifo is empty */
225 tx_fifo_avail = events ? SPIE_TXCNT(events) : FSL_ESPI_FIFO_SIZE;
Heiner Kallweit05823432016-11-25 23:59:24 +0100226start:
227 tx_left = espi->tx_t->len - espi->tx_pos;
228 tx_buf = espi->tx_t->tx_buf;
229 while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
230 if (tx_left >= 4) {
231 if (!tx_buf)
232 fsl_espi_write_reg(espi, ESPI_SPITF, 0);
233 else if (espi->swab)
234 fsl_espi_write_reg(espi, ESPI_SPITF,
235 swahb32p(tx_buf + espi->tx_pos));
236 else
237 fsl_espi_write_reg(espi, ESPI_SPITF,
238 *(u32 *)(tx_buf + espi->tx_pos));
239 espi->tx_pos += 4;
240 tx_left -= 4;
Heiner Kallweit54731262016-10-27 21:25:58 +0200241 tx_fifo_avail -= 4;
Heiner Kallweit05823432016-11-25 23:59:24 +0100242 } else if (tx_left >= 2 && tx_buf && espi->swab) {
243 fsl_espi_write_reg16(espi, ESPI_SPITF,
244 swab16p(tx_buf + espi->tx_pos));
245 espi->tx_pos += 2;
246 tx_left -= 2;
247 tx_fifo_avail -= 2;
Heiner Kallweit54731262016-10-27 21:25:58 +0200248 } else {
Heiner Kallweit05823432016-11-25 23:59:24 +0100249 if (!tx_buf)
250 fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
251 else
252 fsl_espi_write_reg8(espi, ESPI_SPITF,
253 *(u8 *)(tx_buf + espi->tx_pos));
254 espi->tx_pos += 1;
255 tx_left -= 1;
Heiner Kallweit54731262016-10-27 21:25:58 +0200256 tx_fifo_avail -= 1;
257 }
Heiner Kallweit05823432016-11-25 23:59:24 +0100258 }
259
260 if (!tx_left) {
261 /* Last transfer finished, in rxskip mode only one is needed */
262 if (list_is_last(&espi->tx_t->transfer_list,
263 espi->m_transfers) || espi->rxskip) {
264 espi->tx_done = true;
265 return;
266 }
267 espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
268 espi->tx_pos = 0;
269 /* continue with next transfer if tx fifo is not full */
270 if (tx_fifo_avail)
271 goto start;
272 }
Heiner Kallweit54731262016-10-27 21:25:58 +0200273}
274
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100275static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200276{
277 u32 rx_fifo_avail = SPIE_RXCNT(events);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100278 unsigned int rx_left;
279 void *rx_buf;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200280
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100281start:
282 rx_left = espi->rx_t->len - espi->rx_pos;
283 rx_buf = espi->rx_t->rx_buf;
284 while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
285 if (rx_left >= 4) {
286 u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
287
288 if (rx_buf && espi->swab)
289 *(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
290 else if (rx_buf)
291 *(u32 *)(rx_buf + espi->rx_pos) = val;
292 espi->rx_pos += 4;
293 rx_left -= 4;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200294 rx_fifo_avail -= 4;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100295 } else if (rx_left >= 2 && rx_buf && espi->swab) {
296 u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
297
298 *(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
299 espi->rx_pos += 2;
300 rx_left -= 2;
301 rx_fifo_avail -= 2;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200302 } else {
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100303 u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
304
305 if (rx_buf)
306 *(u8 *)(rx_buf + espi->rx_pos) = val;
307 espi->rx_pos += 1;
308 rx_left -= 1;
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200309 rx_fifo_avail -= 1;
310 }
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100311 }
312
313 if (!rx_left) {
314 if (list_is_last(&espi->rx_t->transfer_list,
315 espi->m_transfers)) {
316 espi->rx_done = true;
317 return;
318 }
319 espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
320 espi->rx_pos = 0;
321 /* continue with next transfer if rx fifo is not empty */
322 if (rx_fifo_avail)
323 goto start;
324 }
Heiner Kallweitf05689a2016-10-27 21:27:35 +0200325}
326
Heiner Kallweitea616ee2016-08-25 06:44:42 +0200327static void fsl_espi_setup_transfer(struct spi_device *spi,
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800328 struct spi_transfer *t)
329{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100330 struct fsl_espi *espi = spi_master_get_devdata(spi->master);
Heiner Kallweitd198ebf2016-09-13 23:15:45 +0200331 int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
Paulo Zaneti73aaf152016-10-29 11:02:19 +0200332 u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100333 struct fsl_espi_cs *cs = spi_get_ctldata(spi);
Heiner Kallweit8f3086d2016-11-04 21:01:12 +0100334 u32 hw_mode_old = cs->hw_mode;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800335
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800336 /* mask out bits we are going to set */
337 cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
338
Heiner Kallweita755af52016-09-04 09:56:57 +0200339 cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800340
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100341 pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800342
Paulo Zaneti73aaf152016-10-29 11:02:19 +0200343 if (pm > 15) {
344 cs->hw_mode |= CSMODE_DIV16;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100345 pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800346 }
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800347
348 cs->hw_mode |= CSMODE_PM(pm);
349
Heiner Kallweit8f3086d2016-11-04 21:01:12 +0100350 /* don't write the mode register if the mode doesn't change */
351 if (cs->hw_mode != hw_mode_old)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100352 fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select),
Heiner Kallweit8f3086d2016-11-04 21:01:12 +0100353 cs->hw_mode);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800354}
355
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800356static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
357{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100358 struct fsl_espi *espi = spi_master_get_devdata(spi->master);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100359 unsigned int rx_len = t->len;
Heiner Kallweitaca75152016-11-09 22:58:01 +0100360 u32 mask, spcom;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800361 int ret;
362
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100363 reinit_completion(&espi->done);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800364
365 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
Heiner Kallweitaca75152016-11-09 22:58:01 +0100366 spcom = SPCOM_CS(spi->chip_select);
367 spcom |= SPCOM_TRANLEN(t->len - 1);
368
369 /* configure RXSKIP mode */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100370 if (espi->rxskip) {
371 spcom |= SPCOM_RXSKIP(espi->rxskip);
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100372 rx_len = t->len - espi->rxskip;
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100373 if (t->rx_nbits == SPI_NBITS_DUAL)
374 spcom |= SPCOM_DO;
Heiner Kallweitaca75152016-11-09 22:58:01 +0100375 }
376
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100377 fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800378
Heiner Kallweite508cea2016-10-27 21:27:56 +0200379 /* enable interrupts */
380 mask = SPIM_DON;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100381 if (rx_len > FSL_ESPI_FIFO_SIZE)
Heiner Kallweite508cea2016-10-27 21:27:56 +0200382 mask |= SPIM_RXT;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100383 fsl_espi_write_reg(espi, ESPI_SPIM, mask);
Heiner Kallweit5bcc6a22016-09-07 22:53:01 +0200384
Heiner Kallweit54731262016-10-27 21:25:58 +0200385 /* Prevent filling the fifo from getting interrupted */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100386 spin_lock_irq(&espi->lock);
387 fsl_espi_fill_tx_fifo(espi, 0);
388 spin_unlock_irq(&espi->lock);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800389
Nobuteru Hayashiaa70e562016-03-18 11:35:21 +0000390 /* Won't hang up forever, SPI bus sometimes got lost interrupts... */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100391 ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
Nobuteru Hayashiaa70e562016-03-18 11:35:21 +0000392 if (ret == 0)
Heiner Kallweit05823432016-11-25 23:59:24 +0100393 dev_err(espi->dev, "Transfer timed out!\n");
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800394
395 /* disable rx ints */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100396 fsl_espi_write_reg(espi, ESPI_SPIM, 0);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800397
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200398 return ret == 0 ? -ETIMEDOUT : 0;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800399}
400
Heiner Kallweit38d003f2016-09-07 22:54:51 +0200401static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800402{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100403 struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800404 struct spi_device *spi = m->spi;
Heiner Kallweit38d003f2016-09-07 22:54:51 +0200405 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800406
Heiner Kallweite1cdee72016-11-25 23:58:49 +0100407 /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
408 espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
409
Heiner Kallweit05823432016-11-25 23:59:24 +0100410 espi->m_transfers = &m->transfers;
411 espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
412 transfer_list);
413 espi->tx_pos = 0;
414 espi->tx_done = false;
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100415 espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
416 transfer_list);
417 espi->rx_pos = 0;
418 espi->rx_done = false;
Heiner Kallweit05823432016-11-25 23:59:24 +0100419
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100420 espi->rxskip = fsl_espi_check_rxskip_mode(m);
421 if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
422 dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100423 return -EINVAL;
424 }
425
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100426 /* In RXSKIP mode skip first transfer for reads */
427 if (espi->rxskip)
428 espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
429
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200430 fsl_espi_setup_transfer(spi, trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800431
Heiner Kallweit06af1152016-09-07 22:54:35 +0200432 ret = fsl_espi_bufs(spi, trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800433
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200434 if (trans->delay_usecs)
435 udelay(trans->delay_usecs);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800436
Heiner Kallweite33a3ad2016-09-07 22:51:10 +0200437 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800438}
439
Heiner Kallweitc592bec2014-12-03 07:56:17 +0100440static int fsl_espi_do_one_msg(struct spi_master *master,
441 struct spi_message *m)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800442{
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100443 unsigned int delay_usecs = 0, rx_nbits = 0;
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200444 struct spi_transfer *t, trans = {};
Heiner Kallweite33a3ad2016-09-07 22:51:10 +0200445 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800446
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200447 ret = fsl_espi_check_message(m);
448 if (ret)
449 goto out;
450
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800451 list_for_each_entry(t, &m->transfers, transfer_list) {
Heiner Kallweit96361faf2016-09-07 22:54:00 +0200452 if (t->delay_usecs > delay_usecs)
453 delay_usecs = t->delay_usecs;
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100454 if (t->rx_nbits > rx_nbits)
455 rx_nbits = t->rx_nbits;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800456 }
457
Heiner Kallweit96361faf2016-09-07 22:54:00 +0200458 t = list_first_entry(&m->transfers, struct spi_transfer,
459 transfer_list);
460
Heiner Kallweit06af1152016-09-07 22:54:35 +0200461 trans.len = m->frame_length;
Heiner Kallweit96361faf2016-09-07 22:54:00 +0200462 trans.speed_hz = t->speed_hz;
463 trans.bits_per_word = t->bits_per_word;
464 trans.delay_usecs = delay_usecs;
Heiner Kallweit8263cb32016-11-09 22:58:34 +0100465 trans.rx_nbits = rx_nbits;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800466
Heiner Kallweit06af1152016-09-07 22:54:35 +0200467 if (trans.len)
468 ret = fsl_espi_trans(m, &trans);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800469
Heiner Kallweitfaceef32016-09-07 22:52:06 +0200470 m->actual_length = ret ? 0 : trans.len;
Heiner Kallweitd3152cf12016-09-07 22:53:38 +0200471out:
Heiner Kallweit0319d492016-09-07 22:51:29 +0200472 if (m->status == -EINPROGRESS)
473 m->status = ret;
474
Heiner Kallweitc592bec2014-12-03 07:56:17 +0100475 spi_finalize_current_message(master);
Heiner Kallweit0319d492016-09-07 22:51:29 +0200476
477 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800478}
479
480static int fsl_espi_setup(struct spi_device *spi)
481{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100482 struct fsl_espi *espi;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800483 u32 loop_mode;
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100484 struct fsl_espi_cs *cs = spi_get_ctldata(spi);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800485
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800486 if (!cs) {
Axel Lind9f26742014-08-31 12:44:09 +0800487 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800488 if (!cs)
489 return -ENOMEM;
Axel Lind9f26742014-08-31 12:44:09 +0800490 spi_set_ctldata(spi, cs);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800491 }
492
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100493 espi = spi_master_get_devdata(spi->master);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800494
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100495 pm_runtime_get_sync(espi->dev);
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200496
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100497 cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select));
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800498 /* mask out bits we are going to set */
499 cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
500 | CSMODE_REV);
501
502 if (spi->mode & SPI_CPHA)
503 cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
504 if (spi->mode & SPI_CPOL)
505 cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
506 if (!(spi->mode & SPI_LSB_FIRST))
507 cs->hw_mode |= CSMODE_REV;
508
509 /* Handle the loop mode */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100510 loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800511 loop_mode &= ~SPMODE_LOOP;
512 if (spi->mode & SPI_LOOP)
513 loop_mode |= SPMODE_LOOP;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100514 fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800515
Heiner Kallweitea616ee2016-08-25 06:44:42 +0200516 fsl_espi_setup_transfer(spi, NULL);
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200517
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100518 pm_runtime_mark_last_busy(espi->dev);
519 pm_runtime_put_autosuspend(espi->dev);
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200520
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800521 return 0;
522}
523
Axel Lind9f26742014-08-31 12:44:09 +0800524static void fsl_espi_cleanup(struct spi_device *spi)
525{
Heiner Kallweit219b5e32016-11-13 14:38:05 +0100526 struct fsl_espi_cs *cs = spi_get_ctldata(spi);
Axel Lind9f26742014-08-31 12:44:09 +0800527
528 kfree(cs);
529 spi_set_ctldata(spi, NULL);
530}
531
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100532static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800533{
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100534 if (!espi->rx_done)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100535 fsl_espi_read_rx_fifo(espi, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800536
Heiner Kallweit05823432016-11-25 23:59:24 +0100537 if (!espi->tx_done)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100538 fsl_espi_fill_tx_fifo(espi, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800539
Heiner Kallweitdcb425f32016-11-25 23:59:57 +0100540 if (!espi->tx_done || !espi->rx_done)
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200541 return;
542
543 /* we're done, but check for errors before returning */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100544 events = fsl_espi_read_reg(espi, ESPI_SPIE);
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200545
546 if (!(events & SPIE_DON))
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100547 dev_err(espi->dev,
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200548 "Transfer done but SPIE_DON isn't set!\n");
549
550 if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE)
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100551 dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
Heiner Kallweitdb1b0492016-10-27 21:28:02 +0200552
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100553 complete(&espi->done);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800554}
555
556static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
557{
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100558 struct fsl_espi *espi = context_data;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800559 u32 events;
560
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100561 spin_lock(&espi->lock);
Heiner Kallweit54731262016-10-27 21:25:58 +0200562
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800563 /* Get interrupt events(tx/rx) */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100564 events = fsl_espi_read_reg(espi, ESPI_SPIE);
Heiner Kallweit54731262016-10-27 21:25:58 +0200565 if (!events) {
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100566 spin_unlock(&espi->lock);
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200567 return IRQ_NONE;
Heiner Kallweit54731262016-10-27 21:25:58 +0200568 }
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800569
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100570 dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800571
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100572 fsl_espi_cpu_irq(espi, events);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800573
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200574 /* Clear the events */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100575 fsl_espi_write_reg(espi, ESPI_SPIE, events);
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200576
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100577 spin_unlock(&espi->lock);
Heiner Kallweit54731262016-10-27 21:25:58 +0200578
Heiner Kallweit35f5d712016-09-13 23:15:57 +0200579 return IRQ_HANDLED;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800580}
581
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200582#ifdef CONFIG_PM
583static int fsl_espi_runtime_suspend(struct device *dev)
Heiner Kallweit75506d02014-12-03 07:56:19 +0100584{
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200585 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100586 struct fsl_espi *espi = spi_master_get_devdata(master);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100587 u32 regval;
588
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100589 regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100590 regval &= ~SPMODE_ENABLE;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100591 fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100592
593 return 0;
594}
595
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200596static int fsl_espi_runtime_resume(struct device *dev)
Heiner Kallweit75506d02014-12-03 07:56:19 +0100597{
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200598 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100599 struct fsl_espi *espi = spi_master_get_devdata(master);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100600 u32 regval;
601
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100602 regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100603 regval |= SPMODE_ENABLE;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100604 fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
Heiner Kallweit75506d02014-12-03 07:56:19 +0100605
606 return 0;
607}
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200608#endif
Heiner Kallweit75506d02014-12-03 07:56:19 +0100609
Heiner Kallweit02a595d2016-08-17 21:11:01 +0200610static size_t fsl_espi_max_message_size(struct spi_device *spi)
Michal Suchanekb541eef2015-12-02 10:38:21 +0000611{
612 return SPCOM_TRANLEN_MAX;
613}
614
Heiner Kallweit456c7422016-11-13 14:40:18 +0100615static void fsl_espi_init_regs(struct device *dev, bool initial)
616{
617 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100618 struct fsl_espi *espi = spi_master_get_devdata(master);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100619 struct device_node *nc;
620 u32 csmode, cs, prop;
621 int ret;
622
623 /* SPI controller initializations */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100624 fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
625 fsl_espi_write_reg(espi, ESPI_SPIM, 0);
626 fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
627 fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100628
629 /* Init eSPI CS mode register */
630 for_each_available_child_of_node(master->dev.of_node, nc) {
631 /* get chip select */
632 ret = of_property_read_u32(nc, "reg", &cs);
633 if (ret || cs >= master->num_chipselect)
634 continue;
635
636 csmode = CSMODE_INIT_VAL;
637
638 /* check if CSBEF is set in device tree */
639 ret = of_property_read_u32(nc, "fsl,csbef", &prop);
640 if (!ret) {
641 csmode &= ~(CSMODE_BEF(0xf));
642 csmode |= CSMODE_BEF(prop);
643 }
644
645 /* check if CSAFT is set in device tree */
646 ret = of_property_read_u32(nc, "fsl,csaft", &prop);
647 if (!ret) {
648 csmode &= ~(CSMODE_AFT(0xf));
649 csmode |= CSMODE_AFT(prop);
650 }
651
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100652 fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100653
654 if (initial)
655 dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
656 }
657
658 /* Enable SPI interface */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100659 fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100660}
661
Heiner Kallweit604042a2016-09-17 15:43:31 +0200662static int fsl_espi_probe(struct device *dev, struct resource *mem,
Heiner Kallweit74543462016-11-13 14:36:39 +0100663 unsigned int irq, unsigned int num_cs)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800664{
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800665 struct spi_master *master;
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100666 struct fsl_espi *espi;
Heiner Kallweitb497eb02016-10-01 21:07:52 +0200667 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800668
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100669 master = spi_alloc_master(dev, sizeof(struct fsl_espi));
Heiner Kallweit604042a2016-09-17 15:43:31 +0200670 if (!master)
671 return -ENOMEM;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800672
673 dev_set_drvdata(dev, master);
674
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100675 master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
676 SPI_LSB_FIRST | SPI_LOOP;
677 master->dev.of_node = dev->of_node;
Stephen Warren24778be2013-05-21 20:36:35 -0600678 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800679 master->setup = fsl_espi_setup;
Axel Lind9f26742014-08-31 12:44:09 +0800680 master->cleanup = fsl_espi_cleanup;
Heiner Kallweitc592bec2014-12-03 07:56:17 +0100681 master->transfer_one_message = fsl_espi_do_one_msg;
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200682 master->auto_runtime_pm = true;
Heiner Kallweit02a595d2016-08-17 21:11:01 +0200683 master->max_message_size = fsl_espi_max_message_size;
Heiner Kallweit74543462016-11-13 14:36:39 +0100684 master->num_chipselect = num_cs;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800685
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100686 espi = spi_master_get_devdata(master);
687 spin_lock_init(&espi->lock);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800688
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100689 espi->dev = dev;
690 espi->spibrg = fsl_get_sys_freq();
691 if (espi->spibrg == -1) {
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100692 dev_err(dev, "Can't get sys frequency!\n");
693 ret = -EINVAL;
694 goto err_probe;
695 }
Heiner Kallweitf254e65c2016-11-15 21:56:33 +0100696 /* determined by clock divider fields DIV16/PM in register SPMODEx */
697 master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
698 master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100699
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100700 init_completion(&espi->done);
Heiner Kallweit7cb55572016-11-13 14:37:41 +0100701
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100702 espi->reg_base = devm_ioremap_resource(dev, mem);
703 if (IS_ERR(espi->reg_base)) {
704 ret = PTR_ERR(espi->reg_base);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800705 goto err_probe;
706 }
707
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800708 /* Register for SPI Interrupt */
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100709 ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800710 if (ret)
Heiner Kallweit4178b6b2015-08-26 21:21:50 +0200711 goto err_probe;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800712
Heiner Kallweit456c7422016-11-13 14:40:18 +0100713 fsl_espi_init_regs(dev, true);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800714
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200715 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
716 pm_runtime_use_autosuspend(dev);
717 pm_runtime_set_active(dev);
718 pm_runtime_enable(dev);
719 pm_runtime_get_sync(dev);
720
Heiner Kallweit4178b6b2015-08-26 21:21:50 +0200721 ret = devm_spi_register_master(dev, master);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800722 if (ret < 0)
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200723 goto err_pm;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800724
Heiner Kallweit35ab0462016-11-13 14:40:51 +0100725 dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800726
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200727 pm_runtime_mark_last_busy(dev);
728 pm_runtime_put_autosuspend(dev);
729
Heiner Kallweit604042a2016-09-17 15:43:31 +0200730 return 0;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800731
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200732err_pm:
733 pm_runtime_put_noidle(dev);
734 pm_runtime_disable(dev);
735 pm_runtime_set_suspended(dev);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800736err_probe:
737 spi_master_put(master);
Heiner Kallweit604042a2016-09-17 15:43:31 +0200738 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800739}
740
741static int of_fsl_espi_get_chipselects(struct device *dev)
742{
743 struct device_node *np = dev->of_node;
Heiner Kallweitb497eb02016-10-01 21:07:52 +0200744 u32 num_cs;
745 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800746
Heiner Kallweitb497eb02016-10-01 21:07:52 +0200747 ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
748 if (ret) {
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800749 dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
Heiner Kallweit74543462016-11-13 14:36:39 +0100750 return 0;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800751 }
752
Heiner Kallweit74543462016-11-13 14:36:39 +0100753 return num_cs;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800754}
755
Grant Likelyfd4a3192012-12-07 16:57:14 +0000756static int of_fsl_espi_probe(struct platform_device *ofdev)
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800757{
758 struct device *dev = &ofdev->dev;
759 struct device_node *np = ofdev->dev.of_node;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800760 struct resource mem;
Heiner Kallweit74543462016-11-13 14:36:39 +0100761 unsigned int irq, num_cs;
Heiner Kallweitacf69212016-09-17 15:43:00 +0200762 int ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800763
Heiner Kallweite3ce4f42016-11-13 14:37:18 +0100764 if (of_property_read_bool(np, "mode")) {
765 dev_err(dev, "mode property is not supported on ESPI!\n");
766 return -EINVAL;
767 }
768
Heiner Kallweit74543462016-11-13 14:36:39 +0100769 num_cs = of_fsl_espi_get_chipselects(dev);
770 if (!num_cs)
771 return -EINVAL;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800772
773 ret = of_address_to_resource(np, 0, &mem);
774 if (ret)
Heiner Kallweitacf69212016-09-17 15:43:00 +0200775 return ret;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800776
Thierry Redingf7578492013-09-18 15:24:44 +0200777 irq = irq_of_parse_and_map(np, 0);
Heiner Kallweitacf69212016-09-17 15:43:00 +0200778 if (!irq)
779 return -EINVAL;
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800780
Heiner Kallweit74543462016-11-13 14:36:39 +0100781 return fsl_espi_probe(dev, &mem, irq, num_cs);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800782}
783
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200784static int of_fsl_espi_remove(struct platform_device *dev)
785{
786 pm_runtime_disable(&dev->dev);
787
788 return 0;
789}
790
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800791#ifdef CONFIG_PM_SLEEP
792static int of_fsl_espi_suspend(struct device *dev)
793{
794 struct spi_master *master = dev_get_drvdata(dev);
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800795 int ret;
796
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800797 ret = spi_master_suspend(master);
798 if (ret) {
799 dev_warn(dev, "cannot suspend master\n");
800 return ret;
801 }
802
Heiner Kallweita9a813d2016-11-15 21:37:17 +0100803 return pm_runtime_force_suspend(dev);
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800804}
805
806static int of_fsl_espi_resume(struct device *dev)
807{
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800808 struct spi_master *master = dev_get_drvdata(dev);
Heiner Kallweit456c7422016-11-13 14:40:18 +0100809 int ret;
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800810
Heiner Kallweit456c7422016-11-13 14:40:18 +0100811 fsl_espi_init_regs(dev, false);
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800812
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200813 ret = pm_runtime_force_resume(dev);
814 if (ret < 0)
815 return ret;
816
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800817 return spi_master_resume(master);
818}
819#endif /* CONFIG_PM_SLEEP */
820
821static const struct dev_pm_ops espi_pm = {
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200822 SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
823 fsl_espi_runtime_resume, NULL)
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800824 SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
825};
826
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800827static const struct of_device_id of_fsl_espi_match[] = {
828 { .compatible = "fsl,mpc8536-espi" },
829 {}
830};
831MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
832
Grant Likely18d306d2011-02-22 21:02:43 -0700833static struct platform_driver fsl_espi_driver = {
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800834 .driver = {
835 .name = "fsl_espi",
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800836 .of_match_table = of_fsl_espi_match,
Hou Zhiqiang714bb652013-12-12 12:53:52 +0800837 .pm = &espi_pm,
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800838 },
839 .probe = of_fsl_espi_probe,
Heiner Kallweite9abb4d2015-08-26 21:21:55 +0200840 .remove = of_fsl_espi_remove,
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800841};
Grant Likely940ab882011-10-05 11:29:49 -0600842module_platform_driver(fsl_espi_driver);
Mingkai Hu8b60d6c2010-10-12 18:18:32 +0800843
844MODULE_AUTHOR("Mingkai Hu");
845MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
846MODULE_LICENSE("GPL");