blob: 403c799eba2501bf150904522f8cde09629afabd [file] [log] [blame]
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001/*
2 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/clk.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/qcom-geni-se.h>
20#include <linux/spi/spi.h>
21
22#define SPI_NUM_CHIPSELECT (4)
23#define SPI_XFER_TIMEOUT_MS (250)
24#define SPI_OVERSAMPLING (2)
25/* SPI SE specific registers */
26#define SE_SPI_CPHA (0x224)
27#define SE_SPI_LOOPBACK (0x22C)
28#define SE_SPI_CPOL (0x230)
29#define SE_SPI_DEMUX_OUTPUT_INV (0x24C)
30#define SE_SPI_DEMUX_SEL (0x250)
31#define SE_SPI_TRANS_CFG (0x25C)
32#define SE_SPI_WORD_LEN (0x268)
33#define SE_SPI_TX_TRANS_LEN (0x26C)
34#define SE_SPI_RX_TRANS_LEN (0x270)
35#define SE_SPI_PRE_POST_CMD_DLY (0x274)
36#define SE_SPI_DELAY_COUNTERS (0x278)
37
38/* SE_SPI_CPHA register fields */
39#define CPHA (BIT(0))
40
41/* SE_SPI_LOOPBACK register fields */
42#define LOOPBACK_ENABLE (0x1)
43#define NORMAL_MODE (0x0)
44#define LOOPBACK_MSK (GENMASK(1, 0))
45
46/* SE_SPI_CPOL register fields */
47#define CPOL (BIT(2))
48
49/* SE_SPI_DEMUX_OUTPUT_INV register fields */
50#define CS_DEMUX_OUTPUT_INV_MSK (GENMASK(3, 0))
51
52/* SE_SPI_DEMUX_SEL register fields */
53#define CS_DEMUX_OUTPUT_SEL (GENMASK(3, 0))
54
55/* SE_SPI_TX_TRANS_CFG register fields */
56#define CS_TOGGLE (BIT(0))
57
58/* SE_SPI_WORD_LEN register fields */
59#define WORD_LEN_MSK (GENMASK(9, 0))
60#define MIN_WORD_LEN (4)
61
62/* SPI_TX/SPI_RX_TRANS_LEN fields */
63#define TRANS_LEN_MSK (GENMASK(23, 0))
64
65/* M_CMD OP codes for SPI */
66#define SPI_TX_ONLY (1)
67#define SPI_RX_ONLY (2)
68#define SPI_FULL_DUPLEX (3)
69#define SPI_TX_RX (7)
70#define SPI_CS_ASSERT (8)
71#define SPI_CS_DEASSERT (9)
72#define SPI_SCK_ONLY (10)
73/* M_CMD params for SPI */
74#define SPI_PRE_CMD_DELAY (0)
75#define TIMESTAMP_BEFORE (1)
76#define FRAGMENTATION (2)
77#define TIMESTAMP_AFTER (3)
78#define POST_CMD_DELAY (4)
79
80struct spi_geni_master {
81 struct se_geni_rsc spi_rsc;
82 resource_size_t phys_addr;
83 resource_size_t size;
84 void __iomem *base;
85 int irq;
86 struct device *dev;
87 int rx_fifo_depth;
88 int tx_fifo_depth;
89 int tx_fifo_width;
90 int tx_wm;
91 bool setup;
92 u32 cur_speed_hz;
93 int cur_word_len;
94 unsigned int tx_rem_bytes;
95 unsigned int rx_rem_bytes;
96 struct spi_transfer *cur_xfer;
97 struct completion xfer_done;
98};
99
100static struct spi_master *get_spi_master(struct device *dev)
101{
102 struct platform_device *pdev = to_platform_device(dev);
103 struct spi_master *spi = platform_get_drvdata(pdev);
104
105 return spi;
106}
107
108static int get_sclk(u32 speed_hz, unsigned long *sclk_freq)
109{
110 u32 root_freq[] = { 19200000 };
111
112 *sclk_freq = root_freq[0];
113 return 0;
114}
115
116static int do_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas)
117{
118 unsigned long sclk_freq;
119 int div = 0;
120 int idx;
121 struct se_geni_rsc *rsc = &mas->spi_rsc;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700122 u32 clk_sel = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
123 u32 m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600124 int ret;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700125
126 clk_sel &= ~CLK_SEL_MSK;
127 m_clk_cfg &= ~CLK_DIV_MSK;
128
129 idx = get_sclk(speed_hz, &sclk_freq);
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600130 if (idx < 0)
131 return -EINVAL;
132
133 div = ((sclk_freq / SPI_OVERSAMPLING) / speed_hz);
134 if (!div)
135 return -EINVAL;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700136
137 clk_sel |= (idx & CLK_SEL_MSK);
138 m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
139 ret = clk_set_rate(rsc->se_clk, sclk_freq);
140 if (ret)
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600141 return ret;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700142
143 geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
144 geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600145 return 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700146}
147
148static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
149 int bits_per_word)
150{
151 int pack_words = mas->tx_fifo_width / bits_per_word;
152 bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
153 u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
154
155 word_len &= ~WORD_LEN_MSK;
156 word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
157 se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
158 geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
159}
160
161static int spi_geni_prepare_message(struct spi_master *spi_mas,
162 struct spi_message *spi_msg)
163{
164 struct spi_device *spi_slv = spi_msg->spi;
165 struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
166 u16 mode = spi_slv->mode;
167 u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
168 u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
169 u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
170 u32 demux_sel = geni_read_reg(mas->base, SE_SPI_DEMUX_SEL);
171 u32 demux_output_inv =
172 geni_read_reg(mas->base, SE_SPI_DEMUX_OUTPUT_INV);
173 int ret = 0;
174
175 loopback_cfg &= ~LOOPBACK_MSK;
176 cpol &= ~CPOL;
177 cpha &= ~CPHA;
178 demux_output_inv &= ~BIT(spi_slv->chip_select);
179
180 if (mode & SPI_LOOP)
181 loopback_cfg |= LOOPBACK_ENABLE;
182
183 if (mode & SPI_CPOL)
184 cpol |= CPOL;
185
186 if (mode & SPI_CPHA)
187 cpha |= CPHA;
188
189 if (spi_slv->mode & SPI_CS_HIGH)
190 demux_output_inv |= BIT(spi_slv->chip_select);
191
192 demux_sel |= BIT(spi_slv->chip_select);
193 mas->cur_speed_hz = spi_slv->max_speed_hz;
194 mas->cur_word_len = spi_slv->bits_per_word;
195
196 ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
197 if (ret) {
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600198 dev_err(&spi_mas->dev, "Err setting clks ret(%d) for %d\n",
199 ret, mas->cur_speed_hz);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700200 goto prepare_message_exit;
201 }
202 spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
203 geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
204 geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
205 geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
206 geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
207 geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
208 /* Ensure message level attributes are written before returning */
209 mb();
210prepare_message_exit:
211 return ret;
212}
213
214static int spi_geni_unprepare_message(struct spi_master *spi_mas,
215 struct spi_message *spi_msg)
216{
217 struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
218
219 mas->cur_speed_hz = 0;
220 mas->cur_word_len = 0;
221 return 0;
222}
223
224static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
225{
226 struct spi_geni_master *mas = spi_master_get_devdata(spi);
227 int ret = 0;
228
229 ret = pm_runtime_get_sync(mas->dev);
230 if (ret < 0) {
231 dev_err(mas->dev, "Error enabling SE resources\n");
232 pm_runtime_put_noidle(mas->dev);
233 goto exit_prepare_transfer_hardware;
234 } else {
235 ret = 0;
236 }
237
238 if (unlikely(!mas->setup)) {
239 int proto = get_se_proto(mas->base);
240
241 if (unlikely(proto != SPI)) {
242 dev_err(mas->dev, "Invalid proto %d\n", proto);
243 return -ENXIO;
244 }
245 geni_se_init(mas->base, FIFO_MODE, 0x0,
246 (mas->tx_fifo_depth - 2));
247 mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
248 mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
249 mas->tx_fifo_width = get_tx_fifo_width(mas->base);
250 /* Transmit an entire FIFO worth of data per IRQ */
251 mas->tx_wm = 1;
252 dev_dbg(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
253 mas->tx_fifo_depth, mas->rx_fifo_depth,
254 mas->tx_fifo_width);
255 mas->setup = true;
256 }
257exit_prepare_transfer_hardware:
258 return ret;
259}
260
261static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
262{
263 struct spi_geni_master *mas = spi_master_get_devdata(spi);
264
265 pm_runtime_put_sync(mas->dev);
266 return 0;
267}
268
269static void setup_fifo_xfer(struct spi_transfer *xfer,
270 struct spi_geni_master *mas, u16 mode,
271 struct spi_master *spi)
272{
273 u32 m_cmd = 0;
274 u32 m_param = 0;
275 u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
276 u32 trans_len = 0;
277
278 if (xfer->bits_per_word != mas->cur_word_len) {
279 spi_setup_word_len(mas, mode, xfer->bits_per_word);
280 mas->cur_word_len = xfer->bits_per_word;
281 }
282
283 if (xfer->tx_buf && xfer->rx_buf)
284 m_cmd = SPI_FULL_DUPLEX;
285 else if (xfer->tx_buf)
286 m_cmd = SPI_TX_ONLY;
287 else if (xfer->rx_buf)
288 m_cmd = SPI_RX_ONLY;
289
290 spi_tx_cfg &= ~CS_TOGGLE;
291 if (xfer->cs_change)
292 spi_tx_cfg |= CS_TOGGLE;
293 trans_len = ((xfer->len / (mas->cur_word_len >> 3)) & TRANS_LEN_MSK);
294 if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
295 m_param |= FRAGMENTATION;
296
297 mas->cur_xfer = xfer;
298 if (m_cmd & SPI_TX_ONLY) {
299 mas->tx_rem_bytes = xfer->len;
300 geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
301 }
302
303 if (m_cmd & SPI_RX_ONLY) {
304 geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
305 mas->rx_rem_bytes = xfer->len;
306 }
307 geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
308 geni_setup_m_cmd(mas->base, m_cmd, m_param);
309 geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG);
310 /* Ensure all writes are done before the WM interrupt */
311 mb();
312}
313
314static void handle_fifo_timeout(struct spi_geni_master *mas)
315{
316 unsigned long timeout;
317 u32 tx_trans_len = geni_read_reg(mas->base, SE_SPI_TX_TRANS_LEN);
318 u32 rx_trans_len = geni_read_reg(mas->base, SE_SPI_RX_TRANS_LEN);
319 u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
320 u32 m_cmd = geni_read_reg(mas->base, SE_GENI_M_CMD0);
321
322 /* Timed-out on a FIFO xfer, print relevant reg info. */
323 dev_err(mas->dev, "tx_rem_bytes %d rx_rem_bytes %d\n",
324 mas->tx_rem_bytes, mas->rx_rem_bytes);
325 dev_err(mas->dev, "tx_trans_len %d rx_trans_len %d\n", tx_trans_len,
326 rx_trans_len);
327 dev_err(mas->dev, "spi_tx_cfg 0x%x m_cmd 0x%x\n", spi_tx_cfg, m_cmd);
328 reinit_completion(&mas->xfer_done);
329 geni_cancel_m_cmd(mas->base);
330 /* Ensure cmd cancel is written */
331 mb();
332 timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
333 if (!timeout) {
334 reinit_completion(&mas->xfer_done);
335 geni_abort_m_cmd(mas->base);
336 /* Ensure cmd abort is written */
337 mb();
338 timeout = wait_for_completion_timeout(&mas->xfer_done,
339 HZ);
340 if (!timeout)
341 dev_err(mas->dev,
342 "Failed to cancel/abort m_cmd\n");
343 }
344}
345
346static int spi_geni_transfer_one(struct spi_master *spi,
347 struct spi_device *slv,
348 struct spi_transfer *xfer)
349{
350 int ret = 0;
351 struct spi_geni_master *mas = spi_master_get_devdata(spi);
352 unsigned long timeout;
353
354 if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
355 dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
356 return -EINVAL;
357 }
358
359 reinit_completion(&mas->xfer_done);
360 /* Speed and bits per word can be overridden per transfer */
361 if (xfer->speed_hz != mas->cur_speed_hz) {
362 ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
363 if (ret) {
364 dev_err(mas->dev, "%s:Err setting clks:%d\n",
365 __func__, ret);
366 goto geni_transfer_one_exit;
367 }
368 mas->cur_speed_hz = xfer->speed_hz;
369 }
370
371 setup_fifo_xfer(xfer, mas, slv->mode, spi);
372 timeout = wait_for_completion_timeout(&mas->xfer_done,
373 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
374 if (!timeout) {
375 dev_err(mas->dev, "Xfer[len %d tx %p rx %p n %d] timed out.\n",
376 xfer->len, xfer->tx_buf,
377 xfer->rx_buf,
378 xfer->bits_per_word);
379 ret = -ETIMEDOUT;
380 handle_fifo_timeout(mas);
381 }
382geni_transfer_one_exit:
383 return ret;
384}
385
386static void geni_spi_handle_tx(struct spi_geni_master *mas)
387{
388 int i = 0;
389 int tx_fifo_width = (mas->tx_fifo_width >> 3);
390 int max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
391 const u8 *tx_buf = mas->cur_xfer->tx_buf;
392
393 tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
394 max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
395 while (i < max_bytes) {
396 int j;
397 u32 fifo_word = 0;
398 u8 *fifo_byte;
399 int bytes_to_write = min_t(int, (max_bytes - i), tx_fifo_width);
400
401 fifo_byte = (u8 *)&fifo_word;
402 for (j = 0; j < bytes_to_write; j++)
403 fifo_byte[j] = tx_buf[i++];
404 geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
405 /* Ensure FIFO writes are written in order */
406 mb();
407 }
408 mas->tx_rem_bytes -= max_bytes;
409 if (!mas->tx_rem_bytes) {
410 geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
411 /* Barrier here before return to prevent further ISRs */
412 mb();
413 }
414}
415
416static void geni_spi_handle_rx(struct spi_geni_master *mas)
417{
418 int i = 0;
419 int fifo_width = (mas->tx_fifo_width >> 3);
420 u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
421 int rx_bytes = 0;
422 int rx_wc = 0;
423 u8 *rx_buf = mas->cur_xfer->rx_buf;
424
425 rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
426 if (rx_fifo_status & RX_LAST) {
427 int rx_last_byte_valid =
428 (rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
429 >> RX_LAST_BYTE_VALID_SHFT;
430 if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
431 rx_wc -= 1;
432 rx_bytes += rx_last_byte_valid;
433 }
434 }
435 rx_bytes += rx_wc * fifo_width;
436 rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
437 rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
438 while (i < rx_bytes) {
439 u32 fifo_word = 0;
440 u8 *fifo_byte;
441 int read_bytes = min_t(int, (rx_bytes - i), fifo_width);
442 int j;
443
444 fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
445 fifo_byte = (u8 *)&fifo_word;
446 for (j = 0; j < read_bytes; j++)
447 rx_buf[i++] = fifo_byte[j];
448 }
449 mas->rx_rem_bytes -= rx_bytes;
450}
451
452static irqreturn_t geni_spi_irq(int irq, void *dev)
453{
454 struct spi_geni_master *mas = dev;
455 u32 m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
456
457 if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
458 geni_spi_handle_rx(mas);
459
460 if ((m_irq & M_TX_FIFO_WATERMARK_EN))
461 geni_spi_handle_tx(mas);
462
463 if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
464 (m_irq & M_CMD_ABORT_EN)) {
465 complete(&mas->xfer_done);
466 }
467 geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
468 return IRQ_HANDLED;
469}
470
471static int spi_geni_probe(struct platform_device *pdev)
472{
473 int ret;
474 struct spi_master *spi;
475 struct spi_geni_master *geni_mas;
476 struct se_geni_rsc *rsc;
477 struct resource *res;
478
479 spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
480 if (!spi) {
481 ret = -ENOMEM;
482 dev_err(&pdev->dev, "Failed to alloc spi struct\n");
483 goto spi_geni_probe_err;
484 }
485
486 platform_set_drvdata(pdev, spi);
487 geni_mas = spi_master_get_devdata(spi);
488 rsc = &geni_mas->spi_rsc;
489 geni_mas->dev = &pdev->dev;
490 spi->dev.of_node = pdev->dev.of_node;
491 rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
492 if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
493 dev_err(&pdev->dev, "No pinctrl config specified!\n");
494 ret = PTR_ERR(rsc->geni_pinctrl);
495 goto spi_geni_probe_err;
496 }
497
498 rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
499 PINCTRL_DEFAULT);
500 if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
501 dev_err(&pdev->dev, "No default config specified!\n");
502 ret = PTR_ERR(rsc->geni_gpio_active);
503 goto spi_geni_probe_err;
504 }
505
506 rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
507 PINCTRL_SLEEP);
508 if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
509 dev_err(&pdev->dev, "No sleep config specified!\n");
510 ret = PTR_ERR(rsc->geni_gpio_sleep);
511 goto spi_geni_probe_err;
512 }
513
514 rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
515 if (IS_ERR(rsc->se_clk)) {
516 ret = PTR_ERR(rsc->se_clk);
517 dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
518 goto spi_geni_probe_err;
519 }
520
521 rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
522 if (IS_ERR(rsc->m_ahb_clk)) {
523 ret = PTR_ERR(rsc->m_ahb_clk);
524 dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
525 goto spi_geni_probe_err;
526 }
527
528 rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
529 if (IS_ERR(rsc->s_ahb_clk)) {
530 ret = PTR_ERR(rsc->s_ahb_clk);
531 dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
532 goto spi_geni_probe_err;
533 }
534
535 if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
536 &spi->max_speed_hz)) {
537 dev_err(&pdev->dev, "Max frequency not specified.\n");
538 ret = -ENXIO;
539 goto spi_geni_probe_err;
540 }
541
542 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
543 if (!res) {
544 ret = -ENXIO;
545 dev_err(&pdev->dev, "Err getting IO region\n");
546 goto spi_geni_probe_err;
547 }
548
549 geni_mas->phys_addr = res->start;
550 geni_mas->size = resource_size(res);
551 geni_mas->base = devm_ioremap(&pdev->dev, res->start,
552 resource_size(res));
553 if (!geni_mas->base) {
554 ret = -ENOMEM;
555 dev_err(&pdev->dev, "Err IO mapping iomem\n");
556 goto spi_geni_probe_err;
557 }
558
559 geni_mas->irq = platform_get_irq(pdev, 0);
560 if (geni_mas->irq < 0) {
561 dev_err(&pdev->dev, "Err getting IRQ\n");
562 ret = geni_mas->irq;
563 goto spi_geni_probe_unmap;
564 }
565 ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
566 IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
567 if (ret) {
568 dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
569 geni_mas->irq, ret);
570 goto spi_geni_probe_unmap;
571 }
572
573 spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
574 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
575 spi->num_chipselect = SPI_NUM_CHIPSELECT;
576 spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
577 spi->prepare_message = spi_geni_prepare_message;
578 spi->unprepare_message = spi_geni_unprepare_message;
579 spi->transfer_one = spi_geni_transfer_one;
580 spi->unprepare_transfer_hardware
581 = spi_geni_unprepare_transfer_hardware;
582 spi->auto_runtime_pm = false;
583
584 init_completion(&geni_mas->xfer_done);
585 pm_runtime_enable(&pdev->dev);
586 ret = spi_register_master(spi);
587 if (ret) {
588 dev_err(&pdev->dev, "Failed to register SPI master\n");
589 goto spi_geni_probe_unmap;
590 }
591 return ret;
592spi_geni_probe_unmap:
593 devm_iounmap(&pdev->dev, geni_mas->base);
594spi_geni_probe_err:
595 spi_master_put(spi);
596 return ret;
597}
598
599static int spi_geni_remove(struct platform_device *pdev)
600{
601 struct spi_master *master = platform_get_drvdata(pdev);
602 struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
603
604 spi_unregister_master(master);
605 se_geni_resources_off(&geni_mas->spi_rsc);
606 pm_runtime_put_noidle(&pdev->dev);
607 pm_runtime_disable(&pdev->dev);
608 return 0;
609}
610
611#ifdef CONFIG_PM
612static int spi_geni_runtime_suspend(struct device *dev)
613{
614 int ret = 0;
615 struct spi_master *spi = get_spi_master(dev);
616 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
617
618 ret = se_geni_resources_off(&geni_mas->spi_rsc);
619 return ret;
620}
621
622static int spi_geni_runtime_resume(struct device *dev)
623{
624 int ret = 0;
625 struct spi_master *spi = get_spi_master(dev);
626 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
627
628 ret = se_geni_resources_on(&geni_mas->spi_rsc);
629 return ret;
630}
631
632static int spi_geni_resume(struct device *dev)
633{
634 return 0;
635}
636
637static int spi_geni_suspend(struct device *dev)
638{
639 if (!pm_runtime_status_suspended(dev))
640 return -EBUSY;
641 return 0;
642}
643#else
644static int spi_geni_runtime_suspend(struct device *dev)
645{
646 return 0;
647}
648
649static int spi_geni_runtime_resume(struct device *dev)
650{
651 return 0;
652}
653
654static int spi_geni_resume(struct device *dev)
655{
656 return 0;
657}
658
659static int spi_geni_suspend(struct device *dev)
660{
661 return 0;
662}
663#endif
664
665static const struct dev_pm_ops spi_geni_pm_ops = {
666 SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
667 spi_geni_runtime_resume, NULL)
668 SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
669};
670
671static const struct of_device_id spi_geni_dt_match[] = {
672 { .compatible = "qcom,spi-geni" },
673 {}
674};
675
676static struct platform_driver spi_geni_driver = {
677 .probe = spi_geni_probe,
678 .remove = spi_geni_remove,
679 .driver = {
680 .name = "spi_geni",
681 .pm = &spi_geni_pm_ops,
682 .of_match_table = spi_geni_dt_match,
683 },
684};
685module_platform_driver(spi_geni_driver);
686
687MODULE_LICENSE("GPL v2");
688MODULE_ALIAS("platform:spi_geni");