blob: fdb7cb88fb5673aeebdb36887ea2befea1240513 [file] [log] [blame]
Girish Mahadevan561de452018-10-03 19:14:25 +05301// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3
4#include <linux/clk.h>
5#include <linux/interrupt.h>
6#include <linux/io.h>
7#include <linux/log2.h>
8#include <linux/module.h>
9#include <linux/of.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/qcom-geni-se.h>
13#include <linux/spi/spi.h>
14#include <linux/spinlock.h>
15
16/* SPI SE specific registers and respective register fields */
17#define SE_SPI_CPHA 0x224
18#define CPHA BIT(0)
19
20#define SE_SPI_LOOPBACK 0x22c
21#define LOOPBACK_ENABLE 0x1
22#define NORMAL_MODE 0x0
23#define LOOPBACK_MSK GENMASK(1, 0)
24
25#define SE_SPI_CPOL 0x230
26#define CPOL BIT(2)
27
28#define SE_SPI_DEMUX_OUTPUT_INV 0x24c
29#define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
30
31#define SE_SPI_DEMUX_SEL 0x250
32#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
33
34#define SE_SPI_TRANS_CFG 0x25c
35#define CS_TOGGLE BIT(0)
36
37#define SE_SPI_WORD_LEN 0x268
38#define WORD_LEN_MSK GENMASK(9, 0)
39#define MIN_WORD_LEN 4
40
41#define SE_SPI_TX_TRANS_LEN 0x26c
42#define SE_SPI_RX_TRANS_LEN 0x270
43#define TRANS_LEN_MSK GENMASK(23, 0)
44
45#define SE_SPI_PRE_POST_CMD_DLY 0x274
46
47#define SE_SPI_DELAY_COUNTERS 0x278
48#define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
49#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
50#define SPI_CS_CLK_DELAY_SHFT 10
51
52/* M_CMD OP codes for SPI */
53#define SPI_TX_ONLY 1
54#define SPI_RX_ONLY 2
55#define SPI_FULL_DUPLEX 3
56#define SPI_TX_RX 7
57#define SPI_CS_ASSERT 8
58#define SPI_CS_DEASSERT 9
59#define SPI_SCK_ONLY 10
60/* M_CMD params for SPI */
61#define SPI_PRE_CMD_DELAY BIT(0)
62#define TIMESTAMP_BEFORE BIT(1)
63#define FRAGMENTATION BIT(2)
64#define TIMESTAMP_AFTER BIT(3)
65#define POST_CMD_DELAY BIT(4)
66
Alok Chauhan0dccff32018-10-25 22:10:28 +053067enum spi_m_cmd_opcode {
Girish Mahadevan561de452018-10-03 19:14:25 +053068 CMD_NONE,
69 CMD_XFER,
70 CMD_CS,
71 CMD_CANCEL,
72};
73
Girish Mahadevan561de452018-10-03 19:14:25 +053074struct spi_geni_master {
75 struct geni_se se;
76 struct device *dev;
77 u32 tx_fifo_depth;
78 u32 fifo_width_bits;
79 u32 tx_wm;
80 unsigned long cur_speed_hz;
81 unsigned int cur_bits_per_word;
82 unsigned int tx_rem_bytes;
83 unsigned int rx_rem_bytes;
84 const struct spi_transfer *cur_xfer;
85 struct completion xfer_done;
86 unsigned int oversampling;
87 spinlock_t lock;
Alok Chauhan0dccff32018-10-25 22:10:28 +053088 enum spi_m_cmd_opcode cur_mcmd;
Girish Mahadevan561de452018-10-03 19:14:25 +053089 int irq;
90};
91
92static void handle_fifo_timeout(struct spi_master *spi,
93 struct spi_message *msg);
94
95static int get_spi_clk_cfg(unsigned int speed_hz,
96 struct spi_geni_master *mas,
97 unsigned int *clk_idx,
98 unsigned int *clk_div)
99{
100 unsigned long sclk_freq;
101 unsigned int actual_hz;
102 struct geni_se *se = &mas->se;
103 int ret;
104
105 ret = geni_se_clk_freq_match(&mas->se,
106 speed_hz * mas->oversampling,
107 clk_idx, &sclk_freq, false);
108 if (ret) {
109 dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
110 ret, speed_hz);
111 return ret;
112 }
113
114 *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
115 actual_hz = sclk_freq / (mas->oversampling * *clk_div);
116
117 dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
118 actual_hz, sclk_freq, *clk_idx, *clk_div);
119 ret = clk_set_rate(se->clk, sclk_freq);
120 if (ret)
121 dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
122 return ret;
123}
124
125static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
126{
127 struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
128 struct spi_master *spi = dev_get_drvdata(mas->dev);
129 struct geni_se *se = &mas->se;
Alok Chauhan0dccff32018-10-25 22:10:28 +0530130 unsigned long time_left;
Girish Mahadevan561de452018-10-03 19:14:25 +0530131
132 reinit_completion(&mas->xfer_done);
133 pm_runtime_get_sync(mas->dev);
134 if (!(slv->mode & SPI_CS_HIGH))
135 set_flag = !set_flag;
136
137 mas->cur_mcmd = CMD_CS;
138 if (set_flag)
139 geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
140 else
141 geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
142
Alok Chauhan0dccff32018-10-25 22:10:28 +0530143 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
144 if (!time_left)
Girish Mahadevan561de452018-10-03 19:14:25 +0530145 handle_fifo_timeout(spi, NULL);
146
147 pm_runtime_put(mas->dev);
148}
149
150static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
151 unsigned int bits_per_word)
152{
153 unsigned int pack_words;
154 bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
155 struct geni_se *se = &mas->se;
156 u32 word_len;
157
158 word_len = readl(se->base + SE_SPI_WORD_LEN);
159
160 /*
161 * If bits_per_word isn't a byte aligned value, set the packing to be
162 * 1 SPI word per FIFO word.
163 */
164 if (!(mas->fifo_width_bits % bits_per_word))
165 pack_words = mas->fifo_width_bits / bits_per_word;
166 else
167 pack_words = 1;
168 word_len &= ~WORD_LEN_MSK;
169 word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
170 geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
171 true, true);
172 writel(word_len, se->base + SE_SPI_WORD_LEN);
173}
174
175static int setup_fifo_params(struct spi_device *spi_slv,
176 struct spi_master *spi)
177{
178 struct spi_geni_master *mas = spi_master_get_devdata(spi);
179 struct geni_se *se = &mas->se;
180 u32 loopback_cfg, cpol, cpha, demux_output_inv;
181 u32 demux_sel, clk_sel, m_clk_cfg, idx, div;
182 int ret;
183
184 loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
185 cpol = readl(se->base + SE_SPI_CPOL);
186 cpha = readl(se->base + SE_SPI_CPHA);
187 demux_output_inv = 0;
188 loopback_cfg &= ~LOOPBACK_MSK;
189 cpol &= ~CPOL;
190 cpha &= ~CPHA;
191
192 if (spi_slv->mode & SPI_LOOP)
193 loopback_cfg |= LOOPBACK_ENABLE;
194
195 if (spi_slv->mode & SPI_CPOL)
196 cpol |= CPOL;
197
198 if (spi_slv->mode & SPI_CPHA)
199 cpha |= CPHA;
200
201 if (spi_slv->mode & SPI_CS_HIGH)
202 demux_output_inv = BIT(spi_slv->chip_select);
203
204 demux_sel = spi_slv->chip_select;
205 mas->cur_speed_hz = spi_slv->max_speed_hz;
206 mas->cur_bits_per_word = spi_slv->bits_per_word;
207
208 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
209 if (ret) {
210 dev_err(mas->dev, "Err setting clks ret(%d) for %ld\n",
211 ret, mas->cur_speed_hz);
212 return ret;
213 }
214
215 clk_sel = idx & CLK_SEL_MSK;
216 m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
217 spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
218 writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
219 writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
220 writel(cpha, se->base + SE_SPI_CPHA);
221 writel(cpol, se->base + SE_SPI_CPOL);
222 writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
223 writel(clk_sel, se->base + SE_GENI_CLK_SEL);
224 writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
225 return 0;
226}
227
228static int spi_geni_prepare_message(struct spi_master *spi,
229 struct spi_message *spi_msg)
230{
231 int ret;
232 struct spi_geni_master *mas = spi_master_get_devdata(spi);
233 struct geni_se *se = &mas->se;
234
235 geni_se_select_mode(se, GENI_SE_FIFO);
236 reinit_completion(&mas->xfer_done);
237 ret = setup_fifo_params(spi_msg->spi, spi);
238 if (ret)
239 dev_err(mas->dev, "Couldn't select mode %d\n", ret);
240 return ret;
241}
242
243static int spi_geni_init(struct spi_geni_master *mas)
244{
245 struct geni_se *se = &mas->se;
246 unsigned int proto, major, minor, ver;
247
248 pm_runtime_get_sync(mas->dev);
249
250 proto = geni_se_read_proto(se);
251 if (proto != GENI_SE_SPI) {
252 dev_err(mas->dev, "Invalid proto %d\n", proto);
253 pm_runtime_put(mas->dev);
254 return -ENXIO;
255 }
256 mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
257
258 /* Width of Tx and Rx FIFO is same */
259 mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
260
261 /*
262 * Hardware programming guide suggests to configure
263 * RX FIFO RFR level to fifo_depth-2.
264 */
265 geni_se_init(se, 0x0, mas->tx_fifo_depth - 2);
266 /* Transmit an entire FIFO worth of data per IRQ */
267 mas->tx_wm = 1;
268 ver = geni_se_get_qup_hw_version(se);
269 major = GENI_SE_VERSION_MAJOR(ver);
270 minor = GENI_SE_VERSION_MINOR(ver);
271
272 if (major == 1 && minor == 0)
273 mas->oversampling = 2;
274 else
275 mas->oversampling = 1;
276
277 pm_runtime_put(mas->dev);
278 return 0;
279}
280
281static void setup_fifo_xfer(struct spi_transfer *xfer,
282 struct spi_geni_master *mas,
283 u16 mode, struct spi_master *spi)
284{
285 u32 m_cmd = 0;
286 u32 spi_tx_cfg, len;
287 struct geni_se *se = &mas->se;
288
289 spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
290 if (xfer->bits_per_word != mas->cur_bits_per_word) {
291 spi_setup_word_len(mas, mode, xfer->bits_per_word);
292 mas->cur_bits_per_word = xfer->bits_per_word;
293 }
294
295 /* Speed and bits per word can be overridden per transfer */
296 if (xfer->speed_hz != mas->cur_speed_hz) {
297 int ret;
298 u32 clk_sel, m_clk_cfg;
299 unsigned int idx, div;
300
301 ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
302 if (ret) {
303 dev_err(mas->dev, "Err setting clks:%d\n", ret);
304 return;
305 }
306 /*
307 * SPI core clock gets configured with the requested frequency
308 * or the frequency closer to the requested frequency.
309 * For that reason requested frequency is stored in the
310 * cur_speed_hz and referred in the consecutive transfer instead
311 * of calling clk_get_rate() API.
312 */
313 mas->cur_speed_hz = xfer->speed_hz;
314 clk_sel = idx & CLK_SEL_MSK;
315 m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
316 writel(clk_sel, se->base + SE_GENI_CLK_SEL);
317 writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
318 }
319
320 mas->tx_rem_bytes = 0;
321 mas->rx_rem_bytes = 0;
322 if (xfer->tx_buf && xfer->rx_buf)
323 m_cmd = SPI_FULL_DUPLEX;
324 else if (xfer->tx_buf)
325 m_cmd = SPI_TX_ONLY;
326 else if (xfer->rx_buf)
327 m_cmd = SPI_RX_ONLY;
328
329 spi_tx_cfg &= ~CS_TOGGLE;
330
331 if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
332 len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
333 else
334 len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
335 len &= TRANS_LEN_MSK;
336
337 mas->cur_xfer = xfer;
338 if (m_cmd & SPI_TX_ONLY) {
339 mas->tx_rem_bytes = xfer->len;
340 writel(len, se->base + SE_SPI_TX_TRANS_LEN);
341 }
342
343 if (m_cmd & SPI_RX_ONLY) {
344 writel(len, se->base + SE_SPI_RX_TRANS_LEN);
345 mas->rx_rem_bytes = xfer->len;
346 }
347 writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
348 mas->cur_mcmd = CMD_XFER;
349 geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
350
351 /*
352 * TX_WATERMARK_REG should be set after SPI configuration and
353 * setting up GENI SE engine, as driver starts data transfer
354 * for the watermark interrupt.
355 */
356 if (m_cmd & SPI_TX_ONLY)
357 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
358}
359
360static void handle_fifo_timeout(struct spi_master *spi,
361 struct spi_message *msg)
362{
363 struct spi_geni_master *mas = spi_master_get_devdata(spi);
364 unsigned long time_left, flags;
365 struct geni_se *se = &mas->se;
366
367 spin_lock_irqsave(&mas->lock, flags);
368 reinit_completion(&mas->xfer_done);
369 mas->cur_mcmd = CMD_CANCEL;
370 geni_se_cancel_m_cmd(se);
371 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
372 spin_unlock_irqrestore(&mas->lock, flags);
373 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
374 if (time_left)
375 return;
376
377 spin_lock_irqsave(&mas->lock, flags);
378 reinit_completion(&mas->xfer_done);
379 geni_se_abort_m_cmd(se);
380 spin_unlock_irqrestore(&mas->lock, flags);
381 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
382 if (!time_left)
383 dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
384}
385
386static int spi_geni_transfer_one(struct spi_master *spi,
387 struct spi_device *slv,
388 struct spi_transfer *xfer)
389{
390 struct spi_geni_master *mas = spi_master_get_devdata(spi);
391
392 /* Terminate and return success for 0 byte length transfer */
393 if (!xfer->len)
394 return 0;
395
396 setup_fifo_xfer(xfer, mas, slv->mode, spi);
397 return 1;
398}
399
400static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
401{
402 /*
403 * Calculate how many bytes we'll put in each FIFO word. If the
404 * transfer words don't pack cleanly into a FIFO word we'll just put
405 * one transfer word in each FIFO word. If they do pack we'll pack 'em.
406 */
407 if (mas->fifo_width_bits % mas->cur_bits_per_word)
408 return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
409 BITS_PER_BYTE));
410
411 return mas->fifo_width_bits / BITS_PER_BYTE;
412}
413
414static void geni_spi_handle_tx(struct spi_geni_master *mas)
415{
416 struct geni_se *se = &mas->se;
417 unsigned int max_bytes;
418 const u8 *tx_buf;
419 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
420 unsigned int i = 0;
421
422 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
423 if (mas->tx_rem_bytes < max_bytes)
424 max_bytes = mas->tx_rem_bytes;
425
426 tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
427 while (i < max_bytes) {
428 unsigned int j;
429 unsigned int bytes_to_write;
430 u32 fifo_word = 0;
431 u8 *fifo_byte = (u8 *)&fifo_word;
432
433 bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
434 for (j = 0; j < bytes_to_write; j++)
435 fifo_byte[j] = tx_buf[i++];
436 iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
437 }
438 mas->tx_rem_bytes -= max_bytes;
439 if (!mas->tx_rem_bytes)
440 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
441}
442
443static void geni_spi_handle_rx(struct spi_geni_master *mas)
444{
445 struct geni_se *se = &mas->se;
446 u32 rx_fifo_status;
447 unsigned int rx_bytes;
448 unsigned int rx_last_byte_valid;
449 u8 *rx_buf;
450 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
451 unsigned int i = 0;
452
453 rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
454 rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
455 if (rx_fifo_status & RX_LAST) {
456 rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
457 rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
458 if (rx_last_byte_valid && rx_last_byte_valid < 4)
459 rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
460 }
461 if (mas->rx_rem_bytes < rx_bytes)
462 rx_bytes = mas->rx_rem_bytes;
463
464 rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
465 while (i < rx_bytes) {
466 u32 fifo_word = 0;
467 u8 *fifo_byte = (u8 *)&fifo_word;
468 unsigned int bytes_to_read;
469 unsigned int j;
470
471 bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
472 ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
473 for (j = 0; j < bytes_to_read; j++)
474 rx_buf[i++] = fifo_byte[j];
475 }
476 mas->rx_rem_bytes -= rx_bytes;
477}
478
479static irqreturn_t geni_spi_isr(int irq, void *data)
480{
481 struct spi_master *spi = data;
482 struct spi_geni_master *mas = spi_master_get_devdata(spi);
483 struct geni_se *se = &mas->se;
484 u32 m_irq;
485 unsigned long flags;
Girish Mahadevan561de452018-10-03 19:14:25 +0530486
487 if (mas->cur_mcmd == CMD_NONE)
488 return IRQ_NONE;
489
490 spin_lock_irqsave(&mas->lock, flags);
491 m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
492
493 if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
494 geni_spi_handle_rx(mas);
495
496 if (m_irq & M_TX_FIFO_WATERMARK_EN)
497 geni_spi_handle_tx(mas);
498
499 if (m_irq & M_CMD_DONE_EN) {
500 if (mas->cur_mcmd == CMD_XFER)
501 spi_finalize_current_transfer(spi);
502 else if (mas->cur_mcmd == CMD_CS)
503 complete(&mas->xfer_done);
504 mas->cur_mcmd = CMD_NONE;
505 /*
506 * If this happens, then a CMD_DONE came before all the Tx
507 * buffer bytes were sent out. This is unusual, log this
508 * condition and disable the WM interrupt to prevent the
509 * system from stalling due an interrupt storm.
510 * If this happens when all Rx bytes haven't been received, log
511 * the condition.
512 * The only known time this can happen is if bits_per_word != 8
513 * and some registers that expect xfer lengths in num spi_words
514 * weren't written correctly.
515 */
516 if (mas->tx_rem_bytes) {
517 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
518 dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
519 mas->tx_rem_bytes, mas->cur_bits_per_word);
520 }
521 if (mas->rx_rem_bytes)
522 dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
523 mas->rx_rem_bytes, mas->cur_bits_per_word);
524 }
525
526 if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN)) {
527 mas->cur_mcmd = CMD_NONE;
528 complete(&mas->xfer_done);
529 }
530
531 writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
532 spin_unlock_irqrestore(&mas->lock, flags);
Alok Chauhan0dccff32018-10-25 22:10:28 +0530533 return IRQ_HANDLED;
Girish Mahadevan561de452018-10-03 19:14:25 +0530534}
535
536static int spi_geni_probe(struct platform_device *pdev)
537{
Alok Chauhan6a34e282018-10-25 22:10:29 +0530538 int ret, irq;
Girish Mahadevan561de452018-10-03 19:14:25 +0530539 struct spi_master *spi;
540 struct spi_geni_master *mas;
541 struct resource *res;
Alok Chauhan6a34e282018-10-25 22:10:29 +0530542 void __iomem *base;
543 struct clk *clk;
544
545 irq = platform_get_irq(pdev, 0);
546 if (irq < 0) {
547 dev_err(&pdev->dev, "Err getting IRQ %d\n", irq);
548 return irq;
549 }
550
551 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
552 base = devm_ioremap_resource(&pdev->dev, res);
553 if (IS_ERR(base))
554 return PTR_ERR(base);
555
556 clk = devm_clk_get(&pdev->dev, "se");
557 if (IS_ERR(clk)) {
558 dev_err(&pdev->dev, "Err getting SE Core clk %ld\n",
559 PTR_ERR(clk));
560 return PTR_ERR(clk);
561 }
Girish Mahadevan561de452018-10-03 19:14:25 +0530562
563 spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
564 if (!spi)
565 return -ENOMEM;
566
567 platform_set_drvdata(pdev, spi);
568 mas = spi_master_get_devdata(spi);
Alok Chauhan6a34e282018-10-25 22:10:29 +0530569 mas->irq = irq;
Girish Mahadevan561de452018-10-03 19:14:25 +0530570 mas->dev = &pdev->dev;
571 mas->se.dev = &pdev->dev;
572 mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
Alok Chauhan6a34e282018-10-25 22:10:29 +0530573 mas->se.base = base;
574 mas->se.clk = clk;
Girish Mahadevan561de452018-10-03 19:14:25 +0530575
576 spi->bus_num = -1;
577 spi->dev.of_node = pdev->dev.of_node;
Girish Mahadevan561de452018-10-03 19:14:25 +0530578 spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
579 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
580 spi->num_chipselect = 4;
581 spi->max_speed_hz = 50000000;
582 spi->prepare_message = spi_geni_prepare_message;
583 spi->transfer_one = spi_geni_transfer_one;
584 spi->auto_runtime_pm = true;
585 spi->handle_err = handle_fifo_timeout;
586 spi->set_cs = spi_geni_set_cs;
587
588 init_completion(&mas->xfer_done);
589 spin_lock_init(&mas->lock);
590 pm_runtime_enable(&pdev->dev);
591
592 ret = spi_geni_init(mas);
593 if (ret)
594 goto spi_geni_probe_runtime_disable;
595
Girish Mahadevan561de452018-10-03 19:14:25 +0530596 ret = request_irq(mas->irq, geni_spi_isr,
597 IRQF_TRIGGER_HIGH, "spi_geni", spi);
598 if (ret)
599 goto spi_geni_probe_runtime_disable;
600
601 ret = spi_register_master(spi);
602 if (ret)
603 goto spi_geni_probe_free_irq;
604
605 return 0;
606spi_geni_probe_free_irq:
607 free_irq(mas->irq, spi);
608spi_geni_probe_runtime_disable:
609 pm_runtime_disable(&pdev->dev);
Girish Mahadevan561de452018-10-03 19:14:25 +0530610 spi_master_put(spi);
611 return ret;
612}
613
614static int spi_geni_remove(struct platform_device *pdev)
615{
616 struct spi_master *spi = platform_get_drvdata(pdev);
617 struct spi_geni_master *mas = spi_master_get_devdata(spi);
618
619 /* Unregister _before_ disabling pm_runtime() so we stop transfers */
620 spi_unregister_master(spi);
621
622 free_irq(mas->irq, spi);
623 pm_runtime_disable(&pdev->dev);
624 return 0;
625}
626
627static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
628{
629 struct spi_master *spi = dev_get_drvdata(dev);
630 struct spi_geni_master *mas = spi_master_get_devdata(spi);
631
632 return geni_se_resources_off(&mas->se);
633}
634
635static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
636{
637 struct spi_master *spi = dev_get_drvdata(dev);
638 struct spi_geni_master *mas = spi_master_get_devdata(spi);
639
640 return geni_se_resources_on(&mas->se);
641}
642
643static int __maybe_unused spi_geni_suspend(struct device *dev)
644{
645 struct spi_master *spi = dev_get_drvdata(dev);
646 int ret;
647
648 ret = spi_master_suspend(spi);
649 if (ret)
650 return ret;
651
652 ret = pm_runtime_force_suspend(dev);
653 if (ret)
654 spi_master_resume(spi);
655
656 return ret;
657}
658
659static int __maybe_unused spi_geni_resume(struct device *dev)
660{
661 struct spi_master *spi = dev_get_drvdata(dev);
662 int ret;
663
664 ret = pm_runtime_force_resume(dev);
665 if (ret)
666 return ret;
667
668 ret = spi_master_resume(spi);
669 if (ret)
670 pm_runtime_force_suspend(dev);
671
672 return ret;
673}
674
675static const struct dev_pm_ops spi_geni_pm_ops = {
676 SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
677 spi_geni_runtime_resume, NULL)
678 SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
679};
680
681static const struct of_device_id spi_geni_dt_match[] = {
682 { .compatible = "qcom,geni-spi" },
683 {}
684};
685MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
686
687static struct platform_driver spi_geni_driver = {
688 .probe = spi_geni_probe,
689 .remove = spi_geni_remove,
690 .driver = {
691 .name = "geni_spi",
692 .pm = &spi_geni_pm_ops,
693 .of_match_table = spi_geni_dt_match,
694 },
695};
696module_platform_driver(spi_geni_driver);
697
698MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
699MODULE_LICENSE("GPL v2");