blob: 880fda40757353e69c102e86a0092a132b24c884 [file] [log] [blame]
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001/*
Karthikeyan Ramasubramanian465f10d2018-01-02 23:03:41 -07002 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/clk.h>
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060015#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070017#include <linux/interrupt.h>
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060018#include <linux/ipc_logging.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070019#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of.h>
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -060022#include <linux/of_platform.h>
Karthikeyan Ramasubramanian9a633402017-04-06 16:01:11 -060023#include <linux/pm_runtime.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070024#include <linux/qcom-geni-se.h>
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060025#include <linux/msm_gpi.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070026#include <linux/spi/spi.h>
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -060027#include <linux/spi/spi-geni-qcom.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070028
29#define SPI_NUM_CHIPSELECT (4)
30#define SPI_XFER_TIMEOUT_MS (250)
Girish Mahadevan488a9732018-01-09 17:48:13 -070031#define SPI_AUTO_SUSPEND_DELAY (250)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070032/* SPI SE specific registers */
33#define SE_SPI_CPHA (0x224)
34#define SE_SPI_LOOPBACK (0x22C)
35#define SE_SPI_CPOL (0x230)
36#define SE_SPI_DEMUX_OUTPUT_INV (0x24C)
37#define SE_SPI_DEMUX_SEL (0x250)
38#define SE_SPI_TRANS_CFG (0x25C)
39#define SE_SPI_WORD_LEN (0x268)
40#define SE_SPI_TX_TRANS_LEN (0x26C)
41#define SE_SPI_RX_TRANS_LEN (0x270)
42#define SE_SPI_PRE_POST_CMD_DLY (0x274)
43#define SE_SPI_DELAY_COUNTERS (0x278)
44
45/* SE_SPI_CPHA register fields */
46#define CPHA (BIT(0))
47
48/* SE_SPI_LOOPBACK register fields */
49#define LOOPBACK_ENABLE (0x1)
50#define NORMAL_MODE (0x0)
51#define LOOPBACK_MSK (GENMASK(1, 0))
52
53/* SE_SPI_CPOL register fields */
54#define CPOL (BIT(2))
55
56/* SE_SPI_DEMUX_OUTPUT_INV register fields */
57#define CS_DEMUX_OUTPUT_INV_MSK (GENMASK(3, 0))
58
59/* SE_SPI_DEMUX_SEL register fields */
60#define CS_DEMUX_OUTPUT_SEL (GENMASK(3, 0))
61
62/* SE_SPI_TX_TRANS_CFG register fields */
63#define CS_TOGGLE (BIT(0))
64
65/* SE_SPI_WORD_LEN register fields */
66#define WORD_LEN_MSK (GENMASK(9, 0))
67#define MIN_WORD_LEN (4)
68
69/* SPI_TX/SPI_RX_TRANS_LEN fields */
70#define TRANS_LEN_MSK (GENMASK(23, 0))
71
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -060072/* SE_SPI_DELAY_COUNTERS */
73#define SPI_INTER_WORDS_DELAY_MSK (GENMASK(9, 0))
74#define SPI_CS_CLK_DELAY_MSK (GENMASK(19, 10))
75#define SPI_CS_CLK_DELAY_SHFT (10)
76
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070077/* M_CMD OP codes for SPI */
78#define SPI_TX_ONLY (1)
79#define SPI_RX_ONLY (2)
80#define SPI_FULL_DUPLEX (3)
81#define SPI_TX_RX (7)
82#define SPI_CS_ASSERT (8)
83#define SPI_CS_DEASSERT (9)
84#define SPI_SCK_ONLY (10)
85/* M_CMD params for SPI */
Girish Mahadevan061b9e32017-05-18 10:25:43 -060086#define SPI_PRE_CMD_DELAY BIT(0)
87#define TIMESTAMP_BEFORE BIT(1)
88#define FRAGMENTATION BIT(2)
89#define TIMESTAMP_AFTER BIT(3)
90#define POST_CMD_DELAY BIT(4)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070091
Alok Chauhanb89cb552018-06-12 11:28:34 +053092#define SPI_CORE2X_VOTE (7600)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060093/* GSI CONFIG0 TRE Params */
94/* Flags bit fields */
95#define GSI_LOOPBACK_EN (BIT(0))
96#define GSI_CS_TOGGLE (BIT(3))
97#define GSI_CPHA (BIT(4))
98#define GSI_CPOL (BIT(5))
99
100#define MAX_TX_SG (3)
101#define NUM_SPI_XFER (8)
102
103struct gsi_desc_cb {
104 struct spi_master *spi;
105 struct spi_transfer *xfer;
106};
107
108struct spi_geni_gsi {
109 struct msm_gpi_tre config0_tre;
110 struct msm_gpi_tre go_tre;
111 struct msm_gpi_tre tx_dma_tre;
112 struct msm_gpi_tre rx_dma_tre;
113 struct scatterlist tx_sg[MAX_TX_SG];
114 struct scatterlist rx_sg;
115 dma_cookie_t tx_cookie;
116 dma_cookie_t rx_cookie;
117 struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
118 struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
119 struct dma_async_tx_descriptor *tx_desc;
120 struct dma_async_tx_descriptor *rx_desc;
121 struct gsi_desc_cb desc_cb;
122};
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -0600123
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700124struct spi_geni_master {
125 struct se_geni_rsc spi_rsc;
126 resource_size_t phys_addr;
127 resource_size_t size;
128 void __iomem *base;
129 int irq;
130 struct device *dev;
131 int rx_fifo_depth;
132 int tx_fifo_depth;
133 int tx_fifo_width;
134 int tx_wm;
135 bool setup;
136 u32 cur_speed_hz;
137 int cur_word_len;
138 unsigned int tx_rem_bytes;
139 unsigned int rx_rem_bytes;
140 struct spi_transfer *cur_xfer;
141 struct completion xfer_done;
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -0600142 struct device *wrapper_dev;
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600143 int oversampling;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600144 struct spi_geni_gsi *gsi;
145 struct dma_chan *tx;
146 struct dma_chan *rx;
147 struct msm_gpi_ctrl tx_event;
148 struct msm_gpi_ctrl rx_event;
149 struct completion tx_cb;
150 struct completion rx_cb;
151 bool qn_err;
152 int cur_xfer_mode;
153 int num_tx_eot;
154 int num_rx_eot;
155 int num_xfers;
156 void *ipc;
Girish Mahadevan488a9732018-01-09 17:48:13 -0700157 bool shared_se;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700158};
159
160static struct spi_master *get_spi_master(struct device *dev)
161{
162 struct platform_device *pdev = to_platform_device(dev);
163 struct spi_master *spi = platform_get_drvdata(pdev);
164
165 return spi;
166}
167
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600168static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
169 int *clk_idx, int *clk_div)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700170{
171 unsigned long sclk_freq;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700172 struct se_geni_rsc *rsc = &mas->spi_rsc;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600173 int ret = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700174
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600175 ret = geni_se_clk_freq_match(&mas->spi_rsc,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600176 (speed_hz * mas->oversampling), clk_idx,
177 &sclk_freq, true);
Girish Mahadevan13f3f002017-06-05 14:27:57 -0600178 if (ret) {
179 dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
180 __func__, ret, speed_hz);
181 return ret;
182 }
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600183
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600184 *clk_div = ((sclk_freq / mas->oversampling) / speed_hz);
185 if (!(*clk_div)) {
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600186 dev_err(mas->dev, "%s:Err:sclk:%lu oversampling:%d speed:%u\n",
187 __func__, sclk_freq, mas->oversampling, speed_hz);
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600188 return -EINVAL;
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600189 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700190
Girish Mahadevan13f3f002017-06-05 14:27:57 -0600191 dev_dbg(mas->dev, "%s: req %u sclk %lu, idx %d, div %d\n", __func__,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600192 speed_hz, sclk_freq, *clk_idx, *clk_div);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700193 ret = clk_set_rate(rsc->se_clk, sclk_freq);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600194 if (ret)
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600195 dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
196 __func__, ret);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600197 return ret;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700198}
199
200static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
201 int bits_per_word)
202{
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600203 int pack_words = 1;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700204 bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
205 u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600206 unsigned long cfg0, cfg1;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700207
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600208 /*
209 * If bits_per_word isn't a byte aligned value, set the packing to be
210 * 1 SPI word per FIFO word.
211 */
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600212 if (!(mas->tx_fifo_width % bits_per_word))
213 pack_words = mas->tx_fifo_width / bits_per_word;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700214 word_len &= ~WORD_LEN_MSK;
215 word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
216 se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
217 geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600218 se_get_packing_config(bits_per_word, pack_words, msb_first,
219 &cfg0, &cfg1);
220 GENI_SE_DBG(mas->ipc, false, mas->dev,
221 "%s: cfg0 %lu cfg1 %lu bpw %d pack_words %d\n", __func__,
222 cfg0, cfg1, bits_per_word, pack_words);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700223}
224
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600225static int setup_fifo_params(struct spi_device *spi_slv,
226 struct spi_master *spi)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700227{
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600228 struct spi_geni_master *mas = spi_master_get_devdata(spi);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700229 u16 mode = spi_slv->mode;
230 u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
231 u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
232 u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
Girish Mahadevan061b9e32017-05-18 10:25:43 -0600233 u32 demux_sel = 0;
234 u32 demux_output_inv = 0;
Girish Mahadevanda008762017-11-27 11:31:21 -0700235 u32 clk_sel = 0;
236 u32 m_clk_cfg = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700237 int ret = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600238 int idx;
239 int div;
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600240 struct spi_geni_qcom_ctrl_data *delay_params = NULL;
241 u32 spi_delay_params = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700242
243 loopback_cfg &= ~LOOPBACK_MSK;
244 cpol &= ~CPOL;
245 cpha &= ~CPHA;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700246
247 if (mode & SPI_LOOP)
248 loopback_cfg |= LOOPBACK_ENABLE;
249
250 if (mode & SPI_CPOL)
251 cpol |= CPOL;
252
253 if (mode & SPI_CPHA)
254 cpha |= CPHA;
255
256 if (spi_slv->mode & SPI_CS_HIGH)
257 demux_output_inv |= BIT(spi_slv->chip_select);
258
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600259 if (spi_slv->controller_data) {
260 u32 cs_clk_delay = 0;
261 u32 inter_words_delay = 0;
262
263 delay_params =
264 (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
265 cs_clk_delay =
266 (delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
267 & SPI_CS_CLK_DELAY_MSK;
268 inter_words_delay =
269 delay_params->spi_inter_words_delay &
270 SPI_INTER_WORDS_DELAY_MSK;
271 spi_delay_params =
272 (inter_words_delay | cs_clk_delay);
273 }
274
Girish Mahadevan061b9e32017-05-18 10:25:43 -0600275 demux_sel = spi_slv->chip_select;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700276 mas->cur_speed_hz = spi_slv->max_speed_hz;
277 mas->cur_word_len = spi_slv->bits_per_word;
278
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600279 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700280 if (ret) {
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600281 dev_err(mas->dev, "Err setting clks ret(%d) for %d\n",
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600282 ret, mas->cur_speed_hz);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600283 goto setup_fifo_params_exit;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700284 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600285
286 clk_sel |= (idx & CLK_SEL_MSK);
287 m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700288 spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
289 geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
290 geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
291 geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
292 geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
293 geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600294 geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
295 geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600296 geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600297 GENI_SE_DBG(mas->ipc, false, mas->dev,
298 "%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
299 __func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
300 GENI_SE_DBG(mas->ipc, false, mas->dev,
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600301 "%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
302 clk_sel, cpol, cpha, spi_delay_params);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700303 /* Ensure message level attributes are written before returning */
304 mb();
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600305setup_fifo_params_exit:
306 return ret;
307}
308
309
310static int select_xfer_mode(struct spi_master *spi,
311 struct spi_message *spi_msg)
312{
313 struct spi_geni_master *mas = spi_master_get_devdata(spi);
Dilip Kotad70fa152018-05-18 14:26:23 +0530314 int mode = SE_DMA;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600315 int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
316 FIFO_IF_DISABLE);
317 bool dma_chan_valid =
318 !(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
319
320 /*
321 * If FIFO Interface is disabled and there are no DMA channels then we
322 * can't do this transfer.
323 * If FIFO interface is disabled, we can do GSI only,
324 * else pick FIFO mode.
325 */
326 if (fifo_disable && !dma_chan_valid)
327 mode = -EINVAL;
Dilip Kotad70fa152018-05-18 14:26:23 +0530328 else if (!fifo_disable)
329 mode = SE_DMA;
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600330 else if (dma_chan_valid)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600331 mode = GSI_DMA;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600332 return mode;
333}
334
335static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600336 struct spi_geni_master *mas, u16 mode,
337 u32 cs_clk_delay, u32 inter_words_delay)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600338{
339 struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
340 u8 flags = 0;
341 u8 word_len = 0;
342 u8 pack = 0;
343 int div = 0;
344 int idx = 0;
345 int ret = 0;
346
347 if (IS_ERR_OR_NULL(c0_tre))
348 return c0_tre;
349
350 if (mode & SPI_LOOP)
351 flags |= GSI_LOOPBACK_EN;
352
353 if (mode & SPI_CPOL)
354 flags |= GSI_CPOL;
355
356 if (mode & SPI_CPHA)
357 flags |= GSI_CPHA;
358
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600359 word_len = xfer->bits_per_word - MIN_WORD_LEN;
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600360 pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600361 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
362 if (ret) {
363 dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
364 return ERR_PTR(ret);
365 }
366 c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
367 word_len);
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600368 c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
369 inter_words_delay);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600370 c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
371 c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 1);
372 GENI_SE_DBG(mas->ipc, false, mas->dev,
373 "%s: flags 0x%x word %d pack %d idx %d div %d\n",
374 __func__, flags, word_len, pack, idx, div);
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600375 GENI_SE_DBG(mas->ipc, false, mas->dev,
376 "%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
377 cs_clk_delay, inter_words_delay);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600378 return c0_tre;
379}
380
381static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
382 struct spi_geni_master *mas)
383{
384 struct msm_gpi_tre *go_tre = &mas->gsi[mas->num_xfers].go_tre;
385 int chain;
386 int eot;
387 int eob;
388
389 if (IS_ERR_OR_NULL(go_tre))
390 return go_tre;
391
392 go_tre->dword[0] = MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, cmd);
393 go_tre->dword[1] = MSM_GPI_SPI_GO_TRE_DWORD1;
394 go_tre->dword[2] = MSM_GPI_SPI_GO_TRE_DWORD2(rx_len);
395 if (cmd == SPI_RX_ONLY) {
396 eot = 0;
397 chain = 0;
398 eob = 1;
399 } else {
400 eot = 0;
401 chain = 1;
402 eob = 0;
403 }
404 go_tre->dword[3] = MSM_GPI_SPI_GO_TRE_DWORD3(0, eot, eob, chain);
405 GENI_SE_DBG(mas->ipc, false, mas->dev,
406 "%s: rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
407 __func__, rx_len, flags, cs, cmd, eot, eob, chain);
408 return go_tre;
409}
410
411static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
412 dma_addr_t buf, u32 len,
413 struct spi_geni_master *mas,
414 bool is_tx)
415{
416 if (IS_ERR_OR_NULL(tre))
417 return tre;
418
419 tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
420 tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
421 tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
422 tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, is_tx, 0, 0);
423 return tre;
424}
425
426static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
427 void *ptr)
428{
429 struct spi_master *spi = ptr;
430 struct spi_geni_master *mas = spi_master_get_devdata(spi);
431
432 switch (cb->cb_event) {
433 case MSM_GPI_QUP_NOTIFY:
434 case MSM_GPI_QUP_MAX_EVENT:
435 GENI_SE_DBG(mas->ipc, false, mas->dev,
436 "%s:cb_ev%d status%llu ts%llu count%llu\n",
437 __func__, cb->cb_event, cb->status,
438 cb->timestamp, cb->count);
439 break;
440 case MSM_GPI_QUP_ERROR:
441 case MSM_GPI_QUP_CH_ERROR:
442 case MSM_GPI_QUP_FW_ERROR:
443 case MSM_GPI_QUP_PENDING_EVENT:
444 case MSM_GPI_QUP_EOT_DESC_MISMATCH:
445 case MSM_GPI_QUP_SW_ERROR:
446 GENI_SE_ERR(mas->ipc, true, mas->dev,
447 "%s: cb_ev %d status %llu ts %llu count %llu\n",
448 __func__, cb->cb_event, cb->status,
449 cb->timestamp, cb->count);
450 GENI_SE_ERR(mas->ipc, true, mas->dev,
451 "err.routine %u, err.type %u, err.code %u\n",
452 cb->error_log.routine,
453 cb->error_log.type,
454 cb->error_log.error_code);
455 mas->qn_err = true;
456 complete_all(&mas->tx_cb);
457 complete_all(&mas->rx_cb);
458
459 break;
460 };
461}
462
463static void spi_gsi_rx_callback(void *cb)
464{
465 struct msm_gpi_dma_async_tx_cb_param *cb_param =
466 (struct msm_gpi_dma_async_tx_cb_param *)cb;
467 struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
468 struct spi_master *spi = desc_cb->spi;
469 struct spi_transfer *xfer = desc_cb->xfer;
470 struct spi_geni_master *mas = spi_master_get_devdata(spi);
471
472 if (xfer->rx_buf) {
473 if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
474 GENI_SE_ERR(mas->ipc, true, mas->dev,
475 "%s: Unexpected GSI CB error\n", __func__);
476 return;
477 }
478 if (cb_param->length == xfer->len) {
479 GENI_SE_DBG(mas->ipc, false, mas->dev,
480 "%s\n", __func__);
481 complete(&mas->rx_cb);
482 } else {
483 GENI_SE_ERR(mas->ipc, true, mas->dev,
484 "%s: Length mismatch. Expected %d Callback %d\n",
485 __func__, xfer->len, cb_param->length);
486 }
487 }
488}
489
490static void spi_gsi_tx_callback(void *cb)
491{
492 struct msm_gpi_dma_async_tx_cb_param *cb_param = cb;
493 struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
494 struct spi_master *spi = desc_cb->spi;
495 struct spi_transfer *xfer = desc_cb->xfer;
496 struct spi_geni_master *mas = spi_master_get_devdata(spi);
497
498 if (xfer->tx_buf) {
499 if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
500 GENI_SE_ERR(mas->ipc, true, mas->dev,
501 "%s: Unexpected GSI CB error\n", __func__);
502 return;
503 }
504 if (cb_param->length == xfer->len) {
505 GENI_SE_DBG(mas->ipc, false, mas->dev,
506 "%s\n", __func__);
507 complete(&mas->tx_cb);
508 } else {
509 GENI_SE_ERR(mas->ipc, true, mas->dev,
510 "%s: Length mismatch. Expected %d Callback %d\n",
511 __func__, xfer->len, cb_param->length);
512 }
513 }
514}
515
516static int setup_gsi_xfer(struct spi_transfer *xfer,
517 struct spi_geni_master *mas,
518 struct spi_device *spi_slv,
519 struct spi_master *spi)
520{
521 int ret = 0;
522 struct msm_gpi_tre *c0_tre = NULL;
523 struct msm_gpi_tre *go_tre = NULL;
524 struct msm_gpi_tre *tx_tre = NULL;
525 struct msm_gpi_tre *rx_tre = NULL;
526 struct scatterlist *xfer_tx_sg = mas->gsi[mas->num_xfers].tx_sg;
527 struct scatterlist *xfer_rx_sg = &mas->gsi[mas->num_xfers].rx_sg;
528 int rx_nent = 0;
529 int tx_nent = 0;
530 u8 cmd = 0;
531 u8 cs = 0;
532 u32 rx_len = 0;
533 int go_flags = 0;
534 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600535 struct spi_geni_qcom_ctrl_data *delay_params = NULL;
536 u32 cs_clk_delay = 0;
537 u32 inter_words_delay = 0;
538
539 if (spi_slv->controller_data) {
540 delay_params =
541 (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
542
543 cs_clk_delay =
544 delay_params->spi_cs_clk_delay;
545 inter_words_delay =
546 delay_params->spi_inter_words_delay;
547 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600548
549 if ((xfer->bits_per_word != mas->cur_word_len) ||
550 (xfer->speed_hz != mas->cur_speed_hz)) {
551 mas->cur_word_len = xfer->bits_per_word;
552 mas->cur_speed_hz = xfer->speed_hz;
553 tx_nent++;
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600554 c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode,
555 cs_clk_delay, inter_words_delay);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600556 if (IS_ERR_OR_NULL(c0_tre)) {
557 dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
558 __func__, ret);
559 return PTR_ERR(c0_tre);
560 }
561 }
562
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600563 if (!(mas->cur_word_len % MIN_WORD_LEN)) {
564 rx_len = ((xfer->len << 3) / mas->cur_word_len);
565 } else {
566 int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
567
568 rx_len = (xfer->len / bytes_per_word);
569 }
570
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600571 if (xfer->tx_buf && xfer->rx_buf) {
572 cmd = SPI_FULL_DUPLEX;
573 tx_nent += 2;
574 rx_nent++;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600575 } else if (xfer->tx_buf) {
576 cmd = SPI_TX_ONLY;
577 tx_nent += 2;
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600578 rx_len = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600579 } else if (xfer->rx_buf) {
580 cmd = SPI_RX_ONLY;
581 tx_nent++;
582 rx_nent++;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600583 }
584
585 cs |= spi_slv->chip_select;
Girish Mahadevan36df8752017-11-16 10:53:15 -0700586 if (!xfer->cs_change) {
587 if (!list_is_last(&xfer->transfer_list,
588 &spi->cur_msg->transfers))
589 go_flags |= FRAGMENTATION;
590 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600591 go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
592
593 sg_init_table(xfer_tx_sg, tx_nent);
594 if (rx_nent)
595 sg_init_table(xfer_rx_sg, rx_nent);
596
597 if (c0_tre)
598 sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
599
600 sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
601 mas->gsi[mas->num_xfers].desc_cb.spi = spi;
602 mas->gsi[mas->num_xfers].desc_cb.xfer = xfer;
603 if (cmd & SPI_RX_ONLY) {
604 rx_tre = &mas->gsi[mas->num_xfers].rx_dma_tre;
605 rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->len, mas, 0);
606 if (IS_ERR_OR_NULL(rx_tre)) {
607 dev_err(mas->dev, "Err setting up rx tre\n");
608 return PTR_ERR(rx_tre);
609 }
610 sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
611 mas->gsi[mas->num_xfers].rx_desc =
612 dmaengine_prep_slave_sg(mas->rx,
613 &mas->gsi[mas->num_xfers].rx_sg, rx_nent,
614 DMA_DEV_TO_MEM, flags);
615 if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].rx_desc)) {
616 dev_err(mas->dev, "Err setting up rx desc\n");
617 return -EIO;
618 }
619 mas->gsi[mas->num_xfers].rx_desc->callback =
620 spi_gsi_rx_callback;
621 mas->gsi[mas->num_xfers].rx_desc->callback_param =
622 &mas->gsi[mas->num_xfers].rx_cb_param;
623 mas->gsi[mas->num_xfers].rx_cb_param.userdata =
624 &mas->gsi[mas->num_xfers].desc_cb;
625 mas->num_rx_eot++;
626 }
627
628 if (cmd & SPI_TX_ONLY) {
629 tx_tre = &mas->gsi[mas->num_xfers].tx_dma_tre;
630 tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->len, mas, 1);
631 if (IS_ERR_OR_NULL(tx_tre)) {
632 dev_err(mas->dev, "Err setting up tx tre\n");
633 return PTR_ERR(tx_tre);
634 }
635 sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
636 mas->num_tx_eot++;
637 }
638 mas->gsi[mas->num_xfers].tx_desc = dmaengine_prep_slave_sg(mas->tx,
639 mas->gsi[mas->num_xfers].tx_sg, tx_nent,
640 DMA_MEM_TO_DEV, flags);
641 if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].tx_desc)) {
642 dev_err(mas->dev, "Err setting up tx desc\n");
643 return -EIO;
644 }
645 mas->gsi[mas->num_xfers].tx_desc->callback = spi_gsi_tx_callback;
646 mas->gsi[mas->num_xfers].tx_desc->callback_param =
647 &mas->gsi[mas->num_xfers].tx_cb_param;
648 mas->gsi[mas->num_xfers].tx_cb_param.userdata =
649 &mas->gsi[mas->num_xfers].desc_cb;
650 mas->gsi[mas->num_xfers].tx_cookie =
651 dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
Girish Mahadevan12858822018-01-12 17:18:48 -0700652 if (cmd & SPI_RX_ONLY)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600653 mas->gsi[mas->num_xfers].rx_cookie =
654 dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
655 dma_async_issue_pending(mas->tx);
Girish Mahadevan12858822018-01-12 17:18:48 -0700656 if (cmd & SPI_RX_ONLY)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600657 dma_async_issue_pending(mas->rx);
658 mas->num_xfers++;
659 return ret;
660}
661
662static int spi_geni_map_buf(struct spi_geni_master *mas,
663 struct spi_message *msg)
664{
665 struct spi_transfer *xfer;
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600666 int ret = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600667
668 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
669 if (xfer->rx_buf) {
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600670 ret = geni_se_iommu_map_buf(mas->wrapper_dev,
671 &xfer->rx_dma, xfer->rx_buf,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600672 xfer->len, DMA_FROM_DEVICE);
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600673 if (ret) {
674 GENI_SE_ERR(mas->ipc, true, mas->dev,
675 "%s: Mapping Rx buffer %d\n", __func__, ret);
676 return ret;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600677 }
678 }
679
680 if (xfer->tx_buf) {
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600681 ret = geni_se_iommu_map_buf(mas->wrapper_dev,
682 &xfer->tx_dma,
683 (void *)xfer->tx_buf,
684 xfer->len, DMA_TO_DEVICE);
685 if (ret) {
686 GENI_SE_ERR(mas->ipc, true, mas->dev,
687 "%s: Mapping Tx buffer %d\n", __func__, ret);
688 return ret;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600689 }
690 }
691 };
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600692 return 0;
693}
694
695static void spi_geni_unmap_buf(struct spi_geni_master *mas,
696 struct spi_message *msg)
697{
698 struct spi_transfer *xfer;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600699
700 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
701 if (xfer->rx_buf)
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600702 geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->rx_dma,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600703 xfer->len, DMA_FROM_DEVICE);
704 if (xfer->tx_buf)
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600705 geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->tx_dma,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600706 xfer->len, DMA_TO_DEVICE);
707 };
708}
709
710static int spi_geni_prepare_message(struct spi_master *spi,
711 struct spi_message *spi_msg)
712{
713 int ret = 0;
714 struct spi_geni_master *mas = spi_master_get_devdata(spi);
715
716 mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
717
Dilip Kotad70fa152018-05-18 14:26:23 +0530718 if (mas->cur_xfer_mode < 0) {
719 dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
720 mas->cur_xfer_mode);
721 ret = -EINVAL;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600722 } else if (mas->cur_xfer_mode == GSI_DMA) {
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600723 memset(mas->gsi, 0,
724 (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
725 geni_se_select_mode(mas->base, GSI_DMA);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600726 ret = spi_geni_map_buf(mas, spi_msg);
727 } else {
Dilip Kotad70fa152018-05-18 14:26:23 +0530728 geni_se_select_mode(mas->base, mas->cur_xfer_mode);
729 ret = setup_fifo_params(spi_msg->spi, spi);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600730 }
Dilip Kotad70fa152018-05-18 14:26:23 +0530731
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700732 return ret;
733}
734
735static int spi_geni_unprepare_message(struct spi_master *spi_mas,
736 struct spi_message *spi_msg)
737{
738 struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
739
740 mas->cur_speed_hz = 0;
741 mas->cur_word_len = 0;
Girish Mahadevanf809ccb2017-11-30 10:49:04 -0700742 if (mas->cur_xfer_mode == GSI_DMA)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600743 spi_geni_unmap_buf(mas, spi_msg);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700744 return 0;
745}
746
747static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
748{
749 struct spi_geni_master *mas = spi_master_get_devdata(spi);
750 int ret = 0;
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600751 u32 max_speed = spi->cur_msg->spi->max_speed_hz;
752 struct se_geni_rsc *rsc = &mas->spi_rsc;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700753
Alok Chauhanb89cb552018-06-12 11:28:34 +0530754 /* Adjust the IB based on the max speed of the slave.*/
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600755 rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
Girish Mahadevan488a9732018-01-09 17:48:13 -0700756 if (mas->shared_se) {
757 struct se_geni_rsc *rsc;
758 int ret = 0;
759
760 rsc = &mas->spi_rsc;
761 ret = pinctrl_select_state(rsc->geni_pinctrl,
762 rsc->geni_gpio_active);
763 if (ret)
764 GENI_SE_ERR(mas->ipc, false, NULL,
765 "%s: Error %d pinctrl_select_state\n", __func__, ret);
766 }
767
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700768 ret = pm_runtime_get_sync(mas->dev);
769 if (ret < 0) {
Girish Mahadevan488a9732018-01-09 17:48:13 -0700770 dev_err(mas->dev, "%s:Error enabling SE resources %d\n",
771 __func__, ret);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700772 pm_runtime_put_noidle(mas->dev);
773 goto exit_prepare_transfer_hardware;
774 } else {
775 ret = 0;
776 }
777
778 if (unlikely(!mas->setup)) {
779 int proto = get_se_proto(mas->base);
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600780 unsigned int major;
781 unsigned int minor;
782 unsigned int step;
783 int hw_ver;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700784
785 if (unlikely(proto != SPI)) {
786 dev_err(mas->dev, "Invalid proto %d\n", proto);
787 return -ENXIO;
788 }
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -0600789 geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700790 mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
791 mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
792 mas->tx_fifo_width = get_tx_fifo_width(mas->base);
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600793 mas->oversampling = 1;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700794 /* Transmit an entire FIFO worth of data per IRQ */
795 mas->tx_wm = 1;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600796
797 mas->tx = dma_request_slave_channel(mas->dev, "tx");
798 if (IS_ERR_OR_NULL(mas->tx)) {
799 dev_info(mas->dev, "Failed to get tx DMA ch %ld",
800 PTR_ERR(mas->tx));
801 } else {
802 mas->rx = dma_request_slave_channel(mas->dev, "rx");
803 if (IS_ERR_OR_NULL(mas->rx)) {
804 dev_info(mas->dev, "Failed to get rx DMA ch %ld",
805 PTR_ERR(mas->rx));
806 dma_release_channel(mas->tx);
807 }
808 mas->gsi = devm_kzalloc(mas->dev,
809 (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
810 GFP_KERNEL);
811 if (IS_ERR_OR_NULL(mas->gsi)) {
812 dev_err(mas->dev, "Failed to get GSI mem\n");
813 dma_release_channel(mas->tx);
814 dma_release_channel(mas->rx);
815 mas->tx = NULL;
816 mas->rx = NULL;
817 goto setup_ipc;
818 }
819 mas->tx_event.init.callback = spi_gsi_ch_cb;
820 mas->tx_event.init.cb_param = spi;
821 mas->tx_event.cmd = MSM_GPI_INIT;
822 mas->tx->private = &mas->tx_event;
823 mas->rx_event.init.callback = spi_gsi_ch_cb;
824 mas->rx_event.init.cb_param = spi;
825 mas->rx_event.cmd = MSM_GPI_INIT;
826 mas->rx->private = &mas->rx_event;
827 if (dmaengine_slave_config(mas->tx, NULL)) {
828 dev_err(mas->dev, "Failed to Config Tx\n");
829 dma_release_channel(mas->tx);
830 dma_release_channel(mas->rx);
831 mas->tx = NULL;
832 mas->rx = NULL;
833 goto setup_ipc;
834 }
835 if (dmaengine_slave_config(mas->rx, NULL)) {
836 dev_err(mas->dev, "Failed to Config Rx\n");
837 dma_release_channel(mas->tx);
838 dma_release_channel(mas->rx);
839 mas->tx = NULL;
840 mas->rx = NULL;
841 goto setup_ipc;
842 }
843
844 }
845setup_ipc:
846 mas->ipc = ipc_log_context_create(4, dev_name(mas->dev), 0);
847 dev_info(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700848 mas->tx_fifo_depth, mas->rx_fifo_depth,
849 mas->tx_fifo_width);
850 mas->setup = true;
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600851 hw_ver = geni_se_qupv3_hw_version(mas->wrapper_dev, &major,
852 &minor, &step);
853 if (hw_ver)
854 dev_err(mas->dev, "%s:Err getting HW version %d\n",
855 __func__, hw_ver);
856 else {
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600857 if ((major == 1) && (minor == 0))
858 mas->oversampling = 2;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600859 GENI_SE_DBG(mas->ipc, false, mas->dev,
860 "%s:Major:%d Minor:%d step:%dos%d\n",
861 __func__, major, minor, step, mas->oversampling);
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600862 }
Girish Mahadevan488a9732018-01-09 17:48:13 -0700863 mas->shared_se =
864 (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
865 FIFO_IF_DISABLE);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700866 }
867exit_prepare_transfer_hardware:
868 return ret;
869}
870
871static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
872{
873 struct spi_geni_master *mas = spi_master_get_devdata(spi);
874
Girish Mahadevan488a9732018-01-09 17:48:13 -0700875 if (mas->shared_se) {
876 struct se_geni_rsc *rsc;
877 int ret = 0;
878
879 rsc = &mas->spi_rsc;
880 ret = pinctrl_select_state(rsc->geni_pinctrl,
881 rsc->geni_gpio_sleep);
882 if (ret)
883 GENI_SE_ERR(mas->ipc, false, NULL,
884 "%s: Error %d pinctrl_select_state\n", __func__, ret);
885 }
886
887 pm_runtime_mark_last_busy(mas->dev);
888 pm_runtime_put_autosuspend(mas->dev);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700889 return 0;
890}
891
892static void setup_fifo_xfer(struct spi_transfer *xfer,
893 struct spi_geni_master *mas, u16 mode,
894 struct spi_master *spi)
895{
896 u32 m_cmd = 0;
897 u32 m_param = 0;
898 u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
899 u32 trans_len = 0;
900
901 if (xfer->bits_per_word != mas->cur_word_len) {
902 spi_setup_word_len(mas, mode, xfer->bits_per_word);
903 mas->cur_word_len = xfer->bits_per_word;
904 }
905
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600906 /* Speed and bits per word can be overridden per transfer */
907 if (xfer->speed_hz != mas->cur_speed_hz) {
908 int ret = 0;
Girish Mahadevanda008762017-11-27 11:31:21 -0700909 u32 clk_sel = 0;
910 u32 m_clk_cfg = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600911 int idx = 0;
912 int div = 0;
913
914 ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
915 if (ret) {
916 dev_err(mas->dev, "%s:Err setting clks:%d\n",
917 __func__, ret);
918 return;
919 }
920 mas->cur_speed_hz = xfer->speed_hz;
921 clk_sel |= (idx & CLK_SEL_MSK);
922 m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
923 geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
924 geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
925 }
926
927 mas->tx_rem_bytes = 0;
928 mas->rx_rem_bytes = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700929 if (xfer->tx_buf && xfer->rx_buf)
930 m_cmd = SPI_FULL_DUPLEX;
931 else if (xfer->tx_buf)
932 m_cmd = SPI_TX_ONLY;
933 else if (xfer->rx_buf)
934 m_cmd = SPI_RX_ONLY;
935
936 spi_tx_cfg &= ~CS_TOGGLE;
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600937 if (!(mas->cur_word_len % MIN_WORD_LEN)) {
938 trans_len =
939 ((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK;
940 } else {
941 int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
942
943 trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK;
944 }
Girish Mahadevan36df8752017-11-16 10:53:15 -0700945
946 if (!xfer->cs_change) {
947 if (!list_is_last(&xfer->transfer_list,
948 &spi->cur_msg->transfers))
949 m_param |= FRAGMENTATION;
950 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700951
952 mas->cur_xfer = xfer;
953 if (m_cmd & SPI_TX_ONLY) {
954 mas->tx_rem_bytes = xfer->len;
955 geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
956 }
957
958 if (m_cmd & SPI_RX_ONLY) {
959 geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
960 mas->rx_rem_bytes = xfer->len;
961 }
Dilip Kotad70fa152018-05-18 14:26:23 +0530962
963 if (trans_len > (mas->tx_fifo_depth * mas->tx_fifo_width)) {
964 if (mas->cur_xfer_mode != SE_DMA) {
965 mas->cur_xfer_mode = SE_DMA;
966 geni_se_select_mode(mas->base, mas->cur_xfer_mode);
967 }
968 } else {
969 if (mas->cur_xfer_mode != FIFO_MODE) {
970 mas->cur_xfer_mode = FIFO_MODE;
971 geni_se_select_mode(mas->base, mas->cur_xfer_mode);
972 }
973 }
974
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700975 geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
976 geni_setup_m_cmd(mas->base, m_cmd, m_param);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600977 GENI_SE_DBG(mas->ipc, false, mas->dev,
Dilip Kotad70fa152018-05-18 14:26:23 +0530978 "%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d\n",
Girish Mahadevan36df8752017-11-16 10:53:15 -0700979 __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd,
Dilip Kotad70fa152018-05-18 14:26:23 +0530980 xfer->cs_change, mas->cur_xfer_mode);
981 if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) {
982 int ret = 0;
983
984 ret = geni_se_rx_dma_prep(mas->wrapper_dev, mas->base,
985 xfer->rx_buf, xfer->len, &xfer->rx_dma);
986 if (ret)
987 GENI_SE_ERR(mas->ipc, true, mas->dev,
988 "Failed to setup Rx dma %d\n", ret);
989 }
990 if (m_cmd & SPI_TX_ONLY) {
991 if (mas->cur_xfer_mode == FIFO_MODE) {
992 geni_write_reg(mas->tx_wm, mas->base,
993 SE_GENI_TX_WATERMARK_REG);
994 } else if (mas->cur_xfer_mode == SE_DMA) {
995 int ret = 0;
996
997 ret = geni_se_tx_dma_prep(mas->wrapper_dev, mas->base,
998 (void *)xfer->tx_buf, xfer->len,
999 &xfer->tx_dma);
1000 if (ret)
1001 GENI_SE_ERR(mas->ipc, true, mas->dev,
1002 "Failed to setup tx dma %d\n", ret);
1003 }
1004 }
1005
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001006 /* Ensure all writes are done before the WM interrupt */
1007 mb();
1008}
1009
Dilip Kotad70fa152018-05-18 14:26:23 +05301010static void handle_fifo_timeout(struct spi_geni_master *mas,
1011 struct spi_transfer *xfer)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001012{
1013 unsigned long timeout;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001014
Girish Mahadevan5f9df632017-08-29 13:29:23 -06001015 geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001016 reinit_completion(&mas->xfer_done);
1017 geni_cancel_m_cmd(mas->base);
Dilip Kotad70fa152018-05-18 14:26:23 +05301018 if (mas->cur_xfer_mode == FIFO_MODE)
1019 geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001020 /* Ensure cmd cancel is written */
1021 mb();
1022 timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
1023 if (!timeout) {
1024 reinit_completion(&mas->xfer_done);
1025 geni_abort_m_cmd(mas->base);
1026 /* Ensure cmd abort is written */
1027 mb();
1028 timeout = wait_for_completion_timeout(&mas->xfer_done,
1029 HZ);
1030 if (!timeout)
1031 dev_err(mas->dev,
1032 "Failed to cancel/abort m_cmd\n");
1033 }
Dilip Kotad70fa152018-05-18 14:26:23 +05301034 if (mas->cur_xfer_mode == SE_DMA) {
1035 if (xfer->tx_buf)
1036 geni_se_tx_dma_unprep(mas->wrapper_dev,
1037 xfer->tx_dma, xfer->len);
1038 if (xfer->rx_buf)
1039 geni_se_rx_dma_unprep(mas->wrapper_dev,
1040 xfer->rx_dma, xfer->len);
1041 }
1042
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001043}
1044
1045static int spi_geni_transfer_one(struct spi_master *spi,
1046 struct spi_device *slv,
1047 struct spi_transfer *xfer)
1048{
1049 int ret = 0;
1050 struct spi_geni_master *mas = spi_master_get_devdata(spi);
1051 unsigned long timeout;
1052
1053 if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
1054 dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
1055 return -EINVAL;
1056 }
1057
Dilip Kotad70fa152018-05-18 14:26:23 +05301058 if (mas->cur_xfer_mode != GSI_DMA) {
1059 reinit_completion(&mas->xfer_done);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001060 setup_fifo_xfer(xfer, mas, slv->mode, spi);
1061 timeout = wait_for_completion_timeout(&mas->xfer_done,
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001062 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001063 if (!timeout) {
1064 GENI_SE_ERR(mas->ipc, true, mas->dev,
1065 "Xfer[len %d tx %pK rx %pK n %d] timed out.\n",
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001066 xfer->len, xfer->tx_buf,
1067 xfer->rx_buf,
1068 xfer->bits_per_word);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001069 mas->cur_xfer = NULL;
1070 ret = -ETIMEDOUT;
1071 goto err_fifo_geni_transfer_one;
1072 }
Dilip Kotad70fa152018-05-18 14:26:23 +05301073
1074 if (mas->cur_xfer_mode == SE_DMA) {
1075 if (xfer->tx_buf)
1076 geni_se_tx_dma_unprep(mas->wrapper_dev,
1077 xfer->tx_dma, xfer->len);
1078 if (xfer->rx_buf)
1079 geni_se_rx_dma_unprep(mas->wrapper_dev,
1080 xfer->rx_dma, xfer->len);
1081 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001082 } else {
Dilip Kotad70fa152018-05-18 14:26:23 +05301083 mas->num_tx_eot = 0;
1084 mas->num_rx_eot = 0;
1085 mas->num_xfers = 0;
1086 reinit_completion(&mas->tx_cb);
1087 reinit_completion(&mas->rx_cb);
1088
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001089 setup_gsi_xfer(xfer, mas, slv, spi);
1090 if ((mas->num_xfers >= NUM_SPI_XFER) ||
1091 (list_is_last(&xfer->transfer_list,
1092 &spi->cur_msg->transfers))) {
1093 int i;
1094
1095 for (i = 0 ; i < mas->num_tx_eot; i++) {
1096 timeout =
Karthikeyan Ramasubramanian465f10d2018-01-02 23:03:41 -07001097 wait_for_completion_timeout(
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001098 &mas->tx_cb,
1099 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
1100 if (timeout <= 0) {
1101 GENI_SE_ERR(mas->ipc, true, mas->dev,
1102 "Tx[%d] timeout%lu\n", i, timeout);
1103 ret = -ETIMEDOUT;
1104 goto err_gsi_geni_transfer_one;
1105 }
1106 }
1107 for (i = 0 ; i < mas->num_rx_eot; i++) {
1108 timeout =
Karthikeyan Ramasubramanian465f10d2018-01-02 23:03:41 -07001109 wait_for_completion_timeout(
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001110 &mas->rx_cb,
1111 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
1112 if (timeout <= 0) {
1113 GENI_SE_ERR(mas->ipc, true, mas->dev,
1114 "Rx[%d] timeout%lu\n", i, timeout);
1115 ret = -ETIMEDOUT;
1116 goto err_gsi_geni_transfer_one;
1117 }
1118 }
1119 if (mas->qn_err) {
1120 ret = -EIO;
1121 mas->qn_err = false;
1122 goto err_gsi_geni_transfer_one;
1123 }
1124 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001125 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001126 return ret;
1127err_gsi_geni_transfer_one:
Girish Mahadevan5f9df632017-08-29 13:29:23 -06001128 geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001129 dmaengine_terminate_all(mas->tx);
1130 return ret;
1131err_fifo_geni_transfer_one:
Dilip Kotad70fa152018-05-18 14:26:23 +05301132 handle_fifo_timeout(mas, xfer);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001133 return ret;
1134}
1135
1136static void geni_spi_handle_tx(struct spi_geni_master *mas)
1137{
1138 int i = 0;
1139 int tx_fifo_width = (mas->tx_fifo_width >> 3);
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001140 int max_bytes = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001141 const u8 *tx_buf = NULL;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001142
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001143 if (!mas->cur_xfer)
1144 return;
1145
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001146 /*
1147 * For non-byte aligned bits-per-word values:
1148 * Assumption is that each SPI word will be accomodated in
1149 * ceil (bits_per_word / bits_per_byte)
1150 * and the next SPI word starts at the next byte.
1151 * In such cases, we can fit 1 SPI word per FIFO word so adjust the
1152 * max byte that can be sent per IRQ accordingly.
1153 */
1154 if ((mas->tx_fifo_width % mas->cur_word_len))
1155 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) *
1156 ((mas->cur_word_len / BITS_PER_BYTE) + 1);
1157 else
1158 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001159 tx_buf = mas->cur_xfer->tx_buf;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001160 tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
1161 max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
1162 while (i < max_bytes) {
1163 int j;
1164 u32 fifo_word = 0;
1165 u8 *fifo_byte;
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001166 int bytes_per_fifo = tx_fifo_width;
1167 int bytes_to_write = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001168
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001169 if ((mas->tx_fifo_width % mas->cur_word_len))
1170 bytes_per_fifo =
1171 (mas->cur_word_len / BITS_PER_BYTE) + 1;
1172 bytes_to_write = min_t(int, (max_bytes - i), bytes_per_fifo);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001173 fifo_byte = (u8 *)&fifo_word;
1174 for (j = 0; j < bytes_to_write; j++)
1175 fifo_byte[j] = tx_buf[i++];
1176 geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
1177 /* Ensure FIFO writes are written in order */
1178 mb();
1179 }
1180 mas->tx_rem_bytes -= max_bytes;
1181 if (!mas->tx_rem_bytes) {
1182 geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
1183 /* Barrier here before return to prevent further ISRs */
1184 mb();
1185 }
1186}
1187
1188static void geni_spi_handle_rx(struct spi_geni_master *mas)
1189{
1190 int i = 0;
1191 int fifo_width = (mas->tx_fifo_width >> 3);
1192 u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
1193 int rx_bytes = 0;
1194 int rx_wc = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001195 u8 *rx_buf = NULL;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001196
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001197 if (!mas->cur_xfer)
1198 return;
1199
1200 rx_buf = mas->cur_xfer->rx_buf;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001201 rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
1202 if (rx_fifo_status & RX_LAST) {
1203 int rx_last_byte_valid =
1204 (rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
1205 >> RX_LAST_BYTE_VALID_SHFT;
1206 if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
1207 rx_wc -= 1;
1208 rx_bytes += rx_last_byte_valid;
1209 }
1210 }
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001211 if (!(mas->tx_fifo_width % mas->cur_word_len))
1212 rx_bytes += rx_wc * fifo_width;
1213 else
1214 rx_bytes += rx_wc *
1215 ((mas->cur_word_len / BITS_PER_BYTE) + 1);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001216 rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
1217 rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
1218 while (i < rx_bytes) {
1219 u32 fifo_word = 0;
1220 u8 *fifo_byte;
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001221 int bytes_per_fifo = fifo_width;
1222 int read_bytes = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001223 int j;
1224
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001225 if ((mas->tx_fifo_width % mas->cur_word_len))
1226 bytes_per_fifo =
1227 (mas->cur_word_len / BITS_PER_BYTE) + 1;
1228 read_bytes = min_t(int, (rx_bytes - i), bytes_per_fifo);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001229 fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
1230 fifo_byte = (u8 *)&fifo_word;
1231 for (j = 0; j < read_bytes; j++)
1232 rx_buf[i++] = fifo_byte[j];
1233 }
1234 mas->rx_rem_bytes -= rx_bytes;
1235}
1236
1237static irqreturn_t geni_spi_irq(int irq, void *dev)
1238{
1239 struct spi_geni_master *mas = dev;
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001240 u32 m_irq = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001241
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001242 if (pm_runtime_status_suspended(dev)) {
1243 GENI_SE_DBG(mas->ipc, false, mas->dev,
1244 "%s: device is suspended\n", __func__);
1245 goto exit_geni_spi_irq;
1246 }
1247 m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
Dilip Kotad70fa152018-05-18 14:26:23 +05301248 if (mas->cur_xfer_mode == FIFO_MODE) {
1249 if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
1250 (m_irq & M_RX_FIFO_LAST_EN))
1251 geni_spi_handle_rx(mas);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001252
Dilip Kotad70fa152018-05-18 14:26:23 +05301253 if ((m_irq & M_TX_FIFO_WATERMARK_EN))
1254 geni_spi_handle_tx(mas);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001255
Dilip Kotad70fa152018-05-18 14:26:23 +05301256 if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
1257 (m_irq & M_CMD_ABORT_EN)) {
1258 complete(&mas->xfer_done);
1259 /*
1260 * If this happens, then a CMD_DONE came before all the
1261 * buffer bytes were sent out. This is unusual, log this
1262 * condition and disable the WM interrupt to prevent the
1263 * system from stalling due an interrupt storm.
1264 * If this happens when all Rx bytes haven't been
1265 * received, log the condition.
1266 */
1267 if (mas->tx_rem_bytes) {
1268 geni_write_reg(0, mas->base,
1269 SE_GENI_TX_WATERMARK_REG);
1270 GENI_SE_DBG(mas->ipc, false, mas->dev,
1271 "%s:Premature Done.tx_rem%d bpw%d\n",
1272 __func__, mas->tx_rem_bytes,
1273 mas->cur_word_len);
1274 }
1275 if (mas->rx_rem_bytes)
1276 GENI_SE_DBG(mas->ipc, false, mas->dev,
1277 "%s:Premature Done.rx_rem%d bpw%d\n",
1278 __func__, mas->rx_rem_bytes,
1279 mas->cur_word_len);
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001280 }
Dilip Kotad70fa152018-05-18 14:26:23 +05301281 } else if (mas->cur_xfer_mode == SE_DMA) {
1282 u32 dma_tx_status = geni_read_reg(mas->base,
1283 SE_DMA_TX_IRQ_STAT);
1284 u32 dma_rx_status = geni_read_reg(mas->base,
1285 SE_DMA_RX_IRQ_STAT);
1286
1287 if (dma_tx_status)
1288 geni_write_reg(dma_tx_status, mas->base,
1289 SE_DMA_TX_IRQ_CLR);
1290 if (dma_rx_status)
1291 geni_write_reg(dma_rx_status, mas->base,
1292 SE_DMA_RX_IRQ_CLR);
1293 if (dma_tx_status & TX_DMA_DONE)
1294 mas->tx_rem_bytes = 0;
1295 if (dma_rx_status & RX_DMA_DONE)
1296 mas->rx_rem_bytes = 0;
1297 if (!mas->tx_rem_bytes && !mas->rx_rem_bytes)
1298 complete(&mas->xfer_done);
1299 if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN))
1300 complete(&mas->xfer_done);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001301 }
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001302exit_geni_spi_irq:
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001303 geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
1304 return IRQ_HANDLED;
1305}
1306
1307static int spi_geni_probe(struct platform_device *pdev)
1308{
1309 int ret;
1310 struct spi_master *spi;
1311 struct spi_geni_master *geni_mas;
1312 struct se_geni_rsc *rsc;
1313 struct resource *res;
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -06001314 struct platform_device *wrapper_pdev;
1315 struct device_node *wrapper_ph_node;
Girish Mahadevanbf854fc2017-10-13 16:02:54 -06001316 bool rt_pri;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001317
1318 spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
1319 if (!spi) {
1320 ret = -ENOMEM;
1321 dev_err(&pdev->dev, "Failed to alloc spi struct\n");
1322 goto spi_geni_probe_err;
1323 }
1324
1325 platform_set_drvdata(pdev, spi);
1326 geni_mas = spi_master_get_devdata(spi);
1327 rsc = &geni_mas->spi_rsc;
1328 geni_mas->dev = &pdev->dev;
1329 spi->dev.of_node = pdev->dev.of_node;
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -06001330 wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
1331 "qcom,wrapper-core", 0);
1332 if (IS_ERR_OR_NULL(wrapper_ph_node)) {
1333 ret = PTR_ERR(wrapper_ph_node);
1334 dev_err(&pdev->dev, "No wrapper core defined\n");
1335 goto spi_geni_probe_err;
1336 }
1337 wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
1338 of_node_put(wrapper_ph_node);
1339 if (IS_ERR_OR_NULL(wrapper_pdev)) {
1340 ret = PTR_ERR(wrapper_pdev);
1341 dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
1342 goto spi_geni_probe_err;
1343 }
1344 geni_mas->wrapper_dev = &wrapper_pdev->dev;
1345 geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
1346 ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
1347 (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
1348 if (ret) {
1349 dev_err(&pdev->dev, "Error geni_se_resources_init\n");
1350 goto spi_geni_probe_err;
1351 }
1352
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001353 rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
1354 if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
1355 dev_err(&pdev->dev, "No pinctrl config specified!\n");
1356 ret = PTR_ERR(rsc->geni_pinctrl);
1357 goto spi_geni_probe_err;
1358 }
1359
1360 rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
1361 PINCTRL_DEFAULT);
1362 if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
1363 dev_err(&pdev->dev, "No default config specified!\n");
1364 ret = PTR_ERR(rsc->geni_gpio_active);
1365 goto spi_geni_probe_err;
1366 }
1367
1368 rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
1369 PINCTRL_SLEEP);
1370 if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
1371 dev_err(&pdev->dev, "No sleep config specified!\n");
1372 ret = PTR_ERR(rsc->geni_gpio_sleep);
1373 goto spi_geni_probe_err;
1374 }
1375
1376 rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
1377 if (IS_ERR(rsc->se_clk)) {
1378 ret = PTR_ERR(rsc->se_clk);
1379 dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
1380 goto spi_geni_probe_err;
1381 }
1382
1383 rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
1384 if (IS_ERR(rsc->m_ahb_clk)) {
1385 ret = PTR_ERR(rsc->m_ahb_clk);
1386 dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
1387 goto spi_geni_probe_err;
1388 }
1389
1390 rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
1391 if (IS_ERR(rsc->s_ahb_clk)) {
1392 ret = PTR_ERR(rsc->s_ahb_clk);
1393 dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
1394 goto spi_geni_probe_err;
1395 }
1396
1397 if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
1398 &spi->max_speed_hz)) {
1399 dev_err(&pdev->dev, "Max frequency not specified.\n");
1400 ret = -ENXIO;
1401 goto spi_geni_probe_err;
1402 }
1403
1404 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
1405 if (!res) {
1406 ret = -ENXIO;
1407 dev_err(&pdev->dev, "Err getting IO region\n");
1408 goto spi_geni_probe_err;
1409 }
1410
Girish Mahadevanbf854fc2017-10-13 16:02:54 -06001411 rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
1412 if (rt_pri)
1413 spi->rt = true;
1414
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001415 geni_mas->phys_addr = res->start;
1416 geni_mas->size = resource_size(res);
1417 geni_mas->base = devm_ioremap(&pdev->dev, res->start,
1418 resource_size(res));
1419 if (!geni_mas->base) {
1420 ret = -ENOMEM;
1421 dev_err(&pdev->dev, "Err IO mapping iomem\n");
1422 goto spi_geni_probe_err;
1423 }
1424
1425 geni_mas->irq = platform_get_irq(pdev, 0);
1426 if (geni_mas->irq < 0) {
1427 dev_err(&pdev->dev, "Err getting IRQ\n");
1428 ret = geni_mas->irq;
1429 goto spi_geni_probe_unmap;
1430 }
1431 ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
1432 IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
1433 if (ret) {
1434 dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
1435 geni_mas->irq, ret);
1436 goto spi_geni_probe_unmap;
1437 }
1438
1439 spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
1440 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1441 spi->num_chipselect = SPI_NUM_CHIPSELECT;
1442 spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
1443 spi->prepare_message = spi_geni_prepare_message;
1444 spi->unprepare_message = spi_geni_unprepare_message;
1445 spi->transfer_one = spi_geni_transfer_one;
1446 spi->unprepare_transfer_hardware
1447 = spi_geni_unprepare_transfer_hardware;
1448 spi->auto_runtime_pm = false;
1449
1450 init_completion(&geni_mas->xfer_done);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001451 init_completion(&geni_mas->tx_cb);
1452 init_completion(&geni_mas->rx_cb);
Girish Mahadevan488a9732018-01-09 17:48:13 -07001453 pm_runtime_set_suspended(&pdev->dev);
1454 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTO_SUSPEND_DELAY);
1455 pm_runtime_use_autosuspend(&pdev->dev);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001456 pm_runtime_enable(&pdev->dev);
1457 ret = spi_register_master(spi);
1458 if (ret) {
1459 dev_err(&pdev->dev, "Failed to register SPI master\n");
1460 goto spi_geni_probe_unmap;
1461 }
1462 return ret;
1463spi_geni_probe_unmap:
1464 devm_iounmap(&pdev->dev, geni_mas->base);
1465spi_geni_probe_err:
1466 spi_master_put(spi);
1467 return ret;
1468}
1469
1470static int spi_geni_remove(struct platform_device *pdev)
1471{
1472 struct spi_master *master = platform_get_drvdata(pdev);
1473 struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
1474
1475 spi_unregister_master(master);
1476 se_geni_resources_off(&geni_mas->spi_rsc);
1477 pm_runtime_put_noidle(&pdev->dev);
1478 pm_runtime_disable(&pdev->dev);
1479 return 0;
1480}
1481
1482#ifdef CONFIG_PM
1483static int spi_geni_runtime_suspend(struct device *dev)
1484{
1485 int ret = 0;
1486 struct spi_master *spi = get_spi_master(dev);
1487 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
1488
Girish Mahadevan488a9732018-01-09 17:48:13 -07001489 if (geni_mas->shared_se) {
1490 ret = se_geni_clks_off(&geni_mas->spi_rsc);
1491 if (ret)
1492 GENI_SE_ERR(geni_mas->ipc, false, NULL,
1493 "%s: Error %d turning off clocks\n", __func__, ret);
1494 } else {
1495 ret = se_geni_resources_off(&geni_mas->spi_rsc);
1496 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001497 return ret;
1498}
1499
1500static int spi_geni_runtime_resume(struct device *dev)
1501{
1502 int ret = 0;
1503 struct spi_master *spi = get_spi_master(dev);
1504 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
1505
Girish Mahadevan488a9732018-01-09 17:48:13 -07001506 if (geni_mas->shared_se) {
1507 ret = se_geni_clks_on(&geni_mas->spi_rsc);
1508 if (ret)
1509 GENI_SE_ERR(geni_mas->ipc, false, NULL,
1510 "%s: Error %d turning on clocks\n", __func__, ret);
1511 } else {
1512 ret = se_geni_resources_on(&geni_mas->spi_rsc);
1513 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001514 return ret;
1515}
1516
1517static int spi_geni_resume(struct device *dev)
1518{
1519 return 0;
1520}
1521
1522static int spi_geni_suspend(struct device *dev)
1523{
Girish Mahadevan488a9732018-01-09 17:48:13 -07001524 int ret = 0;
1525
1526 if (!pm_runtime_status_suspended(dev)) {
1527 struct spi_master *spi = get_spi_master(dev);
1528 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
1529
1530 if (list_empty(&spi->queue) && !spi->cur_msg) {
1531 GENI_SE_ERR(geni_mas->ipc, true, dev,
1532 "%s: Force suspend", __func__);
1533 ret = spi_geni_runtime_suspend(dev);
1534 if (ret) {
1535 GENI_SE_ERR(geni_mas->ipc, true, dev,
1536 "Force suspend Failed:%d", ret);
1537 } else {
1538 pm_runtime_disable(dev);
1539 pm_runtime_set_suspended(dev);
1540 pm_runtime_enable(dev);
1541 }
1542 } else {
1543 ret = -EBUSY;
1544 }
1545 }
1546 return ret;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001547}
1548#else
1549static int spi_geni_runtime_suspend(struct device *dev)
1550{
1551 return 0;
1552}
1553
1554static int spi_geni_runtime_resume(struct device *dev)
1555{
1556 return 0;
1557}
1558
1559static int spi_geni_resume(struct device *dev)
1560{
1561 return 0;
1562}
1563
1564static int spi_geni_suspend(struct device *dev)
1565{
1566 return 0;
1567}
1568#endif
1569
1570static const struct dev_pm_ops spi_geni_pm_ops = {
1571 SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
1572 spi_geni_runtime_resume, NULL)
1573 SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
1574};
1575
1576static const struct of_device_id spi_geni_dt_match[] = {
1577 { .compatible = "qcom,spi-geni" },
1578 {}
1579};
1580
1581static struct platform_driver spi_geni_driver = {
1582 .probe = spi_geni_probe,
1583 .remove = spi_geni_remove,
1584 .driver = {
1585 .name = "spi_geni",
1586 .pm = &spi_geni_pm_ops,
1587 .of_match_table = spi_geni_dt_match,
1588 },
1589};
1590module_platform_driver(spi_geni_driver);
1591
1592MODULE_LICENSE("GPL v2");
1593MODULE_ALIAS("platform:spi_geni");