blob: e53c34a0366040f7da9648200d7c9ecdf7082a4b [file] [log] [blame]
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001/*
Karthikeyan Ramasubramanian465f10d2018-01-02 23:03:41 -07002 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/clk.h>
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060015#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070017#include <linux/interrupt.h>
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060018#include <linux/ipc_logging.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070019#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of.h>
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -060022#include <linux/of_platform.h>
Karthikeyan Ramasubramanian9a633402017-04-06 16:01:11 -060023#include <linux/pm_runtime.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070024#include <linux/qcom-geni-se.h>
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060025#include <linux/msm_gpi.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070026#include <linux/spi/spi.h>
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -060027#include <linux/spi/spi-geni-qcom.h>
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070028
29#define SPI_NUM_CHIPSELECT (4)
30#define SPI_XFER_TIMEOUT_MS (250)
Girish Mahadevan488a9732018-01-09 17:48:13 -070031#define SPI_AUTO_SUSPEND_DELAY (250)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070032/* SPI SE specific registers */
33#define SE_SPI_CPHA (0x224)
34#define SE_SPI_LOOPBACK (0x22C)
35#define SE_SPI_CPOL (0x230)
36#define SE_SPI_DEMUX_OUTPUT_INV (0x24C)
37#define SE_SPI_DEMUX_SEL (0x250)
38#define SE_SPI_TRANS_CFG (0x25C)
39#define SE_SPI_WORD_LEN (0x268)
40#define SE_SPI_TX_TRANS_LEN (0x26C)
41#define SE_SPI_RX_TRANS_LEN (0x270)
42#define SE_SPI_PRE_POST_CMD_DLY (0x274)
43#define SE_SPI_DELAY_COUNTERS (0x278)
44
45/* SE_SPI_CPHA register fields */
46#define CPHA (BIT(0))
47
48/* SE_SPI_LOOPBACK register fields */
49#define LOOPBACK_ENABLE (0x1)
50#define NORMAL_MODE (0x0)
51#define LOOPBACK_MSK (GENMASK(1, 0))
52
53/* SE_SPI_CPOL register fields */
54#define CPOL (BIT(2))
55
56/* SE_SPI_DEMUX_OUTPUT_INV register fields */
57#define CS_DEMUX_OUTPUT_INV_MSK (GENMASK(3, 0))
58
59/* SE_SPI_DEMUX_SEL register fields */
60#define CS_DEMUX_OUTPUT_SEL (GENMASK(3, 0))
61
62/* SE_SPI_TX_TRANS_CFG register fields */
63#define CS_TOGGLE (BIT(0))
64
65/* SE_SPI_WORD_LEN register fields */
66#define WORD_LEN_MSK (GENMASK(9, 0))
67#define MIN_WORD_LEN (4)
68
69/* SPI_TX/SPI_RX_TRANS_LEN fields */
70#define TRANS_LEN_MSK (GENMASK(23, 0))
71
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -060072/* SE_SPI_DELAY_COUNTERS */
73#define SPI_INTER_WORDS_DELAY_MSK (GENMASK(9, 0))
74#define SPI_CS_CLK_DELAY_MSK (GENMASK(19, 10))
75#define SPI_CS_CLK_DELAY_SHFT (10)
76
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070077/* M_CMD OP codes for SPI */
78#define SPI_TX_ONLY (1)
79#define SPI_RX_ONLY (2)
80#define SPI_FULL_DUPLEX (3)
81#define SPI_TX_RX (7)
82#define SPI_CS_ASSERT (8)
83#define SPI_CS_DEASSERT (9)
84#define SPI_SCK_ONLY (10)
85/* M_CMD params for SPI */
Girish Mahadevan061b9e32017-05-18 10:25:43 -060086#define SPI_PRE_CMD_DELAY BIT(0)
87#define TIMESTAMP_BEFORE BIT(1)
88#define FRAGMENTATION BIT(2)
89#define TIMESTAMP_AFTER BIT(3)
90#define POST_CMD_DELAY BIT(4)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -070091
Alok Chauhanb89cb552018-06-12 11:28:34 +053092#define SPI_CORE2X_VOTE (7600)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -060093/* GSI CONFIG0 TRE Params */
94/* Flags bit fields */
95#define GSI_LOOPBACK_EN (BIT(0))
96#define GSI_CS_TOGGLE (BIT(3))
97#define GSI_CPHA (BIT(4))
98#define GSI_CPOL (BIT(5))
99
100#define MAX_TX_SG (3)
101#define NUM_SPI_XFER (8)
102
103struct gsi_desc_cb {
104 struct spi_master *spi;
105 struct spi_transfer *xfer;
106};
107
108struct spi_geni_gsi {
109 struct msm_gpi_tre config0_tre;
110 struct msm_gpi_tre go_tre;
111 struct msm_gpi_tre tx_dma_tre;
112 struct msm_gpi_tre rx_dma_tre;
113 struct scatterlist tx_sg[MAX_TX_SG];
114 struct scatterlist rx_sg;
115 dma_cookie_t tx_cookie;
116 dma_cookie_t rx_cookie;
117 struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
118 struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
119 struct dma_async_tx_descriptor *tx_desc;
120 struct dma_async_tx_descriptor *rx_desc;
121 struct gsi_desc_cb desc_cb;
122};
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -0600123
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700124struct spi_geni_master {
125 struct se_geni_rsc spi_rsc;
126 resource_size_t phys_addr;
127 resource_size_t size;
128 void __iomem *base;
129 int irq;
130 struct device *dev;
131 int rx_fifo_depth;
132 int tx_fifo_depth;
133 int tx_fifo_width;
134 int tx_wm;
135 bool setup;
136 u32 cur_speed_hz;
137 int cur_word_len;
138 unsigned int tx_rem_bytes;
139 unsigned int rx_rem_bytes;
140 struct spi_transfer *cur_xfer;
141 struct completion xfer_done;
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -0600142 struct device *wrapper_dev;
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600143 int oversampling;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600144 struct spi_geni_gsi *gsi;
145 struct dma_chan *tx;
146 struct dma_chan *rx;
147 struct msm_gpi_ctrl tx_event;
148 struct msm_gpi_ctrl rx_event;
149 struct completion tx_cb;
150 struct completion rx_cb;
151 bool qn_err;
152 int cur_xfer_mode;
153 int num_tx_eot;
154 int num_rx_eot;
155 int num_xfers;
156 void *ipc;
Girish Mahadevan488a9732018-01-09 17:48:13 -0700157 bool shared_se;
Dilip Kota0a4a9622018-06-14 17:32:26 +0530158 bool dis_autosuspend;
Dilip Kotab5fa8be2018-12-06 19:03:17 +0530159 bool cmd_done;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700160};
161
162static struct spi_master *get_spi_master(struct device *dev)
163{
164 struct platform_device *pdev = to_platform_device(dev);
165 struct spi_master *spi = platform_get_drvdata(pdev);
166
167 return spi;
168}
169
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600170static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
171 int *clk_idx, int *clk_div)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700172{
173 unsigned long sclk_freq;
Prudhvi Yarlagaddaedee5fe2018-11-23 12:20:28 +0530174 unsigned long res_freq;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700175 struct se_geni_rsc *rsc = &mas->spi_rsc;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600176 int ret = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700177
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600178 ret = geni_se_clk_freq_match(&mas->spi_rsc,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600179 (speed_hz * mas->oversampling), clk_idx,
Prudhvi Yarlagaddaedee5fe2018-11-23 12:20:28 +0530180 &sclk_freq, false);
Girish Mahadevan13f3f002017-06-05 14:27:57 -0600181 if (ret) {
182 dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
183 __func__, ret, speed_hz);
184 return ret;
185 }
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600186
Prudhvi Yarlagaddaedee5fe2018-11-23 12:20:28 +0530187 *clk_div = DIV_ROUND_UP(sclk_freq, (mas->oversampling*speed_hz));
188
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600189 if (!(*clk_div)) {
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600190 dev_err(mas->dev, "%s:Err:sclk:%lu oversampling:%d speed:%u\n",
191 __func__, sclk_freq, mas->oversampling, speed_hz);
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600192 return -EINVAL;
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600193 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700194
Prudhvi Yarlagaddaedee5fe2018-11-23 12:20:28 +0530195 res_freq = (sclk_freq / (*clk_div));
196
197 dev_dbg(mas->dev, "%s: req %u resultant %lu sclk %lu, idx %d, div %d\n",
198 __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
199
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700200 ret = clk_set_rate(rsc->se_clk, sclk_freq);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600201 if (ret)
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600202 dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
203 __func__, ret);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600204 return ret;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700205}
206
207static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
208 int bits_per_word)
209{
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600210 int pack_words = 1;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700211 bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
212 u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600213 unsigned long cfg0, cfg1;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700214
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600215 /*
216 * If bits_per_word isn't a byte aligned value, set the packing to be
217 * 1 SPI word per FIFO word.
218 */
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600219 if (!(mas->tx_fifo_width % bits_per_word))
220 pack_words = mas->tx_fifo_width / bits_per_word;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700221 word_len &= ~WORD_LEN_MSK;
222 word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
223 se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
224 geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600225 se_get_packing_config(bits_per_word, pack_words, msb_first,
226 &cfg0, &cfg1);
227 GENI_SE_DBG(mas->ipc, false, mas->dev,
228 "%s: cfg0 %lu cfg1 %lu bpw %d pack_words %d\n", __func__,
229 cfg0, cfg1, bits_per_word, pack_words);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700230}
231
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600232static int setup_fifo_params(struct spi_device *spi_slv,
233 struct spi_master *spi)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700234{
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600235 struct spi_geni_master *mas = spi_master_get_devdata(spi);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700236 u16 mode = spi_slv->mode;
237 u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
238 u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
239 u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
Girish Mahadevan061b9e32017-05-18 10:25:43 -0600240 u32 demux_sel = 0;
241 u32 demux_output_inv = 0;
Girish Mahadevanda008762017-11-27 11:31:21 -0700242 u32 clk_sel = 0;
243 u32 m_clk_cfg = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700244 int ret = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600245 int idx;
246 int div;
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600247 struct spi_geni_qcom_ctrl_data *delay_params = NULL;
248 u32 spi_delay_params = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700249
250 loopback_cfg &= ~LOOPBACK_MSK;
251 cpol &= ~CPOL;
252 cpha &= ~CPHA;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700253
254 if (mode & SPI_LOOP)
255 loopback_cfg |= LOOPBACK_ENABLE;
256
257 if (mode & SPI_CPOL)
258 cpol |= CPOL;
259
260 if (mode & SPI_CPHA)
261 cpha |= CPHA;
262
263 if (spi_slv->mode & SPI_CS_HIGH)
264 demux_output_inv |= BIT(spi_slv->chip_select);
265
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600266 if (spi_slv->controller_data) {
267 u32 cs_clk_delay = 0;
268 u32 inter_words_delay = 0;
269
270 delay_params =
271 (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
272 cs_clk_delay =
273 (delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
274 & SPI_CS_CLK_DELAY_MSK;
275 inter_words_delay =
276 delay_params->spi_inter_words_delay &
277 SPI_INTER_WORDS_DELAY_MSK;
278 spi_delay_params =
279 (inter_words_delay | cs_clk_delay);
280 }
281
Girish Mahadevan061b9e32017-05-18 10:25:43 -0600282 demux_sel = spi_slv->chip_select;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700283 mas->cur_speed_hz = spi_slv->max_speed_hz;
284 mas->cur_word_len = spi_slv->bits_per_word;
285
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600286 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700287 if (ret) {
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600288 dev_err(mas->dev, "Err setting clks ret(%d) for %d\n",
Girish Mahadevan6727acc2017-04-05 12:40:19 -0600289 ret, mas->cur_speed_hz);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600290 goto setup_fifo_params_exit;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700291 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600292
293 clk_sel |= (idx & CLK_SEL_MSK);
294 m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700295 spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
296 geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
297 geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
298 geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
299 geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
300 geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600301 geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
302 geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600303 geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600304 GENI_SE_DBG(mas->ipc, false, mas->dev,
305 "%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
306 __func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
307 GENI_SE_DBG(mas->ipc, false, mas->dev,
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600308 "%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
309 clk_sel, cpol, cpha, spi_delay_params);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700310 /* Ensure message level attributes are written before returning */
311 mb();
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600312setup_fifo_params_exit:
313 return ret;
314}
315
316
317static int select_xfer_mode(struct spi_master *spi,
318 struct spi_message *spi_msg)
319{
320 struct spi_geni_master *mas = spi_master_get_devdata(spi);
Dilip Kotad70fa152018-05-18 14:26:23 +0530321 int mode = SE_DMA;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600322 int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
323 FIFO_IF_DISABLE);
324 bool dma_chan_valid =
325 !(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
326
327 /*
328 * If FIFO Interface is disabled and there are no DMA channels then we
329 * can't do this transfer.
330 * If FIFO interface is disabled, we can do GSI only,
331 * else pick FIFO mode.
332 */
333 if (fifo_disable && !dma_chan_valid)
334 mode = -EINVAL;
Dilip Kotad70fa152018-05-18 14:26:23 +0530335 else if (!fifo_disable)
336 mode = SE_DMA;
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600337 else if (dma_chan_valid)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600338 mode = GSI_DMA;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600339 return mode;
340}
341
342static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600343 struct spi_geni_master *mas, u16 mode,
344 u32 cs_clk_delay, u32 inter_words_delay)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600345{
346 struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
347 u8 flags = 0;
348 u8 word_len = 0;
349 u8 pack = 0;
350 int div = 0;
351 int idx = 0;
352 int ret = 0;
353
354 if (IS_ERR_OR_NULL(c0_tre))
355 return c0_tre;
356
357 if (mode & SPI_LOOP)
358 flags |= GSI_LOOPBACK_EN;
359
360 if (mode & SPI_CPOL)
361 flags |= GSI_CPOL;
362
363 if (mode & SPI_CPHA)
364 flags |= GSI_CPHA;
365
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600366 word_len = xfer->bits_per_word - MIN_WORD_LEN;
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600367 pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600368 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
369 if (ret) {
370 dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
371 return ERR_PTR(ret);
372 }
373 c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
374 word_len);
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600375 c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
376 inter_words_delay);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600377 c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
378 c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 1);
379 GENI_SE_DBG(mas->ipc, false, mas->dev,
380 "%s: flags 0x%x word %d pack %d idx %d div %d\n",
381 __func__, flags, word_len, pack, idx, div);
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600382 GENI_SE_DBG(mas->ipc, false, mas->dev,
383 "%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
384 cs_clk_delay, inter_words_delay);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600385 return c0_tre;
386}
387
388static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
389 struct spi_geni_master *mas)
390{
391 struct msm_gpi_tre *go_tre = &mas->gsi[mas->num_xfers].go_tre;
392 int chain;
393 int eot;
394 int eob;
395
396 if (IS_ERR_OR_NULL(go_tre))
397 return go_tre;
398
399 go_tre->dword[0] = MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, cmd);
400 go_tre->dword[1] = MSM_GPI_SPI_GO_TRE_DWORD1;
401 go_tre->dword[2] = MSM_GPI_SPI_GO_TRE_DWORD2(rx_len);
402 if (cmd == SPI_RX_ONLY) {
403 eot = 0;
404 chain = 0;
405 eob = 1;
406 } else {
407 eot = 0;
408 chain = 1;
409 eob = 0;
410 }
411 go_tre->dword[3] = MSM_GPI_SPI_GO_TRE_DWORD3(0, eot, eob, chain);
412 GENI_SE_DBG(mas->ipc, false, mas->dev,
413 "%s: rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
414 __func__, rx_len, flags, cs, cmd, eot, eob, chain);
415 return go_tre;
416}
417
418static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
419 dma_addr_t buf, u32 len,
420 struct spi_geni_master *mas,
421 bool is_tx)
422{
423 if (IS_ERR_OR_NULL(tre))
424 return tre;
425
426 tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
427 tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
428 tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
429 tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, is_tx, 0, 0);
430 return tre;
431}
432
433static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
434 void *ptr)
435{
436 struct spi_master *spi = ptr;
437 struct spi_geni_master *mas = spi_master_get_devdata(spi);
438
439 switch (cb->cb_event) {
440 case MSM_GPI_QUP_NOTIFY:
441 case MSM_GPI_QUP_MAX_EVENT:
442 GENI_SE_DBG(mas->ipc, false, mas->dev,
443 "%s:cb_ev%d status%llu ts%llu count%llu\n",
444 __func__, cb->cb_event, cb->status,
445 cb->timestamp, cb->count);
446 break;
447 case MSM_GPI_QUP_ERROR:
448 case MSM_GPI_QUP_CH_ERROR:
449 case MSM_GPI_QUP_FW_ERROR:
450 case MSM_GPI_QUP_PENDING_EVENT:
451 case MSM_GPI_QUP_EOT_DESC_MISMATCH:
452 case MSM_GPI_QUP_SW_ERROR:
453 GENI_SE_ERR(mas->ipc, true, mas->dev,
454 "%s: cb_ev %d status %llu ts %llu count %llu\n",
455 __func__, cb->cb_event, cb->status,
456 cb->timestamp, cb->count);
457 GENI_SE_ERR(mas->ipc, true, mas->dev,
458 "err.routine %u, err.type %u, err.code %u\n",
459 cb->error_log.routine,
460 cb->error_log.type,
461 cb->error_log.error_code);
462 mas->qn_err = true;
463 complete_all(&mas->tx_cb);
464 complete_all(&mas->rx_cb);
465
466 break;
467 };
468}
469
470static void spi_gsi_rx_callback(void *cb)
471{
472 struct msm_gpi_dma_async_tx_cb_param *cb_param =
473 (struct msm_gpi_dma_async_tx_cb_param *)cb;
474 struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
475 struct spi_master *spi = desc_cb->spi;
476 struct spi_transfer *xfer = desc_cb->xfer;
477 struct spi_geni_master *mas = spi_master_get_devdata(spi);
478
479 if (xfer->rx_buf) {
480 if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
481 GENI_SE_ERR(mas->ipc, true, mas->dev,
482 "%s: Unexpected GSI CB error\n", __func__);
483 return;
484 }
485 if (cb_param->length == xfer->len) {
486 GENI_SE_DBG(mas->ipc, false, mas->dev,
487 "%s\n", __func__);
488 complete(&mas->rx_cb);
489 } else {
490 GENI_SE_ERR(mas->ipc, true, mas->dev,
491 "%s: Length mismatch. Expected %d Callback %d\n",
492 __func__, xfer->len, cb_param->length);
493 }
494 }
495}
496
497static void spi_gsi_tx_callback(void *cb)
498{
499 struct msm_gpi_dma_async_tx_cb_param *cb_param = cb;
500 struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
501 struct spi_master *spi = desc_cb->spi;
502 struct spi_transfer *xfer = desc_cb->xfer;
503 struct spi_geni_master *mas = spi_master_get_devdata(spi);
504
505 if (xfer->tx_buf) {
506 if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
507 GENI_SE_ERR(mas->ipc, true, mas->dev,
508 "%s: Unexpected GSI CB error\n", __func__);
509 return;
510 }
511 if (cb_param->length == xfer->len) {
512 GENI_SE_DBG(mas->ipc, false, mas->dev,
513 "%s\n", __func__);
514 complete(&mas->tx_cb);
515 } else {
516 GENI_SE_ERR(mas->ipc, true, mas->dev,
517 "%s: Length mismatch. Expected %d Callback %d\n",
518 __func__, xfer->len, cb_param->length);
519 }
520 }
521}
522
523static int setup_gsi_xfer(struct spi_transfer *xfer,
524 struct spi_geni_master *mas,
525 struct spi_device *spi_slv,
526 struct spi_master *spi)
527{
528 int ret = 0;
529 struct msm_gpi_tre *c0_tre = NULL;
530 struct msm_gpi_tre *go_tre = NULL;
531 struct msm_gpi_tre *tx_tre = NULL;
532 struct msm_gpi_tre *rx_tre = NULL;
533 struct scatterlist *xfer_tx_sg = mas->gsi[mas->num_xfers].tx_sg;
534 struct scatterlist *xfer_rx_sg = &mas->gsi[mas->num_xfers].rx_sg;
535 int rx_nent = 0;
536 int tx_nent = 0;
537 u8 cmd = 0;
538 u8 cs = 0;
539 u32 rx_len = 0;
540 int go_flags = 0;
541 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600542 struct spi_geni_qcom_ctrl_data *delay_params = NULL;
543 u32 cs_clk_delay = 0;
544 u32 inter_words_delay = 0;
545
546 if (spi_slv->controller_data) {
547 delay_params =
548 (struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
549
550 cs_clk_delay =
551 delay_params->spi_cs_clk_delay;
552 inter_words_delay =
553 delay_params->spi_inter_words_delay;
554 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600555
556 if ((xfer->bits_per_word != mas->cur_word_len) ||
557 (xfer->speed_hz != mas->cur_speed_hz)) {
558 mas->cur_word_len = xfer->bits_per_word;
559 mas->cur_speed_hz = xfer->speed_hz;
560 tx_nent++;
Girish Mahadevanb06fe0e2017-09-06 11:35:19 -0600561 c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode,
562 cs_clk_delay, inter_words_delay);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600563 if (IS_ERR_OR_NULL(c0_tre)) {
564 dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
565 __func__, ret);
566 return PTR_ERR(c0_tre);
567 }
568 }
569
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600570 if (!(mas->cur_word_len % MIN_WORD_LEN)) {
571 rx_len = ((xfer->len << 3) / mas->cur_word_len);
572 } else {
573 int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
574
575 rx_len = (xfer->len / bytes_per_word);
576 }
577
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600578 if (xfer->tx_buf && xfer->rx_buf) {
579 cmd = SPI_FULL_DUPLEX;
580 tx_nent += 2;
581 rx_nent++;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600582 } else if (xfer->tx_buf) {
583 cmd = SPI_TX_ONLY;
584 tx_nent += 2;
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600585 rx_len = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600586 } else if (xfer->rx_buf) {
587 cmd = SPI_RX_ONLY;
588 tx_nent++;
589 rx_nent++;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600590 }
591
592 cs |= spi_slv->chip_select;
Girish Mahadevan36df8752017-11-16 10:53:15 -0700593 if (!xfer->cs_change) {
594 if (!list_is_last(&xfer->transfer_list,
595 &spi->cur_msg->transfers))
596 go_flags |= FRAGMENTATION;
597 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600598 go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
599
600 sg_init_table(xfer_tx_sg, tx_nent);
601 if (rx_nent)
602 sg_init_table(xfer_rx_sg, rx_nent);
603
604 if (c0_tre)
605 sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
606
607 sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
608 mas->gsi[mas->num_xfers].desc_cb.spi = spi;
609 mas->gsi[mas->num_xfers].desc_cb.xfer = xfer;
610 if (cmd & SPI_RX_ONLY) {
611 rx_tre = &mas->gsi[mas->num_xfers].rx_dma_tre;
612 rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->len, mas, 0);
613 if (IS_ERR_OR_NULL(rx_tre)) {
614 dev_err(mas->dev, "Err setting up rx tre\n");
615 return PTR_ERR(rx_tre);
616 }
617 sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
618 mas->gsi[mas->num_xfers].rx_desc =
619 dmaengine_prep_slave_sg(mas->rx,
620 &mas->gsi[mas->num_xfers].rx_sg, rx_nent,
621 DMA_DEV_TO_MEM, flags);
622 if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].rx_desc)) {
623 dev_err(mas->dev, "Err setting up rx desc\n");
624 return -EIO;
625 }
626 mas->gsi[mas->num_xfers].rx_desc->callback =
627 spi_gsi_rx_callback;
628 mas->gsi[mas->num_xfers].rx_desc->callback_param =
629 &mas->gsi[mas->num_xfers].rx_cb_param;
630 mas->gsi[mas->num_xfers].rx_cb_param.userdata =
631 &mas->gsi[mas->num_xfers].desc_cb;
632 mas->num_rx_eot++;
633 }
634
635 if (cmd & SPI_TX_ONLY) {
636 tx_tre = &mas->gsi[mas->num_xfers].tx_dma_tre;
637 tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->len, mas, 1);
638 if (IS_ERR_OR_NULL(tx_tre)) {
639 dev_err(mas->dev, "Err setting up tx tre\n");
640 return PTR_ERR(tx_tre);
641 }
642 sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
643 mas->num_tx_eot++;
644 }
645 mas->gsi[mas->num_xfers].tx_desc = dmaengine_prep_slave_sg(mas->tx,
646 mas->gsi[mas->num_xfers].tx_sg, tx_nent,
647 DMA_MEM_TO_DEV, flags);
648 if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].tx_desc)) {
649 dev_err(mas->dev, "Err setting up tx desc\n");
650 return -EIO;
651 }
652 mas->gsi[mas->num_xfers].tx_desc->callback = spi_gsi_tx_callback;
653 mas->gsi[mas->num_xfers].tx_desc->callback_param =
654 &mas->gsi[mas->num_xfers].tx_cb_param;
655 mas->gsi[mas->num_xfers].tx_cb_param.userdata =
656 &mas->gsi[mas->num_xfers].desc_cb;
657 mas->gsi[mas->num_xfers].tx_cookie =
658 dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
Girish Mahadevan12858822018-01-12 17:18:48 -0700659 if (cmd & SPI_RX_ONLY)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600660 mas->gsi[mas->num_xfers].rx_cookie =
661 dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
662 dma_async_issue_pending(mas->tx);
Girish Mahadevan12858822018-01-12 17:18:48 -0700663 if (cmd & SPI_RX_ONLY)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600664 dma_async_issue_pending(mas->rx);
665 mas->num_xfers++;
666 return ret;
667}
668
669static int spi_geni_map_buf(struct spi_geni_master *mas,
670 struct spi_message *msg)
671{
672 struct spi_transfer *xfer;
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600673 int ret = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600674
675 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
676 if (xfer->rx_buf) {
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600677 ret = geni_se_iommu_map_buf(mas->wrapper_dev,
678 &xfer->rx_dma, xfer->rx_buf,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600679 xfer->len, DMA_FROM_DEVICE);
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600680 if (ret) {
681 GENI_SE_ERR(mas->ipc, true, mas->dev,
682 "%s: Mapping Rx buffer %d\n", __func__, ret);
683 return ret;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600684 }
685 }
686
687 if (xfer->tx_buf) {
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600688 ret = geni_se_iommu_map_buf(mas->wrapper_dev,
689 &xfer->tx_dma,
690 (void *)xfer->tx_buf,
691 xfer->len, DMA_TO_DEVICE);
692 if (ret) {
693 GENI_SE_ERR(mas->ipc, true, mas->dev,
694 "%s: Mapping Tx buffer %d\n", __func__, ret);
695 return ret;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600696 }
697 }
698 };
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600699 return 0;
700}
701
702static void spi_geni_unmap_buf(struct spi_geni_master *mas,
703 struct spi_message *msg)
704{
705 struct spi_transfer *xfer;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600706
707 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
708 if (xfer->rx_buf)
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600709 geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->rx_dma,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600710 xfer->len, DMA_FROM_DEVICE);
711 if (xfer->tx_buf)
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600712 geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->tx_dma,
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600713 xfer->len, DMA_TO_DEVICE);
714 };
715}
716
717static int spi_geni_prepare_message(struct spi_master *spi,
718 struct spi_message *spi_msg)
719{
720 int ret = 0;
721 struct spi_geni_master *mas = spi_master_get_devdata(spi);
722
723 mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
724
Dilip Kotad70fa152018-05-18 14:26:23 +0530725 if (mas->cur_xfer_mode < 0) {
726 dev_err(mas->dev, "%s: Couldn't select mode %d", __func__,
727 mas->cur_xfer_mode);
728 ret = -EINVAL;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600729 } else if (mas->cur_xfer_mode == GSI_DMA) {
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600730 memset(mas->gsi, 0,
731 (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
732 geni_se_select_mode(mas->base, GSI_DMA);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600733 ret = spi_geni_map_buf(mas, spi_msg);
734 } else {
Dilip Kotad70fa152018-05-18 14:26:23 +0530735 geni_se_select_mode(mas->base, mas->cur_xfer_mode);
736 ret = setup_fifo_params(spi_msg->spi, spi);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600737 }
Dilip Kotad70fa152018-05-18 14:26:23 +0530738
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700739 return ret;
740}
741
742static int spi_geni_unprepare_message(struct spi_master *spi_mas,
743 struct spi_message *spi_msg)
744{
745 struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
746
747 mas->cur_speed_hz = 0;
748 mas->cur_word_len = 0;
Girish Mahadevanf809ccb2017-11-30 10:49:04 -0700749 if (mas->cur_xfer_mode == GSI_DMA)
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600750 spi_geni_unmap_buf(mas, spi_msg);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700751 return 0;
752}
753
754static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
755{
756 struct spi_geni_master *mas = spi_master_get_devdata(spi);
Dilip Kota0a4a9622018-06-14 17:32:26 +0530757 int ret = 0, count = 0;
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600758 u32 max_speed = spi->cur_msg->spi->max_speed_hz;
759 struct se_geni_rsc *rsc = &mas->spi_rsc;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700760
Alok Chauhanb89cb552018-06-12 11:28:34 +0530761 /* Adjust the IB based on the max speed of the slave.*/
Girish Mahadevan5f9df632017-08-29 13:29:23 -0600762 rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
Girish Mahadevan488a9732018-01-09 17:48:13 -0700763 if (mas->shared_se) {
764 struct se_geni_rsc *rsc;
765 int ret = 0;
766
767 rsc = &mas->spi_rsc;
768 ret = pinctrl_select_state(rsc->geni_pinctrl,
769 rsc->geni_gpio_active);
770 if (ret)
771 GENI_SE_ERR(mas->ipc, false, NULL,
772 "%s: Error %d pinctrl_select_state\n", __func__, ret);
773 }
774
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700775 ret = pm_runtime_get_sync(mas->dev);
776 if (ret < 0) {
Girish Mahadevan488a9732018-01-09 17:48:13 -0700777 dev_err(mas->dev, "%s:Error enabling SE resources %d\n",
778 __func__, ret);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700779 pm_runtime_put_noidle(mas->dev);
780 goto exit_prepare_transfer_hardware;
781 } else {
782 ret = 0;
783 }
Dilip Kota0a4a9622018-06-14 17:32:26 +0530784 if (mas->dis_autosuspend) {
785 count = atomic_read(&mas->dev->power.usage_count);
786 if (count <= 0)
787 GENI_SE_ERR(mas->ipc, false, NULL,
788 "resume usage count mismatch:%d", count);
789 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700790 if (unlikely(!mas->setup)) {
791 int proto = get_se_proto(mas->base);
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600792 unsigned int major;
793 unsigned int minor;
794 unsigned int step;
795 int hw_ver;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700796
797 if (unlikely(proto != SPI)) {
798 dev_err(mas->dev, "Invalid proto %d\n", proto);
799 return -ENXIO;
800 }
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -0600801 geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700802 mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
803 mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
804 mas->tx_fifo_width = get_tx_fifo_width(mas->base);
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600805 mas->oversampling = 1;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700806 /* Transmit an entire FIFO worth of data per IRQ */
807 mas->tx_wm = 1;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600808
809 mas->tx = dma_request_slave_channel(mas->dev, "tx");
810 if (IS_ERR_OR_NULL(mas->tx)) {
811 dev_info(mas->dev, "Failed to get tx DMA ch %ld",
812 PTR_ERR(mas->tx));
813 } else {
814 mas->rx = dma_request_slave_channel(mas->dev, "rx");
815 if (IS_ERR_OR_NULL(mas->rx)) {
816 dev_info(mas->dev, "Failed to get rx DMA ch %ld",
817 PTR_ERR(mas->rx));
818 dma_release_channel(mas->tx);
819 }
820 mas->gsi = devm_kzalloc(mas->dev,
821 (sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
822 GFP_KERNEL);
823 if (IS_ERR_OR_NULL(mas->gsi)) {
824 dev_err(mas->dev, "Failed to get GSI mem\n");
825 dma_release_channel(mas->tx);
826 dma_release_channel(mas->rx);
827 mas->tx = NULL;
828 mas->rx = NULL;
829 goto setup_ipc;
830 }
831 mas->tx_event.init.callback = spi_gsi_ch_cb;
832 mas->tx_event.init.cb_param = spi;
833 mas->tx_event.cmd = MSM_GPI_INIT;
834 mas->tx->private = &mas->tx_event;
835 mas->rx_event.init.callback = spi_gsi_ch_cb;
836 mas->rx_event.init.cb_param = spi;
837 mas->rx_event.cmd = MSM_GPI_INIT;
838 mas->rx->private = &mas->rx_event;
839 if (dmaengine_slave_config(mas->tx, NULL)) {
840 dev_err(mas->dev, "Failed to Config Tx\n");
841 dma_release_channel(mas->tx);
842 dma_release_channel(mas->rx);
843 mas->tx = NULL;
844 mas->rx = NULL;
845 goto setup_ipc;
846 }
847 if (dmaengine_slave_config(mas->rx, NULL)) {
848 dev_err(mas->dev, "Failed to Config Rx\n");
849 dma_release_channel(mas->tx);
850 dma_release_channel(mas->rx);
851 mas->tx = NULL;
852 mas->rx = NULL;
853 goto setup_ipc;
854 }
855
856 }
857setup_ipc:
858 mas->ipc = ipc_log_context_create(4, dev_name(mas->dev), 0);
859 dev_info(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700860 mas->tx_fifo_depth, mas->rx_fifo_depth,
861 mas->tx_fifo_width);
862 mas->setup = true;
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600863 hw_ver = geni_se_qupv3_hw_version(mas->wrapper_dev, &major,
864 &minor, &step);
865 if (hw_ver)
866 dev_err(mas->dev, "%s:Err getting HW version %d\n",
867 __func__, hw_ver);
868 else {
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600869 if ((major == 1) && (minor == 0))
870 mas->oversampling = 2;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600871 GENI_SE_DBG(mas->ipc, false, mas->dev,
872 "%s:Major:%d Minor:%d step:%dos%d\n",
873 __func__, major, minor, step, mas->oversampling);
Girish Mahadevan96cf38f2017-07-31 11:01:55 -0600874 }
Girish Mahadevan488a9732018-01-09 17:48:13 -0700875 mas->shared_se =
876 (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
877 FIFO_IF_DISABLE);
Dilip Kota0a4a9622018-06-14 17:32:26 +0530878 if (mas->dis_autosuspend)
879 GENI_SE_DBG(mas->ipc, false, mas->dev,
880 "Auto Suspend is disabled\n");
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700881 }
882exit_prepare_transfer_hardware:
883 return ret;
884}
885
886static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
887{
888 struct spi_geni_master *mas = spi_master_get_devdata(spi);
Dilip Kota0a4a9622018-06-14 17:32:26 +0530889 int count = 0;
Girish Mahadevan488a9732018-01-09 17:48:13 -0700890 if (mas->shared_se) {
891 struct se_geni_rsc *rsc;
892 int ret = 0;
893
894 rsc = &mas->spi_rsc;
895 ret = pinctrl_select_state(rsc->geni_pinctrl,
896 rsc->geni_gpio_sleep);
897 if (ret)
898 GENI_SE_ERR(mas->ipc, false, NULL,
899 "%s: Error %d pinctrl_select_state\n", __func__, ret);
900 }
901
Dilip Kota0a4a9622018-06-14 17:32:26 +0530902 if (mas->dis_autosuspend) {
903 pm_runtime_put_sync(mas->dev);
904 count = atomic_read(&mas->dev->power.usage_count);
905 if (count < 0)
906 GENI_SE_ERR(mas->ipc, false, NULL,
907 "suspend usage count mismatch:%d", count);
908 } else {
909 pm_runtime_mark_last_busy(mas->dev);
910 pm_runtime_put_autosuspend(mas->dev);
911 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700912 return 0;
913}
914
915static void setup_fifo_xfer(struct spi_transfer *xfer,
916 struct spi_geni_master *mas, u16 mode,
917 struct spi_master *spi)
918{
919 u32 m_cmd = 0;
920 u32 m_param = 0;
921 u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
Dilip Kotac0a114a2018-10-31 16:54:38 +0530922 u32 trans_len = 0, fifo_size = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700923
924 if (xfer->bits_per_word != mas->cur_word_len) {
925 spi_setup_word_len(mas, mode, xfer->bits_per_word);
926 mas->cur_word_len = xfer->bits_per_word;
927 }
928
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600929 /* Speed and bits per word can be overridden per transfer */
930 if (xfer->speed_hz != mas->cur_speed_hz) {
931 int ret = 0;
Girish Mahadevanda008762017-11-27 11:31:21 -0700932 u32 clk_sel = 0;
933 u32 m_clk_cfg = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -0600934 int idx = 0;
935 int div = 0;
936
937 ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
938 if (ret) {
939 dev_err(mas->dev, "%s:Err setting clks:%d\n",
940 __func__, ret);
941 return;
942 }
943 mas->cur_speed_hz = xfer->speed_hz;
944 clk_sel |= (idx & CLK_SEL_MSK);
945 m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
946 geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
947 geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
948 }
949
950 mas->tx_rem_bytes = 0;
951 mas->rx_rem_bytes = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700952 if (xfer->tx_buf && xfer->rx_buf)
953 m_cmd = SPI_FULL_DUPLEX;
954 else if (xfer->tx_buf)
955 m_cmd = SPI_TX_ONLY;
956 else if (xfer->rx_buf)
957 m_cmd = SPI_RX_ONLY;
958
959 spi_tx_cfg &= ~CS_TOGGLE;
Girish Mahadevan1fc27702017-08-31 12:55:23 -0600960 if (!(mas->cur_word_len % MIN_WORD_LEN)) {
961 trans_len =
962 ((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK;
963 } else {
964 int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
965
966 trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK;
967 }
Girish Mahadevan36df8752017-11-16 10:53:15 -0700968
969 if (!xfer->cs_change) {
970 if (!list_is_last(&xfer->transfer_list,
971 &spi->cur_msg->transfers))
972 m_param |= FRAGMENTATION;
973 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -0700974
975 mas->cur_xfer = xfer;
976 if (m_cmd & SPI_TX_ONLY) {
977 mas->tx_rem_bytes = xfer->len;
978 geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
979 }
980
981 if (m_cmd & SPI_RX_ONLY) {
982 geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
983 mas->rx_rem_bytes = xfer->len;
984 }
Dilip Kotad70fa152018-05-18 14:26:23 +0530985
Dilip Kotac0a114a2018-10-31 16:54:38 +0530986 fifo_size =
987 (mas->tx_fifo_depth * mas->tx_fifo_width / mas->cur_word_len);
988 if (trans_len > fifo_size) {
Dilip Kotad70fa152018-05-18 14:26:23 +0530989 if (mas->cur_xfer_mode != SE_DMA) {
990 mas->cur_xfer_mode = SE_DMA;
991 geni_se_select_mode(mas->base, mas->cur_xfer_mode);
992 }
993 } else {
994 if (mas->cur_xfer_mode != FIFO_MODE) {
995 mas->cur_xfer_mode = FIFO_MODE;
996 geni_se_select_mode(mas->base, mas->cur_xfer_mode);
997 }
998 }
999
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001000 geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
1001 geni_setup_m_cmd(mas->base, m_cmd, m_param);
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001002 GENI_SE_DBG(mas->ipc, false, mas->dev,
Dilip Kotad70fa152018-05-18 14:26:23 +05301003 "%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d\n",
Girish Mahadevan36df8752017-11-16 10:53:15 -07001004 __func__, trans_len, xfer->len, spi_tx_cfg, m_cmd,
Dilip Kotad70fa152018-05-18 14:26:23 +05301005 xfer->cs_change, mas->cur_xfer_mode);
1006 if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) {
1007 int ret = 0;
1008
1009 ret = geni_se_rx_dma_prep(mas->wrapper_dev, mas->base,
1010 xfer->rx_buf, xfer->len, &xfer->rx_dma);
1011 if (ret)
1012 GENI_SE_ERR(mas->ipc, true, mas->dev,
1013 "Failed to setup Rx dma %d\n", ret);
1014 }
1015 if (m_cmd & SPI_TX_ONLY) {
1016 if (mas->cur_xfer_mode == FIFO_MODE) {
1017 geni_write_reg(mas->tx_wm, mas->base,
1018 SE_GENI_TX_WATERMARK_REG);
1019 } else if (mas->cur_xfer_mode == SE_DMA) {
1020 int ret = 0;
1021
1022 ret = geni_se_tx_dma_prep(mas->wrapper_dev, mas->base,
1023 (void *)xfer->tx_buf, xfer->len,
1024 &xfer->tx_dma);
1025 if (ret)
1026 GENI_SE_ERR(mas->ipc, true, mas->dev,
1027 "Failed to setup tx dma %d\n", ret);
1028 }
1029 }
1030
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001031 /* Ensure all writes are done before the WM interrupt */
1032 mb();
1033}
1034
Dilip Kotad70fa152018-05-18 14:26:23 +05301035static void handle_fifo_timeout(struct spi_geni_master *mas,
1036 struct spi_transfer *xfer)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001037{
1038 unsigned long timeout;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001039
Girish Mahadevan5f9df632017-08-29 13:29:23 -06001040 geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001041 reinit_completion(&mas->xfer_done);
1042 geni_cancel_m_cmd(mas->base);
Dilip Kotad70fa152018-05-18 14:26:23 +05301043 if (mas->cur_xfer_mode == FIFO_MODE)
1044 geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001045 /* Ensure cmd cancel is written */
1046 mb();
1047 timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
1048 if (!timeout) {
1049 reinit_completion(&mas->xfer_done);
1050 geni_abort_m_cmd(mas->base);
1051 /* Ensure cmd abort is written */
1052 mb();
1053 timeout = wait_for_completion_timeout(&mas->xfer_done,
1054 HZ);
1055 if (!timeout)
1056 dev_err(mas->dev,
1057 "Failed to cancel/abort m_cmd\n");
1058 }
Dilip Kotad70fa152018-05-18 14:26:23 +05301059 if (mas->cur_xfer_mode == SE_DMA) {
1060 if (xfer->tx_buf)
1061 geni_se_tx_dma_unprep(mas->wrapper_dev,
1062 xfer->tx_dma, xfer->len);
1063 if (xfer->rx_buf)
1064 geni_se_rx_dma_unprep(mas->wrapper_dev,
1065 xfer->rx_dma, xfer->len);
1066 }
1067
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001068}
1069
1070static int spi_geni_transfer_one(struct spi_master *spi,
1071 struct spi_device *slv,
1072 struct spi_transfer *xfer)
1073{
1074 int ret = 0;
1075 struct spi_geni_master *mas = spi_master_get_devdata(spi);
1076 unsigned long timeout;
1077
1078 if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
1079 dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
1080 return -EINVAL;
1081 }
1082
Dilip Kotad70fa152018-05-18 14:26:23 +05301083 if (mas->cur_xfer_mode != GSI_DMA) {
1084 reinit_completion(&mas->xfer_done);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001085 setup_fifo_xfer(xfer, mas, slv->mode, spi);
1086 timeout = wait_for_completion_timeout(&mas->xfer_done,
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001087 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001088 if (!timeout) {
1089 GENI_SE_ERR(mas->ipc, true, mas->dev,
1090 "Xfer[len %d tx %pK rx %pK n %d] timed out.\n",
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001091 xfer->len, xfer->tx_buf,
1092 xfer->rx_buf,
1093 xfer->bits_per_word);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001094 mas->cur_xfer = NULL;
1095 ret = -ETIMEDOUT;
1096 goto err_fifo_geni_transfer_one;
1097 }
Dilip Kotad70fa152018-05-18 14:26:23 +05301098
1099 if (mas->cur_xfer_mode == SE_DMA) {
1100 if (xfer->tx_buf)
1101 geni_se_tx_dma_unprep(mas->wrapper_dev,
1102 xfer->tx_dma, xfer->len);
1103 if (xfer->rx_buf)
1104 geni_se_rx_dma_unprep(mas->wrapper_dev,
1105 xfer->rx_dma, xfer->len);
1106 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001107 } else {
Dilip Kotad70fa152018-05-18 14:26:23 +05301108 mas->num_tx_eot = 0;
1109 mas->num_rx_eot = 0;
1110 mas->num_xfers = 0;
1111 reinit_completion(&mas->tx_cb);
1112 reinit_completion(&mas->rx_cb);
1113
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001114 setup_gsi_xfer(xfer, mas, slv, spi);
1115 if ((mas->num_xfers >= NUM_SPI_XFER) ||
1116 (list_is_last(&xfer->transfer_list,
1117 &spi->cur_msg->transfers))) {
1118 int i;
1119
1120 for (i = 0 ; i < mas->num_tx_eot; i++) {
1121 timeout =
Karthikeyan Ramasubramanian465f10d2018-01-02 23:03:41 -07001122 wait_for_completion_timeout(
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001123 &mas->tx_cb,
1124 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
1125 if (timeout <= 0) {
1126 GENI_SE_ERR(mas->ipc, true, mas->dev,
1127 "Tx[%d] timeout%lu\n", i, timeout);
1128 ret = -ETIMEDOUT;
1129 goto err_gsi_geni_transfer_one;
1130 }
1131 }
1132 for (i = 0 ; i < mas->num_rx_eot; i++) {
1133 timeout =
Karthikeyan Ramasubramanian465f10d2018-01-02 23:03:41 -07001134 wait_for_completion_timeout(
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001135 &mas->rx_cb,
1136 msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
1137 if (timeout <= 0) {
1138 GENI_SE_ERR(mas->ipc, true, mas->dev,
1139 "Rx[%d] timeout%lu\n", i, timeout);
1140 ret = -ETIMEDOUT;
1141 goto err_gsi_geni_transfer_one;
1142 }
1143 }
1144 if (mas->qn_err) {
1145 ret = -EIO;
1146 mas->qn_err = false;
1147 goto err_gsi_geni_transfer_one;
1148 }
1149 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001150 }
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001151 return ret;
1152err_gsi_geni_transfer_one:
Girish Mahadevan5f9df632017-08-29 13:29:23 -06001153 geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001154 dmaengine_terminate_all(mas->tx);
1155 return ret;
1156err_fifo_geni_transfer_one:
Dilip Kotad70fa152018-05-18 14:26:23 +05301157 handle_fifo_timeout(mas, xfer);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001158 return ret;
1159}
1160
1161static void geni_spi_handle_tx(struct spi_geni_master *mas)
1162{
1163 int i = 0;
1164 int tx_fifo_width = (mas->tx_fifo_width >> 3);
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001165 int max_bytes = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001166 const u8 *tx_buf = NULL;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001167
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001168 if (!mas->cur_xfer)
1169 return;
1170
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001171 /*
1172 * For non-byte aligned bits-per-word values:
1173 * Assumption is that each SPI word will be accomodated in
1174 * ceil (bits_per_word / bits_per_byte)
1175 * and the next SPI word starts at the next byte.
1176 * In such cases, we can fit 1 SPI word per FIFO word so adjust the
1177 * max byte that can be sent per IRQ accordingly.
1178 */
1179 if ((mas->tx_fifo_width % mas->cur_word_len))
1180 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) *
1181 ((mas->cur_word_len / BITS_PER_BYTE) + 1);
1182 else
1183 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001184 tx_buf = mas->cur_xfer->tx_buf;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001185 tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
1186 max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
1187 while (i < max_bytes) {
1188 int j;
1189 u32 fifo_word = 0;
1190 u8 *fifo_byte;
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001191 int bytes_per_fifo = tx_fifo_width;
1192 int bytes_to_write = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001193
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001194 if ((mas->tx_fifo_width % mas->cur_word_len))
1195 bytes_per_fifo =
1196 (mas->cur_word_len / BITS_PER_BYTE) + 1;
1197 bytes_to_write = min_t(int, (max_bytes - i), bytes_per_fifo);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001198 fifo_byte = (u8 *)&fifo_word;
1199 for (j = 0; j < bytes_to_write; j++)
1200 fifo_byte[j] = tx_buf[i++];
1201 geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
1202 /* Ensure FIFO writes are written in order */
1203 mb();
1204 }
1205 mas->tx_rem_bytes -= max_bytes;
1206 if (!mas->tx_rem_bytes) {
1207 geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
1208 /* Barrier here before return to prevent further ISRs */
1209 mb();
1210 }
1211}
1212
1213static void geni_spi_handle_rx(struct spi_geni_master *mas)
1214{
1215 int i = 0;
1216 int fifo_width = (mas->tx_fifo_width >> 3);
1217 u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
1218 int rx_bytes = 0;
1219 int rx_wc = 0;
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001220 u8 *rx_buf = NULL;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001221
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001222 if (!mas->cur_xfer)
1223 return;
1224
1225 rx_buf = mas->cur_xfer->rx_buf;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001226 rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
1227 if (rx_fifo_status & RX_LAST) {
1228 int rx_last_byte_valid =
1229 (rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
1230 >> RX_LAST_BYTE_VALID_SHFT;
1231 if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
1232 rx_wc -= 1;
1233 rx_bytes += rx_last_byte_valid;
1234 }
1235 }
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001236 if (!(mas->tx_fifo_width % mas->cur_word_len))
1237 rx_bytes += rx_wc * fifo_width;
1238 else
1239 rx_bytes += rx_wc *
1240 ((mas->cur_word_len / BITS_PER_BYTE) + 1);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001241 rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
1242 rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
1243 while (i < rx_bytes) {
1244 u32 fifo_word = 0;
1245 u8 *fifo_byte;
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001246 int bytes_per_fifo = fifo_width;
1247 int read_bytes = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001248 int j;
1249
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001250 if ((mas->tx_fifo_width % mas->cur_word_len))
1251 bytes_per_fifo =
1252 (mas->cur_word_len / BITS_PER_BYTE) + 1;
1253 read_bytes = min_t(int, (rx_bytes - i), bytes_per_fifo);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001254 fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
1255 fifo_byte = (u8 *)&fifo_word;
1256 for (j = 0; j < read_bytes; j++)
1257 rx_buf[i++] = fifo_byte[j];
1258 }
1259 mas->rx_rem_bytes -= rx_bytes;
1260}
1261
Prudhvi Yarlagadda8daa6fd2018-10-29 12:47:45 +05301262static irqreturn_t geni_spi_irq(int irq, void *data)
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001263{
Prudhvi Yarlagadda8daa6fd2018-10-29 12:47:45 +05301264 struct spi_geni_master *mas = data;
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001265 u32 m_irq = 0;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001266
Prudhvi Yarlagadda8daa6fd2018-10-29 12:47:45 +05301267 if (pm_runtime_status_suspended(mas->dev)) {
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001268 GENI_SE_DBG(mas->ipc, false, mas->dev,
1269 "%s: device is suspended\n", __func__);
1270 goto exit_geni_spi_irq;
1271 }
1272 m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
Dilip Kotad70fa152018-05-18 14:26:23 +05301273 if (mas->cur_xfer_mode == FIFO_MODE) {
1274 if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
1275 (m_irq & M_RX_FIFO_LAST_EN))
1276 geni_spi_handle_rx(mas);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001277
Dilip Kotad70fa152018-05-18 14:26:23 +05301278 if ((m_irq & M_TX_FIFO_WATERMARK_EN))
1279 geni_spi_handle_tx(mas);
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001280
Dilip Kotad70fa152018-05-18 14:26:23 +05301281 if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
1282 (m_irq & M_CMD_ABORT_EN)) {
Dilip Kotab5fa8be2018-12-06 19:03:17 +05301283 mas->cmd_done = true;
Dilip Kotad70fa152018-05-18 14:26:23 +05301284 /*
1285 * If this happens, then a CMD_DONE came before all the
1286 * buffer bytes were sent out. This is unusual, log this
1287 * condition and disable the WM interrupt to prevent the
1288 * system from stalling due an interrupt storm.
1289 * If this happens when all Rx bytes haven't been
1290 * received, log the condition.
1291 */
1292 if (mas->tx_rem_bytes) {
1293 geni_write_reg(0, mas->base,
1294 SE_GENI_TX_WATERMARK_REG);
1295 GENI_SE_DBG(mas->ipc, false, mas->dev,
1296 "%s:Premature Done.tx_rem%d bpw%d\n",
1297 __func__, mas->tx_rem_bytes,
1298 mas->cur_word_len);
1299 }
1300 if (mas->rx_rem_bytes)
1301 GENI_SE_DBG(mas->ipc, false, mas->dev,
1302 "%s:Premature Done.rx_rem%d bpw%d\n",
1303 __func__, mas->rx_rem_bytes,
1304 mas->cur_word_len);
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001305 }
Dilip Kotad70fa152018-05-18 14:26:23 +05301306 } else if (mas->cur_xfer_mode == SE_DMA) {
1307 u32 dma_tx_status = geni_read_reg(mas->base,
1308 SE_DMA_TX_IRQ_STAT);
1309 u32 dma_rx_status = geni_read_reg(mas->base,
1310 SE_DMA_RX_IRQ_STAT);
1311
1312 if (dma_tx_status)
1313 geni_write_reg(dma_tx_status, mas->base,
1314 SE_DMA_TX_IRQ_CLR);
1315 if (dma_rx_status)
1316 geni_write_reg(dma_rx_status, mas->base,
1317 SE_DMA_RX_IRQ_CLR);
1318 if (dma_tx_status & TX_DMA_DONE)
1319 mas->tx_rem_bytes = 0;
1320 if (dma_rx_status & RX_DMA_DONE)
1321 mas->rx_rem_bytes = 0;
1322 if (!mas->tx_rem_bytes && !mas->rx_rem_bytes)
Dilip Kotab5fa8be2018-12-06 19:03:17 +05301323 mas->cmd_done = true;
Dilip Kotad70fa152018-05-18 14:26:23 +05301324 if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN))
Dilip Kotab5fa8be2018-12-06 19:03:17 +05301325 mas->cmd_done = true;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001326 }
Girish Mahadevan1fc27702017-08-31 12:55:23 -06001327exit_geni_spi_irq:
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001328 geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
Dilip Kotab5fa8be2018-12-06 19:03:17 +05301329 if (mas->cmd_done) {
1330 mas->cmd_done = false;
1331 complete(&mas->xfer_done);
1332 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001333 return IRQ_HANDLED;
1334}
1335
1336static int spi_geni_probe(struct platform_device *pdev)
1337{
1338 int ret;
1339 struct spi_master *spi;
1340 struct spi_geni_master *geni_mas;
1341 struct se_geni_rsc *rsc;
1342 struct resource *res;
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -06001343 struct platform_device *wrapper_pdev;
1344 struct device_node *wrapper_ph_node;
Girish Mahadevanbf854fc2017-10-13 16:02:54 -06001345 bool rt_pri;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001346
1347 spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
1348 if (!spi) {
1349 ret = -ENOMEM;
1350 dev_err(&pdev->dev, "Failed to alloc spi struct\n");
1351 goto spi_geni_probe_err;
1352 }
1353
1354 platform_set_drvdata(pdev, spi);
1355 geni_mas = spi_master_get_devdata(spi);
1356 rsc = &geni_mas->spi_rsc;
1357 geni_mas->dev = &pdev->dev;
1358 spi->dev.of_node = pdev->dev.of_node;
Karthikeyan Ramasubramanian0d578b72017-04-26 10:44:02 -06001359 wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
1360 "qcom,wrapper-core", 0);
1361 if (IS_ERR_OR_NULL(wrapper_ph_node)) {
1362 ret = PTR_ERR(wrapper_ph_node);
1363 dev_err(&pdev->dev, "No wrapper core defined\n");
1364 goto spi_geni_probe_err;
1365 }
1366 wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
1367 of_node_put(wrapper_ph_node);
1368 if (IS_ERR_OR_NULL(wrapper_pdev)) {
1369 ret = PTR_ERR(wrapper_pdev);
1370 dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
1371 goto spi_geni_probe_err;
1372 }
1373 geni_mas->wrapper_dev = &wrapper_pdev->dev;
1374 geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
1375 ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
1376 (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
1377 if (ret) {
1378 dev_err(&pdev->dev, "Error geni_se_resources_init\n");
1379 goto spi_geni_probe_err;
1380 }
1381
Shrey Vijaydb097e82018-05-09 17:31:00 +05301382 geni_mas->spi_rsc.ctrl_dev = geni_mas->dev;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001383 rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
1384 if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
1385 dev_err(&pdev->dev, "No pinctrl config specified!\n");
1386 ret = PTR_ERR(rsc->geni_pinctrl);
1387 goto spi_geni_probe_err;
1388 }
1389
1390 rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
1391 PINCTRL_DEFAULT);
1392 if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
1393 dev_err(&pdev->dev, "No default config specified!\n");
1394 ret = PTR_ERR(rsc->geni_gpio_active);
1395 goto spi_geni_probe_err;
1396 }
1397
1398 rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
1399 PINCTRL_SLEEP);
1400 if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
1401 dev_err(&pdev->dev, "No sleep config specified!\n");
1402 ret = PTR_ERR(rsc->geni_gpio_sleep);
1403 goto spi_geni_probe_err;
1404 }
1405
1406 rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
1407 if (IS_ERR(rsc->se_clk)) {
1408 ret = PTR_ERR(rsc->se_clk);
1409 dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
1410 goto spi_geni_probe_err;
1411 }
1412
1413 rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
1414 if (IS_ERR(rsc->m_ahb_clk)) {
1415 ret = PTR_ERR(rsc->m_ahb_clk);
1416 dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
1417 goto spi_geni_probe_err;
1418 }
1419
1420 rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
1421 if (IS_ERR(rsc->s_ahb_clk)) {
1422 ret = PTR_ERR(rsc->s_ahb_clk);
1423 dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
1424 goto spi_geni_probe_err;
1425 }
1426
1427 if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
1428 &spi->max_speed_hz)) {
1429 dev_err(&pdev->dev, "Max frequency not specified.\n");
1430 ret = -ENXIO;
1431 goto spi_geni_probe_err;
1432 }
1433
1434 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
1435 if (!res) {
1436 ret = -ENXIO;
1437 dev_err(&pdev->dev, "Err getting IO region\n");
1438 goto spi_geni_probe_err;
1439 }
1440
Girish Mahadevanbf854fc2017-10-13 16:02:54 -06001441 rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
1442 if (rt_pri)
1443 spi->rt = true;
Dilip Kota0a4a9622018-06-14 17:32:26 +05301444 geni_mas->dis_autosuspend =
1445 of_property_read_bool(pdev->dev.of_node,
1446 "qcom,disable-autosuspend");
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001447 geni_mas->phys_addr = res->start;
1448 geni_mas->size = resource_size(res);
1449 geni_mas->base = devm_ioremap(&pdev->dev, res->start,
1450 resource_size(res));
1451 if (!geni_mas->base) {
1452 ret = -ENOMEM;
1453 dev_err(&pdev->dev, "Err IO mapping iomem\n");
1454 goto spi_geni_probe_err;
1455 }
1456
1457 geni_mas->irq = platform_get_irq(pdev, 0);
1458 if (geni_mas->irq < 0) {
1459 dev_err(&pdev->dev, "Err getting IRQ\n");
1460 ret = geni_mas->irq;
1461 goto spi_geni_probe_unmap;
1462 }
1463 ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
1464 IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
1465 if (ret) {
1466 dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
1467 geni_mas->irq, ret);
1468 goto spi_geni_probe_unmap;
1469 }
1470
1471 spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
1472 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1473 spi->num_chipselect = SPI_NUM_CHIPSELECT;
1474 spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
1475 spi->prepare_message = spi_geni_prepare_message;
1476 spi->unprepare_message = spi_geni_unprepare_message;
1477 spi->transfer_one = spi_geni_transfer_one;
1478 spi->unprepare_transfer_hardware
1479 = spi_geni_unprepare_transfer_hardware;
1480 spi->auto_runtime_pm = false;
1481
1482 init_completion(&geni_mas->xfer_done);
Girish Mahadevan9a7c9442017-08-15 12:10:09 -06001483 init_completion(&geni_mas->tx_cb);
1484 init_completion(&geni_mas->rx_cb);
Girish Mahadevan488a9732018-01-09 17:48:13 -07001485 pm_runtime_set_suspended(&pdev->dev);
Dilip Kota0a4a9622018-06-14 17:32:26 +05301486 if (!geni_mas->dis_autosuspend) {
1487 pm_runtime_set_autosuspend_delay(&pdev->dev,
1488 SPI_AUTO_SUSPEND_DELAY);
1489 pm_runtime_use_autosuspend(&pdev->dev);
1490 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001491 pm_runtime_enable(&pdev->dev);
1492 ret = spi_register_master(spi);
1493 if (ret) {
1494 dev_err(&pdev->dev, "Failed to register SPI master\n");
1495 goto spi_geni_probe_unmap;
1496 }
1497 return ret;
1498spi_geni_probe_unmap:
1499 devm_iounmap(&pdev->dev, geni_mas->base);
1500spi_geni_probe_err:
1501 spi_master_put(spi);
1502 return ret;
1503}
1504
1505static int spi_geni_remove(struct platform_device *pdev)
1506{
1507 struct spi_master *master = platform_get_drvdata(pdev);
1508 struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
1509
1510 spi_unregister_master(master);
1511 se_geni_resources_off(&geni_mas->spi_rsc);
1512 pm_runtime_put_noidle(&pdev->dev);
1513 pm_runtime_disable(&pdev->dev);
1514 return 0;
1515}
1516
1517#ifdef CONFIG_PM
1518static int spi_geni_runtime_suspend(struct device *dev)
1519{
1520 int ret = 0;
1521 struct spi_master *spi = get_spi_master(dev);
1522 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
1523
Girish Mahadevan488a9732018-01-09 17:48:13 -07001524 if (geni_mas->shared_se) {
1525 ret = se_geni_clks_off(&geni_mas->spi_rsc);
1526 if (ret)
1527 GENI_SE_ERR(geni_mas->ipc, false, NULL,
1528 "%s: Error %d turning off clocks\n", __func__, ret);
1529 } else {
1530 ret = se_geni_resources_off(&geni_mas->spi_rsc);
1531 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001532 return ret;
1533}
1534
1535static int spi_geni_runtime_resume(struct device *dev)
1536{
1537 int ret = 0;
1538 struct spi_master *spi = get_spi_master(dev);
1539 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
1540
Girish Mahadevan488a9732018-01-09 17:48:13 -07001541 if (geni_mas->shared_se) {
1542 ret = se_geni_clks_on(&geni_mas->spi_rsc);
1543 if (ret)
1544 GENI_SE_ERR(geni_mas->ipc, false, NULL,
1545 "%s: Error %d turning on clocks\n", __func__, ret);
1546 } else {
1547 ret = se_geni_resources_on(&geni_mas->spi_rsc);
1548 }
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001549 return ret;
1550}
1551
1552static int spi_geni_resume(struct device *dev)
1553{
1554 return 0;
1555}
1556
1557static int spi_geni_suspend(struct device *dev)
1558{
Girish Mahadevan488a9732018-01-09 17:48:13 -07001559 int ret = 0;
1560
1561 if (!pm_runtime_status_suspended(dev)) {
1562 struct spi_master *spi = get_spi_master(dev);
1563 struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
1564
1565 if (list_empty(&spi->queue) && !spi->cur_msg) {
1566 GENI_SE_ERR(geni_mas->ipc, true, dev,
1567 "%s: Force suspend", __func__);
1568 ret = spi_geni_runtime_suspend(dev);
1569 if (ret) {
1570 GENI_SE_ERR(geni_mas->ipc, true, dev,
1571 "Force suspend Failed:%d", ret);
1572 } else {
1573 pm_runtime_disable(dev);
1574 pm_runtime_set_suspended(dev);
1575 pm_runtime_enable(dev);
1576 }
1577 } else {
1578 ret = -EBUSY;
1579 }
1580 }
1581 return ret;
Girish Mahadevan2ef85af2017-02-14 14:42:22 -07001582}
1583#else
1584static int spi_geni_runtime_suspend(struct device *dev)
1585{
1586 return 0;
1587}
1588
1589static int spi_geni_runtime_resume(struct device *dev)
1590{
1591 return 0;
1592}
1593
1594static int spi_geni_resume(struct device *dev)
1595{
1596 return 0;
1597}
1598
1599static int spi_geni_suspend(struct device *dev)
1600{
1601 return 0;
1602}
1603#endif
1604
1605static const struct dev_pm_ops spi_geni_pm_ops = {
1606 SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
1607 spi_geni_runtime_resume, NULL)
1608 SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
1609};
1610
1611static const struct of_device_id spi_geni_dt_match[] = {
1612 { .compatible = "qcom,spi-geni" },
1613 {}
1614};
1615
1616static struct platform_driver spi_geni_driver = {
1617 .probe = spi_geni_probe,
1618 .remove = spi_geni_remove,
1619 .driver = {
1620 .name = "spi_geni",
1621 .pm = &spi_geni_pm_ops,
1622 .of_match_table = spi_geni_dt_match,
1623 },
1624};
1625module_platform_driver(spi_geni_driver);
1626
1627MODULE_LICENSE("GPL v2");
1628MODULE_ALIAS("platform:spi_geni");