blob: 4f8c798e0633a81483c3b05af46805b1848ba386 [file] [log] [blame]
Feng Tang7063c0d2010-12-24 13:59:11 +08001/*
Grant Likelyca632f52011-06-06 01:16:30 -06002 * Special handling for DW core on Intel MID platform
Feng Tang7063c0d2010-12-24 13:59:11 +08003 *
Andy Shevchenko197e96b2014-09-12 15:12:01 +03004 * Copyright (c) 2009, 2014 Intel Corporation.
Feng Tang7063c0d2010-12-24 13:59:11 +08005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
Feng Tang7063c0d2010-12-24 13:59:11 +080014 */
15
16#include <linux/dma-mapping.h>
17#include <linux/dmaengine.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include <linux/spi/spi.h>
Viresh Kumar258aea72012-02-01 16:12:19 +053021#include <linux/types.h>
Grant Likely568a60e2011-02-28 12:47:12 -070022
Grant Likelyca632f52011-06-06 01:16:30 -060023#include "spi-dw.h"
Feng Tang7063c0d2010-12-24 13:59:11 +080024
25#ifdef CONFIG_SPI_DW_MID_DMA
26#include <linux/intel_mid_dma.h>
27#include <linux/pci.h>
28
Andy Shevchenko30c8eb52014-10-28 18:25:02 +020029#define RX_BUSY 0
30#define TX_BUSY 1
31
Feng Tang7063c0d2010-12-24 13:59:11 +080032struct mid_dma {
33 struct intel_mid_dma_slave dmas_tx;
34 struct intel_mid_dma_slave dmas_rx;
35};
36
37static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
38{
39 struct dw_spi *dws = param;
40
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030041 return dws->dma_dev == chan->device->dev;
Feng Tang7063c0d2010-12-24 13:59:11 +080042}
43
44static int mid_spi_dma_init(struct dw_spi *dws)
45{
46 struct mid_dma *dw_dma = dws->dma_priv;
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030047 struct pci_dev *dma_dev;
Feng Tang7063c0d2010-12-24 13:59:11 +080048 struct intel_mid_dma_slave *rxs, *txs;
49 dma_cap_mask_t mask;
50
51 /*
52 * Get pci device for DMA controller, currently it could only
Andy Shevchenkoea092452014-09-12 15:11:59 +030053 * be the DMA controller of Medfield
Feng Tang7063c0d2010-12-24 13:59:11 +080054 */
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030055 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
56 if (!dma_dev)
57 return -ENODEV;
58
59 dws->dma_dev = &dma_dev->dev;
Feng Tang7063c0d2010-12-24 13:59:11 +080060
61 dma_cap_zero(mask);
62 dma_cap_set(DMA_SLAVE, mask);
63
64 /* 1. Init rx channel */
65 dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
66 if (!dws->rxchan)
67 goto err_exit;
68 rxs = &dw_dma->dmas_rx;
69 rxs->hs_mode = LNW_DMA_HW_HS;
70 rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
71 dws->rxchan->private = rxs;
72
73 /* 2. Init tx channel */
74 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
75 if (!dws->txchan)
76 goto free_rxchan;
77 txs = &dw_dma->dmas_tx;
78 txs->hs_mode = LNW_DMA_HW_HS;
79 txs->cfg_mode = LNW_DMA_MEM_TO_PER;
80 dws->txchan->private = txs;
81
82 dws->dma_inited = 1;
83 return 0;
84
85free_rxchan:
86 dma_release_channel(dws->rxchan);
87err_exit:
Andy Shevchenkob89e9c82014-09-12 15:12:00 +030088 return -EBUSY;
Feng Tang7063c0d2010-12-24 13:59:11 +080089}
90
91static void mid_spi_dma_exit(struct dw_spi *dws)
92{
Andy Shevchenkofb578622014-09-12 15:11:58 +030093 if (!dws->dma_inited)
94 return;
Andy Shevchenko8e45ef62014-09-18 20:08:53 +030095
96 dmaengine_terminate_all(dws->txchan);
Feng Tang7063c0d2010-12-24 13:59:11 +080097 dma_release_channel(dws->txchan);
Andy Shevchenko8e45ef62014-09-18 20:08:53 +030098
99 dmaengine_terminate_all(dws->rxchan);
Feng Tang7063c0d2010-12-24 13:59:11 +0800100 dma_release_channel(dws->rxchan);
101}
102
103/*
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200104 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
105 * channel will clear a corresponding bit.
Feng Tang7063c0d2010-12-24 13:59:11 +0800106 */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200107static void dw_spi_dma_tx_done(void *arg)
Feng Tang7063c0d2010-12-24 13:59:11 +0800108{
109 struct dw_spi *dws = arg;
110
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200111 clear_bit(TX_BUSY, &dws->dma_chan_busy);
112 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
Feng Tang7063c0d2010-12-24 13:59:11 +0800113 return;
114 dw_spi_xfer_done(dws);
115}
116
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200117static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
Feng Tang7063c0d2010-12-24 13:59:11 +0800118{
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200119 struct dma_slave_config txconf;
120 struct dma_async_tx_descriptor *txdesc;
Feng Tang7063c0d2010-12-24 13:59:11 +0800121
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200122 if (!dws->tx_dma)
123 return NULL;
124
Vinod Koula485df42011-10-14 10:47:38 +0530125 txconf.direction = DMA_MEM_TO_DEV;
Feng Tang7063c0d2010-12-24 13:59:11 +0800126 txconf.dst_addr = dws->dma_addr;
127 txconf.dst_maxburst = LNW_DMA_MSIZE_16;
128 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Andy Shevchenkob41583e2014-09-18 20:08:51 +0300129 txconf.dst_addr_width = dws->dma_width;
Viresh Kumar258aea72012-02-01 16:12:19 +0530130 txconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800131
Andy Shevchenko2a285292014-10-02 16:31:08 +0300132 dmaengine_slave_config(dws->txchan, &txconf);
Feng Tang7063c0d2010-12-24 13:59:11 +0800133
134 memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
135 dws->tx_sgl.dma_address = dws->tx_dma;
136 dws->tx_sgl.length = dws->len;
137
Andy Shevchenko2a285292014-10-02 16:31:08 +0300138 txdesc = dmaengine_prep_slave_sg(dws->txchan,
Feng Tang7063c0d2010-12-24 13:59:11 +0800139 &dws->tx_sgl,
140 1,
Vinod Koula485df42011-10-14 10:47:38 +0530141 DMA_MEM_TO_DEV,
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300142 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200143 if (!txdesc)
144 return NULL;
145
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200146 txdesc->callback = dw_spi_dma_tx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800147 txdesc->callback_param = dws;
148
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200149 return txdesc;
150}
151
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200152/*
153 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
154 * channel will clear a corresponding bit.
155 */
156static void dw_spi_dma_rx_done(void *arg)
157{
158 struct dw_spi *dws = arg;
159
Andy Shevchenko854d2f22015-03-06 14:42:01 +0200160 clear_bit(RX_BUSY, &dws->dma_chan_busy);
161 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200162 return;
163 dw_spi_xfer_done(dws);
164}
165
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200166static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
167{
168 struct dma_slave_config rxconf;
169 struct dma_async_tx_descriptor *rxdesc;
170
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200171 if (!dws->rx_dma)
172 return NULL;
173
Vinod Koula485df42011-10-14 10:47:38 +0530174 rxconf.direction = DMA_DEV_TO_MEM;
Feng Tang7063c0d2010-12-24 13:59:11 +0800175 rxconf.src_addr = dws->dma_addr;
176 rxconf.src_maxburst = LNW_DMA_MSIZE_16;
177 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Andy Shevchenkob41583e2014-09-18 20:08:51 +0300178 rxconf.src_addr_width = dws->dma_width;
Viresh Kumar258aea72012-02-01 16:12:19 +0530179 rxconf.device_fc = false;
Feng Tang7063c0d2010-12-24 13:59:11 +0800180
Andy Shevchenko2a285292014-10-02 16:31:08 +0300181 dmaengine_slave_config(dws->rxchan, &rxconf);
Feng Tang7063c0d2010-12-24 13:59:11 +0800182
183 memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
184 dws->rx_sgl.dma_address = dws->rx_dma;
185 dws->rx_sgl.length = dws->len;
186
Andy Shevchenko2a285292014-10-02 16:31:08 +0300187 rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
Feng Tang7063c0d2010-12-24 13:59:11 +0800188 &dws->rx_sgl,
189 1,
Vinod Koula485df42011-10-14 10:47:38 +0530190 DMA_DEV_TO_MEM,
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300191 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Andy Shevchenkoc9dafb22015-03-02 20:15:58 +0200192 if (!rxdesc)
193 return NULL;
194
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200195 rxdesc->callback = dw_spi_dma_rx_done;
Feng Tang7063c0d2010-12-24 13:59:11 +0800196 rxdesc->callback_param = dws;
197
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200198 return rxdesc;
199}
200
201static void dw_spi_dma_setup(struct dw_spi *dws)
202{
203 u16 dma_ctrl = 0;
204
205 spi_enable_chip(dws, 0);
206
207 dw_writew(dws, DW_SPI_DMARDLR, 0xf);
208 dw_writew(dws, DW_SPI_DMATDLR, 0x10);
209
210 if (dws->tx_dma)
211 dma_ctrl |= SPI_DMA_TDMAE;
212 if (dws->rx_dma)
213 dma_ctrl |= SPI_DMA_RDMAE;
214 dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
215
216 spi_enable_chip(dws, 1);
217}
218
219static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
220{
221 struct dma_async_tx_descriptor *txdesc, *rxdesc;
222
223 /* 1. setup DMA related registers */
224 if (cs_change)
225 dw_spi_dma_setup(dws);
226
Andy Shevchenkoa5c2db92014-10-28 18:25:01 +0200227 /* 2. Prepare the TX dma transfer */
228 txdesc = dw_spi_dma_prepare_tx(dws);
229
230 /* 3. Prepare the RX dma transfer */
231 rxdesc = dw_spi_dma_prepare_rx(dws);
232
Feng Tang7063c0d2010-12-24 13:59:11 +0800233 /* rx must be started before tx due to spi instinct */
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200234 if (rxdesc) {
235 set_bit(RX_BUSY, &dws->dma_chan_busy);
236 dmaengine_submit(rxdesc);
237 dma_async_issue_pending(dws->rxchan);
238 }
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300239
Andy Shevchenko30c8eb52014-10-28 18:25:02 +0200240 if (txdesc) {
241 set_bit(TX_BUSY, &dws->dma_chan_busy);
242 dmaengine_submit(txdesc);
243 dma_async_issue_pending(dws->txchan);
244 }
Andy Shevchenkof7477c22014-10-02 16:31:09 +0300245
Feng Tang7063c0d2010-12-24 13:59:11 +0800246 return 0;
247}
248
249static struct dw_spi_dma_ops mid_dma_ops = {
250 .dma_init = mid_spi_dma_init,
251 .dma_exit = mid_spi_dma_exit,
252 .dma_transfer = mid_spi_dma_transfer,
253};
254#endif
255
Andy Shevchenkoea092452014-09-12 15:11:59 +0300256/* Some specific info for SPI0 controller on Intel MID */
Feng Tang7063c0d2010-12-24 13:59:11 +0800257
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200258/* HW info for MRST Clk Control Unit, 32b reg per controller */
Feng Tang7063c0d2010-12-24 13:59:11 +0800259#define MRST_SPI_CLK_BASE 100000000 /* 100m */
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200260#define MRST_CLK_SPI_REG 0xff11d86c
Feng Tang7063c0d2010-12-24 13:59:11 +0800261#define CLK_SPI_BDIV_OFFSET 0
262#define CLK_SPI_BDIV_MASK 0x00000007
263#define CLK_SPI_CDIV_OFFSET 9
264#define CLK_SPI_CDIV_MASK 0x00000e00
265#define CLK_SPI_DISABLE_OFFSET 8
266
267int dw_spi_mid_init(struct dw_spi *dws)
268{
H Hartley Sweeten7eb187b2011-09-20 11:06:17 -0700269 void __iomem *clk_reg;
270 u32 clk_cdiv;
Feng Tang7063c0d2010-12-24 13:59:11 +0800271
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200272 clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
Feng Tang7063c0d2010-12-24 13:59:11 +0800273 if (!clk_reg)
274 return -ENOMEM;
275
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200276 /* Get SPI controller operating freq info */
277 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
278 clk_cdiv &= CLK_SPI_CDIV_MASK;
279 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
Feng Tang7063c0d2010-12-24 13:59:11 +0800280 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
Andy Shevchenkod9c14742015-01-22 17:59:34 +0200281
Feng Tang7063c0d2010-12-24 13:59:11 +0800282 iounmap(clk_reg);
283
Feng Tang7063c0d2010-12-24 13:59:11 +0800284#ifdef CONFIG_SPI_DW_MID_DMA
285 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
286 if (!dws->dma_priv)
287 return -ENOMEM;
288 dws->dma_ops = &mid_dma_ops;
289#endif
290 return 0;
291}