Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 1 | /* |
Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 2 | * Special handling for DW core on Intel MID platform |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 3 | * |
Andy Shevchenko | 197e96b | 2014-09-12 15:12:01 +0300 | [diff] [blame] | 4 | * Copyright (c) 2009, 2014 Intel Corporation. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms and conditions of the GNU General Public License, |
| 8 | * version 2, as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | * more details. |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include <linux/dma-mapping.h> |
| 17 | #include <linux/dmaengine.h> |
| 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/spi/spi.h> |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 21 | #include <linux/types.h> |
Grant Likely | 568a60e | 2011-02-28 12:47:12 -0700 | [diff] [blame] | 22 | |
Grant Likely | ca632f5 | 2011-06-06 01:16:30 -0600 | [diff] [blame] | 23 | #include "spi-dw.h" |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 24 | |
| 25 | #ifdef CONFIG_SPI_DW_MID_DMA |
| 26 | #include <linux/intel_mid_dma.h> |
| 27 | #include <linux/pci.h> |
| 28 | |
| 29 | struct mid_dma { |
| 30 | struct intel_mid_dma_slave dmas_tx; |
| 31 | struct intel_mid_dma_slave dmas_rx; |
| 32 | }; |
| 33 | |
| 34 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
| 35 | { |
| 36 | struct dw_spi *dws = param; |
| 37 | |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 38 | return dws->dma_dev == chan->device->dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | static int mid_spi_dma_init(struct dw_spi *dws) |
| 42 | { |
| 43 | struct mid_dma *dw_dma = dws->dma_priv; |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 44 | struct pci_dev *dma_dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 45 | struct intel_mid_dma_slave *rxs, *txs; |
| 46 | dma_cap_mask_t mask; |
| 47 | |
| 48 | /* |
| 49 | * Get pci device for DMA controller, currently it could only |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 50 | * be the DMA controller of Medfield |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 51 | */ |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 52 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
| 53 | if (!dma_dev) |
| 54 | return -ENODEV; |
| 55 | |
| 56 | dws->dma_dev = &dma_dev->dev; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 57 | |
| 58 | dma_cap_zero(mask); |
| 59 | dma_cap_set(DMA_SLAVE, mask); |
| 60 | |
| 61 | /* 1. Init rx channel */ |
| 62 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); |
| 63 | if (!dws->rxchan) |
| 64 | goto err_exit; |
| 65 | rxs = &dw_dma->dmas_rx; |
| 66 | rxs->hs_mode = LNW_DMA_HW_HS; |
| 67 | rxs->cfg_mode = LNW_DMA_PER_TO_MEM; |
| 68 | dws->rxchan->private = rxs; |
| 69 | |
| 70 | /* 2. Init tx channel */ |
| 71 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); |
| 72 | if (!dws->txchan) |
| 73 | goto free_rxchan; |
| 74 | txs = &dw_dma->dmas_tx; |
| 75 | txs->hs_mode = LNW_DMA_HW_HS; |
| 76 | txs->cfg_mode = LNW_DMA_MEM_TO_PER; |
| 77 | dws->txchan->private = txs; |
| 78 | |
| 79 | dws->dma_inited = 1; |
| 80 | return 0; |
| 81 | |
| 82 | free_rxchan: |
| 83 | dma_release_channel(dws->rxchan); |
| 84 | err_exit: |
Andy Shevchenko | b89e9c8 | 2014-09-12 15:12:00 +0300 | [diff] [blame] | 85 | return -EBUSY; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | static void mid_spi_dma_exit(struct dw_spi *dws) |
| 89 | { |
Andy Shevchenko | fb57862 | 2014-09-12 15:11:58 +0300 | [diff] [blame] | 90 | if (!dws->dma_inited) |
| 91 | return; |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 92 | |
| 93 | dmaengine_terminate_all(dws->txchan); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 94 | dma_release_channel(dws->txchan); |
Andy Shevchenko | 8e45ef6 | 2014-09-18 20:08:53 +0300 | [diff] [blame] | 95 | |
| 96 | dmaengine_terminate_all(dws->rxchan); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 97 | dma_release_channel(dws->rxchan); |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * dws->dma_chan_done is cleared before the dma transfer starts, |
| 102 | * callback for rx/tx channel will each increment it by 1. |
| 103 | * Reaching 2 means the whole spi transaction is done. |
| 104 | */ |
| 105 | static void dw_spi_dma_done(void *arg) |
| 106 | { |
| 107 | struct dw_spi *dws = arg; |
| 108 | |
| 109 | if (++dws->dma_chan_done != 2) |
| 110 | return; |
| 111 | dw_spi_xfer_done(dws); |
| 112 | } |
| 113 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame^] | 114 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 115 | { |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame^] | 116 | struct dma_slave_config txconf; |
| 117 | struct dma_async_tx_descriptor *txdesc; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 118 | |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 119 | txconf.direction = DMA_MEM_TO_DEV; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 120 | txconf.dst_addr = dws->dma_addr; |
| 121 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; |
| 122 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Andy Shevchenko | b41583e | 2014-09-18 20:08:51 +0300 | [diff] [blame] | 123 | txconf.dst_addr_width = dws->dma_width; |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 124 | txconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 125 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 126 | dmaengine_slave_config(dws->txchan, &txconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 127 | |
| 128 | memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); |
| 129 | dws->tx_sgl.dma_address = dws->tx_dma; |
| 130 | dws->tx_sgl.length = dws->len; |
| 131 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 132 | txdesc = dmaengine_prep_slave_sg(dws->txchan, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 133 | &dws->tx_sgl, |
| 134 | 1, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 135 | DMA_MEM_TO_DEV, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 136 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 137 | txdesc->callback = dw_spi_dma_done; |
| 138 | txdesc->callback_param = dws; |
| 139 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame^] | 140 | return txdesc; |
| 141 | } |
| 142 | |
| 143 | static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) |
| 144 | { |
| 145 | struct dma_slave_config rxconf; |
| 146 | struct dma_async_tx_descriptor *rxdesc; |
| 147 | |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 148 | rxconf.direction = DMA_DEV_TO_MEM; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 149 | rxconf.src_addr = dws->dma_addr; |
| 150 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; |
| 151 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
Andy Shevchenko | b41583e | 2014-09-18 20:08:51 +0300 | [diff] [blame] | 152 | rxconf.src_addr_width = dws->dma_width; |
Viresh Kumar | 258aea7 | 2012-02-01 16:12:19 +0530 | [diff] [blame] | 153 | rxconf.device_fc = false; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 154 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 155 | dmaengine_slave_config(dws->rxchan, &rxconf); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 156 | |
| 157 | memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); |
| 158 | dws->rx_sgl.dma_address = dws->rx_dma; |
| 159 | dws->rx_sgl.length = dws->len; |
| 160 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 161 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 162 | &dws->rx_sgl, |
| 163 | 1, |
Vinod Koul | a485df4 | 2011-10-14 10:47:38 +0530 | [diff] [blame] | 164 | DMA_DEV_TO_MEM, |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 165 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 166 | rxdesc->callback = dw_spi_dma_done; |
| 167 | rxdesc->callback_param = dws; |
| 168 | |
Andy Shevchenko | a5c2db9 | 2014-10-28 18:25:01 +0200 | [diff] [blame^] | 169 | return rxdesc; |
| 170 | } |
| 171 | |
| 172 | static void dw_spi_dma_setup(struct dw_spi *dws) |
| 173 | { |
| 174 | u16 dma_ctrl = 0; |
| 175 | |
| 176 | spi_enable_chip(dws, 0); |
| 177 | |
| 178 | dw_writew(dws, DW_SPI_DMARDLR, 0xf); |
| 179 | dw_writew(dws, DW_SPI_DMATDLR, 0x10); |
| 180 | |
| 181 | if (dws->tx_dma) |
| 182 | dma_ctrl |= SPI_DMA_TDMAE; |
| 183 | if (dws->rx_dma) |
| 184 | dma_ctrl |= SPI_DMA_RDMAE; |
| 185 | dw_writew(dws, DW_SPI_DMACR, dma_ctrl); |
| 186 | |
| 187 | spi_enable_chip(dws, 1); |
| 188 | } |
| 189 | |
| 190 | static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) |
| 191 | { |
| 192 | struct dma_async_tx_descriptor *txdesc, *rxdesc; |
| 193 | |
| 194 | /* 1. setup DMA related registers */ |
| 195 | if (cs_change) |
| 196 | dw_spi_dma_setup(dws); |
| 197 | |
| 198 | dws->dma_chan_done = 0; |
| 199 | |
| 200 | /* 2. Prepare the TX dma transfer */ |
| 201 | txdesc = dw_spi_dma_prepare_tx(dws); |
| 202 | |
| 203 | /* 3. Prepare the RX dma transfer */ |
| 204 | rxdesc = dw_spi_dma_prepare_rx(dws); |
| 205 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 206 | /* rx must be started before tx due to spi instinct */ |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 207 | dmaengine_submit(rxdesc); |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 208 | dma_async_issue_pending(dws->rxchan); |
| 209 | |
Andy Shevchenko | 2a28529 | 2014-10-02 16:31:08 +0300 | [diff] [blame] | 210 | dmaengine_submit(txdesc); |
Andy Shevchenko | f7477c2 | 2014-10-02 16:31:09 +0300 | [diff] [blame] | 211 | dma_async_issue_pending(dws->txchan); |
| 212 | |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static struct dw_spi_dma_ops mid_dma_ops = { |
| 217 | .dma_init = mid_spi_dma_init, |
| 218 | .dma_exit = mid_spi_dma_exit, |
| 219 | .dma_transfer = mid_spi_dma_transfer, |
| 220 | }; |
| 221 | #endif |
| 222 | |
Andy Shevchenko | ea09245 | 2014-09-12 15:11:59 +0300 | [diff] [blame] | 223 | /* Some specific info for SPI0 controller on Intel MID */ |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 224 | |
| 225 | /* HW info for MRST CLk Control Unit, one 32b reg */ |
| 226 | #define MRST_SPI_CLK_BASE 100000000 /* 100m */ |
| 227 | #define MRST_CLK_SPI0_REG 0xff11d86c |
| 228 | #define CLK_SPI_BDIV_OFFSET 0 |
| 229 | #define CLK_SPI_BDIV_MASK 0x00000007 |
| 230 | #define CLK_SPI_CDIV_OFFSET 9 |
| 231 | #define CLK_SPI_CDIV_MASK 0x00000e00 |
| 232 | #define CLK_SPI_DISABLE_OFFSET 8 |
| 233 | |
| 234 | int dw_spi_mid_init(struct dw_spi *dws) |
| 235 | { |
H Hartley Sweeten | 7eb187b | 2011-09-20 11:06:17 -0700 | [diff] [blame] | 236 | void __iomem *clk_reg; |
| 237 | u32 clk_cdiv; |
Feng Tang | 7063c0d | 2010-12-24 13:59:11 +0800 | [diff] [blame] | 238 | |
| 239 | clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); |
| 240 | if (!clk_reg) |
| 241 | return -ENOMEM; |
| 242 | |
| 243 | /* get SPI controller operating freq info */ |
| 244 | clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; |
| 245 | dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); |
| 246 | iounmap(clk_reg); |
| 247 | |
| 248 | dws->num_cs = 16; |
| 249 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ |
| 250 | |
| 251 | #ifdef CONFIG_SPI_DW_MID_DMA |
| 252 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); |
| 253 | if (!dws->dma_priv) |
| 254 | return -ENOMEM; |
| 255 | dws->dma_ops = &mid_dma_ops; |
| 256 | #endif |
| 257 | return 0; |
| 258 | } |