blob: aaeb790e8382872e0f6578707b712878998f4220 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
41
42#define SPI_DRV_NAME "spi_qsd"
43#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
44
45#define QSD_REG(x) (x)
46#define QUP_REG(x)
47
48#define SPI_FIFO_WORD_CNT 0x0048
49
50#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
51
52#define QSD_REG(x)
53#define QUP_REG(x) (x)
54
55#define QUP_CONFIG 0x0000 /* N & NO_INPUT/NO_OUPUT bits */
56#define QUP_ERROR_FLAGS 0x0308
57#define QUP_ERROR_FLAGS_EN 0x030C
58#define QUP_ERR_MASK 0x3
59#define SPI_OUTPUT_FIFO_WORD_CNT 0x010C
60#define SPI_INPUT_FIFO_WORD_CNT 0x0214
61#define QUP_MX_WRITE_COUNT 0x0150
62#define QUP_MX_WRITE_CNT_CURRENT 0x0154
63
64#define QUP_CONFIG_SPI_MODE 0x0100
65
66#define GSBI_CTRL_REG 0x0
67#define GSBI_SPI_CONFIG 0x30
68#endif
69
70#define SPI_CONFIG QSD_REG(0x0000) QUP_REG(0x0300)
71#define SPI_IO_CONTROL QSD_REG(0x0004) QUP_REG(0x0304)
72#define SPI_IO_MODES QSD_REG(0x0008) QUP_REG(0x0008)
73#define SPI_SW_RESET QSD_REG(0x000C) QUP_REG(0x000C)
74#define SPI_TIME_OUT QSD_REG(0x0010) QUP_REG(0x0010)
75#define SPI_TIME_OUT_CURRENT QSD_REG(0x0014) QUP_REG(0x0014)
76#define SPI_MX_OUTPUT_COUNT QSD_REG(0x0018) QUP_REG(0x0100)
77#define SPI_MX_OUTPUT_CNT_CURRENT QSD_REG(0x001C) QUP_REG(0x0104)
78#define SPI_MX_INPUT_COUNT QSD_REG(0x0020) QUP_REG(0x0200)
79#define SPI_MX_INPUT_CNT_CURRENT QSD_REG(0x0024) QUP_REG(0x0204)
80#define SPI_MX_READ_COUNT QSD_REG(0x0028) QUP_REG(0x0208)
81#define SPI_MX_READ_CNT_CURRENT QSD_REG(0x002C) QUP_REG(0x020C)
82#define SPI_OPERATIONAL QSD_REG(0x0030) QUP_REG(0x0018)
83#define SPI_ERROR_FLAGS QSD_REG(0x0034) QUP_REG(0x001C)
84#define SPI_ERROR_FLAGS_EN QSD_REG(0x0038) QUP_REG(0x0020)
85#define SPI_DEASSERT_WAIT QSD_REG(0x003C) QUP_REG(0x0310)
86#define SPI_OUTPUT_DEBUG QSD_REG(0x0040) QUP_REG(0x0108)
87#define SPI_INPUT_DEBUG QSD_REG(0x0044) QUP_REG(0x0210)
88#define SPI_TEST_CTRL QSD_REG(0x004C) QUP_REG(0x0024)
89#define SPI_OUTPUT_FIFO QSD_REG(0x0100) QUP_REG(0x0110)
90#define SPI_INPUT_FIFO QSD_REG(0x0200) QUP_REG(0x0218)
91#define SPI_STATE QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004)
92
93/* SPI_CONFIG fields */
94#define SPI_CFG_INPUT_FIRST 0x00000200
95#define SPI_NO_INPUT 0x00000080
96#define SPI_NO_OUTPUT 0x00000040
97#define SPI_CFG_LOOPBACK 0x00000100
98#define SPI_CFG_N 0x0000001F
99
100/* SPI_IO_CONTROL fields */
101#define SPI_IO_C_CLK_IDLE_HIGH 0x00000400
102#define SPI_IO_C_MX_CS_MODE 0x00000100
103#define SPI_IO_C_CS_N_POLARITY 0x000000F0
104#define SPI_IO_C_CS_N_POLARITY_0 0x00000010
105#define SPI_IO_C_CS_SELECT 0x0000000C
106#define SPI_IO_C_TRISTATE_CS 0x00000002
107#define SPI_IO_C_NO_TRI_STATE 0x00000001
108
109/* SPI_IO_MODES fields */
110#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN QSD_REG(0x00004000) QUP_REG(0x00010000)
111#define SPI_IO_M_PACK_EN QSD_REG(0x00002000) QUP_REG(0x00008000)
112#define SPI_IO_M_UNPACK_EN QSD_REG(0x00001000) QUP_REG(0x00004000)
113#define SPI_IO_M_INPUT_MODE QSD_REG(0x00000C00) QUP_REG(0x00003000)
114#define SPI_IO_M_OUTPUT_MODE QSD_REG(0x00000300) QUP_REG(0x00000C00)
115#define SPI_IO_M_INPUT_FIFO_SIZE QSD_REG(0x000000C0) QUP_REG(0x00000380)
116#define SPI_IO_M_INPUT_BLOCK_SIZE QSD_REG(0x00000030) QUP_REG(0x00000060)
117#define SPI_IO_M_OUTPUT_FIFO_SIZE QSD_REG(0x0000000C) QUP_REG(0x0000001C)
118#define SPI_IO_M_OUTPUT_BLOCK_SIZE QSD_REG(0x00000003) QUP_REG(0x00000003)
119
120#define INPUT_BLOCK_SZ_SHIFT QSD_REG(4) QUP_REG(5)
121#define INPUT_FIFO_SZ_SHIFT QSD_REG(6) QUP_REG(7)
122#define OUTPUT_BLOCK_SZ_SHIFT QSD_REG(0) QUP_REG(0)
123#define OUTPUT_FIFO_SZ_SHIFT QSD_REG(2) QUP_REG(2)
124#define OUTPUT_MODE_SHIFT QSD_REG(8) QUP_REG(10)
125#define INPUT_MODE_SHIFT QSD_REG(10) QUP_REG(12)
126
127/* SPI_OPERATIONAL fields */
128#define SPI_OP_MAX_INPUT_DONE_FLAG 0x00000800
129#define SPI_OP_MAX_OUTPUT_DONE_FLAG 0x00000400
130#define SPI_OP_INPUT_SERVICE_FLAG 0x00000200
131#define SPI_OP_OUTPUT_SERVICE_FLAG 0x00000100
132#define SPI_OP_INPUT_FIFO_FULL 0x00000080
133#define SPI_OP_OUTPUT_FIFO_FULL 0x00000040
134#define SPI_OP_IP_FIFO_NOT_EMPTY 0x00000020
135#define SPI_OP_OP_FIFO_NOT_EMPTY 0x00000010
136#define SPI_OP_STATE_VALID 0x00000004
137#define SPI_OP_STATE 0x00000003
138
139#define SPI_OP_STATE_CLEAR_BITS 0x2
140enum msm_spi_state {
141 SPI_OP_STATE_RESET = 0x00000000,
142 SPI_OP_STATE_RUN = 0x00000001,
143 SPI_OP_STATE_PAUSE = 0x00000003,
144};
145
146/* SPI_ERROR_FLAGS fields */
147#define SPI_ERR_OUTPUT_OVER_RUN_ERR 0x00000020
148#define SPI_ERR_INPUT_UNDER_RUN_ERR 0x00000010
149#define SPI_ERR_OUTPUT_UNDER_RUN_ERR 0x00000008
150#define SPI_ERR_INPUT_OVER_RUN_ERR 0x00000004
151#define SPI_ERR_CLK_OVER_RUN_ERR 0x00000002
152#define SPI_ERR_CLK_UNDER_RUN_ERR 0x00000001
153
154/* We don't allow transactions larger than 4K-64 or 64K-64 due to
155 mx_input/output_cnt register size */
156#define SPI_MAX_TRANSFERS QSD_REG(0xFC0) QUP_REG(0xFC0)
157#define SPI_MAX_LEN (SPI_MAX_TRANSFERS * dd->bytes_per_word)
158
159#define SPI_NUM_CHIPSELECTS 4
160#define SPI_SUPPORTED_MODES (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
161
162#define SPI_DELAY_THRESHOLD 1
163/* Default timeout is 10 milliseconds */
164#define SPI_DEFAULT_TIMEOUT 10
165/* 250 microseconds */
166#define SPI_TRYLOCK_DELAY 250
167
168/* Data Mover burst size */
169#define DM_BURST_SIZE 16
170/* Data Mover commands should be aligned to 64 bit(8 bytes) */
171#define DM_BYTE_ALIGN 8
172
173static char const * const spi_rsrcs[] = {
174 "spi_clk",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 "spi_miso",
176 "spi_mosi"
177};
178
Harini Jayaramane4c06192011-09-28 16:26:39 -0600179static char const * const spi_cs_rsrcs[] = {
180 "spi_cs",
181 "spi_cs1",
182 "spi_cs2",
183 "spi_cs3",
184};
185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186enum msm_spi_mode {
187 SPI_FIFO_MODE = 0x0, /* 00 */
188 SPI_BLOCK_MODE = 0x1, /* 01 */
189 SPI_DMOV_MODE = 0x2, /* 10 */
190 SPI_MODE_NONE = 0xFF, /* invalid value */
191};
192
Harini Jayaramane4c06192011-09-28 16:26:39 -0600193/* Structure for SPI CS GPIOs */
194struct spi_cs_gpio {
195 int gpio_num;
196 bool valid;
197};
198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199/* Structures for Data Mover */
200struct spi_dmov_cmd {
201 dmov_box box; /* data aligned to max(dm_burst_size, block_size)
202 (<= fifo_size) */
203 dmov_s single_pad; /* data unaligned to max(dm_burst_size, block_size)
204 padded to fit */
205 dma_addr_t cmd_ptr;
206};
207
208MODULE_LICENSE("GPL v2");
209MODULE_VERSION("0.3");
210MODULE_ALIAS("platform:"SPI_DRV_NAME);
211
212static struct pm_qos_request_list qos_req_list;
213
214#ifdef CONFIG_DEBUG_FS
215/* Used to create debugfs entries */
216static const struct {
217 const char *name;
218 mode_t mode;
219 int offset;
220} debugfs_spi_regs[] = {
221 {"config", S_IRUGO | S_IWUSR, SPI_CONFIG},
222 {"io_control", S_IRUGO | S_IWUSR, SPI_IO_CONTROL},
223 {"io_modes", S_IRUGO | S_IWUSR, SPI_IO_MODES},
224 {"sw_reset", S_IWUSR, SPI_SW_RESET},
225 {"time_out", S_IRUGO | S_IWUSR, SPI_TIME_OUT},
226 {"time_out_current", S_IRUGO, SPI_TIME_OUT_CURRENT},
227 {"mx_output_count", S_IRUGO | S_IWUSR, SPI_MX_OUTPUT_COUNT},
228 {"mx_output_cnt_current", S_IRUGO, SPI_MX_OUTPUT_CNT_CURRENT},
229 {"mx_input_count", S_IRUGO | S_IWUSR, SPI_MX_INPUT_COUNT},
230 {"mx_input_cnt_current", S_IRUGO, SPI_MX_INPUT_CNT_CURRENT},
231 {"mx_read_count", S_IRUGO | S_IWUSR, SPI_MX_READ_COUNT},
232 {"mx_read_cnt_current", S_IRUGO, SPI_MX_READ_CNT_CURRENT},
233 {"operational", S_IRUGO | S_IWUSR, SPI_OPERATIONAL},
234 {"error_flags", S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS},
235 {"error_flags_en", S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS_EN},
236 {"deassert_wait", S_IRUGO | S_IWUSR, SPI_DEASSERT_WAIT},
237 {"output_debug", S_IRUGO, SPI_OUTPUT_DEBUG},
238 {"input_debug", S_IRUGO, SPI_INPUT_DEBUG},
239 {"test_ctrl", S_IRUGO | S_IWUSR, SPI_TEST_CTRL},
240 {"output_fifo", S_IWUSR, SPI_OUTPUT_FIFO},
241 {"input_fifo" , S_IRUSR, SPI_INPUT_FIFO},
242 {"spi_state", S_IRUGO | S_IWUSR, SPI_STATE},
243#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
244 {"fifo_word_cnt", S_IRUGO, SPI_FIFO_WORD_CNT},
245#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
246 {"qup_config", S_IRUGO | S_IWUSR, QUP_CONFIG},
247 {"qup_error_flags", S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS},
248 {"qup_error_flags_en", S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS_EN},
249 {"mx_write_cnt", S_IRUGO | S_IWUSR, QUP_MX_WRITE_COUNT},
250 {"mx_write_cnt_current", S_IRUGO, QUP_MX_WRITE_CNT_CURRENT},
251 {"output_fifo_word_cnt", S_IRUGO, SPI_OUTPUT_FIFO_WORD_CNT},
252 {"input_fifo_word_cnt", S_IRUGO, SPI_INPUT_FIFO_WORD_CNT},
253#endif
254};
255#endif
256
257struct msm_spi {
258 u8 *read_buf;
259 const u8 *write_buf;
260 void __iomem *base;
261 void __iomem *gsbi_base;
262 struct device *dev;
263 spinlock_t queue_lock;
264 struct mutex core_lock;
265 struct list_head queue;
266 struct workqueue_struct *workqueue;
267 struct work_struct work_data;
268 struct spi_message *cur_msg;
269 struct spi_transfer *cur_transfer;
270 struct completion transfer_complete;
271 struct clk *clk;
272 struct clk *pclk;
273 unsigned long mem_phys_addr;
274 size_t mem_size;
275 unsigned long gsbi_mem_phys_addr;
276 size_t gsbi_mem_size;
277 int input_fifo_size;
278 int output_fifo_size;
279 u32 rx_bytes_remaining;
280 u32 tx_bytes_remaining;
281 u32 clock_speed;
Harini Jayaraman970b5202011-09-20 17:28:50 -0600282 int irq_in;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 int read_xfr_cnt;
284 int write_xfr_cnt;
285 int write_len;
286 int read_len;
287#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
Harini Jayaraman970b5202011-09-20 17:28:50 -0600288 int irq_out;
289 int irq_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290#endif
291 int bytes_per_word;
292 bool suspended;
293 bool transfer_pending;
294 wait_queue_head_t continue_suspend;
295 /* DMA data */
296 enum msm_spi_mode mode;
297 bool use_dma;
298 int tx_dma_chan;
299 int tx_dma_crci;
300 int rx_dma_chan;
301 int rx_dma_crci;
302 /* Data Mover Commands */
303 struct spi_dmov_cmd *tx_dmov_cmd;
304 struct spi_dmov_cmd *rx_dmov_cmd;
305 /* Physical address of the tx dmov box command */
306 dma_addr_t tx_dmov_cmd_dma;
307 dma_addr_t rx_dmov_cmd_dma;
308 struct msm_dmov_cmd tx_hdr;
309 struct msm_dmov_cmd rx_hdr;
310 int input_block_size;
311 int output_block_size;
312 int burst_size;
313 atomic_t rx_irq_called;
314 /* Used to pad messages unaligned to block size */
315 u8 *tx_padding;
316 dma_addr_t tx_padding_dma;
317 u8 *rx_padding;
318 dma_addr_t rx_padding_dma;
319 u32 unaligned_len;
320 /* DMA statistics */
321 int stat_dmov_tx_err;
322 int stat_dmov_rx_err;
323 int stat_rx;
324 int stat_dmov_rx;
325 int stat_tx;
326 int stat_dmov_tx;
327#ifdef CONFIG_DEBUG_FS
328 struct dentry *dent_spi;
329 struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
330#endif
331 struct msm_spi_platform_data *pdata; /* Platform data */
332 /* Remote Spinlock Data */
333 bool use_rlock;
334 remote_mutex_t r_lock;
335 uint32_t pm_lat;
336 /* When set indicates multiple transfers in a single message */
337 bool multi_xfr;
338 bool done;
339 u32 cur_msg_len;
340 /* Used in FIFO mode to keep track of the transfer being processed */
341 struct spi_transfer *cur_tx_transfer;
342 struct spi_transfer *cur_rx_transfer;
343 /* Temporary buffer used for WR-WR or WR-RD transfers */
344 u8 *temp_buf;
Harini Jayaramane4c06192011-09-28 16:26:39 -0600345 /* GPIO pin numbers for SPI clk, miso and mosi */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346 int spi_gpios[ARRAY_SIZE(spi_rsrcs)];
Harini Jayaramane4c06192011-09-28 16:26:39 -0600347 /* SPI CS GPIOs for each slave */
348 struct spi_cs_gpio cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349};
350
351/* Forward declaration */
352static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
353static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
354static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
355static inline int msm_spi_set_state(struct msm_spi *dd,
356 enum msm_spi_state state);
357static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
358static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
359
360#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
361/* Interrupt Handling */
362static inline int msm_spi_get_irq_data(struct msm_spi *dd,
363 struct platform_device *pdev)
364{
365 dd->irq_in = platform_get_irq_byname(pdev, "spi_irq_in");
366 dd->irq_out = platform_get_irq_byname(pdev, "spi_irq_out");
367 dd->irq_err = platform_get_irq_byname(pdev, "spi_irq_err");
368 if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
369 return -1;
370 return 0;
371}
372
373static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
374 struct platform_device *pdev)
375{
376 return 0;
377}
378
379static inline int msm_spi_request_gsbi(struct msm_spi *dd) { return 0; }
380static inline void msm_spi_release_gsbi(struct msm_spi *dd) {}
381static inline void msm_spi_init_gsbi(struct msm_spi *dd) {}
382
383static inline void msm_spi_disable_irqs(struct msm_spi *dd)
384{
385 disable_irq(dd->irq_in);
386 disable_irq(dd->irq_out);
387 disable_irq(dd->irq_err);
388}
389
390static inline void msm_spi_enable_irqs(struct msm_spi *dd)
391{
392 enable_irq(dd->irq_in);
393 enable_irq(dd->irq_out);
394 enable_irq(dd->irq_err);
395}
396
397static inline int msm_spi_request_irq(struct msm_spi *dd,
398 const char *name,
399 struct spi_master *master)
400{
401 int rc;
402 rc = request_irq(dd->irq_in, msm_spi_input_irq, IRQF_TRIGGER_RISING,
403 name, dd);
404 if (rc)
405 goto error_irq1;
406 rc = request_irq(dd->irq_out, msm_spi_output_irq, IRQF_TRIGGER_RISING,
407 name, dd);
408 if (rc)
409 goto error_irq2;
410 rc = request_irq(dd->irq_err, msm_spi_error_irq, IRQF_TRIGGER_RISING,
411 name, master);
412 if (rc)
413 goto error_irq3;
414 return 0;
415
416error_irq3:
417 free_irq(dd->irq_out, dd);
418error_irq2:
419 free_irq(dd->irq_in, dd);
420error_irq1:
421 return rc;
422}
423
424static inline void msm_spi_free_irq(struct msm_spi *dd,
425 struct spi_master *master)
426{
427 free_irq(dd->irq_err, master);
428 free_irq(dd->irq_out, dd);
429 free_irq(dd->irq_in, dd);
430}
431
432static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
433static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
434static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
435
436static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
437static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
438{
439 msm_spi_write_word_to_fifo(dd);
440}
441static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
442
443static inline void msm_spi_complete(struct msm_spi *dd)
444{
445 complete(&dd->transfer_complete);
446}
447
448static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
449{
450 writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
451}
452
453static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
454{
455 writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
456}
457
458#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
459
460/* Interrupt Handling */
461/* In QUP the same interrupt line is used for intput, output and error*/
462static inline int msm_spi_get_irq_data(struct msm_spi *dd,
463 struct platform_device *pdev)
464{
465 dd->irq_in = platform_get_irq_byname(pdev, "spi_irq_in");
466 if (dd->irq_in < 0)
467 return -1;
468 return 0;
469}
470
471static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
472 struct platform_device *pdev)
473{
474 struct resource *resource;
475
476 resource = platform_get_resource_byname(pdev,
477 IORESOURCE_MEM, "gsbi_base");
478 if (!resource)
479 return -ENXIO;
480 dd->gsbi_mem_phys_addr = resource->start;
481 dd->gsbi_mem_size = resource_size(resource);
482
483 return 0;
484}
485
486static inline void msm_spi_release_gsbi(struct msm_spi *dd)
487{
488 iounmap(dd->gsbi_base);
489 release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
490}
491
492static inline int msm_spi_request_gsbi(struct msm_spi *dd)
493{
494 if (!request_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size,
495 SPI_DRV_NAME)) {
496 return -ENXIO;
497 }
498 dd->gsbi_base = ioremap(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
499 if (!dd->gsbi_base) {
500 release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
501 return -ENXIO;
502 }
503 return 0;
504}
505
506static inline void msm_spi_init_gsbi(struct msm_spi *dd)
507{
508 /* Set GSBI to SPI mode, and CRCI_MUX_CTRL to SPI CRCI ports */
509 writel_relaxed(GSBI_SPI_CONFIG, dd->gsbi_base + GSBI_CTRL_REG);
510}
511
512/* Figure which irq occured and call the relevant functions */
513static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
514{
515 u32 op, ret = IRQ_NONE;
516 struct msm_spi *dd = dev_id;
517
518 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
519 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
520 struct spi_master *master = dev_get_drvdata(dd->dev);
521 ret |= msm_spi_error_irq(irq, master);
522 }
523
524 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
525 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
526 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
527 dd->base + SPI_OPERATIONAL);
528 /*
529 * Ensure service flag was cleared before further
530 * processing of interrupt.
531 */
532 mb();
533 ret |= msm_spi_input_irq(irq, dev_id);
534 }
535
536 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
537 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
538 dd->base + SPI_OPERATIONAL);
539 /*
540 * Ensure service flag was cleared before further
541 * processing of interrupt.
542 */
543 mb();
544 ret |= msm_spi_output_irq(irq, dev_id);
545 }
546
547 if (dd->done) {
548 complete(&dd->transfer_complete);
549 dd->done = 0;
550 }
551 return ret;
552}
553
554static inline int msm_spi_request_irq(struct msm_spi *dd,
555 const char *name,
556 struct spi_master *master)
557{
558 return request_irq(dd->irq_in, msm_spi_qup_irq, IRQF_TRIGGER_HIGH,
559 name, dd);
560}
561
562static inline void msm_spi_free_irq(struct msm_spi *dd,
563 struct spi_master *master)
564{
565 free_irq(dd->irq_in, dd);
566}
567
568static inline void msm_spi_free_output_irq(struct msm_spi *dd) { }
569static inline void msm_spi_free_error_irq(struct msm_spi *dd,
570 struct spi_master *master) { }
571
572static inline void msm_spi_disable_irqs(struct msm_spi *dd)
573{
574 disable_irq(dd->irq_in);
575}
576
577static inline void msm_spi_enable_irqs(struct msm_spi *dd)
578{
579 enable_irq(dd->irq_in);
580}
581
582static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
583{
584 *spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
585}
586
587static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
588{
589 writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
590}
591
592static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
593
594/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
595static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
596{
597 u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
598
599 msm_spi_add_configs(dd, &qup_config, bpw-1);
600 writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
601 dd->base + QUP_CONFIG);
602}
603
604static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
605{
606 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
607 return -1;
608 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
609 return -1;
610 return 0;
611}
612
613static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
614{
615 if (read_count <= dd->input_fifo_size)
616 msm_spi_write_rmn_to_fifo(dd);
617 else
618 msm_spi_write_word_to_fifo(dd);
619}
620
621static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
622{
623 writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
624}
625
626static inline void msm_spi_complete(struct msm_spi *dd)
627{
628 dd->done = 1;
629}
630
631static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
632{
633 writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
634}
635
636static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
637{
638 writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
639}
640
641#endif
642
643static inline int msm_spi_request_gpios(struct msm_spi *dd)
644{
645 int i;
646 int result = 0;
647
648 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
649 if (dd->spi_gpios[i] >= 0) {
650 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
651 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -0600652 dev_err(dd->dev, "%s: gpio_request for pin %d "
653 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 dd->spi_gpios[i], result);
655 goto error;
656 }
657 }
658 }
659 return 0;
660
661error:
662 for (; --i >= 0;) {
663 if (dd->spi_gpios[i] >= 0)
664 gpio_free(dd->spi_gpios[i]);
665 }
666 return result;
667}
668
669static inline void msm_spi_free_gpios(struct msm_spi *dd)
670{
671 int i;
672
673 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
674 if (dd->spi_gpios[i] >= 0)
675 gpio_free(dd->spi_gpios[i]);
676 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600677
678 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
679 if (dd->cs_gpios[i].valid) {
680 gpio_free(dd->cs_gpios[i].gpio_num);
681 dd->cs_gpios[i].valid = 0;
682 }
683 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684}
685
686static void msm_spi_clock_set(struct msm_spi *dd, int speed)
687{
688 int rc;
689
690 rc = clk_set_rate(dd->clk, speed);
691 if (!rc)
692 dd->clock_speed = speed;
693}
694
695static int msm_spi_calculate_size(int *fifo_size,
696 int *block_size,
697 int block,
698 int mult)
699{
700 int words;
701
702 switch (block) {
703 case 0:
704 words = 1; /* 4 bytes */
705 break;
706 case 1:
707 words = 4; /* 16 bytes */
708 break;
709 case 2:
710 words = 8; /* 32 bytes */
711 break;
712 default:
713 return -1;
714 }
715 switch (mult) {
716 case 0:
717 *fifo_size = words * 2;
718 break;
719 case 1:
720 *fifo_size = words * 4;
721 break;
722 case 2:
723 *fifo_size = words * 8;
724 break;
725 case 3:
726 *fifo_size = words * 16;
727 break;
728 default:
729 return -1;
730 }
731 *block_size = words * sizeof(u32); /* in bytes */
732 return 0;
733}
734
735static void get_next_transfer(struct msm_spi *dd)
736{
737 struct spi_transfer *t = dd->cur_transfer;
738
739 if (t->transfer_list.next != &dd->cur_msg->transfers) {
740 dd->cur_transfer = list_entry(t->transfer_list.next,
741 struct spi_transfer,
742 transfer_list);
743 dd->write_buf = dd->cur_transfer->tx_buf;
744 dd->read_buf = dd->cur_transfer->rx_buf;
745 }
746}
747
748static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
749{
750 u32 spi_iom;
751 int block;
752 int mult;
753
754 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
755
756 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
757 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
758 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
759 block, mult)) {
760 goto fifo_size_err;
761 }
762
763 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
764 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
765 if (msm_spi_calculate_size(&dd->output_fifo_size,
766 &dd->output_block_size, block, mult)) {
767 goto fifo_size_err;
768 }
769 /* DM mode is not available for this block size */
770 if (dd->input_block_size == 4 || dd->output_block_size == 4)
771 dd->use_dma = 0;
772
773 /* DM mode is currently unsupported for different block sizes */
774 if (dd->input_block_size != dd->output_block_size)
775 dd->use_dma = 0;
776
777 if (dd->use_dma)
778 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
779
780 return;
781
782fifo_size_err:
783 dd->use_dma = 0;
784 printk(KERN_WARNING "%s: invalid FIFO size, SPI_IO_MODES=0x%x\n",
785 __func__, spi_iom);
786 return;
787}
788
789static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
790{
791 u32 data_in;
792 int i;
793 int shift;
794
795 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
796 if (dd->read_buf) {
797 for (i = 0; (i < dd->bytes_per_word) &&
798 dd->rx_bytes_remaining; i++) {
799 /* The data format depends on bytes_per_word:
800 4 bytes: 0x12345678
801 3 bytes: 0x00123456
802 2 bytes: 0x00001234
803 1 byte : 0x00000012
804 */
805 shift = 8 * (dd->bytes_per_word - i - 1);
806 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
807 dd->rx_bytes_remaining--;
808 }
809 } else {
810 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
811 dd->rx_bytes_remaining -= dd->bytes_per_word;
812 else
813 dd->rx_bytes_remaining = 0;
814 }
815 dd->read_xfr_cnt++;
816 if (dd->multi_xfr) {
817 if (!dd->rx_bytes_remaining)
818 dd->read_xfr_cnt = 0;
819 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
820 dd->read_len) {
821 struct spi_transfer *t = dd->cur_rx_transfer;
822 if (t->transfer_list.next != &dd->cur_msg->transfers) {
823 t = list_entry(t->transfer_list.next,
824 struct spi_transfer,
825 transfer_list);
826 dd->read_buf = t->rx_buf;
827 dd->read_len = t->len;
828 dd->read_xfr_cnt = 0;
829 dd->cur_rx_transfer = t;
830 }
831 }
832 }
833}
834
835static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
836{
837 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
838
839 return spi_op & SPI_OP_STATE_VALID;
840}
841
842static inline int msm_spi_wait_valid(struct msm_spi *dd)
843{
844 unsigned long delay = 0;
845 unsigned long timeout = 0;
846
847 if (dd->clock_speed == 0)
848 return -EINVAL;
849 /*
850 * Based on the SPI clock speed, sufficient time
851 * should be given for the SPI state transition
852 * to occur
853 */
854 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
855 /*
856 * For small delay values, the default timeout would
857 * be one jiffy
858 */
859 if (delay < SPI_DELAY_THRESHOLD)
860 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600861
862 /* Adding one to round off to the nearest jiffy */
863 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864 while (!msm_spi_is_valid_state(dd)) {
865 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600866 if (!msm_spi_is_valid_state(dd)) {
867 if (dd->cur_msg)
868 dd->cur_msg->status = -EIO;
869 dev_err(dd->dev, "%s: SPI operational state"
870 "not valid\n", __func__);
871 return -ETIMEDOUT;
872 } else
873 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 }
875 /*
876 * For smaller values of delay, context switch time
877 * would negate the usage of usleep
878 */
879 if (delay > 20)
880 usleep(delay);
881 else if (delay)
882 udelay(delay);
883 }
884 return 0;
885}
886
887static inline int msm_spi_set_state(struct msm_spi *dd,
888 enum msm_spi_state state)
889{
890 enum msm_spi_state cur_state;
891 if (msm_spi_wait_valid(dd))
892 return -1;
893 cur_state = readl_relaxed(dd->base + SPI_STATE);
894 /* Per spec:
895 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
896 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
897 (state == SPI_OP_STATE_RESET)) {
898 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
899 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
900 } else {
901 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
902 dd->base + SPI_STATE);
903 }
904 if (msm_spi_wait_valid(dd))
905 return -1;
906
907 return 0;
908}
909
910static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
911{
912 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
913
914 if (n != (*config & SPI_CFG_N))
915 *config = (*config & ~SPI_CFG_N) | n;
916
917 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
918 if (dd->read_buf == NULL)
919 *config |= SPI_NO_INPUT;
920 if (dd->write_buf == NULL)
921 *config |= SPI_NO_OUTPUT;
922 }
923}
924
925static void msm_spi_set_config(struct msm_spi *dd, int bpw)
926{
927 u32 spi_config;
928
929 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
930
931 if (dd->cur_msg->spi->mode & SPI_CPHA)
932 spi_config &= ~SPI_CFG_INPUT_FIRST;
933 else
934 spi_config |= SPI_CFG_INPUT_FIRST;
935 if (dd->cur_msg->spi->mode & SPI_LOOP)
936 spi_config |= SPI_CFG_LOOPBACK;
937 else
938 spi_config &= ~SPI_CFG_LOOPBACK;
939 msm_spi_add_configs(dd, &spi_config, bpw-1);
940 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
941 msm_spi_set_qup_config(dd, bpw);
942}
943
944static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
945{
946 dmov_box *box;
947 int bytes_to_send, num_rows, bytes_sent;
948 u32 num_transfers;
949
950 atomic_set(&dd->rx_irq_called, 0);
951 if (dd->write_len && !dd->read_len) {
952 /* WR-WR transfer */
953 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
954 dd->write_buf = dd->temp_buf;
955 } else {
956 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
957 /* For WR-RD transfer, bytes_sent can be negative */
958 if (bytes_sent < 0)
959 bytes_sent = 0;
960 }
961
962 /* We'll send in chunks of SPI_MAX_LEN if larger */
963 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
964 SPI_MAX_LEN : dd->tx_bytes_remaining;
965 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
966 dd->unaligned_len = bytes_to_send % dd->burst_size;
967 num_rows = bytes_to_send / dd->burst_size;
968
969 dd->mode = SPI_DMOV_MODE;
970
971 if (num_rows) {
972 /* src in 16 MSB, dst in 16 LSB */
973 box = &dd->tx_dmov_cmd->box;
974 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
975 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
976 box->num_rows = (num_rows << 16) | num_rows;
977 box->row_offset = (dd->burst_size << 16) | 0;
978
979 box = &dd->rx_dmov_cmd->box;
980 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
981 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
982 box->num_rows = (num_rows << 16) | num_rows;
983 box->row_offset = (0 << 16) | dd->burst_size;
984
985 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
986 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
987 offsetof(struct spi_dmov_cmd, box));
988 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
989 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
990 offsetof(struct spi_dmov_cmd, box));
991 } else {
992 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
993 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
994 offsetof(struct spi_dmov_cmd, single_pad));
995 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
996 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
997 offsetof(struct spi_dmov_cmd, single_pad));
998 }
999
1000 if (!dd->unaligned_len) {
1001 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
1002 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
1003 } else {
1004 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
1005 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
1006 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
1007
1008 if ((dd->multi_xfr) && (dd->read_len <= 0))
1009 offset = dd->cur_msg_len - dd->unaligned_len;
1010
1011 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
1012 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
1013
1014 memset(dd->tx_padding, 0, dd->burst_size);
1015 memset(dd->rx_padding, 0, dd->burst_size);
1016 if (dd->write_buf)
1017 memcpy(dd->tx_padding, dd->write_buf + offset,
1018 dd->unaligned_len);
1019
1020 tx_cmd->src = dd->tx_padding_dma;
1021 rx_cmd->dst = dd->rx_padding_dma;
1022 tx_cmd->len = rx_cmd->len = dd->burst_size;
1023 }
1024 /* This also takes care of the padding dummy buf
1025 Since this is set to the correct length, the
1026 dummy bytes won't be actually sent */
1027 if (dd->multi_xfr) {
1028 u32 write_transfers = 0;
1029 u32 read_transfers = 0;
1030
1031 if (dd->write_len > 0) {
1032 write_transfers = DIV_ROUND_UP(dd->write_len,
1033 dd->bytes_per_word);
1034 writel_relaxed(write_transfers,
1035 dd->base + SPI_MX_OUTPUT_COUNT);
1036 }
1037 if (dd->read_len > 0) {
1038 /*
1039 * The read following a write transfer must take
1040 * into account, that the bytes pertaining to
1041 * the write transfer needs to be discarded,
1042 * before the actual read begins.
1043 */
1044 read_transfers = DIV_ROUND_UP(dd->read_len +
1045 dd->write_len,
1046 dd->bytes_per_word);
1047 writel_relaxed(read_transfers,
1048 dd->base + SPI_MX_INPUT_COUNT);
1049 }
1050 } else {
1051 if (dd->write_buf)
1052 writel_relaxed(num_transfers,
1053 dd->base + SPI_MX_OUTPUT_COUNT);
1054 if (dd->read_buf)
1055 writel_relaxed(num_transfers,
1056 dd->base + SPI_MX_INPUT_COUNT);
1057 }
1058}
1059
1060static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
1061{
1062 dma_coherent_pre_ops();
1063 if (dd->write_buf)
1064 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
1065 if (dd->read_buf)
1066 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
1067}
1068
1069/* SPI core can send maximum of 4K transfers, because there is HW problem
1070 with infinite mode.
1071 Therefore, we are sending several chunks of 3K or less (depending on how
1072 much is left).
1073 Upon completion we send the next chunk, or complete the transfer if
1074 everything is finished.
1075*/
1076static int msm_spi_dm_send_next(struct msm_spi *dd)
1077{
1078 /* By now we should have sent all the bytes in FIFO mode,
1079 * However to make things right, we'll check anyway.
1080 */
1081 if (dd->mode != SPI_DMOV_MODE)
1082 return 0;
1083
1084 /* We need to send more chunks, if we sent max last time */
1085 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
1086 dd->tx_bytes_remaining -= SPI_MAX_LEN;
1087 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1088 return 0;
1089 dd->read_len = dd->write_len = 0;
1090 msm_spi_setup_dm_transfer(dd);
1091 msm_spi_enqueue_dm_commands(dd);
1092 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1093 return 0;
1094 return 1;
1095 } else if (dd->read_len && dd->write_len) {
1096 dd->tx_bytes_remaining -= dd->cur_transfer->len;
1097 if (list_is_last(&dd->cur_transfer->transfer_list,
1098 &dd->cur_msg->transfers))
1099 return 0;
1100 get_next_transfer(dd);
1101 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
1102 return 0;
1103 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
1104 dd->read_buf = dd->temp_buf;
1105 dd->read_len = dd->write_len = -1;
1106 msm_spi_setup_dm_transfer(dd);
1107 msm_spi_enqueue_dm_commands(dd);
1108 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1109 return 0;
1110 return 1;
1111 }
1112 return 0;
1113}
1114
1115static inline void msm_spi_ack_transfer(struct msm_spi *dd)
1116{
1117 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
1118 SPI_OP_MAX_OUTPUT_DONE_FLAG,
1119 dd->base + SPI_OPERATIONAL);
1120 /* Ensure done flag was cleared before proceeding further */
1121 mb();
1122}
1123
1124static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
1125{
1126 struct msm_spi *dd = dev_id;
1127
1128 dd->stat_rx++;
1129
1130 if (dd->mode == SPI_MODE_NONE)
1131 return IRQ_HANDLED;
1132
1133 if (dd->mode == SPI_DMOV_MODE) {
1134 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1135 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
1136 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
1137 msm_spi_ack_transfer(dd);
1138 if (dd->unaligned_len == 0) {
1139 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1140 return IRQ_HANDLED;
1141 }
1142 msm_spi_complete(dd);
1143 return IRQ_HANDLED;
1144 }
1145 return IRQ_NONE;
1146 }
1147
1148 if (dd->mode == SPI_FIFO_MODE) {
1149 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
1150 SPI_OP_IP_FIFO_NOT_EMPTY) &&
1151 (dd->rx_bytes_remaining > 0)) {
1152 msm_spi_read_word_from_fifo(dd);
1153 }
1154 if (dd->rx_bytes_remaining == 0)
1155 msm_spi_complete(dd);
1156 }
1157
1158 return IRQ_HANDLED;
1159}
1160
1161static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1162{
1163 u32 word;
1164 u8 byte;
1165 int i;
1166
1167 word = 0;
1168 if (dd->write_buf) {
1169 for (i = 0; (i < dd->bytes_per_word) &&
1170 dd->tx_bytes_remaining; i++) {
1171 dd->tx_bytes_remaining--;
1172 byte = *dd->write_buf++;
1173 word |= (byte << (BITS_PER_BYTE * (3 - i)));
1174 }
1175 } else
1176 if (dd->tx_bytes_remaining > dd->bytes_per_word)
1177 dd->tx_bytes_remaining -= dd->bytes_per_word;
1178 else
1179 dd->tx_bytes_remaining = 0;
1180 dd->write_xfr_cnt++;
1181 if (dd->multi_xfr) {
1182 if (!dd->tx_bytes_remaining)
1183 dd->write_xfr_cnt = 0;
1184 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
1185 dd->write_len) {
1186 struct spi_transfer *t = dd->cur_tx_transfer;
1187 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1188 t = list_entry(t->transfer_list.next,
1189 struct spi_transfer,
1190 transfer_list);
1191 dd->write_buf = t->tx_buf;
1192 dd->write_len = t->len;
1193 dd->write_xfr_cnt = 0;
1194 dd->cur_tx_transfer = t;
1195 }
1196 }
1197 }
1198 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1199}
1200
1201static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1202{
1203 int count = 0;
1204
1205 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
1206 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
1207 SPI_OP_OUTPUT_FIFO_FULL)) {
1208 msm_spi_write_word_to_fifo(dd);
1209 count++;
1210 }
1211}
1212
1213static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1214{
1215 struct msm_spi *dd = dev_id;
1216
1217 dd->stat_tx++;
1218
1219 if (dd->mode == SPI_MODE_NONE)
1220 return IRQ_HANDLED;
1221
1222 if (dd->mode == SPI_DMOV_MODE) {
1223 /* TX_ONLY transaction is handled here
1224 This is the only place we send complete at tx and not rx */
1225 if (dd->read_buf == NULL &&
1226 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1227 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1228 msm_spi_ack_transfer(dd);
1229 msm_spi_complete(dd);
1230 return IRQ_HANDLED;
1231 }
1232 return IRQ_NONE;
1233 }
1234
1235 /* Output FIFO is empty. Transmit any outstanding write data. */
1236 if (dd->mode == SPI_FIFO_MODE)
1237 msm_spi_write_rmn_to_fifo(dd);
1238
1239 return IRQ_HANDLED;
1240}
1241
1242static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1243{
1244 struct spi_master *master = dev_id;
1245 struct msm_spi *dd = spi_master_get_devdata(master);
1246 u32 spi_err;
1247
1248 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1249 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1250 dev_warn(master->dev.parent, "SPI output overrun error\n");
1251 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1252 dev_warn(master->dev.parent, "SPI input underrun error\n");
1253 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1254 dev_warn(master->dev.parent, "SPI output underrun error\n");
1255 msm_spi_get_clk_err(dd, &spi_err);
1256 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1257 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1258 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1259 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1260 msm_spi_clear_error_flags(dd);
1261 msm_spi_ack_clk_err(dd);
1262 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1263 mb();
1264 return IRQ_HANDLED;
1265}
1266
1267static int msm_spi_map_dma_buffers(struct msm_spi *dd)
1268{
1269 struct device *dev;
1270 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001271 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 void *tx_buf, *rx_buf;
1273 unsigned tx_len, rx_len;
1274 int ret = -EINVAL;
1275
1276 dev = &dd->cur_msg->spi->dev;
1277 first_xfr = dd->cur_transfer;
1278 tx_buf = (void *)first_xfr->tx_buf;
1279 rx_buf = first_xfr->rx_buf;
1280 tx_len = rx_len = first_xfr->len;
1281
1282 /*
1283 * For WR-WR and WR-RD transfers, we allocate our own temporary
1284 * buffer and copy the data to/from the client buffers.
1285 */
1286 if (dd->multi_xfr) {
1287 dd->temp_buf = kzalloc(dd->cur_msg_len,
1288 GFP_KERNEL | __GFP_DMA);
1289 if (!dd->temp_buf)
1290 return -ENOMEM;
1291 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1292 struct spi_transfer, transfer_list);
1293
1294 if (dd->write_len && !dd->read_len) {
1295 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1296 goto error;
1297
1298 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1299 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1300 nxt_xfr->len);
1301 tx_buf = dd->temp_buf;
1302 tx_len = dd->cur_msg_len;
1303 } else {
1304 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1305 goto error;
1306
1307 rx_buf = dd->temp_buf;
1308 rx_len = dd->cur_msg_len;
1309 }
1310 }
1311 if (tx_buf != NULL) {
1312 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1313 tx_len, DMA_TO_DEVICE);
1314 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1315 dev_err(dev, "dma %cX %d bytes error\n",
1316 'T', tx_len);
1317 ret = -ENOMEM;
1318 goto error;
1319 }
1320 }
1321 if (rx_buf != NULL) {
1322 dma_addr_t dma_handle;
1323 dma_handle = dma_map_single(dev, rx_buf,
1324 rx_len, DMA_FROM_DEVICE);
1325 if (dma_mapping_error(NULL, dma_handle)) {
1326 dev_err(dev, "dma %cX %d bytes error\n",
1327 'R', rx_len);
1328 if (tx_buf != NULL)
1329 dma_unmap_single(NULL, first_xfr->tx_dma,
1330 tx_len, DMA_TO_DEVICE);
1331 ret = -ENOMEM;
1332 goto error;
1333 }
1334 if (dd->multi_xfr)
1335 nxt_xfr->rx_dma = dma_handle;
1336 else
1337 first_xfr->rx_dma = dma_handle;
1338 }
1339 return 0;
1340
1341error:
1342 kfree(dd->temp_buf);
1343 dd->temp_buf = NULL;
1344 return ret;
1345}
1346
1347static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
1348{
1349 struct device *dev;
1350 u32 offset;
1351
1352 dev = &dd->cur_msg->spi->dev;
1353 if (dd->cur_msg->is_dma_mapped)
1354 goto unmap_end;
1355
1356 if (dd->multi_xfr) {
1357 if (dd->write_len && !dd->read_len) {
1358 dma_unmap_single(dev,
1359 dd->cur_transfer->tx_dma,
1360 dd->cur_msg_len,
1361 DMA_TO_DEVICE);
1362 } else {
1363 struct spi_transfer *prev_xfr;
1364 prev_xfr = list_entry(
1365 dd->cur_transfer->transfer_list.prev,
1366 struct spi_transfer,
1367 transfer_list);
1368 if (dd->cur_transfer->rx_buf) {
1369 dma_unmap_single(dev,
1370 dd->cur_transfer->rx_dma,
1371 dd->cur_msg_len,
1372 DMA_FROM_DEVICE);
1373 }
1374 if (prev_xfr->tx_buf) {
1375 dma_unmap_single(dev,
1376 prev_xfr->tx_dma,
1377 prev_xfr->len,
1378 DMA_TO_DEVICE);
1379 }
1380 if (dd->unaligned_len && dd->read_buf) {
1381 offset = dd->cur_msg_len - dd->unaligned_len;
1382 dma_coherent_post_ops();
1383 memcpy(dd->read_buf + offset, dd->rx_padding,
1384 dd->unaligned_len);
1385 memcpy(dd->cur_transfer->rx_buf,
1386 dd->read_buf + prev_xfr->len,
1387 dd->cur_transfer->len);
1388 }
1389 }
1390 kfree(dd->temp_buf);
1391 dd->temp_buf = NULL;
1392 return;
1393 } else {
1394 if (dd->cur_transfer->rx_buf)
1395 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1396 dd->cur_transfer->len,
1397 DMA_FROM_DEVICE);
1398 if (dd->cur_transfer->tx_buf)
1399 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1400 dd->cur_transfer->len,
1401 DMA_TO_DEVICE);
1402 }
1403
1404unmap_end:
1405 /* If we padded the transfer, we copy it from the padding buf */
1406 if (dd->unaligned_len && dd->read_buf) {
1407 offset = dd->cur_transfer->len - dd->unaligned_len;
1408 dma_coherent_post_ops();
1409 memcpy(dd->read_buf + offset, dd->rx_padding,
1410 dd->unaligned_len);
1411 }
1412}
1413
1414/**
1415 * msm_use_dm - decides whether to use data mover for this
1416 * transfer
1417 * @dd: device
1418 * @tr: transfer
1419 *
1420 * Start using DM if:
1421 * 1. Transfer is longer than 3*block size.
1422 * 2. Buffers should be aligned to cache line.
1423 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
1424 */
1425static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
1426 u8 bpw)
1427{
1428 u32 cache_line = dma_get_cache_alignment();
1429
1430 if (!dd->use_dma)
1431 return 0;
1432
1433 if (dd->cur_msg_len < 3*dd->input_block_size)
1434 return 0;
1435
1436 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
1437 return 0;
1438
1439 if (tr->tx_buf) {
1440 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1441 return 0;
1442 }
1443 if (tr->rx_buf) {
1444 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1445 return 0;
1446 }
1447
1448 if (tr->cs_change &&
1449 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1450 return 0;
1451 return 1;
1452}
1453
1454static void msm_spi_process_transfer(struct msm_spi *dd)
1455{
1456 u8 bpw;
1457 u32 spi_ioc;
1458 u32 spi_iom;
1459 u32 spi_ioc_orig;
1460 u32 max_speed;
1461 u32 chip_select;
1462 u32 read_count;
1463 u32 timeout;
1464 u32 int_loopback = 0;
1465
1466 dd->tx_bytes_remaining = dd->cur_msg_len;
1467 dd->rx_bytes_remaining = dd->cur_msg_len;
1468 dd->read_buf = dd->cur_transfer->rx_buf;
1469 dd->write_buf = dd->cur_transfer->tx_buf;
1470 init_completion(&dd->transfer_complete);
1471 if (dd->cur_transfer->bits_per_word)
1472 bpw = dd->cur_transfer->bits_per_word;
1473 else
1474 if (dd->cur_msg->spi->bits_per_word)
1475 bpw = dd->cur_msg->spi->bits_per_word;
1476 else
1477 bpw = 8;
1478 dd->bytes_per_word = (bpw + 7) / 8;
1479
1480 if (dd->cur_transfer->speed_hz)
1481 max_speed = dd->cur_transfer->speed_hz;
1482 else
1483 max_speed = dd->cur_msg->spi->max_speed_hz;
1484 if (!dd->clock_speed || max_speed != dd->clock_speed)
1485 msm_spi_clock_set(dd, max_speed);
1486
1487 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1488 if (dd->cur_msg->spi->mode & SPI_LOOP)
1489 int_loopback = 1;
1490 if (int_loopback && dd->multi_xfr &&
1491 (read_count > dd->input_fifo_size)) {
1492 if (dd->read_len && dd->write_len)
1493 printk(KERN_WARNING
1494 "%s:Internal Loopback does not support > fifo size\
1495 for write-then-read transactions\n",
1496 __func__);
1497 else if (dd->write_len && !dd->read_len)
1498 printk(KERN_WARNING
1499 "%s:Internal Loopback does not support > fifo size\
1500 for write-then-write transactions\n",
1501 __func__);
1502 return;
1503 }
1504 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1505 dd->mode = SPI_FIFO_MODE;
1506 if (dd->multi_xfr) {
1507 dd->read_len = dd->cur_transfer->len;
1508 dd->write_len = dd->cur_transfer->len;
1509 }
1510 /* read_count cannot exceed fifo_size, and only one READ COUNT
1511 interrupt is generated per transaction, so for transactions
1512 larger than fifo size READ COUNT must be disabled.
1513 For those transactions we usually move to Data Mover mode.
1514 */
1515 if (read_count <= dd->input_fifo_size) {
1516 writel_relaxed(read_count,
1517 dd->base + SPI_MX_READ_COUNT);
1518 msm_spi_set_write_count(dd, read_count);
1519 } else {
1520 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1521 msm_spi_set_write_count(dd, 0);
1522 }
1523 } else {
1524 dd->mode = SPI_DMOV_MODE;
1525 if (dd->write_len && dd->read_len) {
1526 dd->tx_bytes_remaining = dd->write_len;
1527 dd->rx_bytes_remaining = dd->read_len;
1528 }
1529 }
1530
1531 /* Write mode - fifo or data mover*/
1532 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1533 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1534 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1535 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1536 /* Turn on packing for data mover */
1537 if (dd->mode == SPI_DMOV_MODE)
1538 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1539 else
1540 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1541 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1542
1543 msm_spi_set_config(dd, bpw);
1544
1545 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1546 spi_ioc_orig = spi_ioc;
1547 if (dd->cur_msg->spi->mode & SPI_CPOL)
1548 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1549 else
1550 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1551 chip_select = dd->cur_msg->spi->chip_select << 2;
1552 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1553 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1554 if (!dd->cur_transfer->cs_change)
1555 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1556 if (spi_ioc != spi_ioc_orig)
1557 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1558
1559 if (dd->mode == SPI_DMOV_MODE) {
1560 msm_spi_setup_dm_transfer(dd);
1561 msm_spi_enqueue_dm_commands(dd);
1562 }
1563 /* The output fifo interrupt handler will handle all writes after
1564 the first. Restricting this to one write avoids contention
1565 issues and race conditions between this thread and the int handler
1566 */
1567 else if (dd->mode == SPI_FIFO_MODE) {
1568 if (msm_spi_prepare_for_write(dd))
1569 goto transfer_end;
1570 msm_spi_start_write(dd, read_count);
1571 }
1572
1573 /* Only enter the RUN state after the first word is written into
1574 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1575 might fire before the first word is written resulting in a
1576 possible race condition.
1577 */
1578 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1579 goto transfer_end;
1580
1581 timeout = 100 * msecs_to_jiffies(
1582 DIV_ROUND_UP(dd->cur_msg_len * 8,
1583 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1584
1585 /* Assume success, this might change later upon transaction result */
1586 dd->cur_msg->status = 0;
1587 do {
1588 if (!wait_for_completion_timeout(&dd->transfer_complete,
1589 timeout)) {
1590 dev_err(dd->dev, "%s: SPI transaction "
1591 "timeout\n", __func__);
1592 dd->cur_msg->status = -EIO;
1593 if (dd->mode == SPI_DMOV_MODE) {
1594 msm_dmov_flush(dd->tx_dma_chan);
1595 msm_dmov_flush(dd->rx_dma_chan);
1596 }
1597 break;
1598 }
1599 } while (msm_spi_dm_send_next(dd));
1600
1601transfer_end:
1602 if (dd->mode == SPI_DMOV_MODE)
1603 msm_spi_unmap_dma_buffers(dd);
1604 dd->mode = SPI_MODE_NONE;
1605
1606 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1607 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1608 dd->base + SPI_IO_CONTROL);
1609}
1610
1611static void get_transfer_length(struct msm_spi *dd)
1612{
1613 struct spi_transfer *tr;
1614 int num_xfrs = 0;
1615 int readlen = 0;
1616 int writelen = 0;
1617
1618 dd->cur_msg_len = 0;
1619 dd->multi_xfr = 0;
1620 dd->read_len = dd->write_len = 0;
1621
1622 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1623 if (tr->tx_buf)
1624 writelen += tr->len;
1625 if (tr->rx_buf)
1626 readlen += tr->len;
1627 dd->cur_msg_len += tr->len;
1628 num_xfrs++;
1629 }
1630
1631 if (num_xfrs == 2) {
1632 struct spi_transfer *first_xfr = dd->cur_transfer;
1633
1634 dd->multi_xfr = 1;
1635 tr = list_entry(first_xfr->transfer_list.next,
1636 struct spi_transfer,
1637 transfer_list);
1638 /*
1639 * We update dd->read_len and dd->write_len only
1640 * for WR-WR and WR-RD transfers.
1641 */
1642 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1643 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1644 ((!tr->tx_buf) && (tr->rx_buf))) {
1645 dd->read_len = readlen;
1646 dd->write_len = writelen;
1647 }
1648 }
1649 } else if (num_xfrs > 1)
1650 dd->multi_xfr = 1;
1651}
1652
1653static inline int combine_transfers(struct msm_spi *dd)
1654{
1655 struct spi_transfer *t = dd->cur_transfer;
1656 struct spi_transfer *nxt;
1657 int xfrs_grped = 1;
1658
1659 dd->cur_msg_len = dd->cur_transfer->len;
1660 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1661 nxt = list_entry(t->transfer_list.next,
1662 struct spi_transfer,
1663 transfer_list);
1664 if (t->cs_change != nxt->cs_change)
1665 return xfrs_grped;
1666 dd->cur_msg_len += nxt->len;
1667 xfrs_grped++;
1668 t = nxt;
1669 }
1670 return xfrs_grped;
1671}
1672
1673static void msm_spi_process_message(struct msm_spi *dd)
1674{
1675 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001676 int cs_num;
1677 int rc;
1678
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001680 cs_num = dd->cur_msg->spi->chip_select;
1681 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1682 (!(dd->cs_gpios[cs_num].valid)) &&
1683 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1684 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1685 spi_cs_rsrcs[cs_num]);
1686 if (rc) {
1687 dev_err(dd->dev, "gpio_request for pin %d failed with "
1688 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1689 rc);
1690 return;
1691 }
1692 dd->cs_gpios[cs_num].valid = 1;
1693 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694
1695 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1696 struct spi_transfer,
1697 transfer_list);
1698 get_transfer_length(dd);
1699 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1700 /* Handling of multi-transfers. FIFO mode is used by default */
1701 list_for_each_entry(dd->cur_transfer,
1702 &dd->cur_msg->transfers,
1703 transfer_list) {
1704 if (!dd->cur_transfer->len)
Harini Jayaramane4c06192011-09-28 16:26:39 -06001705 goto error;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 if (xfrs_grped) {
1707 xfrs_grped--;
1708 continue;
1709 } else {
1710 dd->read_len = dd->write_len = 0;
1711 xfrs_grped = combine_transfers(dd);
1712 }
1713 dd->cur_tx_transfer = dd->cur_transfer;
1714 dd->cur_rx_transfer = dd->cur_transfer;
1715 msm_spi_process_transfer(dd);
1716 xfrs_grped--;
1717 }
1718 } else {
1719 /* Handling of a single transfer or WR-WR or WR-RD transfers */
1720 if ((!dd->cur_msg->is_dma_mapped) &&
1721 (msm_use_dm(dd, dd->cur_transfer,
1722 dd->cur_transfer->bits_per_word))) {
1723 /* Mapping of DMA buffers */
1724 int ret = msm_spi_map_dma_buffers(dd);
1725 if (ret < 0) {
1726 dd->cur_msg->status = ret;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001727 goto error;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 }
1729 }
1730 dd->cur_tx_transfer = dd->cur_rx_transfer = dd->cur_transfer;
1731 msm_spi_process_transfer(dd);
1732 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001733
1734 return;
1735
1736error:
1737 if (dd->cs_gpios[cs_num].valid) {
1738 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1739 dd->cs_gpios[cs_num].valid = 0;
1740 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001741}
1742
1743/* workqueue - pull messages from queue & process */
1744static void msm_spi_workq(struct work_struct *work)
1745{
1746 struct msm_spi *dd =
1747 container_of(work, struct msm_spi, work_data);
1748 unsigned long flags;
1749 u32 status_error = 0;
1750
1751 mutex_lock(&dd->core_lock);
1752
1753 /* Don't allow power collapse until we release mutex */
1754 if (pm_qos_request_active(&qos_req_list))
1755 pm_qos_update_request(&qos_req_list,
1756 dd->pm_lat);
1757 if (dd->use_rlock)
1758 remote_mutex_lock(&dd->r_lock);
1759
1760 clk_enable(dd->clk);
1761 clk_enable(dd->pclk);
1762 msm_spi_enable_irqs(dd);
1763
1764 if (!msm_spi_is_valid_state(dd)) {
1765 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1766 __func__);
1767 status_error = 1;
1768 }
1769
1770 spin_lock_irqsave(&dd->queue_lock, flags);
1771 while (!list_empty(&dd->queue)) {
1772 dd->cur_msg = list_entry(dd->queue.next,
1773 struct spi_message, queue);
1774 list_del_init(&dd->cur_msg->queue);
1775 spin_unlock_irqrestore(&dd->queue_lock, flags);
1776 if (status_error)
1777 dd->cur_msg->status = -EIO;
1778 else
1779 msm_spi_process_message(dd);
1780 if (dd->cur_msg->complete)
1781 dd->cur_msg->complete(dd->cur_msg->context);
1782 spin_lock_irqsave(&dd->queue_lock, flags);
1783 }
1784 dd->transfer_pending = 0;
1785 spin_unlock_irqrestore(&dd->queue_lock, flags);
1786
1787 msm_spi_disable_irqs(dd);
1788 clk_disable(dd->clk);
1789 clk_disable(dd->pclk);
1790
1791 if (dd->use_rlock)
1792 remote_mutex_unlock(&dd->r_lock);
1793
1794 if (pm_qos_request_active(&qos_req_list))
1795 pm_qos_update_request(&qos_req_list,
1796 PM_QOS_DEFAULT_VALUE);
1797
1798 mutex_unlock(&dd->core_lock);
1799 /* If needed, this can be done after the current message is complete,
1800 and work can be continued upon resume. No motivation for now. */
1801 if (dd->suspended)
1802 wake_up_interruptible(&dd->continue_suspend);
1803}
1804
1805static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1806{
1807 struct msm_spi *dd;
1808 unsigned long flags;
1809 struct spi_transfer *tr;
1810
1811 dd = spi_master_get_devdata(spi->master);
1812 if (dd->suspended)
1813 return -EBUSY;
1814
1815 if (list_empty(&msg->transfers) || !msg->complete)
1816 return -EINVAL;
1817
1818 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1819 /* Check message parameters */
1820 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1821 (tr->bits_per_word &&
1822 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1823 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1824 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1825 "tx=%p, rx=%p\n",
1826 tr->speed_hz, tr->bits_per_word,
1827 tr->tx_buf, tr->rx_buf);
1828 return -EINVAL;
1829 }
1830 }
1831
1832 spin_lock_irqsave(&dd->queue_lock, flags);
1833 if (dd->suspended) {
1834 spin_unlock_irqrestore(&dd->queue_lock, flags);
1835 return -EBUSY;
1836 }
1837 dd->transfer_pending = 1;
1838 list_add_tail(&msg->queue, &dd->queue);
1839 spin_unlock_irqrestore(&dd->queue_lock, flags);
1840 queue_work(dd->workqueue, &dd->work_data);
1841 return 0;
1842}
1843
1844static int msm_spi_setup(struct spi_device *spi)
1845{
1846 struct msm_spi *dd;
1847 int rc = 0;
1848 u32 spi_ioc;
1849 u32 spi_config;
1850 u32 mask;
1851
1852 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1853 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1854 __func__, spi->bits_per_word);
1855 rc = -EINVAL;
1856 }
1857 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1858 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1859 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1860 rc = -EINVAL;
1861 }
1862
1863 if (rc)
1864 goto err_setup_exit;
1865
1866 dd = spi_master_get_devdata(spi->master);
1867
1868 mutex_lock(&dd->core_lock);
1869 if (dd->suspended) {
1870 mutex_unlock(&dd->core_lock);
1871 return -EBUSY;
1872 }
1873
1874 if (dd->use_rlock)
1875 remote_mutex_lock(&dd->r_lock);
1876
1877 clk_enable(dd->clk);
1878 clk_enable(dd->pclk);
1879
1880 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1881 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1882 if (spi->mode & SPI_CS_HIGH)
1883 spi_ioc |= mask;
1884 else
1885 spi_ioc &= ~mask;
1886 if (spi->mode & SPI_CPOL)
1887 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1888 else
1889 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1890
1891 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1892
1893 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1894 if (spi->mode & SPI_LOOP)
1895 spi_config |= SPI_CFG_LOOPBACK;
1896 else
1897 spi_config &= ~SPI_CFG_LOOPBACK;
1898 if (spi->mode & SPI_CPHA)
1899 spi_config &= ~SPI_CFG_INPUT_FIRST;
1900 else
1901 spi_config |= SPI_CFG_INPUT_FIRST;
1902 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1903
1904 /* Ensure previous write completed before disabling the clocks */
1905 mb();
1906 clk_disable(dd->clk);
1907 clk_disable(dd->pclk);
1908
1909 if (dd->use_rlock)
1910 remote_mutex_unlock(&dd->r_lock);
1911 mutex_unlock(&dd->core_lock);
1912
1913err_setup_exit:
1914 return rc;
1915}
1916
1917#ifdef CONFIG_DEBUG_FS
1918static int debugfs_iomem_x32_set(void *data, u64 val)
1919{
1920 writel_relaxed(val, data);
1921 /* Ensure the previous write completed. */
1922 mb();
1923 return 0;
1924}
1925
1926static int debugfs_iomem_x32_get(void *data, u64 *val)
1927{
1928 *val = readl_relaxed(data);
1929 /* Ensure the previous read completed. */
1930 mb();
1931 return 0;
1932}
1933
1934DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1935 debugfs_iomem_x32_set, "0x%08llx\n");
1936
1937static void spi_debugfs_init(struct msm_spi *dd)
1938{
1939 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1940 if (dd->dent_spi) {
1941 int i;
1942 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1943 dd->debugfs_spi_regs[i] =
1944 debugfs_create_file(
1945 debugfs_spi_regs[i].name,
1946 debugfs_spi_regs[i].mode,
1947 dd->dent_spi,
1948 dd->base + debugfs_spi_regs[i].offset,
1949 &fops_iomem_x32);
1950 }
1951 }
1952}
1953
1954static void spi_debugfs_exit(struct msm_spi *dd)
1955{
1956 if (dd->dent_spi) {
1957 int i;
1958 debugfs_remove_recursive(dd->dent_spi);
1959 dd->dent_spi = NULL;
1960 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1961 dd->debugfs_spi_regs[i] = NULL;
1962 }
1963}
1964#else
1965static void spi_debugfs_init(struct msm_spi *dd) {}
1966static void spi_debugfs_exit(struct msm_spi *dd) {}
1967#endif
1968
1969/* ===Device attributes begin=== */
1970static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1971 char *buf)
1972{
1973 struct spi_master *master = dev_get_drvdata(dev);
1974 struct msm_spi *dd = spi_master_get_devdata(master);
1975
1976 return snprintf(buf, PAGE_SIZE,
1977 "Device %s\n"
1978 "rx fifo_size = %d spi words\n"
1979 "tx fifo_size = %d spi words\n"
1980 "use_dma ? %s\n"
1981 "rx block size = %d bytes\n"
1982 "tx block size = %d bytes\n"
1983 "burst size = %d bytes\n"
1984 "DMA configuration:\n"
1985 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1986 "--statistics--\n"
1987 "Rx isrs = %d\n"
1988 "Tx isrs = %d\n"
1989 "DMA error = %d\n"
1990 "--debug--\n"
1991 "NA yet\n",
1992 dev_name(dev),
1993 dd->input_fifo_size,
1994 dd->output_fifo_size,
1995 dd->use_dma ? "yes" : "no",
1996 dd->input_block_size,
1997 dd->output_block_size,
1998 dd->burst_size,
1999 dd->tx_dma_chan,
2000 dd->rx_dma_chan,
2001 dd->tx_dma_crci,
2002 dd->rx_dma_crci,
2003 dd->stat_rx + dd->stat_dmov_rx,
2004 dd->stat_tx + dd->stat_dmov_tx,
2005 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
2006 );
2007}
2008
2009/* Reset statistics on write */
2010static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
2011 const char *buf, size_t count)
2012{
2013 struct msm_spi *dd = dev_get_drvdata(dev);
2014 dd->stat_rx = 0;
2015 dd->stat_tx = 0;
2016 dd->stat_dmov_rx = 0;
2017 dd->stat_dmov_tx = 0;
2018 dd->stat_dmov_rx_err = 0;
2019 dd->stat_dmov_tx_err = 0;
2020 return count;
2021}
2022
2023static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
2024
2025static struct attribute *dev_attrs[] = {
2026 &dev_attr_stats.attr,
2027 NULL,
2028};
2029
2030static struct attribute_group dev_attr_grp = {
2031 .attrs = dev_attrs,
2032};
2033/* ===Device attributes end=== */
2034
2035/**
2036 * spi_dmov_tx_complete_func - DataMover tx completion callback
2037 *
2038 * Executed in IRQ context (Data Mover's IRQ) DataMover's
2039 * spinlock @msm_dmov_lock held.
2040 */
2041static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
2042 unsigned int result,
2043 struct msm_dmov_errdata *err)
2044{
2045 struct msm_spi *dd;
2046
2047 if (!(result & DMOV_RSLT_VALID)) {
2048 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2049 return;
2050 }
2051 /* restore original context */
2052 dd = container_of(cmd, struct msm_spi, tx_hdr);
2053 if (result & DMOV_RSLT_DONE)
2054 dd->stat_dmov_tx++;
2055 else {
2056 /* Error or flush */
2057 if (result & DMOV_RSLT_ERROR) {
2058 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2059 dd->stat_dmov_tx_err++;
2060 }
2061 if (result & DMOV_RSLT_FLUSH) {
2062 /*
2063 * Flushing normally happens in process of
2064 * removing, when we are waiting for outstanding
2065 * DMA commands to be flushed.
2066 */
2067 dev_info(dd->dev,
2068 "DMA channel flushed (0x%08x)\n", result);
2069 }
2070 if (err)
2071 dev_err(dd->dev,
2072 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2073 err->flush[0], err->flush[1], err->flush[2],
2074 err->flush[3], err->flush[4], err->flush[5]);
2075 dd->cur_msg->status = -EIO;
2076 complete(&dd->transfer_complete);
2077 }
2078}
2079
2080/**
2081 * spi_dmov_rx_complete_func - DataMover rx completion callback
2082 *
2083 * Executed in IRQ context (Data Mover's IRQ)
2084 * DataMover's spinlock @msm_dmov_lock held.
2085 */
2086static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2087 unsigned int result,
2088 struct msm_dmov_errdata *err)
2089{
2090 struct msm_spi *dd;
2091
2092 if (!(result & DMOV_RSLT_VALID)) {
2093 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2094 result, cmd);
2095 return;
2096 }
2097 /* restore original context */
2098 dd = container_of(cmd, struct msm_spi, rx_hdr);
2099 if (result & DMOV_RSLT_DONE) {
2100 dd->stat_dmov_rx++;
2101 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2102 return;
2103 complete(&dd->transfer_complete);
2104 } else {
2105 /** Error or flush */
2106 if (result & DMOV_RSLT_ERROR) {
2107 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2108 dd->stat_dmov_rx_err++;
2109 }
2110 if (result & DMOV_RSLT_FLUSH) {
2111 dev_info(dd->dev,
2112 "DMA channel flushed(0x%08x)\n", result);
2113 }
2114 if (err)
2115 dev_err(dd->dev,
2116 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2117 err->flush[0], err->flush[1], err->flush[2],
2118 err->flush[3], err->flush[4], err->flush[5]);
2119 dd->cur_msg->status = -EIO;
2120 complete(&dd->transfer_complete);
2121 }
2122}
2123
2124static inline u32 get_chunk_size(struct msm_spi *dd)
2125{
2126 u32 cache_line = dma_get_cache_alignment();
2127
2128 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
2129 roundup(dd->burst_size, cache_line))*2;
2130}
2131
2132static void msm_spi_teardown_dma(struct msm_spi *dd)
2133{
2134 int limit = 0;
2135
2136 if (!dd->use_dma)
2137 return;
2138
2139 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
2140 msm_dmov_flush(dd->tx_dma_chan);
2141 msm_dmov_flush(dd->rx_dma_chan);
2142 msleep(10);
2143 }
2144
2145 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
2146 dd->tx_dmov_cmd_dma);
2147 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2148 dd->tx_padding = dd->rx_padding = NULL;
2149}
2150
2151static __init int msm_spi_init_dma(struct msm_spi *dd)
2152{
2153 dmov_box *box;
2154 u32 cache_line = dma_get_cache_alignment();
2155
2156 /* Allocate all as one chunk, since all is smaller than page size */
2157
2158 /* We send NULL device, since it requires coherent_dma_mask id
2159 device definition, we're okay with using system pool */
2160 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
2161 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
2162 if (dd->tx_dmov_cmd == NULL)
2163 return -ENOMEM;
2164
2165 /* DMA addresses should be 64 bit aligned aligned */
2166 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2167 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2168 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2169 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2170
2171 /* Buffers should be aligned to cache line */
2172 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2173 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2174 sizeof(struct spi_dmov_cmd), cache_line);
2175 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
2176 cache_line);
2177 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
2178 cache_line);
2179
2180 /* Setup DM commands */
2181 box = &(dd->rx_dmov_cmd->box);
2182 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2183 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2184 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2185 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2186 offsetof(struct spi_dmov_cmd, cmd_ptr));
2187 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002188
2189 box = &(dd->tx_dmov_cmd->box);
2190 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2191 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2192 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2193 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2194 offsetof(struct spi_dmov_cmd, cmd_ptr));
2195 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196
2197 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2198 CMD_DST_CRCI(dd->tx_dma_crci);
2199 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2200 SPI_OUTPUT_FIFO;
2201 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2202 CMD_SRC_CRCI(dd->rx_dma_crci);
2203 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2204 SPI_INPUT_FIFO;
2205
2206 /* Clear remaining activities on channel */
2207 msm_dmov_flush(dd->tx_dma_chan);
2208 msm_dmov_flush(dd->rx_dma_chan);
2209
2210 return 0;
2211}
2212
2213static int __init msm_spi_probe(struct platform_device *pdev)
2214{
2215 struct spi_master *master;
2216 struct msm_spi *dd;
2217 struct resource *resource;
2218 int rc = -ENXIO;
2219 int locked = 0;
2220 int i = 0;
2221 int clk_enabled = 0;
2222 int pclk_enabled = 0;
2223 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2224
2225 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2226 if (!master) {
2227 rc = -ENOMEM;
2228 dev_err(&pdev->dev, "master allocation failed\n");
2229 goto err_probe_exit;
2230 }
2231
2232 master->bus_num = pdev->id;
2233 master->mode_bits = SPI_SUPPORTED_MODES;
2234 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2235 master->setup = msm_spi_setup;
2236 master->transfer = msm_spi_transfer;
2237 platform_set_drvdata(pdev, master);
2238 dd = spi_master_get_devdata(master);
2239
2240 dd->pdata = pdata;
2241 rc = msm_spi_get_irq_data(dd, pdev);
2242 if (rc)
2243 goto err_probe_res;
2244 resource = platform_get_resource_byname(pdev,
2245 IORESOURCE_MEM, "spi_base");
2246 if (!resource) {
2247 rc = -ENXIO;
2248 goto err_probe_res;
2249 }
2250 dd->mem_phys_addr = resource->start;
2251 dd->mem_size = resource_size(resource);
2252
2253 rc = msm_spi_get_gsbi_resource(dd, pdev);
2254 if (rc)
2255 goto err_probe_res2;
2256
2257 if (pdata) {
2258 if (pdata->dma_config) {
2259 rc = pdata->dma_config();
2260 if (rc) {
2261 dev_warn(&pdev->dev,
2262 "%s: DM mode not supported\n",
2263 __func__);
2264 dd->use_dma = 0;
2265 goto skip_dma_resources;
2266 }
2267 }
2268 resource = platform_get_resource_byname(pdev,
2269 IORESOURCE_DMA,
2270 "spidm_channels");
2271 if (resource) {
2272 dd->rx_dma_chan = resource->start;
2273 dd->tx_dma_chan = resource->end;
2274
2275 resource = platform_get_resource_byname(pdev,
2276 IORESOURCE_DMA,
2277 "spidm_crci");
2278 if (!resource) {
2279 rc = -ENXIO;
2280 goto err_probe_res;
2281 }
2282 dd->rx_dma_crci = resource->start;
2283 dd->tx_dma_crci = resource->end;
2284 dd->use_dma = 1;
2285 master->dma_alignment = dma_get_cache_alignment();
2286 }
2287
2288skip_dma_resources:
2289 if (pdata->gpio_config) {
2290 rc = pdata->gpio_config();
2291 if (rc) {
2292 dev_err(&pdev->dev,
2293 "%s: error configuring GPIOs\n",
2294 __func__);
2295 goto err_probe_gpio;
2296 }
2297 }
2298 }
2299
2300 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2301 resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
2302 spi_rsrcs[i]);
2303 dd->spi_gpios[i] = resource ? resource->start : -1;
2304 }
2305
Harini Jayaramane4c06192011-09-28 16:26:39 -06002306 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2307 resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
2308 spi_cs_rsrcs[i]);
2309 dd->cs_gpios[i].gpio_num = resource ? resource->start : -1;
2310 dd->cs_gpios[i].valid = 0;
2311 }
2312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002313 rc = msm_spi_request_gpios(dd);
2314 if (rc)
2315 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06002316
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002317 spin_lock_init(&dd->queue_lock);
2318 mutex_init(&dd->core_lock);
2319 INIT_LIST_HEAD(&dd->queue);
2320 INIT_WORK(&dd->work_data, msm_spi_workq);
2321 init_waitqueue_head(&dd->continue_suspend);
2322 dd->workqueue = create_singlethread_workqueue(
2323 dev_name(master->dev.parent));
2324 if (!dd->workqueue)
2325 goto err_probe_workq;
2326
2327 if (!request_mem_region(dd->mem_phys_addr, dd->mem_size,
2328 SPI_DRV_NAME)) {
2329 rc = -ENXIO;
2330 goto err_probe_reqmem;
2331 }
2332
2333 dd->base = ioremap(dd->mem_phys_addr, dd->mem_size);
2334 if (!dd->base)
2335 goto err_probe_ioremap;
2336 rc = msm_spi_request_gsbi(dd);
2337 if (rc)
2338 goto err_probe_ioremap2;
2339 if (pdata && pdata->rsl_id) {
2340 struct remote_mutex_id rmid;
2341 rmid.r_spinlock_id = pdata->rsl_id;
2342 rmid.delay_us = SPI_TRYLOCK_DELAY;
2343
2344 rc = remote_mutex_init(&dd->r_lock, &rmid);
2345 if (rc) {
2346 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2347 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2348 __func__, rc);
2349 goto err_probe_rlock_init;
2350 }
2351 dd->use_rlock = 1;
2352 dd->pm_lat = pdata->pm_lat;
2353 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2354 PM_QOS_DEFAULT_VALUE);
2355 }
2356 mutex_lock(&dd->core_lock);
2357 if (dd->use_rlock)
2358 remote_mutex_lock(&dd->r_lock);
2359 locked = 1;
2360
2361 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002362 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002363 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002364 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002365 rc = PTR_ERR(dd->clk);
2366 goto err_probe_clk_get;
2367 }
2368
Matt Wagantallac294852011-08-17 15:44:58 -07002369 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002370 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002371 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002372 rc = PTR_ERR(dd->pclk);
2373 goto err_probe_pclk_get;
2374 }
2375
2376 if (pdata && pdata->max_clock_speed)
2377 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2378
2379 rc = clk_enable(dd->clk);
2380 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002381 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002382 __func__);
2383 goto err_probe_clk_enable;
2384 }
2385 clk_enabled = 1;
2386
2387 rc = clk_enable(dd->pclk);
2388 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002389 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002390 __func__);
2391 goto err_probe_pclk_enable;
2392 }
2393 pclk_enabled = 1;
2394 msm_spi_init_gsbi(dd);
2395 msm_spi_calculate_fifo_size(dd);
2396 if (dd->use_dma) {
2397 rc = msm_spi_init_dma(dd);
2398 if (rc)
2399 goto err_probe_dma;
2400 }
2401
2402 /* Initialize registers */
2403 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
2404 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2405
2406 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
2407 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
2408 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
2409 /*
2410 * The SPI core generates a bogus input overrun error on some targets,
2411 * when a transition from run to reset state occurs and if the FIFO has
2412 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2413 * bit.
2414 */
2415 msm_spi_enable_error_flags(dd);
2416
2417 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2418 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2419 if (rc)
2420 goto err_probe_state;
2421
2422 clk_disable(dd->clk);
2423 clk_disable(dd->pclk);
2424 clk_enabled = 0;
2425 pclk_enabled = 0;
2426
2427 dd->suspended = 0;
2428 dd->transfer_pending = 0;
2429 dd->multi_xfr = 0;
2430 dd->mode = SPI_MODE_NONE;
2431
2432 rc = msm_spi_request_irq(dd, pdev->name, master);
2433 if (rc)
2434 goto err_probe_irq;
2435
2436 msm_spi_disable_irqs(dd);
2437 if (dd->use_rlock)
2438 remote_mutex_unlock(&dd->r_lock);
2439
2440 mutex_unlock(&dd->core_lock);
2441 locked = 0;
2442
2443 rc = spi_register_master(master);
2444 if (rc)
2445 goto err_probe_reg_master;
2446
2447 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2448 if (rc) {
2449 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2450 goto err_attrs;
2451 }
2452
2453 spi_debugfs_init(dd);
2454
2455 return 0;
2456
2457err_attrs:
2458err_probe_reg_master:
2459 msm_spi_free_irq(dd, master);
2460err_probe_irq:
2461err_probe_state:
2462 msm_spi_teardown_dma(dd);
2463err_probe_dma:
2464 if (pclk_enabled)
2465 clk_disable(dd->pclk);
2466err_probe_pclk_enable:
2467 if (clk_enabled)
2468 clk_disable(dd->clk);
2469err_probe_clk_enable:
2470 clk_put(dd->pclk);
2471err_probe_pclk_get:
2472 clk_put(dd->clk);
2473err_probe_clk_get:
2474 if (locked) {
2475 if (dd->use_rlock)
2476 remote_mutex_unlock(&dd->r_lock);
2477 mutex_unlock(&dd->core_lock);
2478 }
2479err_probe_rlock_init:
2480 msm_spi_release_gsbi(dd);
2481err_probe_ioremap2:
2482 iounmap(dd->base);
2483err_probe_ioremap:
2484 release_mem_region(dd->mem_phys_addr, dd->mem_size);
2485err_probe_reqmem:
2486 destroy_workqueue(dd->workqueue);
2487err_probe_workq:
2488 msm_spi_free_gpios(dd);
2489err_probe_gpio:
2490 if (pdata && pdata->gpio_release)
2491 pdata->gpio_release();
2492err_probe_res2:
2493err_probe_res:
2494 spi_master_put(master);
2495err_probe_exit:
2496 return rc;
2497}
2498
2499#ifdef CONFIG_PM
2500static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2501{
2502 struct spi_master *master = platform_get_drvdata(pdev);
2503 struct msm_spi *dd;
2504 unsigned long flags;
2505
2506 if (!master)
2507 goto suspend_exit;
2508 dd = spi_master_get_devdata(master);
2509 if (!dd)
2510 goto suspend_exit;
2511
2512 /* Make sure nothing is added to the queue while we're suspending */
2513 spin_lock_irqsave(&dd->queue_lock, flags);
2514 dd->suspended = 1;
2515 spin_unlock_irqrestore(&dd->queue_lock, flags);
2516
2517 /* Wait for transactions to end, or time out */
2518 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2519 msm_spi_free_gpios(dd);
2520
2521suspend_exit:
2522 return 0;
2523}
2524
2525static int msm_spi_resume(struct platform_device *pdev)
2526{
2527 struct spi_master *master = platform_get_drvdata(pdev);
2528 struct msm_spi *dd;
2529
2530 if (!master)
2531 goto resume_exit;
2532 dd = spi_master_get_devdata(master);
2533 if (!dd)
2534 goto resume_exit;
2535
2536 BUG_ON(msm_spi_request_gpios(dd) != 0);
2537 dd->suspended = 0;
2538resume_exit:
2539 return 0;
2540}
2541#else
2542#define msm_spi_suspend NULL
2543#define msm_spi_resume NULL
2544#endif /* CONFIG_PM */
2545
2546static int __devexit msm_spi_remove(struct platform_device *pdev)
2547{
2548 struct spi_master *master = platform_get_drvdata(pdev);
2549 struct msm_spi *dd = spi_master_get_devdata(master);
2550 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2551
2552 pm_qos_remove_request(&qos_req_list);
2553 spi_debugfs_exit(dd);
2554 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2555
2556 msm_spi_free_irq(dd, master);
2557 msm_spi_teardown_dma(dd);
2558
2559 if (pdata && pdata->gpio_release)
2560 pdata->gpio_release();
2561
2562 msm_spi_free_gpios(dd);
2563 iounmap(dd->base);
2564 release_mem_region(dd->mem_phys_addr, dd->mem_size);
2565 msm_spi_release_gsbi(dd);
2566 clk_put(dd->clk);
2567 clk_put(dd->pclk);
2568 destroy_workqueue(dd->workqueue);
2569 platform_set_drvdata(pdev, 0);
2570 spi_unregister_master(master);
2571 spi_master_put(master);
2572
2573 return 0;
2574}
2575
2576static struct platform_driver msm_spi_driver = {
2577 .driver = {
2578 .name = SPI_DRV_NAME,
2579 .owner = THIS_MODULE,
2580 },
2581 .suspend = msm_spi_suspend,
2582 .resume = msm_spi_resume,
2583 .remove = __exit_p(msm_spi_remove),
2584};
2585
2586static int __init msm_spi_init(void)
2587{
2588 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2589}
2590module_init(msm_spi_init);
2591
2592static void __exit msm_spi_exit(void)
2593{
2594 platform_driver_unregister(&msm_spi_driver);
2595}
2596module_exit(msm_spi_exit);