blob: 48965560aecac41fa5eb41308a598f754051a719 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
41
42#define SPI_DRV_NAME "spi_qsd"
43#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
44
45#define QSD_REG(x) (x)
46#define QUP_REG(x)
47
48#define SPI_FIFO_WORD_CNT 0x0048
49
50#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
51
52#define QSD_REG(x)
53#define QUP_REG(x) (x)
54
55#define QUP_CONFIG 0x0000 /* N & NO_INPUT/NO_OUPUT bits */
56#define QUP_ERROR_FLAGS 0x0308
57#define QUP_ERROR_FLAGS_EN 0x030C
58#define QUP_ERR_MASK 0x3
59#define SPI_OUTPUT_FIFO_WORD_CNT 0x010C
60#define SPI_INPUT_FIFO_WORD_CNT 0x0214
61#define QUP_MX_WRITE_COUNT 0x0150
62#define QUP_MX_WRITE_CNT_CURRENT 0x0154
63
64#define QUP_CONFIG_SPI_MODE 0x0100
65
66#define GSBI_CTRL_REG 0x0
67#define GSBI_SPI_CONFIG 0x30
68#endif
69
70#define SPI_CONFIG QSD_REG(0x0000) QUP_REG(0x0300)
71#define SPI_IO_CONTROL QSD_REG(0x0004) QUP_REG(0x0304)
72#define SPI_IO_MODES QSD_REG(0x0008) QUP_REG(0x0008)
73#define SPI_SW_RESET QSD_REG(0x000C) QUP_REG(0x000C)
74#define SPI_TIME_OUT QSD_REG(0x0010) QUP_REG(0x0010)
75#define SPI_TIME_OUT_CURRENT QSD_REG(0x0014) QUP_REG(0x0014)
76#define SPI_MX_OUTPUT_COUNT QSD_REG(0x0018) QUP_REG(0x0100)
77#define SPI_MX_OUTPUT_CNT_CURRENT QSD_REG(0x001C) QUP_REG(0x0104)
78#define SPI_MX_INPUT_COUNT QSD_REG(0x0020) QUP_REG(0x0200)
79#define SPI_MX_INPUT_CNT_CURRENT QSD_REG(0x0024) QUP_REG(0x0204)
80#define SPI_MX_READ_COUNT QSD_REG(0x0028) QUP_REG(0x0208)
81#define SPI_MX_READ_CNT_CURRENT QSD_REG(0x002C) QUP_REG(0x020C)
82#define SPI_OPERATIONAL QSD_REG(0x0030) QUP_REG(0x0018)
83#define SPI_ERROR_FLAGS QSD_REG(0x0034) QUP_REG(0x001C)
84#define SPI_ERROR_FLAGS_EN QSD_REG(0x0038) QUP_REG(0x0020)
85#define SPI_DEASSERT_WAIT QSD_REG(0x003C) QUP_REG(0x0310)
86#define SPI_OUTPUT_DEBUG QSD_REG(0x0040) QUP_REG(0x0108)
87#define SPI_INPUT_DEBUG QSD_REG(0x0044) QUP_REG(0x0210)
88#define SPI_TEST_CTRL QSD_REG(0x004C) QUP_REG(0x0024)
89#define SPI_OUTPUT_FIFO QSD_REG(0x0100) QUP_REG(0x0110)
90#define SPI_INPUT_FIFO QSD_REG(0x0200) QUP_REG(0x0218)
91#define SPI_STATE QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004)
92
93/* SPI_CONFIG fields */
94#define SPI_CFG_INPUT_FIRST 0x00000200
95#define SPI_NO_INPUT 0x00000080
96#define SPI_NO_OUTPUT 0x00000040
97#define SPI_CFG_LOOPBACK 0x00000100
98#define SPI_CFG_N 0x0000001F
99
100/* SPI_IO_CONTROL fields */
101#define SPI_IO_C_CLK_IDLE_HIGH 0x00000400
102#define SPI_IO_C_MX_CS_MODE 0x00000100
103#define SPI_IO_C_CS_N_POLARITY 0x000000F0
104#define SPI_IO_C_CS_N_POLARITY_0 0x00000010
105#define SPI_IO_C_CS_SELECT 0x0000000C
106#define SPI_IO_C_TRISTATE_CS 0x00000002
107#define SPI_IO_C_NO_TRI_STATE 0x00000001
108
109/* SPI_IO_MODES fields */
110#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN QSD_REG(0x00004000) QUP_REG(0x00010000)
111#define SPI_IO_M_PACK_EN QSD_REG(0x00002000) QUP_REG(0x00008000)
112#define SPI_IO_M_UNPACK_EN QSD_REG(0x00001000) QUP_REG(0x00004000)
113#define SPI_IO_M_INPUT_MODE QSD_REG(0x00000C00) QUP_REG(0x00003000)
114#define SPI_IO_M_OUTPUT_MODE QSD_REG(0x00000300) QUP_REG(0x00000C00)
115#define SPI_IO_M_INPUT_FIFO_SIZE QSD_REG(0x000000C0) QUP_REG(0x00000380)
116#define SPI_IO_M_INPUT_BLOCK_SIZE QSD_REG(0x00000030) QUP_REG(0x00000060)
117#define SPI_IO_M_OUTPUT_FIFO_SIZE QSD_REG(0x0000000C) QUP_REG(0x0000001C)
118#define SPI_IO_M_OUTPUT_BLOCK_SIZE QSD_REG(0x00000003) QUP_REG(0x00000003)
119
120#define INPUT_BLOCK_SZ_SHIFT QSD_REG(4) QUP_REG(5)
121#define INPUT_FIFO_SZ_SHIFT QSD_REG(6) QUP_REG(7)
122#define OUTPUT_BLOCK_SZ_SHIFT QSD_REG(0) QUP_REG(0)
123#define OUTPUT_FIFO_SZ_SHIFT QSD_REG(2) QUP_REG(2)
124#define OUTPUT_MODE_SHIFT QSD_REG(8) QUP_REG(10)
125#define INPUT_MODE_SHIFT QSD_REG(10) QUP_REG(12)
126
127/* SPI_OPERATIONAL fields */
128#define SPI_OP_MAX_INPUT_DONE_FLAG 0x00000800
129#define SPI_OP_MAX_OUTPUT_DONE_FLAG 0x00000400
130#define SPI_OP_INPUT_SERVICE_FLAG 0x00000200
131#define SPI_OP_OUTPUT_SERVICE_FLAG 0x00000100
132#define SPI_OP_INPUT_FIFO_FULL 0x00000080
133#define SPI_OP_OUTPUT_FIFO_FULL 0x00000040
134#define SPI_OP_IP_FIFO_NOT_EMPTY 0x00000020
135#define SPI_OP_OP_FIFO_NOT_EMPTY 0x00000010
136#define SPI_OP_STATE_VALID 0x00000004
137#define SPI_OP_STATE 0x00000003
138
139#define SPI_OP_STATE_CLEAR_BITS 0x2
140enum msm_spi_state {
141 SPI_OP_STATE_RESET = 0x00000000,
142 SPI_OP_STATE_RUN = 0x00000001,
143 SPI_OP_STATE_PAUSE = 0x00000003,
144};
145
146/* SPI_ERROR_FLAGS fields */
147#define SPI_ERR_OUTPUT_OVER_RUN_ERR 0x00000020
148#define SPI_ERR_INPUT_UNDER_RUN_ERR 0x00000010
149#define SPI_ERR_OUTPUT_UNDER_RUN_ERR 0x00000008
150#define SPI_ERR_INPUT_OVER_RUN_ERR 0x00000004
151#define SPI_ERR_CLK_OVER_RUN_ERR 0x00000002
152#define SPI_ERR_CLK_UNDER_RUN_ERR 0x00000001
153
154/* We don't allow transactions larger than 4K-64 or 64K-64 due to
155 mx_input/output_cnt register size */
156#define SPI_MAX_TRANSFERS QSD_REG(0xFC0) QUP_REG(0xFC0)
157#define SPI_MAX_LEN (SPI_MAX_TRANSFERS * dd->bytes_per_word)
158
159#define SPI_NUM_CHIPSELECTS 4
160#define SPI_SUPPORTED_MODES (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
161
162#define SPI_DELAY_THRESHOLD 1
163/* Default timeout is 10 milliseconds */
164#define SPI_DEFAULT_TIMEOUT 10
165/* 250 microseconds */
166#define SPI_TRYLOCK_DELAY 250
167
168/* Data Mover burst size */
169#define DM_BURST_SIZE 16
170/* Data Mover commands should be aligned to 64 bit(8 bytes) */
171#define DM_BYTE_ALIGN 8
172
173static char const * const spi_rsrcs[] = {
174 "spi_clk",
175 "spi_cs",
176 "spi_miso",
177 "spi_mosi"
178};
179
180enum msm_spi_mode {
181 SPI_FIFO_MODE = 0x0, /* 00 */
182 SPI_BLOCK_MODE = 0x1, /* 01 */
183 SPI_DMOV_MODE = 0x2, /* 10 */
184 SPI_MODE_NONE = 0xFF, /* invalid value */
185};
186
187/* Structures for Data Mover */
188struct spi_dmov_cmd {
189 dmov_box box; /* data aligned to max(dm_burst_size, block_size)
190 (<= fifo_size) */
191 dmov_s single_pad; /* data unaligned to max(dm_burst_size, block_size)
192 padded to fit */
193 dma_addr_t cmd_ptr;
194};
195
196MODULE_LICENSE("GPL v2");
197MODULE_VERSION("0.3");
198MODULE_ALIAS("platform:"SPI_DRV_NAME);
199
200static struct pm_qos_request_list qos_req_list;
201
202#ifdef CONFIG_DEBUG_FS
203/* Used to create debugfs entries */
204static const struct {
205 const char *name;
206 mode_t mode;
207 int offset;
208} debugfs_spi_regs[] = {
209 {"config", S_IRUGO | S_IWUSR, SPI_CONFIG},
210 {"io_control", S_IRUGO | S_IWUSR, SPI_IO_CONTROL},
211 {"io_modes", S_IRUGO | S_IWUSR, SPI_IO_MODES},
212 {"sw_reset", S_IWUSR, SPI_SW_RESET},
213 {"time_out", S_IRUGO | S_IWUSR, SPI_TIME_OUT},
214 {"time_out_current", S_IRUGO, SPI_TIME_OUT_CURRENT},
215 {"mx_output_count", S_IRUGO | S_IWUSR, SPI_MX_OUTPUT_COUNT},
216 {"mx_output_cnt_current", S_IRUGO, SPI_MX_OUTPUT_CNT_CURRENT},
217 {"mx_input_count", S_IRUGO | S_IWUSR, SPI_MX_INPUT_COUNT},
218 {"mx_input_cnt_current", S_IRUGO, SPI_MX_INPUT_CNT_CURRENT},
219 {"mx_read_count", S_IRUGO | S_IWUSR, SPI_MX_READ_COUNT},
220 {"mx_read_cnt_current", S_IRUGO, SPI_MX_READ_CNT_CURRENT},
221 {"operational", S_IRUGO | S_IWUSR, SPI_OPERATIONAL},
222 {"error_flags", S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS},
223 {"error_flags_en", S_IRUGO | S_IWUSR, SPI_ERROR_FLAGS_EN},
224 {"deassert_wait", S_IRUGO | S_IWUSR, SPI_DEASSERT_WAIT},
225 {"output_debug", S_IRUGO, SPI_OUTPUT_DEBUG},
226 {"input_debug", S_IRUGO, SPI_INPUT_DEBUG},
227 {"test_ctrl", S_IRUGO | S_IWUSR, SPI_TEST_CTRL},
228 {"output_fifo", S_IWUSR, SPI_OUTPUT_FIFO},
229 {"input_fifo" , S_IRUSR, SPI_INPUT_FIFO},
230 {"spi_state", S_IRUGO | S_IWUSR, SPI_STATE},
231#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
232 {"fifo_word_cnt", S_IRUGO, SPI_FIFO_WORD_CNT},
233#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
234 {"qup_config", S_IRUGO | S_IWUSR, QUP_CONFIG},
235 {"qup_error_flags", S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS},
236 {"qup_error_flags_en", S_IRUGO | S_IWUSR, QUP_ERROR_FLAGS_EN},
237 {"mx_write_cnt", S_IRUGO | S_IWUSR, QUP_MX_WRITE_COUNT},
238 {"mx_write_cnt_current", S_IRUGO, QUP_MX_WRITE_CNT_CURRENT},
239 {"output_fifo_word_cnt", S_IRUGO, SPI_OUTPUT_FIFO_WORD_CNT},
240 {"input_fifo_word_cnt", S_IRUGO, SPI_INPUT_FIFO_WORD_CNT},
241#endif
242};
243#endif
244
245struct msm_spi {
246 u8 *read_buf;
247 const u8 *write_buf;
248 void __iomem *base;
249 void __iomem *gsbi_base;
250 struct device *dev;
251 spinlock_t queue_lock;
252 struct mutex core_lock;
253 struct list_head queue;
254 struct workqueue_struct *workqueue;
255 struct work_struct work_data;
256 struct spi_message *cur_msg;
257 struct spi_transfer *cur_transfer;
258 struct completion transfer_complete;
259 struct clk *clk;
260 struct clk *pclk;
261 unsigned long mem_phys_addr;
262 size_t mem_size;
263 unsigned long gsbi_mem_phys_addr;
264 size_t gsbi_mem_size;
265 int input_fifo_size;
266 int output_fifo_size;
267 u32 rx_bytes_remaining;
268 u32 tx_bytes_remaining;
269 u32 clock_speed;
Harini Jayaraman970b5202011-09-20 17:28:50 -0600270 int irq_in;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 int read_xfr_cnt;
272 int write_xfr_cnt;
273 int write_len;
274 int read_len;
275#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
Harini Jayaraman970b5202011-09-20 17:28:50 -0600276 int irq_out;
277 int irq_err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278#endif
279 int bytes_per_word;
280 bool suspended;
281 bool transfer_pending;
282 wait_queue_head_t continue_suspend;
283 /* DMA data */
284 enum msm_spi_mode mode;
285 bool use_dma;
286 int tx_dma_chan;
287 int tx_dma_crci;
288 int rx_dma_chan;
289 int rx_dma_crci;
290 /* Data Mover Commands */
291 struct spi_dmov_cmd *tx_dmov_cmd;
292 struct spi_dmov_cmd *rx_dmov_cmd;
293 /* Physical address of the tx dmov box command */
294 dma_addr_t tx_dmov_cmd_dma;
295 dma_addr_t rx_dmov_cmd_dma;
296 struct msm_dmov_cmd tx_hdr;
297 struct msm_dmov_cmd rx_hdr;
298 int input_block_size;
299 int output_block_size;
300 int burst_size;
301 atomic_t rx_irq_called;
302 /* Used to pad messages unaligned to block size */
303 u8 *tx_padding;
304 dma_addr_t tx_padding_dma;
305 u8 *rx_padding;
306 dma_addr_t rx_padding_dma;
307 u32 unaligned_len;
308 /* DMA statistics */
309 int stat_dmov_tx_err;
310 int stat_dmov_rx_err;
311 int stat_rx;
312 int stat_dmov_rx;
313 int stat_tx;
314 int stat_dmov_tx;
315#ifdef CONFIG_DEBUG_FS
316 struct dentry *dent_spi;
317 struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
318#endif
319 struct msm_spi_platform_data *pdata; /* Platform data */
320 /* Remote Spinlock Data */
321 bool use_rlock;
322 remote_mutex_t r_lock;
323 uint32_t pm_lat;
324 /* When set indicates multiple transfers in a single message */
325 bool multi_xfr;
326 bool done;
327 u32 cur_msg_len;
328 /* Used in FIFO mode to keep track of the transfer being processed */
329 struct spi_transfer *cur_tx_transfer;
330 struct spi_transfer *cur_rx_transfer;
331 /* Temporary buffer used for WR-WR or WR-RD transfers */
332 u8 *temp_buf;
333 /* GPIO pin numbers for SPI clk, cs, miso and mosi */
334 int spi_gpios[ARRAY_SIZE(spi_rsrcs)];
335};
336
337/* Forward declaration */
338static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
339static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
340static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
341static inline int msm_spi_set_state(struct msm_spi *dd,
342 enum msm_spi_state state);
343static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
344static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
345
346#if defined(CONFIG_SPI_QSD) || defined(CONFIG_SPI_QSD_MODULE)
347/* Interrupt Handling */
348static inline int msm_spi_get_irq_data(struct msm_spi *dd,
349 struct platform_device *pdev)
350{
351 dd->irq_in = platform_get_irq_byname(pdev, "spi_irq_in");
352 dd->irq_out = platform_get_irq_byname(pdev, "spi_irq_out");
353 dd->irq_err = platform_get_irq_byname(pdev, "spi_irq_err");
354 if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
355 return -1;
356 return 0;
357}
358
359static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
360 struct platform_device *pdev)
361{
362 return 0;
363}
364
365static inline int msm_spi_request_gsbi(struct msm_spi *dd) { return 0; }
366static inline void msm_spi_release_gsbi(struct msm_spi *dd) {}
367static inline void msm_spi_init_gsbi(struct msm_spi *dd) {}
368
369static inline void msm_spi_disable_irqs(struct msm_spi *dd)
370{
371 disable_irq(dd->irq_in);
372 disable_irq(dd->irq_out);
373 disable_irq(dd->irq_err);
374}
375
376static inline void msm_spi_enable_irqs(struct msm_spi *dd)
377{
378 enable_irq(dd->irq_in);
379 enable_irq(dd->irq_out);
380 enable_irq(dd->irq_err);
381}
382
383static inline int msm_spi_request_irq(struct msm_spi *dd,
384 const char *name,
385 struct spi_master *master)
386{
387 int rc;
388 rc = request_irq(dd->irq_in, msm_spi_input_irq, IRQF_TRIGGER_RISING,
389 name, dd);
390 if (rc)
391 goto error_irq1;
392 rc = request_irq(dd->irq_out, msm_spi_output_irq, IRQF_TRIGGER_RISING,
393 name, dd);
394 if (rc)
395 goto error_irq2;
396 rc = request_irq(dd->irq_err, msm_spi_error_irq, IRQF_TRIGGER_RISING,
397 name, master);
398 if (rc)
399 goto error_irq3;
400 return 0;
401
402error_irq3:
403 free_irq(dd->irq_out, dd);
404error_irq2:
405 free_irq(dd->irq_in, dd);
406error_irq1:
407 return rc;
408}
409
410static inline void msm_spi_free_irq(struct msm_spi *dd,
411 struct spi_master *master)
412{
413 free_irq(dd->irq_err, master);
414 free_irq(dd->irq_out, dd);
415 free_irq(dd->irq_in, dd);
416}
417
418static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
419static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
420static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
421
422static inline int msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
423static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
424{
425 msm_spi_write_word_to_fifo(dd);
426}
427static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
428
429static inline void msm_spi_complete(struct msm_spi *dd)
430{
431 complete(&dd->transfer_complete);
432}
433
434static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
435{
436 writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
437}
438
439static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
440{
441 writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
442}
443
444#elif defined(CONFIG_SPI_QUP) || defined(CONFIG_SPI_QUP_MODULE)
445
446/* Interrupt Handling */
447/* In QUP the same interrupt line is used for intput, output and error*/
448static inline int msm_spi_get_irq_data(struct msm_spi *dd,
449 struct platform_device *pdev)
450{
451 dd->irq_in = platform_get_irq_byname(pdev, "spi_irq_in");
452 if (dd->irq_in < 0)
453 return -1;
454 return 0;
455}
456
457static inline int msm_spi_get_gsbi_resource(struct msm_spi *dd,
458 struct platform_device *pdev)
459{
460 struct resource *resource;
461
462 resource = platform_get_resource_byname(pdev,
463 IORESOURCE_MEM, "gsbi_base");
464 if (!resource)
465 return -ENXIO;
466 dd->gsbi_mem_phys_addr = resource->start;
467 dd->gsbi_mem_size = resource_size(resource);
468
469 return 0;
470}
471
472static inline void msm_spi_release_gsbi(struct msm_spi *dd)
473{
474 iounmap(dd->gsbi_base);
475 release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
476}
477
478static inline int msm_spi_request_gsbi(struct msm_spi *dd)
479{
480 if (!request_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size,
481 SPI_DRV_NAME)) {
482 return -ENXIO;
483 }
484 dd->gsbi_base = ioremap(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
485 if (!dd->gsbi_base) {
486 release_mem_region(dd->gsbi_mem_phys_addr, dd->gsbi_mem_size);
487 return -ENXIO;
488 }
489 return 0;
490}
491
492static inline void msm_spi_init_gsbi(struct msm_spi *dd)
493{
494 /* Set GSBI to SPI mode, and CRCI_MUX_CTRL to SPI CRCI ports */
495 writel_relaxed(GSBI_SPI_CONFIG, dd->gsbi_base + GSBI_CTRL_REG);
496}
497
498/* Figure which irq occured and call the relevant functions */
499static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
500{
501 u32 op, ret = IRQ_NONE;
502 struct msm_spi *dd = dev_id;
503
504 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
505 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
506 struct spi_master *master = dev_get_drvdata(dd->dev);
507 ret |= msm_spi_error_irq(irq, master);
508 }
509
510 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
511 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
512 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
513 dd->base + SPI_OPERATIONAL);
514 /*
515 * Ensure service flag was cleared before further
516 * processing of interrupt.
517 */
518 mb();
519 ret |= msm_spi_input_irq(irq, dev_id);
520 }
521
522 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
523 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
524 dd->base + SPI_OPERATIONAL);
525 /*
526 * Ensure service flag was cleared before further
527 * processing of interrupt.
528 */
529 mb();
530 ret |= msm_spi_output_irq(irq, dev_id);
531 }
532
533 if (dd->done) {
534 complete(&dd->transfer_complete);
535 dd->done = 0;
536 }
537 return ret;
538}
539
540static inline int msm_spi_request_irq(struct msm_spi *dd,
541 const char *name,
542 struct spi_master *master)
543{
544 return request_irq(dd->irq_in, msm_spi_qup_irq, IRQF_TRIGGER_HIGH,
545 name, dd);
546}
547
548static inline void msm_spi_free_irq(struct msm_spi *dd,
549 struct spi_master *master)
550{
551 free_irq(dd->irq_in, dd);
552}
553
554static inline void msm_spi_free_output_irq(struct msm_spi *dd) { }
555static inline void msm_spi_free_error_irq(struct msm_spi *dd,
556 struct spi_master *master) { }
557
558static inline void msm_spi_disable_irqs(struct msm_spi *dd)
559{
560 disable_irq(dd->irq_in);
561}
562
563static inline void msm_spi_enable_irqs(struct msm_spi *dd)
564{
565 enable_irq(dd->irq_in);
566}
567
568static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
569{
570 *spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
571}
572
573static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
574{
575 writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
576}
577
578static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n);
579
580/* QUP has no_input, no_output, and N bits at QUP_CONFIG */
581static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
582{
583 u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
584
585 msm_spi_add_configs(dd, &qup_config, bpw-1);
586 writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE,
587 dd->base + QUP_CONFIG);
588}
589
590static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
591{
592 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
593 return -1;
594 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
595 return -1;
596 return 0;
597}
598
599static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
600{
601 if (read_count <= dd->input_fifo_size)
602 msm_spi_write_rmn_to_fifo(dd);
603 else
604 msm_spi_write_word_to_fifo(dd);
605}
606
607static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
608{
609 writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
610}
611
612static inline void msm_spi_complete(struct msm_spi *dd)
613{
614 dd->done = 1;
615}
616
617static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
618{
619 writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
620}
621
622static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
623{
624 writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
625}
626
627#endif
628
629static inline int msm_spi_request_gpios(struct msm_spi *dd)
630{
631 int i;
632 int result = 0;
633
634 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
635 if (dd->spi_gpios[i] >= 0) {
636 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
637 if (result) {
638 pr_err("%s: gpio_request for pin %d failed\
639 with error%d\n", __func__,
640 dd->spi_gpios[i], result);
641 goto error;
642 }
643 }
644 }
645 return 0;
646
647error:
648 for (; --i >= 0;) {
649 if (dd->spi_gpios[i] >= 0)
650 gpio_free(dd->spi_gpios[i]);
651 }
652 return result;
653}
654
655static inline void msm_spi_free_gpios(struct msm_spi *dd)
656{
657 int i;
658
659 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
660 if (dd->spi_gpios[i] >= 0)
661 gpio_free(dd->spi_gpios[i]);
662 }
663}
664
665static void msm_spi_clock_set(struct msm_spi *dd, int speed)
666{
667 int rc;
668
669 rc = clk_set_rate(dd->clk, speed);
670 if (!rc)
671 dd->clock_speed = speed;
672}
673
674static int msm_spi_calculate_size(int *fifo_size,
675 int *block_size,
676 int block,
677 int mult)
678{
679 int words;
680
681 switch (block) {
682 case 0:
683 words = 1; /* 4 bytes */
684 break;
685 case 1:
686 words = 4; /* 16 bytes */
687 break;
688 case 2:
689 words = 8; /* 32 bytes */
690 break;
691 default:
692 return -1;
693 }
694 switch (mult) {
695 case 0:
696 *fifo_size = words * 2;
697 break;
698 case 1:
699 *fifo_size = words * 4;
700 break;
701 case 2:
702 *fifo_size = words * 8;
703 break;
704 case 3:
705 *fifo_size = words * 16;
706 break;
707 default:
708 return -1;
709 }
710 *block_size = words * sizeof(u32); /* in bytes */
711 return 0;
712}
713
714static void get_next_transfer(struct msm_spi *dd)
715{
716 struct spi_transfer *t = dd->cur_transfer;
717
718 if (t->transfer_list.next != &dd->cur_msg->transfers) {
719 dd->cur_transfer = list_entry(t->transfer_list.next,
720 struct spi_transfer,
721 transfer_list);
722 dd->write_buf = dd->cur_transfer->tx_buf;
723 dd->read_buf = dd->cur_transfer->rx_buf;
724 }
725}
726
727static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
728{
729 u32 spi_iom;
730 int block;
731 int mult;
732
733 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
734
735 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
736 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
737 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
738 block, mult)) {
739 goto fifo_size_err;
740 }
741
742 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
743 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
744 if (msm_spi_calculate_size(&dd->output_fifo_size,
745 &dd->output_block_size, block, mult)) {
746 goto fifo_size_err;
747 }
748 /* DM mode is not available for this block size */
749 if (dd->input_block_size == 4 || dd->output_block_size == 4)
750 dd->use_dma = 0;
751
752 /* DM mode is currently unsupported for different block sizes */
753 if (dd->input_block_size != dd->output_block_size)
754 dd->use_dma = 0;
755
756 if (dd->use_dma)
757 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
758
759 return;
760
761fifo_size_err:
762 dd->use_dma = 0;
763 printk(KERN_WARNING "%s: invalid FIFO size, SPI_IO_MODES=0x%x\n",
764 __func__, spi_iom);
765 return;
766}
767
768static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
769{
770 u32 data_in;
771 int i;
772 int shift;
773
774 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
775 if (dd->read_buf) {
776 for (i = 0; (i < dd->bytes_per_word) &&
777 dd->rx_bytes_remaining; i++) {
778 /* The data format depends on bytes_per_word:
779 4 bytes: 0x12345678
780 3 bytes: 0x00123456
781 2 bytes: 0x00001234
782 1 byte : 0x00000012
783 */
784 shift = 8 * (dd->bytes_per_word - i - 1);
785 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
786 dd->rx_bytes_remaining--;
787 }
788 } else {
789 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
790 dd->rx_bytes_remaining -= dd->bytes_per_word;
791 else
792 dd->rx_bytes_remaining = 0;
793 }
794 dd->read_xfr_cnt++;
795 if (dd->multi_xfr) {
796 if (!dd->rx_bytes_remaining)
797 dd->read_xfr_cnt = 0;
798 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
799 dd->read_len) {
800 struct spi_transfer *t = dd->cur_rx_transfer;
801 if (t->transfer_list.next != &dd->cur_msg->transfers) {
802 t = list_entry(t->transfer_list.next,
803 struct spi_transfer,
804 transfer_list);
805 dd->read_buf = t->rx_buf;
806 dd->read_len = t->len;
807 dd->read_xfr_cnt = 0;
808 dd->cur_rx_transfer = t;
809 }
810 }
811 }
812}
813
814static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
815{
816 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
817
818 return spi_op & SPI_OP_STATE_VALID;
819}
820
821static inline int msm_spi_wait_valid(struct msm_spi *dd)
822{
823 unsigned long delay = 0;
824 unsigned long timeout = 0;
825
826 if (dd->clock_speed == 0)
827 return -EINVAL;
828 /*
829 * Based on the SPI clock speed, sufficient time
830 * should be given for the SPI state transition
831 * to occur
832 */
833 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
834 /*
835 * For small delay values, the default timeout would
836 * be one jiffy
837 */
838 if (delay < SPI_DELAY_THRESHOLD)
839 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600840
841 /* Adding one to round off to the nearest jiffy */
842 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 while (!msm_spi_is_valid_state(dd)) {
844 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600845 if (!msm_spi_is_valid_state(dd)) {
846 if (dd->cur_msg)
847 dd->cur_msg->status = -EIO;
848 dev_err(dd->dev, "%s: SPI operational state"
849 "not valid\n", __func__);
850 return -ETIMEDOUT;
851 } else
852 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853 }
854 /*
855 * For smaller values of delay, context switch time
856 * would negate the usage of usleep
857 */
858 if (delay > 20)
859 usleep(delay);
860 else if (delay)
861 udelay(delay);
862 }
863 return 0;
864}
865
866static inline int msm_spi_set_state(struct msm_spi *dd,
867 enum msm_spi_state state)
868{
869 enum msm_spi_state cur_state;
870 if (msm_spi_wait_valid(dd))
871 return -1;
872 cur_state = readl_relaxed(dd->base + SPI_STATE);
873 /* Per spec:
874 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
875 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
876 (state == SPI_OP_STATE_RESET)) {
877 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
878 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
879 } else {
880 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
881 dd->base + SPI_STATE);
882 }
883 if (msm_spi_wait_valid(dd))
884 return -1;
885
886 return 0;
887}
888
889static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
890{
891 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
892
893 if (n != (*config & SPI_CFG_N))
894 *config = (*config & ~SPI_CFG_N) | n;
895
896 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
897 if (dd->read_buf == NULL)
898 *config |= SPI_NO_INPUT;
899 if (dd->write_buf == NULL)
900 *config |= SPI_NO_OUTPUT;
901 }
902}
903
904static void msm_spi_set_config(struct msm_spi *dd, int bpw)
905{
906 u32 spi_config;
907
908 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
909
910 if (dd->cur_msg->spi->mode & SPI_CPHA)
911 spi_config &= ~SPI_CFG_INPUT_FIRST;
912 else
913 spi_config |= SPI_CFG_INPUT_FIRST;
914 if (dd->cur_msg->spi->mode & SPI_LOOP)
915 spi_config |= SPI_CFG_LOOPBACK;
916 else
917 spi_config &= ~SPI_CFG_LOOPBACK;
918 msm_spi_add_configs(dd, &spi_config, bpw-1);
919 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
920 msm_spi_set_qup_config(dd, bpw);
921}
922
923static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
924{
925 dmov_box *box;
926 int bytes_to_send, num_rows, bytes_sent;
927 u32 num_transfers;
928
929 atomic_set(&dd->rx_irq_called, 0);
930 if (dd->write_len && !dd->read_len) {
931 /* WR-WR transfer */
932 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
933 dd->write_buf = dd->temp_buf;
934 } else {
935 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
936 /* For WR-RD transfer, bytes_sent can be negative */
937 if (bytes_sent < 0)
938 bytes_sent = 0;
939 }
940
941 /* We'll send in chunks of SPI_MAX_LEN if larger */
942 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
943 SPI_MAX_LEN : dd->tx_bytes_remaining;
944 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
945 dd->unaligned_len = bytes_to_send % dd->burst_size;
946 num_rows = bytes_to_send / dd->burst_size;
947
948 dd->mode = SPI_DMOV_MODE;
949
950 if (num_rows) {
951 /* src in 16 MSB, dst in 16 LSB */
952 box = &dd->tx_dmov_cmd->box;
953 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
954 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
955 box->num_rows = (num_rows << 16) | num_rows;
956 box->row_offset = (dd->burst_size << 16) | 0;
957
958 box = &dd->rx_dmov_cmd->box;
959 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
960 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
961 box->num_rows = (num_rows << 16) | num_rows;
962 box->row_offset = (0 << 16) | dd->burst_size;
963
964 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
965 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
966 offsetof(struct spi_dmov_cmd, box));
967 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
968 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
969 offsetof(struct spi_dmov_cmd, box));
970 } else {
971 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
972 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
973 offsetof(struct spi_dmov_cmd, single_pad));
974 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
975 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
976 offsetof(struct spi_dmov_cmd, single_pad));
977 }
978
979 if (!dd->unaligned_len) {
980 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
981 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
982 } else {
983 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
984 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
985 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
986
987 if ((dd->multi_xfr) && (dd->read_len <= 0))
988 offset = dd->cur_msg_len - dd->unaligned_len;
989
990 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
991 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
992
993 memset(dd->tx_padding, 0, dd->burst_size);
994 memset(dd->rx_padding, 0, dd->burst_size);
995 if (dd->write_buf)
996 memcpy(dd->tx_padding, dd->write_buf + offset,
997 dd->unaligned_len);
998
999 tx_cmd->src = dd->tx_padding_dma;
1000 rx_cmd->dst = dd->rx_padding_dma;
1001 tx_cmd->len = rx_cmd->len = dd->burst_size;
1002 }
1003 /* This also takes care of the padding dummy buf
1004 Since this is set to the correct length, the
1005 dummy bytes won't be actually sent */
1006 if (dd->multi_xfr) {
1007 u32 write_transfers = 0;
1008 u32 read_transfers = 0;
1009
1010 if (dd->write_len > 0) {
1011 write_transfers = DIV_ROUND_UP(dd->write_len,
1012 dd->bytes_per_word);
1013 writel_relaxed(write_transfers,
1014 dd->base + SPI_MX_OUTPUT_COUNT);
1015 }
1016 if (dd->read_len > 0) {
1017 /*
1018 * The read following a write transfer must take
1019 * into account, that the bytes pertaining to
1020 * the write transfer needs to be discarded,
1021 * before the actual read begins.
1022 */
1023 read_transfers = DIV_ROUND_UP(dd->read_len +
1024 dd->write_len,
1025 dd->bytes_per_word);
1026 writel_relaxed(read_transfers,
1027 dd->base + SPI_MX_INPUT_COUNT);
1028 }
1029 } else {
1030 if (dd->write_buf)
1031 writel_relaxed(num_transfers,
1032 dd->base + SPI_MX_OUTPUT_COUNT);
1033 if (dd->read_buf)
1034 writel_relaxed(num_transfers,
1035 dd->base + SPI_MX_INPUT_COUNT);
1036 }
1037}
1038
1039static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
1040{
1041 dma_coherent_pre_ops();
1042 if (dd->write_buf)
1043 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
1044 if (dd->read_buf)
1045 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
1046}
1047
1048/* SPI core can send maximum of 4K transfers, because there is HW problem
1049 with infinite mode.
1050 Therefore, we are sending several chunks of 3K or less (depending on how
1051 much is left).
1052 Upon completion we send the next chunk, or complete the transfer if
1053 everything is finished.
1054*/
1055static int msm_spi_dm_send_next(struct msm_spi *dd)
1056{
1057 /* By now we should have sent all the bytes in FIFO mode,
1058 * However to make things right, we'll check anyway.
1059 */
1060 if (dd->mode != SPI_DMOV_MODE)
1061 return 0;
1062
1063 /* We need to send more chunks, if we sent max last time */
1064 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
1065 dd->tx_bytes_remaining -= SPI_MAX_LEN;
1066 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1067 return 0;
1068 dd->read_len = dd->write_len = 0;
1069 msm_spi_setup_dm_transfer(dd);
1070 msm_spi_enqueue_dm_commands(dd);
1071 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1072 return 0;
1073 return 1;
1074 } else if (dd->read_len && dd->write_len) {
1075 dd->tx_bytes_remaining -= dd->cur_transfer->len;
1076 if (list_is_last(&dd->cur_transfer->transfer_list,
1077 &dd->cur_msg->transfers))
1078 return 0;
1079 get_next_transfer(dd);
1080 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
1081 return 0;
1082 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
1083 dd->read_buf = dd->temp_buf;
1084 dd->read_len = dd->write_len = -1;
1085 msm_spi_setup_dm_transfer(dd);
1086 msm_spi_enqueue_dm_commands(dd);
1087 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1088 return 0;
1089 return 1;
1090 }
1091 return 0;
1092}
1093
1094static inline void msm_spi_ack_transfer(struct msm_spi *dd)
1095{
1096 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
1097 SPI_OP_MAX_OUTPUT_DONE_FLAG,
1098 dd->base + SPI_OPERATIONAL);
1099 /* Ensure done flag was cleared before proceeding further */
1100 mb();
1101}
1102
1103static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
1104{
1105 struct msm_spi *dd = dev_id;
1106
1107 dd->stat_rx++;
1108
1109 if (dd->mode == SPI_MODE_NONE)
1110 return IRQ_HANDLED;
1111
1112 if (dd->mode == SPI_DMOV_MODE) {
1113 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1114 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
1115 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
1116 msm_spi_ack_transfer(dd);
1117 if (dd->unaligned_len == 0) {
1118 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1119 return IRQ_HANDLED;
1120 }
1121 msm_spi_complete(dd);
1122 return IRQ_HANDLED;
1123 }
1124 return IRQ_NONE;
1125 }
1126
1127 if (dd->mode == SPI_FIFO_MODE) {
1128 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
1129 SPI_OP_IP_FIFO_NOT_EMPTY) &&
1130 (dd->rx_bytes_remaining > 0)) {
1131 msm_spi_read_word_from_fifo(dd);
1132 }
1133 if (dd->rx_bytes_remaining == 0)
1134 msm_spi_complete(dd);
1135 }
1136
1137 return IRQ_HANDLED;
1138}
1139
1140static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1141{
1142 u32 word;
1143 u8 byte;
1144 int i;
1145
1146 word = 0;
1147 if (dd->write_buf) {
1148 for (i = 0; (i < dd->bytes_per_word) &&
1149 dd->tx_bytes_remaining; i++) {
1150 dd->tx_bytes_remaining--;
1151 byte = *dd->write_buf++;
1152 word |= (byte << (BITS_PER_BYTE * (3 - i)));
1153 }
1154 } else
1155 if (dd->tx_bytes_remaining > dd->bytes_per_word)
1156 dd->tx_bytes_remaining -= dd->bytes_per_word;
1157 else
1158 dd->tx_bytes_remaining = 0;
1159 dd->write_xfr_cnt++;
1160 if (dd->multi_xfr) {
1161 if (!dd->tx_bytes_remaining)
1162 dd->write_xfr_cnt = 0;
1163 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
1164 dd->write_len) {
1165 struct spi_transfer *t = dd->cur_tx_transfer;
1166 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1167 t = list_entry(t->transfer_list.next,
1168 struct spi_transfer,
1169 transfer_list);
1170 dd->write_buf = t->tx_buf;
1171 dd->write_len = t->len;
1172 dd->write_xfr_cnt = 0;
1173 dd->cur_tx_transfer = t;
1174 }
1175 }
1176 }
1177 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1178}
1179
1180static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1181{
1182 int count = 0;
1183
1184 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
1185 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
1186 SPI_OP_OUTPUT_FIFO_FULL)) {
1187 msm_spi_write_word_to_fifo(dd);
1188 count++;
1189 }
1190}
1191
1192static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1193{
1194 struct msm_spi *dd = dev_id;
1195
1196 dd->stat_tx++;
1197
1198 if (dd->mode == SPI_MODE_NONE)
1199 return IRQ_HANDLED;
1200
1201 if (dd->mode == SPI_DMOV_MODE) {
1202 /* TX_ONLY transaction is handled here
1203 This is the only place we send complete at tx and not rx */
1204 if (dd->read_buf == NULL &&
1205 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1206 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1207 msm_spi_ack_transfer(dd);
1208 msm_spi_complete(dd);
1209 return IRQ_HANDLED;
1210 }
1211 return IRQ_NONE;
1212 }
1213
1214 /* Output FIFO is empty. Transmit any outstanding write data. */
1215 if (dd->mode == SPI_FIFO_MODE)
1216 msm_spi_write_rmn_to_fifo(dd);
1217
1218 return IRQ_HANDLED;
1219}
1220
1221static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1222{
1223 struct spi_master *master = dev_id;
1224 struct msm_spi *dd = spi_master_get_devdata(master);
1225 u32 spi_err;
1226
1227 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1228 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1229 dev_warn(master->dev.parent, "SPI output overrun error\n");
1230 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1231 dev_warn(master->dev.parent, "SPI input underrun error\n");
1232 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1233 dev_warn(master->dev.parent, "SPI output underrun error\n");
1234 msm_spi_get_clk_err(dd, &spi_err);
1235 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1236 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1237 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1238 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1239 msm_spi_clear_error_flags(dd);
1240 msm_spi_ack_clk_err(dd);
1241 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1242 mb();
1243 return IRQ_HANDLED;
1244}
1245
1246static int msm_spi_map_dma_buffers(struct msm_spi *dd)
1247{
1248 struct device *dev;
1249 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001250 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 void *tx_buf, *rx_buf;
1252 unsigned tx_len, rx_len;
1253 int ret = -EINVAL;
1254
1255 dev = &dd->cur_msg->spi->dev;
1256 first_xfr = dd->cur_transfer;
1257 tx_buf = (void *)first_xfr->tx_buf;
1258 rx_buf = first_xfr->rx_buf;
1259 tx_len = rx_len = first_xfr->len;
1260
1261 /*
1262 * For WR-WR and WR-RD transfers, we allocate our own temporary
1263 * buffer and copy the data to/from the client buffers.
1264 */
1265 if (dd->multi_xfr) {
1266 dd->temp_buf = kzalloc(dd->cur_msg_len,
1267 GFP_KERNEL | __GFP_DMA);
1268 if (!dd->temp_buf)
1269 return -ENOMEM;
1270 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1271 struct spi_transfer, transfer_list);
1272
1273 if (dd->write_len && !dd->read_len) {
1274 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1275 goto error;
1276
1277 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1278 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1279 nxt_xfr->len);
1280 tx_buf = dd->temp_buf;
1281 tx_len = dd->cur_msg_len;
1282 } else {
1283 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1284 goto error;
1285
1286 rx_buf = dd->temp_buf;
1287 rx_len = dd->cur_msg_len;
1288 }
1289 }
1290 if (tx_buf != NULL) {
1291 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1292 tx_len, DMA_TO_DEVICE);
1293 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1294 dev_err(dev, "dma %cX %d bytes error\n",
1295 'T', tx_len);
1296 ret = -ENOMEM;
1297 goto error;
1298 }
1299 }
1300 if (rx_buf != NULL) {
1301 dma_addr_t dma_handle;
1302 dma_handle = dma_map_single(dev, rx_buf,
1303 rx_len, DMA_FROM_DEVICE);
1304 if (dma_mapping_error(NULL, dma_handle)) {
1305 dev_err(dev, "dma %cX %d bytes error\n",
1306 'R', rx_len);
1307 if (tx_buf != NULL)
1308 dma_unmap_single(NULL, first_xfr->tx_dma,
1309 tx_len, DMA_TO_DEVICE);
1310 ret = -ENOMEM;
1311 goto error;
1312 }
1313 if (dd->multi_xfr)
1314 nxt_xfr->rx_dma = dma_handle;
1315 else
1316 first_xfr->rx_dma = dma_handle;
1317 }
1318 return 0;
1319
1320error:
1321 kfree(dd->temp_buf);
1322 dd->temp_buf = NULL;
1323 return ret;
1324}
1325
1326static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
1327{
1328 struct device *dev;
1329 u32 offset;
1330
1331 dev = &dd->cur_msg->spi->dev;
1332 if (dd->cur_msg->is_dma_mapped)
1333 goto unmap_end;
1334
1335 if (dd->multi_xfr) {
1336 if (dd->write_len && !dd->read_len) {
1337 dma_unmap_single(dev,
1338 dd->cur_transfer->tx_dma,
1339 dd->cur_msg_len,
1340 DMA_TO_DEVICE);
1341 } else {
1342 struct spi_transfer *prev_xfr;
1343 prev_xfr = list_entry(
1344 dd->cur_transfer->transfer_list.prev,
1345 struct spi_transfer,
1346 transfer_list);
1347 if (dd->cur_transfer->rx_buf) {
1348 dma_unmap_single(dev,
1349 dd->cur_transfer->rx_dma,
1350 dd->cur_msg_len,
1351 DMA_FROM_DEVICE);
1352 }
1353 if (prev_xfr->tx_buf) {
1354 dma_unmap_single(dev,
1355 prev_xfr->tx_dma,
1356 prev_xfr->len,
1357 DMA_TO_DEVICE);
1358 }
1359 if (dd->unaligned_len && dd->read_buf) {
1360 offset = dd->cur_msg_len - dd->unaligned_len;
1361 dma_coherent_post_ops();
1362 memcpy(dd->read_buf + offset, dd->rx_padding,
1363 dd->unaligned_len);
1364 memcpy(dd->cur_transfer->rx_buf,
1365 dd->read_buf + prev_xfr->len,
1366 dd->cur_transfer->len);
1367 }
1368 }
1369 kfree(dd->temp_buf);
1370 dd->temp_buf = NULL;
1371 return;
1372 } else {
1373 if (dd->cur_transfer->rx_buf)
1374 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1375 dd->cur_transfer->len,
1376 DMA_FROM_DEVICE);
1377 if (dd->cur_transfer->tx_buf)
1378 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1379 dd->cur_transfer->len,
1380 DMA_TO_DEVICE);
1381 }
1382
1383unmap_end:
1384 /* If we padded the transfer, we copy it from the padding buf */
1385 if (dd->unaligned_len && dd->read_buf) {
1386 offset = dd->cur_transfer->len - dd->unaligned_len;
1387 dma_coherent_post_ops();
1388 memcpy(dd->read_buf + offset, dd->rx_padding,
1389 dd->unaligned_len);
1390 }
1391}
1392
1393/**
1394 * msm_use_dm - decides whether to use data mover for this
1395 * transfer
1396 * @dd: device
1397 * @tr: transfer
1398 *
1399 * Start using DM if:
1400 * 1. Transfer is longer than 3*block size.
1401 * 2. Buffers should be aligned to cache line.
1402 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
1403 */
1404static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
1405 u8 bpw)
1406{
1407 u32 cache_line = dma_get_cache_alignment();
1408
1409 if (!dd->use_dma)
1410 return 0;
1411
1412 if (dd->cur_msg_len < 3*dd->input_block_size)
1413 return 0;
1414
1415 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
1416 return 0;
1417
1418 if (tr->tx_buf) {
1419 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1420 return 0;
1421 }
1422 if (tr->rx_buf) {
1423 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1424 return 0;
1425 }
1426
1427 if (tr->cs_change &&
1428 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1429 return 0;
1430 return 1;
1431}
1432
1433static void msm_spi_process_transfer(struct msm_spi *dd)
1434{
1435 u8 bpw;
1436 u32 spi_ioc;
1437 u32 spi_iom;
1438 u32 spi_ioc_orig;
1439 u32 max_speed;
1440 u32 chip_select;
1441 u32 read_count;
1442 u32 timeout;
1443 u32 int_loopback = 0;
1444
1445 dd->tx_bytes_remaining = dd->cur_msg_len;
1446 dd->rx_bytes_remaining = dd->cur_msg_len;
1447 dd->read_buf = dd->cur_transfer->rx_buf;
1448 dd->write_buf = dd->cur_transfer->tx_buf;
1449 init_completion(&dd->transfer_complete);
1450 if (dd->cur_transfer->bits_per_word)
1451 bpw = dd->cur_transfer->bits_per_word;
1452 else
1453 if (dd->cur_msg->spi->bits_per_word)
1454 bpw = dd->cur_msg->spi->bits_per_word;
1455 else
1456 bpw = 8;
1457 dd->bytes_per_word = (bpw + 7) / 8;
1458
1459 if (dd->cur_transfer->speed_hz)
1460 max_speed = dd->cur_transfer->speed_hz;
1461 else
1462 max_speed = dd->cur_msg->spi->max_speed_hz;
1463 if (!dd->clock_speed || max_speed != dd->clock_speed)
1464 msm_spi_clock_set(dd, max_speed);
1465
1466 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1467 if (dd->cur_msg->spi->mode & SPI_LOOP)
1468 int_loopback = 1;
1469 if (int_loopback && dd->multi_xfr &&
1470 (read_count > dd->input_fifo_size)) {
1471 if (dd->read_len && dd->write_len)
1472 printk(KERN_WARNING
1473 "%s:Internal Loopback does not support > fifo size\
1474 for write-then-read transactions\n",
1475 __func__);
1476 else if (dd->write_len && !dd->read_len)
1477 printk(KERN_WARNING
1478 "%s:Internal Loopback does not support > fifo size\
1479 for write-then-write transactions\n",
1480 __func__);
1481 return;
1482 }
1483 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
1484 dd->mode = SPI_FIFO_MODE;
1485 if (dd->multi_xfr) {
1486 dd->read_len = dd->cur_transfer->len;
1487 dd->write_len = dd->cur_transfer->len;
1488 }
1489 /* read_count cannot exceed fifo_size, and only one READ COUNT
1490 interrupt is generated per transaction, so for transactions
1491 larger than fifo size READ COUNT must be disabled.
1492 For those transactions we usually move to Data Mover mode.
1493 */
1494 if (read_count <= dd->input_fifo_size) {
1495 writel_relaxed(read_count,
1496 dd->base + SPI_MX_READ_COUNT);
1497 msm_spi_set_write_count(dd, read_count);
1498 } else {
1499 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1500 msm_spi_set_write_count(dd, 0);
1501 }
1502 } else {
1503 dd->mode = SPI_DMOV_MODE;
1504 if (dd->write_len && dd->read_len) {
1505 dd->tx_bytes_remaining = dd->write_len;
1506 dd->rx_bytes_remaining = dd->read_len;
1507 }
1508 }
1509
1510 /* Write mode - fifo or data mover*/
1511 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1512 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1513 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1514 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1515 /* Turn on packing for data mover */
1516 if (dd->mode == SPI_DMOV_MODE)
1517 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1518 else
1519 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1520 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1521
1522 msm_spi_set_config(dd, bpw);
1523
1524 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1525 spi_ioc_orig = spi_ioc;
1526 if (dd->cur_msg->spi->mode & SPI_CPOL)
1527 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1528 else
1529 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1530 chip_select = dd->cur_msg->spi->chip_select << 2;
1531 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1532 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1533 if (!dd->cur_transfer->cs_change)
1534 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1535 if (spi_ioc != spi_ioc_orig)
1536 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1537
1538 if (dd->mode == SPI_DMOV_MODE) {
1539 msm_spi_setup_dm_transfer(dd);
1540 msm_spi_enqueue_dm_commands(dd);
1541 }
1542 /* The output fifo interrupt handler will handle all writes after
1543 the first. Restricting this to one write avoids contention
1544 issues and race conditions between this thread and the int handler
1545 */
1546 else if (dd->mode == SPI_FIFO_MODE) {
1547 if (msm_spi_prepare_for_write(dd))
1548 goto transfer_end;
1549 msm_spi_start_write(dd, read_count);
1550 }
1551
1552 /* Only enter the RUN state after the first word is written into
1553 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1554 might fire before the first word is written resulting in a
1555 possible race condition.
1556 */
1557 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1558 goto transfer_end;
1559
1560 timeout = 100 * msecs_to_jiffies(
1561 DIV_ROUND_UP(dd->cur_msg_len * 8,
1562 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1563
1564 /* Assume success, this might change later upon transaction result */
1565 dd->cur_msg->status = 0;
1566 do {
1567 if (!wait_for_completion_timeout(&dd->transfer_complete,
1568 timeout)) {
1569 dev_err(dd->dev, "%s: SPI transaction "
1570 "timeout\n", __func__);
1571 dd->cur_msg->status = -EIO;
1572 if (dd->mode == SPI_DMOV_MODE) {
1573 msm_dmov_flush(dd->tx_dma_chan);
1574 msm_dmov_flush(dd->rx_dma_chan);
1575 }
1576 break;
1577 }
1578 } while (msm_spi_dm_send_next(dd));
1579
1580transfer_end:
1581 if (dd->mode == SPI_DMOV_MODE)
1582 msm_spi_unmap_dma_buffers(dd);
1583 dd->mode = SPI_MODE_NONE;
1584
1585 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1586 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1587 dd->base + SPI_IO_CONTROL);
1588}
1589
1590static void get_transfer_length(struct msm_spi *dd)
1591{
1592 struct spi_transfer *tr;
1593 int num_xfrs = 0;
1594 int readlen = 0;
1595 int writelen = 0;
1596
1597 dd->cur_msg_len = 0;
1598 dd->multi_xfr = 0;
1599 dd->read_len = dd->write_len = 0;
1600
1601 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1602 if (tr->tx_buf)
1603 writelen += tr->len;
1604 if (tr->rx_buf)
1605 readlen += tr->len;
1606 dd->cur_msg_len += tr->len;
1607 num_xfrs++;
1608 }
1609
1610 if (num_xfrs == 2) {
1611 struct spi_transfer *first_xfr = dd->cur_transfer;
1612
1613 dd->multi_xfr = 1;
1614 tr = list_entry(first_xfr->transfer_list.next,
1615 struct spi_transfer,
1616 transfer_list);
1617 /*
1618 * We update dd->read_len and dd->write_len only
1619 * for WR-WR and WR-RD transfers.
1620 */
1621 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1622 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1623 ((!tr->tx_buf) && (tr->rx_buf))) {
1624 dd->read_len = readlen;
1625 dd->write_len = writelen;
1626 }
1627 }
1628 } else if (num_xfrs > 1)
1629 dd->multi_xfr = 1;
1630}
1631
1632static inline int combine_transfers(struct msm_spi *dd)
1633{
1634 struct spi_transfer *t = dd->cur_transfer;
1635 struct spi_transfer *nxt;
1636 int xfrs_grped = 1;
1637
1638 dd->cur_msg_len = dd->cur_transfer->len;
1639 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1640 nxt = list_entry(t->transfer_list.next,
1641 struct spi_transfer,
1642 transfer_list);
1643 if (t->cs_change != nxt->cs_change)
1644 return xfrs_grped;
1645 dd->cur_msg_len += nxt->len;
1646 xfrs_grped++;
1647 t = nxt;
1648 }
1649 return xfrs_grped;
1650}
1651
1652static void msm_spi_process_message(struct msm_spi *dd)
1653{
1654 int xfrs_grped = 0;
1655 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
1656
1657 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1658 struct spi_transfer,
1659 transfer_list);
1660 get_transfer_length(dd);
1661 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1662 /* Handling of multi-transfers. FIFO mode is used by default */
1663 list_for_each_entry(dd->cur_transfer,
1664 &dd->cur_msg->transfers,
1665 transfer_list) {
1666 if (!dd->cur_transfer->len)
1667 return;
1668 if (xfrs_grped) {
1669 xfrs_grped--;
1670 continue;
1671 } else {
1672 dd->read_len = dd->write_len = 0;
1673 xfrs_grped = combine_transfers(dd);
1674 }
1675 dd->cur_tx_transfer = dd->cur_transfer;
1676 dd->cur_rx_transfer = dd->cur_transfer;
1677 msm_spi_process_transfer(dd);
1678 xfrs_grped--;
1679 }
1680 } else {
1681 /* Handling of a single transfer or WR-WR or WR-RD transfers */
1682 if ((!dd->cur_msg->is_dma_mapped) &&
1683 (msm_use_dm(dd, dd->cur_transfer,
1684 dd->cur_transfer->bits_per_word))) {
1685 /* Mapping of DMA buffers */
1686 int ret = msm_spi_map_dma_buffers(dd);
1687 if (ret < 0) {
1688 dd->cur_msg->status = ret;
1689 return;
1690 }
1691 }
1692 dd->cur_tx_transfer = dd->cur_rx_transfer = dd->cur_transfer;
1693 msm_spi_process_transfer(dd);
1694 }
1695}
1696
1697/* workqueue - pull messages from queue & process */
1698static void msm_spi_workq(struct work_struct *work)
1699{
1700 struct msm_spi *dd =
1701 container_of(work, struct msm_spi, work_data);
1702 unsigned long flags;
1703 u32 status_error = 0;
1704
1705 mutex_lock(&dd->core_lock);
1706
1707 /* Don't allow power collapse until we release mutex */
1708 if (pm_qos_request_active(&qos_req_list))
1709 pm_qos_update_request(&qos_req_list,
1710 dd->pm_lat);
1711 if (dd->use_rlock)
1712 remote_mutex_lock(&dd->r_lock);
1713
1714 clk_enable(dd->clk);
1715 clk_enable(dd->pclk);
1716 msm_spi_enable_irqs(dd);
1717
1718 if (!msm_spi_is_valid_state(dd)) {
1719 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1720 __func__);
1721 status_error = 1;
1722 }
1723
1724 spin_lock_irqsave(&dd->queue_lock, flags);
1725 while (!list_empty(&dd->queue)) {
1726 dd->cur_msg = list_entry(dd->queue.next,
1727 struct spi_message, queue);
1728 list_del_init(&dd->cur_msg->queue);
1729 spin_unlock_irqrestore(&dd->queue_lock, flags);
1730 if (status_error)
1731 dd->cur_msg->status = -EIO;
1732 else
1733 msm_spi_process_message(dd);
1734 if (dd->cur_msg->complete)
1735 dd->cur_msg->complete(dd->cur_msg->context);
1736 spin_lock_irqsave(&dd->queue_lock, flags);
1737 }
1738 dd->transfer_pending = 0;
1739 spin_unlock_irqrestore(&dd->queue_lock, flags);
1740
1741 msm_spi_disable_irqs(dd);
1742 clk_disable(dd->clk);
1743 clk_disable(dd->pclk);
1744
1745 if (dd->use_rlock)
1746 remote_mutex_unlock(&dd->r_lock);
1747
1748 if (pm_qos_request_active(&qos_req_list))
1749 pm_qos_update_request(&qos_req_list,
1750 PM_QOS_DEFAULT_VALUE);
1751
1752 mutex_unlock(&dd->core_lock);
1753 /* If needed, this can be done after the current message is complete,
1754 and work can be continued upon resume. No motivation for now. */
1755 if (dd->suspended)
1756 wake_up_interruptible(&dd->continue_suspend);
1757}
1758
1759static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1760{
1761 struct msm_spi *dd;
1762 unsigned long flags;
1763 struct spi_transfer *tr;
1764
1765 dd = spi_master_get_devdata(spi->master);
1766 if (dd->suspended)
1767 return -EBUSY;
1768
1769 if (list_empty(&msg->transfers) || !msg->complete)
1770 return -EINVAL;
1771
1772 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1773 /* Check message parameters */
1774 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1775 (tr->bits_per_word &&
1776 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1777 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1778 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1779 "tx=%p, rx=%p\n",
1780 tr->speed_hz, tr->bits_per_word,
1781 tr->tx_buf, tr->rx_buf);
1782 return -EINVAL;
1783 }
1784 }
1785
1786 spin_lock_irqsave(&dd->queue_lock, flags);
1787 if (dd->suspended) {
1788 spin_unlock_irqrestore(&dd->queue_lock, flags);
1789 return -EBUSY;
1790 }
1791 dd->transfer_pending = 1;
1792 list_add_tail(&msg->queue, &dd->queue);
1793 spin_unlock_irqrestore(&dd->queue_lock, flags);
1794 queue_work(dd->workqueue, &dd->work_data);
1795 return 0;
1796}
1797
1798static int msm_spi_setup(struct spi_device *spi)
1799{
1800 struct msm_spi *dd;
1801 int rc = 0;
1802 u32 spi_ioc;
1803 u32 spi_config;
1804 u32 mask;
1805
1806 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1807 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1808 __func__, spi->bits_per_word);
1809 rc = -EINVAL;
1810 }
1811 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1812 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1813 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1814 rc = -EINVAL;
1815 }
1816
1817 if (rc)
1818 goto err_setup_exit;
1819
1820 dd = spi_master_get_devdata(spi->master);
1821
1822 mutex_lock(&dd->core_lock);
1823 if (dd->suspended) {
1824 mutex_unlock(&dd->core_lock);
1825 return -EBUSY;
1826 }
1827
1828 if (dd->use_rlock)
1829 remote_mutex_lock(&dd->r_lock);
1830
1831 clk_enable(dd->clk);
1832 clk_enable(dd->pclk);
1833
1834 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1835 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1836 if (spi->mode & SPI_CS_HIGH)
1837 spi_ioc |= mask;
1838 else
1839 spi_ioc &= ~mask;
1840 if (spi->mode & SPI_CPOL)
1841 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1842 else
1843 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1844
1845 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1846
1847 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1848 if (spi->mode & SPI_LOOP)
1849 spi_config |= SPI_CFG_LOOPBACK;
1850 else
1851 spi_config &= ~SPI_CFG_LOOPBACK;
1852 if (spi->mode & SPI_CPHA)
1853 spi_config &= ~SPI_CFG_INPUT_FIRST;
1854 else
1855 spi_config |= SPI_CFG_INPUT_FIRST;
1856 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1857
1858 /* Ensure previous write completed before disabling the clocks */
1859 mb();
1860 clk_disable(dd->clk);
1861 clk_disable(dd->pclk);
1862
1863 if (dd->use_rlock)
1864 remote_mutex_unlock(&dd->r_lock);
1865 mutex_unlock(&dd->core_lock);
1866
1867err_setup_exit:
1868 return rc;
1869}
1870
1871#ifdef CONFIG_DEBUG_FS
1872static int debugfs_iomem_x32_set(void *data, u64 val)
1873{
1874 writel_relaxed(val, data);
1875 /* Ensure the previous write completed. */
1876 mb();
1877 return 0;
1878}
1879
1880static int debugfs_iomem_x32_get(void *data, u64 *val)
1881{
1882 *val = readl_relaxed(data);
1883 /* Ensure the previous read completed. */
1884 mb();
1885 return 0;
1886}
1887
1888DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1889 debugfs_iomem_x32_set, "0x%08llx\n");
1890
1891static void spi_debugfs_init(struct msm_spi *dd)
1892{
1893 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1894 if (dd->dent_spi) {
1895 int i;
1896 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1897 dd->debugfs_spi_regs[i] =
1898 debugfs_create_file(
1899 debugfs_spi_regs[i].name,
1900 debugfs_spi_regs[i].mode,
1901 dd->dent_spi,
1902 dd->base + debugfs_spi_regs[i].offset,
1903 &fops_iomem_x32);
1904 }
1905 }
1906}
1907
1908static void spi_debugfs_exit(struct msm_spi *dd)
1909{
1910 if (dd->dent_spi) {
1911 int i;
1912 debugfs_remove_recursive(dd->dent_spi);
1913 dd->dent_spi = NULL;
1914 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1915 dd->debugfs_spi_regs[i] = NULL;
1916 }
1917}
1918#else
1919static void spi_debugfs_init(struct msm_spi *dd) {}
1920static void spi_debugfs_exit(struct msm_spi *dd) {}
1921#endif
1922
1923/* ===Device attributes begin=== */
1924static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1925 char *buf)
1926{
1927 struct spi_master *master = dev_get_drvdata(dev);
1928 struct msm_spi *dd = spi_master_get_devdata(master);
1929
1930 return snprintf(buf, PAGE_SIZE,
1931 "Device %s\n"
1932 "rx fifo_size = %d spi words\n"
1933 "tx fifo_size = %d spi words\n"
1934 "use_dma ? %s\n"
1935 "rx block size = %d bytes\n"
1936 "tx block size = %d bytes\n"
1937 "burst size = %d bytes\n"
1938 "DMA configuration:\n"
1939 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1940 "--statistics--\n"
1941 "Rx isrs = %d\n"
1942 "Tx isrs = %d\n"
1943 "DMA error = %d\n"
1944 "--debug--\n"
1945 "NA yet\n",
1946 dev_name(dev),
1947 dd->input_fifo_size,
1948 dd->output_fifo_size,
1949 dd->use_dma ? "yes" : "no",
1950 dd->input_block_size,
1951 dd->output_block_size,
1952 dd->burst_size,
1953 dd->tx_dma_chan,
1954 dd->rx_dma_chan,
1955 dd->tx_dma_crci,
1956 dd->rx_dma_crci,
1957 dd->stat_rx + dd->stat_dmov_rx,
1958 dd->stat_tx + dd->stat_dmov_tx,
1959 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1960 );
1961}
1962
1963/* Reset statistics on write */
1964static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1965 const char *buf, size_t count)
1966{
1967 struct msm_spi *dd = dev_get_drvdata(dev);
1968 dd->stat_rx = 0;
1969 dd->stat_tx = 0;
1970 dd->stat_dmov_rx = 0;
1971 dd->stat_dmov_tx = 0;
1972 dd->stat_dmov_rx_err = 0;
1973 dd->stat_dmov_tx_err = 0;
1974 return count;
1975}
1976
1977static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1978
1979static struct attribute *dev_attrs[] = {
1980 &dev_attr_stats.attr,
1981 NULL,
1982};
1983
1984static struct attribute_group dev_attr_grp = {
1985 .attrs = dev_attrs,
1986};
1987/* ===Device attributes end=== */
1988
1989/**
1990 * spi_dmov_tx_complete_func - DataMover tx completion callback
1991 *
1992 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1993 * spinlock @msm_dmov_lock held.
1994 */
1995static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1996 unsigned int result,
1997 struct msm_dmov_errdata *err)
1998{
1999 struct msm_spi *dd;
2000
2001 if (!(result & DMOV_RSLT_VALID)) {
2002 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2003 return;
2004 }
2005 /* restore original context */
2006 dd = container_of(cmd, struct msm_spi, tx_hdr);
2007 if (result & DMOV_RSLT_DONE)
2008 dd->stat_dmov_tx++;
2009 else {
2010 /* Error or flush */
2011 if (result & DMOV_RSLT_ERROR) {
2012 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2013 dd->stat_dmov_tx_err++;
2014 }
2015 if (result & DMOV_RSLT_FLUSH) {
2016 /*
2017 * Flushing normally happens in process of
2018 * removing, when we are waiting for outstanding
2019 * DMA commands to be flushed.
2020 */
2021 dev_info(dd->dev,
2022 "DMA channel flushed (0x%08x)\n", result);
2023 }
2024 if (err)
2025 dev_err(dd->dev,
2026 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2027 err->flush[0], err->flush[1], err->flush[2],
2028 err->flush[3], err->flush[4], err->flush[5]);
2029 dd->cur_msg->status = -EIO;
2030 complete(&dd->transfer_complete);
2031 }
2032}
2033
2034/**
2035 * spi_dmov_rx_complete_func - DataMover rx completion callback
2036 *
2037 * Executed in IRQ context (Data Mover's IRQ)
2038 * DataMover's spinlock @msm_dmov_lock held.
2039 */
2040static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2041 unsigned int result,
2042 struct msm_dmov_errdata *err)
2043{
2044 struct msm_spi *dd;
2045
2046 if (!(result & DMOV_RSLT_VALID)) {
2047 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2048 result, cmd);
2049 return;
2050 }
2051 /* restore original context */
2052 dd = container_of(cmd, struct msm_spi, rx_hdr);
2053 if (result & DMOV_RSLT_DONE) {
2054 dd->stat_dmov_rx++;
2055 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2056 return;
2057 complete(&dd->transfer_complete);
2058 } else {
2059 /** Error or flush */
2060 if (result & DMOV_RSLT_ERROR) {
2061 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2062 dd->stat_dmov_rx_err++;
2063 }
2064 if (result & DMOV_RSLT_FLUSH) {
2065 dev_info(dd->dev,
2066 "DMA channel flushed(0x%08x)\n", result);
2067 }
2068 if (err)
2069 dev_err(dd->dev,
2070 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2071 err->flush[0], err->flush[1], err->flush[2],
2072 err->flush[3], err->flush[4], err->flush[5]);
2073 dd->cur_msg->status = -EIO;
2074 complete(&dd->transfer_complete);
2075 }
2076}
2077
2078static inline u32 get_chunk_size(struct msm_spi *dd)
2079{
2080 u32 cache_line = dma_get_cache_alignment();
2081
2082 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
2083 roundup(dd->burst_size, cache_line))*2;
2084}
2085
2086static void msm_spi_teardown_dma(struct msm_spi *dd)
2087{
2088 int limit = 0;
2089
2090 if (!dd->use_dma)
2091 return;
2092
2093 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
2094 msm_dmov_flush(dd->tx_dma_chan);
2095 msm_dmov_flush(dd->rx_dma_chan);
2096 msleep(10);
2097 }
2098
2099 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
2100 dd->tx_dmov_cmd_dma);
2101 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2102 dd->tx_padding = dd->rx_padding = NULL;
2103}
2104
2105static __init int msm_spi_init_dma(struct msm_spi *dd)
2106{
2107 dmov_box *box;
2108 u32 cache_line = dma_get_cache_alignment();
2109
2110 /* Allocate all as one chunk, since all is smaller than page size */
2111
2112 /* We send NULL device, since it requires coherent_dma_mask id
2113 device definition, we're okay with using system pool */
2114 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
2115 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
2116 if (dd->tx_dmov_cmd == NULL)
2117 return -ENOMEM;
2118
2119 /* DMA addresses should be 64 bit aligned aligned */
2120 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2121 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2122 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2123 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2124
2125 /* Buffers should be aligned to cache line */
2126 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2127 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2128 sizeof(struct spi_dmov_cmd), cache_line);
2129 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
2130 cache_line);
2131 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
2132 cache_line);
2133
2134 /* Setup DM commands */
2135 box = &(dd->rx_dmov_cmd->box);
2136 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2137 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2138 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2139 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2140 offsetof(struct spi_dmov_cmd, cmd_ptr));
2141 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002142
2143 box = &(dd->tx_dmov_cmd->box);
2144 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2145 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2146 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2147 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2148 offsetof(struct spi_dmov_cmd, cmd_ptr));
2149 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002150
2151 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2152 CMD_DST_CRCI(dd->tx_dma_crci);
2153 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2154 SPI_OUTPUT_FIFO;
2155 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2156 CMD_SRC_CRCI(dd->rx_dma_crci);
2157 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2158 SPI_INPUT_FIFO;
2159
2160 /* Clear remaining activities on channel */
2161 msm_dmov_flush(dd->tx_dma_chan);
2162 msm_dmov_flush(dd->rx_dma_chan);
2163
2164 return 0;
2165}
2166
2167static int __init msm_spi_probe(struct platform_device *pdev)
2168{
2169 struct spi_master *master;
2170 struct msm_spi *dd;
2171 struct resource *resource;
2172 int rc = -ENXIO;
2173 int locked = 0;
2174 int i = 0;
2175 int clk_enabled = 0;
2176 int pclk_enabled = 0;
2177 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2178
2179 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2180 if (!master) {
2181 rc = -ENOMEM;
2182 dev_err(&pdev->dev, "master allocation failed\n");
2183 goto err_probe_exit;
2184 }
2185
2186 master->bus_num = pdev->id;
2187 master->mode_bits = SPI_SUPPORTED_MODES;
2188 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2189 master->setup = msm_spi_setup;
2190 master->transfer = msm_spi_transfer;
2191 platform_set_drvdata(pdev, master);
2192 dd = spi_master_get_devdata(master);
2193
2194 dd->pdata = pdata;
2195 rc = msm_spi_get_irq_data(dd, pdev);
2196 if (rc)
2197 goto err_probe_res;
2198 resource = platform_get_resource_byname(pdev,
2199 IORESOURCE_MEM, "spi_base");
2200 if (!resource) {
2201 rc = -ENXIO;
2202 goto err_probe_res;
2203 }
2204 dd->mem_phys_addr = resource->start;
2205 dd->mem_size = resource_size(resource);
2206
2207 rc = msm_spi_get_gsbi_resource(dd, pdev);
2208 if (rc)
2209 goto err_probe_res2;
2210
2211 if (pdata) {
2212 if (pdata->dma_config) {
2213 rc = pdata->dma_config();
2214 if (rc) {
2215 dev_warn(&pdev->dev,
2216 "%s: DM mode not supported\n",
2217 __func__);
2218 dd->use_dma = 0;
2219 goto skip_dma_resources;
2220 }
2221 }
2222 resource = platform_get_resource_byname(pdev,
2223 IORESOURCE_DMA,
2224 "spidm_channels");
2225 if (resource) {
2226 dd->rx_dma_chan = resource->start;
2227 dd->tx_dma_chan = resource->end;
2228
2229 resource = platform_get_resource_byname(pdev,
2230 IORESOURCE_DMA,
2231 "spidm_crci");
2232 if (!resource) {
2233 rc = -ENXIO;
2234 goto err_probe_res;
2235 }
2236 dd->rx_dma_crci = resource->start;
2237 dd->tx_dma_crci = resource->end;
2238 dd->use_dma = 1;
2239 master->dma_alignment = dma_get_cache_alignment();
2240 }
2241
2242skip_dma_resources:
2243 if (pdata->gpio_config) {
2244 rc = pdata->gpio_config();
2245 if (rc) {
2246 dev_err(&pdev->dev,
2247 "%s: error configuring GPIOs\n",
2248 __func__);
2249 goto err_probe_gpio;
2250 }
2251 }
2252 }
2253
2254 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2255 resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
2256 spi_rsrcs[i]);
2257 dd->spi_gpios[i] = resource ? resource->start : -1;
2258 }
2259
2260 rc = msm_spi_request_gpios(dd);
2261 if (rc)
2262 goto err_probe_gpio;
2263 spin_lock_init(&dd->queue_lock);
2264 mutex_init(&dd->core_lock);
2265 INIT_LIST_HEAD(&dd->queue);
2266 INIT_WORK(&dd->work_data, msm_spi_workq);
2267 init_waitqueue_head(&dd->continue_suspend);
2268 dd->workqueue = create_singlethread_workqueue(
2269 dev_name(master->dev.parent));
2270 if (!dd->workqueue)
2271 goto err_probe_workq;
2272
2273 if (!request_mem_region(dd->mem_phys_addr, dd->mem_size,
2274 SPI_DRV_NAME)) {
2275 rc = -ENXIO;
2276 goto err_probe_reqmem;
2277 }
2278
2279 dd->base = ioremap(dd->mem_phys_addr, dd->mem_size);
2280 if (!dd->base)
2281 goto err_probe_ioremap;
2282 rc = msm_spi_request_gsbi(dd);
2283 if (rc)
2284 goto err_probe_ioremap2;
2285 if (pdata && pdata->rsl_id) {
2286 struct remote_mutex_id rmid;
2287 rmid.r_spinlock_id = pdata->rsl_id;
2288 rmid.delay_us = SPI_TRYLOCK_DELAY;
2289
2290 rc = remote_mutex_init(&dd->r_lock, &rmid);
2291 if (rc) {
2292 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2293 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2294 __func__, rc);
2295 goto err_probe_rlock_init;
2296 }
2297 dd->use_rlock = 1;
2298 dd->pm_lat = pdata->pm_lat;
2299 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
2300 PM_QOS_DEFAULT_VALUE);
2301 }
2302 mutex_lock(&dd->core_lock);
2303 if (dd->use_rlock)
2304 remote_mutex_lock(&dd->r_lock);
2305 locked = 1;
2306
2307 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002308 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002309 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002310 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002311 rc = PTR_ERR(dd->clk);
2312 goto err_probe_clk_get;
2313 }
2314
Matt Wagantallac294852011-08-17 15:44:58 -07002315 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002316 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002317 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002318 rc = PTR_ERR(dd->pclk);
2319 goto err_probe_pclk_get;
2320 }
2321
2322 if (pdata && pdata->max_clock_speed)
2323 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2324
2325 rc = clk_enable(dd->clk);
2326 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002327 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002328 __func__);
2329 goto err_probe_clk_enable;
2330 }
2331 clk_enabled = 1;
2332
2333 rc = clk_enable(dd->pclk);
2334 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002335 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336 __func__);
2337 goto err_probe_pclk_enable;
2338 }
2339 pclk_enabled = 1;
2340 msm_spi_init_gsbi(dd);
2341 msm_spi_calculate_fifo_size(dd);
2342 if (dd->use_dma) {
2343 rc = msm_spi_init_dma(dd);
2344 if (rc)
2345 goto err_probe_dma;
2346 }
2347
2348 /* Initialize registers */
2349 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
2350 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2351
2352 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
2353 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
2354 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
2355 /*
2356 * The SPI core generates a bogus input overrun error on some targets,
2357 * when a transition from run to reset state occurs and if the FIFO has
2358 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2359 * bit.
2360 */
2361 msm_spi_enable_error_flags(dd);
2362
2363 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2364 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2365 if (rc)
2366 goto err_probe_state;
2367
2368 clk_disable(dd->clk);
2369 clk_disable(dd->pclk);
2370 clk_enabled = 0;
2371 pclk_enabled = 0;
2372
2373 dd->suspended = 0;
2374 dd->transfer_pending = 0;
2375 dd->multi_xfr = 0;
2376 dd->mode = SPI_MODE_NONE;
2377
2378 rc = msm_spi_request_irq(dd, pdev->name, master);
2379 if (rc)
2380 goto err_probe_irq;
2381
2382 msm_spi_disable_irqs(dd);
2383 if (dd->use_rlock)
2384 remote_mutex_unlock(&dd->r_lock);
2385
2386 mutex_unlock(&dd->core_lock);
2387 locked = 0;
2388
2389 rc = spi_register_master(master);
2390 if (rc)
2391 goto err_probe_reg_master;
2392
2393 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2394 if (rc) {
2395 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2396 goto err_attrs;
2397 }
2398
2399 spi_debugfs_init(dd);
2400
2401 return 0;
2402
2403err_attrs:
2404err_probe_reg_master:
2405 msm_spi_free_irq(dd, master);
2406err_probe_irq:
2407err_probe_state:
2408 msm_spi_teardown_dma(dd);
2409err_probe_dma:
2410 if (pclk_enabled)
2411 clk_disable(dd->pclk);
2412err_probe_pclk_enable:
2413 if (clk_enabled)
2414 clk_disable(dd->clk);
2415err_probe_clk_enable:
2416 clk_put(dd->pclk);
2417err_probe_pclk_get:
2418 clk_put(dd->clk);
2419err_probe_clk_get:
2420 if (locked) {
2421 if (dd->use_rlock)
2422 remote_mutex_unlock(&dd->r_lock);
2423 mutex_unlock(&dd->core_lock);
2424 }
2425err_probe_rlock_init:
2426 msm_spi_release_gsbi(dd);
2427err_probe_ioremap2:
2428 iounmap(dd->base);
2429err_probe_ioremap:
2430 release_mem_region(dd->mem_phys_addr, dd->mem_size);
2431err_probe_reqmem:
2432 destroy_workqueue(dd->workqueue);
2433err_probe_workq:
2434 msm_spi_free_gpios(dd);
2435err_probe_gpio:
2436 if (pdata && pdata->gpio_release)
2437 pdata->gpio_release();
2438err_probe_res2:
2439err_probe_res:
2440 spi_master_put(master);
2441err_probe_exit:
2442 return rc;
2443}
2444
2445#ifdef CONFIG_PM
2446static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2447{
2448 struct spi_master *master = platform_get_drvdata(pdev);
2449 struct msm_spi *dd;
2450 unsigned long flags;
2451
2452 if (!master)
2453 goto suspend_exit;
2454 dd = spi_master_get_devdata(master);
2455 if (!dd)
2456 goto suspend_exit;
2457
2458 /* Make sure nothing is added to the queue while we're suspending */
2459 spin_lock_irqsave(&dd->queue_lock, flags);
2460 dd->suspended = 1;
2461 spin_unlock_irqrestore(&dd->queue_lock, flags);
2462
2463 /* Wait for transactions to end, or time out */
2464 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2465 msm_spi_free_gpios(dd);
2466
2467suspend_exit:
2468 return 0;
2469}
2470
2471static int msm_spi_resume(struct platform_device *pdev)
2472{
2473 struct spi_master *master = platform_get_drvdata(pdev);
2474 struct msm_spi *dd;
2475
2476 if (!master)
2477 goto resume_exit;
2478 dd = spi_master_get_devdata(master);
2479 if (!dd)
2480 goto resume_exit;
2481
2482 BUG_ON(msm_spi_request_gpios(dd) != 0);
2483 dd->suspended = 0;
2484resume_exit:
2485 return 0;
2486}
2487#else
2488#define msm_spi_suspend NULL
2489#define msm_spi_resume NULL
2490#endif /* CONFIG_PM */
2491
2492static int __devexit msm_spi_remove(struct platform_device *pdev)
2493{
2494 struct spi_master *master = platform_get_drvdata(pdev);
2495 struct msm_spi *dd = spi_master_get_devdata(master);
2496 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2497
2498 pm_qos_remove_request(&qos_req_list);
2499 spi_debugfs_exit(dd);
2500 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2501
2502 msm_spi_free_irq(dd, master);
2503 msm_spi_teardown_dma(dd);
2504
2505 if (pdata && pdata->gpio_release)
2506 pdata->gpio_release();
2507
2508 msm_spi_free_gpios(dd);
2509 iounmap(dd->base);
2510 release_mem_region(dd->mem_phys_addr, dd->mem_size);
2511 msm_spi_release_gsbi(dd);
2512 clk_put(dd->clk);
2513 clk_put(dd->pclk);
2514 destroy_workqueue(dd->workqueue);
2515 platform_set_drvdata(pdev, 0);
2516 spi_unregister_master(master);
2517 spi_master_put(master);
2518
2519 return 0;
2520}
2521
2522static struct platform_driver msm_spi_driver = {
2523 .driver = {
2524 .name = SPI_DRV_NAME,
2525 .owner = THIS_MODULE,
2526 },
2527 .suspend = msm_spi_suspend,
2528 .resume = msm_spi_resume,
2529 .remove = __exit_p(msm_spi_remove),
2530};
2531
2532static int __init msm_spi_init(void)
2533{
2534 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2535}
2536module_init(msm_spi_init);
2537
2538static void __exit msm_spi_exit(void)
2539{
2540 platform_driver_unregister(&msm_spi_driver);
2541}
2542module_exit(msm_spi_exit);