blob: 17b9f0da2c5123515879c9cfe7fdaa544ce1c065 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/list.h>
22#include <linux/irq.h>
23#include <linux/platform_device.h>
24#include <linux/spi/spi.h>
25#include <linux/interrupt.h>
26#include <linux/err.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/workqueue.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <mach/msm_spi.h>
33#include <linux/dma-mapping.h>
34#include <linux/sched.h>
35#include <mach/dma.h>
36#include <asm/atomic.h>
37#include <linux/mutex.h>
38#include <linux/gpio.h>
39#include <linux/remote_spinlock.h>
40#include <linux/pm_qos_params.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070041#include <linux/of.h>
42#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
45 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046{
47 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070048 unsigned long gsbi_mem_phys_addr;
49 size_t gsbi_mem_size;
50 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070052 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 return 0;
55
56 gsbi_mem_phys_addr = resource->start;
57 gsbi_mem_size = resource_size(resource);
58 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
59 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070061
62 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
63 gsbi_mem_size);
64 if (!gsbi_base)
65 return -ENXIO;
66
67 /* Set GSBI to SPI mode */
68 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70 return 0;
71}
72
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070073static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
76 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
77 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
78 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
79 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
80 if (dd->qup_ver)
81 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082}
83
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084static inline int msm_spi_request_gpios(struct msm_spi *dd)
85{
86 int i;
87 int result = 0;
88
89 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
90 if (dd->spi_gpios[i] >= 0) {
91 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
92 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060093 dev_err(dd->dev, "%s: gpio_request for pin %d "
94 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095 dd->spi_gpios[i], result);
96 goto error;
97 }
98 }
99 }
100 return 0;
101
102error:
103 for (; --i >= 0;) {
104 if (dd->spi_gpios[i] >= 0)
105 gpio_free(dd->spi_gpios[i]);
106 }
107 return result;
108}
109
110static inline void msm_spi_free_gpios(struct msm_spi *dd)
111{
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
115 if (dd->spi_gpios[i] >= 0)
116 gpio_free(dd->spi_gpios[i]);
117 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600118
119 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
120 if (dd->cs_gpios[i].valid) {
121 gpio_free(dd->cs_gpios[i].gpio_num);
122 dd->cs_gpios[i].valid = 0;
123 }
124 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125}
126
127static void msm_spi_clock_set(struct msm_spi *dd, int speed)
128{
129 int rc;
130
131 rc = clk_set_rate(dd->clk, speed);
132 if (!rc)
133 dd->clock_speed = speed;
134}
135
136static int msm_spi_calculate_size(int *fifo_size,
137 int *block_size,
138 int block,
139 int mult)
140{
141 int words;
142
143 switch (block) {
144 case 0:
145 words = 1; /* 4 bytes */
146 break;
147 case 1:
148 words = 4; /* 16 bytes */
149 break;
150 case 2:
151 words = 8; /* 32 bytes */
152 break;
153 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700154 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 switch (mult) {
158 case 0:
159 *fifo_size = words * 2;
160 break;
161 case 1:
162 *fifo_size = words * 4;
163 break;
164 case 2:
165 *fifo_size = words * 8;
166 break;
167 case 3:
168 *fifo_size = words * 16;
169 break;
170 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700171 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 *block_size = words * sizeof(u32); /* in bytes */
175 return 0;
176}
177
178static void get_next_transfer(struct msm_spi *dd)
179{
180 struct spi_transfer *t = dd->cur_transfer;
181
182 if (t->transfer_list.next != &dd->cur_msg->transfers) {
183 dd->cur_transfer = list_entry(t->transfer_list.next,
184 struct spi_transfer,
185 transfer_list);
186 dd->write_buf = dd->cur_transfer->tx_buf;
187 dd->read_buf = dd->cur_transfer->rx_buf;
188 }
189}
190
191static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
192{
193 u32 spi_iom;
194 int block;
195 int mult;
196
197 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
198
199 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
200 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
201 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
202 block, mult)) {
203 goto fifo_size_err;
204 }
205
206 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
207 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
208 if (msm_spi_calculate_size(&dd->output_fifo_size,
209 &dd->output_block_size, block, mult)) {
210 goto fifo_size_err;
211 }
212 /* DM mode is not available for this block size */
213 if (dd->input_block_size == 4 || dd->output_block_size == 4)
214 dd->use_dma = 0;
215
216 /* DM mode is currently unsupported for different block sizes */
217 if (dd->input_block_size != dd->output_block_size)
218 dd->use_dma = 0;
219
220 if (dd->use_dma)
221 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
222
223 return;
224
225fifo_size_err:
226 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700227 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 return;
229}
230
231static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
232{
233 u32 data_in;
234 int i;
235 int shift;
236
237 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
238 if (dd->read_buf) {
239 for (i = 0; (i < dd->bytes_per_word) &&
240 dd->rx_bytes_remaining; i++) {
241 /* The data format depends on bytes_per_word:
242 4 bytes: 0x12345678
243 3 bytes: 0x00123456
244 2 bytes: 0x00001234
245 1 byte : 0x00000012
246 */
247 shift = 8 * (dd->bytes_per_word - i - 1);
248 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
249 dd->rx_bytes_remaining--;
250 }
251 } else {
252 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
253 dd->rx_bytes_remaining -= dd->bytes_per_word;
254 else
255 dd->rx_bytes_remaining = 0;
256 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 dd->read_xfr_cnt++;
259 if (dd->multi_xfr) {
260 if (!dd->rx_bytes_remaining)
261 dd->read_xfr_cnt = 0;
262 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
263 dd->read_len) {
264 struct spi_transfer *t = dd->cur_rx_transfer;
265 if (t->transfer_list.next != &dd->cur_msg->transfers) {
266 t = list_entry(t->transfer_list.next,
267 struct spi_transfer,
268 transfer_list);
269 dd->read_buf = t->rx_buf;
270 dd->read_len = t->len;
271 dd->read_xfr_cnt = 0;
272 dd->cur_rx_transfer = t;
273 }
274 }
275 }
276}
277
278static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
279{
280 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
281
282 return spi_op & SPI_OP_STATE_VALID;
283}
284
285static inline int msm_spi_wait_valid(struct msm_spi *dd)
286{
287 unsigned long delay = 0;
288 unsigned long timeout = 0;
289
290 if (dd->clock_speed == 0)
291 return -EINVAL;
292 /*
293 * Based on the SPI clock speed, sufficient time
294 * should be given for the SPI state transition
295 * to occur
296 */
297 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
298 /*
299 * For small delay values, the default timeout would
300 * be one jiffy
301 */
302 if (delay < SPI_DELAY_THRESHOLD)
303 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600304
305 /* Adding one to round off to the nearest jiffy */
306 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307 while (!msm_spi_is_valid_state(dd)) {
308 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600309 if (!msm_spi_is_valid_state(dd)) {
310 if (dd->cur_msg)
311 dd->cur_msg->status = -EIO;
312 dev_err(dd->dev, "%s: SPI operational state"
313 "not valid\n", __func__);
314 return -ETIMEDOUT;
315 } else
316 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 }
318 /*
319 * For smaller values of delay, context switch time
320 * would negate the usage of usleep
321 */
322 if (delay > 20)
323 usleep(delay);
324 else if (delay)
325 udelay(delay);
326 }
327 return 0;
328}
329
330static inline int msm_spi_set_state(struct msm_spi *dd,
331 enum msm_spi_state state)
332{
333 enum msm_spi_state cur_state;
334 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700335 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 cur_state = readl_relaxed(dd->base + SPI_STATE);
337 /* Per spec:
338 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
339 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
340 (state == SPI_OP_STATE_RESET)) {
341 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
342 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
343 } else {
344 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
345 dd->base + SPI_STATE);
346 }
347 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700348 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349
350 return 0;
351}
352
353static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
354{
355 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
356
357 if (n != (*config & SPI_CFG_N))
358 *config = (*config & ~SPI_CFG_N) | n;
359
360 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
361 if (dd->read_buf == NULL)
362 *config |= SPI_NO_INPUT;
363 if (dd->write_buf == NULL)
364 *config |= SPI_NO_OUTPUT;
365 }
366}
367
368static void msm_spi_set_config(struct msm_spi *dd, int bpw)
369{
370 u32 spi_config;
371
372 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
373
374 if (dd->cur_msg->spi->mode & SPI_CPHA)
375 spi_config &= ~SPI_CFG_INPUT_FIRST;
376 else
377 spi_config |= SPI_CFG_INPUT_FIRST;
378 if (dd->cur_msg->spi->mode & SPI_LOOP)
379 spi_config |= SPI_CFG_LOOPBACK;
380 else
381 spi_config &= ~SPI_CFG_LOOPBACK;
382 msm_spi_add_configs(dd, &spi_config, bpw-1);
383 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
384 msm_spi_set_qup_config(dd, bpw);
385}
386
387static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
388{
389 dmov_box *box;
390 int bytes_to_send, num_rows, bytes_sent;
391 u32 num_transfers;
392
393 atomic_set(&dd->rx_irq_called, 0);
394 if (dd->write_len && !dd->read_len) {
395 /* WR-WR transfer */
396 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
397 dd->write_buf = dd->temp_buf;
398 } else {
399 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
400 /* For WR-RD transfer, bytes_sent can be negative */
401 if (bytes_sent < 0)
402 bytes_sent = 0;
403 }
404
405 /* We'll send in chunks of SPI_MAX_LEN if larger */
406 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
407 SPI_MAX_LEN : dd->tx_bytes_remaining;
408 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
409 dd->unaligned_len = bytes_to_send % dd->burst_size;
410 num_rows = bytes_to_send / dd->burst_size;
411
412 dd->mode = SPI_DMOV_MODE;
413
414 if (num_rows) {
415 /* src in 16 MSB, dst in 16 LSB */
416 box = &dd->tx_dmov_cmd->box;
417 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
418 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
419 box->num_rows = (num_rows << 16) | num_rows;
420 box->row_offset = (dd->burst_size << 16) | 0;
421
422 box = &dd->rx_dmov_cmd->box;
423 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
424 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
425 box->num_rows = (num_rows << 16) | num_rows;
426 box->row_offset = (0 << 16) | dd->burst_size;
427
428 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
429 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
430 offsetof(struct spi_dmov_cmd, box));
431 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
432 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
433 offsetof(struct spi_dmov_cmd, box));
434 } else {
435 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
436 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
437 offsetof(struct spi_dmov_cmd, single_pad));
438 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
439 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
440 offsetof(struct spi_dmov_cmd, single_pad));
441 }
442
443 if (!dd->unaligned_len) {
444 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
445 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
446 } else {
447 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
448 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
449 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
450
451 if ((dd->multi_xfr) && (dd->read_len <= 0))
452 offset = dd->cur_msg_len - dd->unaligned_len;
453
454 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
455 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
456
457 memset(dd->tx_padding, 0, dd->burst_size);
458 memset(dd->rx_padding, 0, dd->burst_size);
459 if (dd->write_buf)
460 memcpy(dd->tx_padding, dd->write_buf + offset,
461 dd->unaligned_len);
462
463 tx_cmd->src = dd->tx_padding_dma;
464 rx_cmd->dst = dd->rx_padding_dma;
465 tx_cmd->len = rx_cmd->len = dd->burst_size;
466 }
467 /* This also takes care of the padding dummy buf
468 Since this is set to the correct length, the
469 dummy bytes won't be actually sent */
470 if (dd->multi_xfr) {
471 u32 write_transfers = 0;
472 u32 read_transfers = 0;
473
474 if (dd->write_len > 0) {
475 write_transfers = DIV_ROUND_UP(dd->write_len,
476 dd->bytes_per_word);
477 writel_relaxed(write_transfers,
478 dd->base + SPI_MX_OUTPUT_COUNT);
479 }
480 if (dd->read_len > 0) {
481 /*
482 * The read following a write transfer must take
483 * into account, that the bytes pertaining to
484 * the write transfer needs to be discarded,
485 * before the actual read begins.
486 */
487 read_transfers = DIV_ROUND_UP(dd->read_len +
488 dd->write_len,
489 dd->bytes_per_word);
490 writel_relaxed(read_transfers,
491 dd->base + SPI_MX_INPUT_COUNT);
492 }
493 } else {
494 if (dd->write_buf)
495 writel_relaxed(num_transfers,
496 dd->base + SPI_MX_OUTPUT_COUNT);
497 if (dd->read_buf)
498 writel_relaxed(num_transfers,
499 dd->base + SPI_MX_INPUT_COUNT);
500 }
501}
502
503static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
504{
505 dma_coherent_pre_ops();
506 if (dd->write_buf)
507 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
508 if (dd->read_buf)
509 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
510}
511
512/* SPI core can send maximum of 4K transfers, because there is HW problem
513 with infinite mode.
514 Therefore, we are sending several chunks of 3K or less (depending on how
515 much is left).
516 Upon completion we send the next chunk, or complete the transfer if
517 everything is finished.
518*/
519static int msm_spi_dm_send_next(struct msm_spi *dd)
520{
521 /* By now we should have sent all the bytes in FIFO mode,
522 * However to make things right, we'll check anyway.
523 */
524 if (dd->mode != SPI_DMOV_MODE)
525 return 0;
526
527 /* We need to send more chunks, if we sent max last time */
528 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
529 dd->tx_bytes_remaining -= SPI_MAX_LEN;
530 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
531 return 0;
532 dd->read_len = dd->write_len = 0;
533 msm_spi_setup_dm_transfer(dd);
534 msm_spi_enqueue_dm_commands(dd);
535 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
536 return 0;
537 return 1;
538 } else if (dd->read_len && dd->write_len) {
539 dd->tx_bytes_remaining -= dd->cur_transfer->len;
540 if (list_is_last(&dd->cur_transfer->transfer_list,
541 &dd->cur_msg->transfers))
542 return 0;
543 get_next_transfer(dd);
544 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
545 return 0;
546 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
547 dd->read_buf = dd->temp_buf;
548 dd->read_len = dd->write_len = -1;
549 msm_spi_setup_dm_transfer(dd);
550 msm_spi_enqueue_dm_commands(dd);
551 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
552 return 0;
553 return 1;
554 }
555 return 0;
556}
557
558static inline void msm_spi_ack_transfer(struct msm_spi *dd)
559{
560 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
561 SPI_OP_MAX_OUTPUT_DONE_FLAG,
562 dd->base + SPI_OPERATIONAL);
563 /* Ensure done flag was cleared before proceeding further */
564 mb();
565}
566
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700567/* Figure which irq occured and call the relevant functions */
568static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
569{
570 u32 op, ret = IRQ_NONE;
571 struct msm_spi *dd = dev_id;
572
573 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
574 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
575 struct spi_master *master = dev_get_drvdata(dd->dev);
576 ret |= msm_spi_error_irq(irq, master);
577 }
578
579 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
580 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
581 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
582 dd->base + SPI_OPERATIONAL);
583 /*
584 * Ensure service flag was cleared before further
585 * processing of interrupt.
586 */
587 mb();
588 ret |= msm_spi_input_irq(irq, dev_id);
589 }
590
591 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
592 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
593 dd->base + SPI_OPERATIONAL);
594 /*
595 * Ensure service flag was cleared before further
596 * processing of interrupt.
597 */
598 mb();
599 ret |= msm_spi_output_irq(irq, dev_id);
600 }
601
602 if (dd->done) {
603 complete(&dd->transfer_complete);
604 dd->done = 0;
605 }
606 return ret;
607}
608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
610{
611 struct msm_spi *dd = dev_id;
612
613 dd->stat_rx++;
614
615 if (dd->mode == SPI_MODE_NONE)
616 return IRQ_HANDLED;
617
618 if (dd->mode == SPI_DMOV_MODE) {
619 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
620 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
621 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
622 msm_spi_ack_transfer(dd);
623 if (dd->unaligned_len == 0) {
624 if (atomic_inc_return(&dd->rx_irq_called) == 1)
625 return IRQ_HANDLED;
626 }
627 msm_spi_complete(dd);
628 return IRQ_HANDLED;
629 }
630 return IRQ_NONE;
631 }
632
633 if (dd->mode == SPI_FIFO_MODE) {
634 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
635 SPI_OP_IP_FIFO_NOT_EMPTY) &&
636 (dd->rx_bytes_remaining > 0)) {
637 msm_spi_read_word_from_fifo(dd);
638 }
639 if (dd->rx_bytes_remaining == 0)
640 msm_spi_complete(dd);
641 }
642
643 return IRQ_HANDLED;
644}
645
646static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
647{
648 u32 word;
649 u8 byte;
650 int i;
651
652 word = 0;
653 if (dd->write_buf) {
654 for (i = 0; (i < dd->bytes_per_word) &&
655 dd->tx_bytes_remaining; i++) {
656 dd->tx_bytes_remaining--;
657 byte = *dd->write_buf++;
658 word |= (byte << (BITS_PER_BYTE * (3 - i)));
659 }
660 } else
661 if (dd->tx_bytes_remaining > dd->bytes_per_word)
662 dd->tx_bytes_remaining -= dd->bytes_per_word;
663 else
664 dd->tx_bytes_remaining = 0;
665 dd->write_xfr_cnt++;
666 if (dd->multi_xfr) {
667 if (!dd->tx_bytes_remaining)
668 dd->write_xfr_cnt = 0;
669 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
670 dd->write_len) {
671 struct spi_transfer *t = dd->cur_tx_transfer;
672 if (t->transfer_list.next != &dd->cur_msg->transfers) {
673 t = list_entry(t->transfer_list.next,
674 struct spi_transfer,
675 transfer_list);
676 dd->write_buf = t->tx_buf;
677 dd->write_len = t->len;
678 dd->write_xfr_cnt = 0;
679 dd->cur_tx_transfer = t;
680 }
681 }
682 }
683 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
684}
685
686static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
687{
688 int count = 0;
689
690 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
691 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
692 SPI_OP_OUTPUT_FIFO_FULL)) {
693 msm_spi_write_word_to_fifo(dd);
694 count++;
695 }
696}
697
698static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
699{
700 struct msm_spi *dd = dev_id;
701
702 dd->stat_tx++;
703
704 if (dd->mode == SPI_MODE_NONE)
705 return IRQ_HANDLED;
706
707 if (dd->mode == SPI_DMOV_MODE) {
708 /* TX_ONLY transaction is handled here
709 This is the only place we send complete at tx and not rx */
710 if (dd->read_buf == NULL &&
711 readl_relaxed(dd->base + SPI_OPERATIONAL) &
712 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
713 msm_spi_ack_transfer(dd);
714 msm_spi_complete(dd);
715 return IRQ_HANDLED;
716 }
717 return IRQ_NONE;
718 }
719
720 /* Output FIFO is empty. Transmit any outstanding write data. */
721 if (dd->mode == SPI_FIFO_MODE)
722 msm_spi_write_rmn_to_fifo(dd);
723
724 return IRQ_HANDLED;
725}
726
727static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
728{
729 struct spi_master *master = dev_id;
730 struct msm_spi *dd = spi_master_get_devdata(master);
731 u32 spi_err;
732
733 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
734 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
735 dev_warn(master->dev.parent, "SPI output overrun error\n");
736 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
737 dev_warn(master->dev.parent, "SPI input underrun error\n");
738 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
739 dev_warn(master->dev.parent, "SPI output underrun error\n");
740 msm_spi_get_clk_err(dd, &spi_err);
741 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
742 dev_warn(master->dev.parent, "SPI clock overrun error\n");
743 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
744 dev_warn(master->dev.parent, "SPI clock underrun error\n");
745 msm_spi_clear_error_flags(dd);
746 msm_spi_ack_clk_err(dd);
747 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
748 mb();
749 return IRQ_HANDLED;
750}
751
752static int msm_spi_map_dma_buffers(struct msm_spi *dd)
753{
754 struct device *dev;
755 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600756 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 void *tx_buf, *rx_buf;
758 unsigned tx_len, rx_len;
759 int ret = -EINVAL;
760
761 dev = &dd->cur_msg->spi->dev;
762 first_xfr = dd->cur_transfer;
763 tx_buf = (void *)first_xfr->tx_buf;
764 rx_buf = first_xfr->rx_buf;
765 tx_len = rx_len = first_xfr->len;
766
767 /*
768 * For WR-WR and WR-RD transfers, we allocate our own temporary
769 * buffer and copy the data to/from the client buffers.
770 */
771 if (dd->multi_xfr) {
772 dd->temp_buf = kzalloc(dd->cur_msg_len,
773 GFP_KERNEL | __GFP_DMA);
774 if (!dd->temp_buf)
775 return -ENOMEM;
776 nxt_xfr = list_entry(first_xfr->transfer_list.next,
777 struct spi_transfer, transfer_list);
778
779 if (dd->write_len && !dd->read_len) {
780 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
781 goto error;
782
783 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
784 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
785 nxt_xfr->len);
786 tx_buf = dd->temp_buf;
787 tx_len = dd->cur_msg_len;
788 } else {
789 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
790 goto error;
791
792 rx_buf = dd->temp_buf;
793 rx_len = dd->cur_msg_len;
794 }
795 }
796 if (tx_buf != NULL) {
797 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
798 tx_len, DMA_TO_DEVICE);
799 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
800 dev_err(dev, "dma %cX %d bytes error\n",
801 'T', tx_len);
802 ret = -ENOMEM;
803 goto error;
804 }
805 }
806 if (rx_buf != NULL) {
807 dma_addr_t dma_handle;
808 dma_handle = dma_map_single(dev, rx_buf,
809 rx_len, DMA_FROM_DEVICE);
810 if (dma_mapping_error(NULL, dma_handle)) {
811 dev_err(dev, "dma %cX %d bytes error\n",
812 'R', rx_len);
813 if (tx_buf != NULL)
814 dma_unmap_single(NULL, first_xfr->tx_dma,
815 tx_len, DMA_TO_DEVICE);
816 ret = -ENOMEM;
817 goto error;
818 }
819 if (dd->multi_xfr)
820 nxt_xfr->rx_dma = dma_handle;
821 else
822 first_xfr->rx_dma = dma_handle;
823 }
824 return 0;
825
826error:
827 kfree(dd->temp_buf);
828 dd->temp_buf = NULL;
829 return ret;
830}
831
832static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
833{
834 struct device *dev;
835 u32 offset;
836
837 dev = &dd->cur_msg->spi->dev;
838 if (dd->cur_msg->is_dma_mapped)
839 goto unmap_end;
840
841 if (dd->multi_xfr) {
842 if (dd->write_len && !dd->read_len) {
843 dma_unmap_single(dev,
844 dd->cur_transfer->tx_dma,
845 dd->cur_msg_len,
846 DMA_TO_DEVICE);
847 } else {
848 struct spi_transfer *prev_xfr;
849 prev_xfr = list_entry(
850 dd->cur_transfer->transfer_list.prev,
851 struct spi_transfer,
852 transfer_list);
853 if (dd->cur_transfer->rx_buf) {
854 dma_unmap_single(dev,
855 dd->cur_transfer->rx_dma,
856 dd->cur_msg_len,
857 DMA_FROM_DEVICE);
858 }
859 if (prev_xfr->tx_buf) {
860 dma_unmap_single(dev,
861 prev_xfr->tx_dma,
862 prev_xfr->len,
863 DMA_TO_DEVICE);
864 }
865 if (dd->unaligned_len && dd->read_buf) {
866 offset = dd->cur_msg_len - dd->unaligned_len;
867 dma_coherent_post_ops();
868 memcpy(dd->read_buf + offset, dd->rx_padding,
869 dd->unaligned_len);
870 memcpy(dd->cur_transfer->rx_buf,
871 dd->read_buf + prev_xfr->len,
872 dd->cur_transfer->len);
873 }
874 }
875 kfree(dd->temp_buf);
876 dd->temp_buf = NULL;
877 return;
878 } else {
879 if (dd->cur_transfer->rx_buf)
880 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
881 dd->cur_transfer->len,
882 DMA_FROM_DEVICE);
883 if (dd->cur_transfer->tx_buf)
884 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
885 dd->cur_transfer->len,
886 DMA_TO_DEVICE);
887 }
888
889unmap_end:
890 /* If we padded the transfer, we copy it from the padding buf */
891 if (dd->unaligned_len && dd->read_buf) {
892 offset = dd->cur_transfer->len - dd->unaligned_len;
893 dma_coherent_post_ops();
894 memcpy(dd->read_buf + offset, dd->rx_padding,
895 dd->unaligned_len);
896 }
897}
898
899/**
900 * msm_use_dm - decides whether to use data mover for this
901 * transfer
902 * @dd: device
903 * @tr: transfer
904 *
905 * Start using DM if:
906 * 1. Transfer is longer than 3*block size.
907 * 2. Buffers should be aligned to cache line.
908 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
909 */
910static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
911 u8 bpw)
912{
913 u32 cache_line = dma_get_cache_alignment();
914
915 if (!dd->use_dma)
916 return 0;
917
918 if (dd->cur_msg_len < 3*dd->input_block_size)
919 return 0;
920
921 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
922 return 0;
923
924 if (tr->tx_buf) {
925 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
926 return 0;
927 }
928 if (tr->rx_buf) {
929 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
930 return 0;
931 }
932
933 if (tr->cs_change &&
934 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
935 return 0;
936 return 1;
937}
938
939static void msm_spi_process_transfer(struct msm_spi *dd)
940{
941 u8 bpw;
942 u32 spi_ioc;
943 u32 spi_iom;
944 u32 spi_ioc_orig;
945 u32 max_speed;
946 u32 chip_select;
947 u32 read_count;
948 u32 timeout;
949 u32 int_loopback = 0;
950
951 dd->tx_bytes_remaining = dd->cur_msg_len;
952 dd->rx_bytes_remaining = dd->cur_msg_len;
953 dd->read_buf = dd->cur_transfer->rx_buf;
954 dd->write_buf = dd->cur_transfer->tx_buf;
955 init_completion(&dd->transfer_complete);
956 if (dd->cur_transfer->bits_per_word)
957 bpw = dd->cur_transfer->bits_per_word;
958 else
959 if (dd->cur_msg->spi->bits_per_word)
960 bpw = dd->cur_msg->spi->bits_per_word;
961 else
962 bpw = 8;
963 dd->bytes_per_word = (bpw + 7) / 8;
964
965 if (dd->cur_transfer->speed_hz)
966 max_speed = dd->cur_transfer->speed_hz;
967 else
968 max_speed = dd->cur_msg->spi->max_speed_hz;
969 if (!dd->clock_speed || max_speed != dd->clock_speed)
970 msm_spi_clock_set(dd, max_speed);
971
972 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
973 if (dd->cur_msg->spi->mode & SPI_LOOP)
974 int_loopback = 1;
975 if (int_loopback && dd->multi_xfr &&
976 (read_count > dd->input_fifo_size)) {
977 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700978 pr_err(
979 "%s:Internal Loopback does not support > fifo size"
980 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 __func__);
982 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700983 pr_err(
984 "%s:Internal Loopback does not support > fifo size"
985 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 __func__);
987 return;
988 }
989 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
990 dd->mode = SPI_FIFO_MODE;
991 if (dd->multi_xfr) {
992 dd->read_len = dd->cur_transfer->len;
993 dd->write_len = dd->cur_transfer->len;
994 }
995 /* read_count cannot exceed fifo_size, and only one READ COUNT
996 interrupt is generated per transaction, so for transactions
997 larger than fifo size READ COUNT must be disabled.
998 For those transactions we usually move to Data Mover mode.
999 */
1000 if (read_count <= dd->input_fifo_size) {
1001 writel_relaxed(read_count,
1002 dd->base + SPI_MX_READ_COUNT);
1003 msm_spi_set_write_count(dd, read_count);
1004 } else {
1005 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1006 msm_spi_set_write_count(dd, 0);
1007 }
1008 } else {
1009 dd->mode = SPI_DMOV_MODE;
1010 if (dd->write_len && dd->read_len) {
1011 dd->tx_bytes_remaining = dd->write_len;
1012 dd->rx_bytes_remaining = dd->read_len;
1013 }
1014 }
1015
1016 /* Write mode - fifo or data mover*/
1017 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1018 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1019 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1020 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1021 /* Turn on packing for data mover */
1022 if (dd->mode == SPI_DMOV_MODE)
1023 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1024 else
1025 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1026 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1027
1028 msm_spi_set_config(dd, bpw);
1029
1030 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1031 spi_ioc_orig = spi_ioc;
1032 if (dd->cur_msg->spi->mode & SPI_CPOL)
1033 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1034 else
1035 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1036 chip_select = dd->cur_msg->spi->chip_select << 2;
1037 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1038 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1039 if (!dd->cur_transfer->cs_change)
1040 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1041 if (spi_ioc != spi_ioc_orig)
1042 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1043
1044 if (dd->mode == SPI_DMOV_MODE) {
1045 msm_spi_setup_dm_transfer(dd);
1046 msm_spi_enqueue_dm_commands(dd);
1047 }
1048 /* The output fifo interrupt handler will handle all writes after
1049 the first. Restricting this to one write avoids contention
1050 issues and race conditions between this thread and the int handler
1051 */
1052 else if (dd->mode == SPI_FIFO_MODE) {
1053 if (msm_spi_prepare_for_write(dd))
1054 goto transfer_end;
1055 msm_spi_start_write(dd, read_count);
1056 }
1057
1058 /* Only enter the RUN state after the first word is written into
1059 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1060 might fire before the first word is written resulting in a
1061 possible race condition.
1062 */
1063 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1064 goto transfer_end;
1065
1066 timeout = 100 * msecs_to_jiffies(
1067 DIV_ROUND_UP(dd->cur_msg_len * 8,
1068 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1069
1070 /* Assume success, this might change later upon transaction result */
1071 dd->cur_msg->status = 0;
1072 do {
1073 if (!wait_for_completion_timeout(&dd->transfer_complete,
1074 timeout)) {
1075 dev_err(dd->dev, "%s: SPI transaction "
1076 "timeout\n", __func__);
1077 dd->cur_msg->status = -EIO;
1078 if (dd->mode == SPI_DMOV_MODE) {
1079 msm_dmov_flush(dd->tx_dma_chan);
1080 msm_dmov_flush(dd->rx_dma_chan);
1081 }
1082 break;
1083 }
1084 } while (msm_spi_dm_send_next(dd));
1085
1086transfer_end:
1087 if (dd->mode == SPI_DMOV_MODE)
1088 msm_spi_unmap_dma_buffers(dd);
1089 dd->mode = SPI_MODE_NONE;
1090
1091 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1092 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1093 dd->base + SPI_IO_CONTROL);
1094}
1095
1096static void get_transfer_length(struct msm_spi *dd)
1097{
1098 struct spi_transfer *tr;
1099 int num_xfrs = 0;
1100 int readlen = 0;
1101 int writelen = 0;
1102
1103 dd->cur_msg_len = 0;
1104 dd->multi_xfr = 0;
1105 dd->read_len = dd->write_len = 0;
1106
1107 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1108 if (tr->tx_buf)
1109 writelen += tr->len;
1110 if (tr->rx_buf)
1111 readlen += tr->len;
1112 dd->cur_msg_len += tr->len;
1113 num_xfrs++;
1114 }
1115
1116 if (num_xfrs == 2) {
1117 struct spi_transfer *first_xfr = dd->cur_transfer;
1118
1119 dd->multi_xfr = 1;
1120 tr = list_entry(first_xfr->transfer_list.next,
1121 struct spi_transfer,
1122 transfer_list);
1123 /*
1124 * We update dd->read_len and dd->write_len only
1125 * for WR-WR and WR-RD transfers.
1126 */
1127 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1128 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1129 ((!tr->tx_buf) && (tr->rx_buf))) {
1130 dd->read_len = readlen;
1131 dd->write_len = writelen;
1132 }
1133 }
1134 } else if (num_xfrs > 1)
1135 dd->multi_xfr = 1;
1136}
1137
1138static inline int combine_transfers(struct msm_spi *dd)
1139{
1140 struct spi_transfer *t = dd->cur_transfer;
1141 struct spi_transfer *nxt;
1142 int xfrs_grped = 1;
1143
1144 dd->cur_msg_len = dd->cur_transfer->len;
1145 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1146 nxt = list_entry(t->transfer_list.next,
1147 struct spi_transfer,
1148 transfer_list);
1149 if (t->cs_change != nxt->cs_change)
1150 return xfrs_grped;
1151 dd->cur_msg_len += nxt->len;
1152 xfrs_grped++;
1153 t = nxt;
1154 }
1155 return xfrs_grped;
1156}
1157
1158static void msm_spi_process_message(struct msm_spi *dd)
1159{
1160 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001161 int cs_num;
1162 int rc;
1163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001165 cs_num = dd->cur_msg->spi->chip_select;
1166 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1167 (!(dd->cs_gpios[cs_num].valid)) &&
1168 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1169 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1170 spi_cs_rsrcs[cs_num]);
1171 if (rc) {
1172 dev_err(dd->dev, "gpio_request for pin %d failed with "
1173 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1174 rc);
1175 return;
1176 }
1177 dd->cs_gpios[cs_num].valid = 1;
1178 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001180 if (dd->qup_ver) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001182 &dd->cur_msg->transfers,
1183 transfer_list) {
1184 u32 spi_ioc;
1185 u32 spi_ioc_orig;
1186 struct spi_transfer *t = dd->cur_transfer;
1187 struct spi_transfer *nxt;
1188
1189 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1190 nxt = list_entry(t->transfer_list.next,
1191 struct spi_transfer,
1192 transfer_list);
1193
1194 spi_ioc = readl_relaxed(dd->base +
1195 SPI_IO_CONTROL);
1196 spi_ioc_orig = spi_ioc;
1197 if (t->cs_change == nxt->cs_change)
1198 spi_ioc |= SPI_IO_C_FORCE_CS;
1199 else
1200 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1201
1202 if (spi_ioc != spi_ioc_orig) {
1203 writel_relaxed(spi_ioc,
1204 dd->base + SPI_IO_CONTROL);
1205 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001207
1208 dd->cur_msg_len = dd->cur_transfer->len;
1209 msm_spi_process_transfer(dd);
1210 }
1211 } else {
1212 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1213 struct spi_transfer,
1214 transfer_list);
1215 get_transfer_length(dd);
1216 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1217 /*
1218 * Handling of multi-transfers.
1219 * FIFO mode is used by default
1220 */
1221 list_for_each_entry(dd->cur_transfer,
1222 &dd->cur_msg->transfers,
1223 transfer_list) {
1224 if (!dd->cur_transfer->len)
1225 goto error;
1226 if (xfrs_grped) {
1227 xfrs_grped--;
1228 continue;
1229 } else {
1230 dd->read_len = dd->write_len = 0;
1231 xfrs_grped = combine_transfers(dd);
1232 }
1233
1234 dd->cur_tx_transfer = dd->cur_transfer;
1235 dd->cur_rx_transfer = dd->cur_transfer;
1236 msm_spi_process_transfer(dd);
1237 xfrs_grped--;
1238 }
1239 } else {
1240 /* Handling of a single transfer or
1241 * WR-WR or WR-RD transfers
1242 */
1243 if ((!dd->cur_msg->is_dma_mapped) &&
1244 (msm_use_dm(dd, dd->cur_transfer,
1245 dd->cur_transfer->bits_per_word))) {
1246 /* Mapping of DMA buffers */
1247 int ret = msm_spi_map_dma_buffers(dd);
1248 if (ret < 0) {
1249 dd->cur_msg->status = ret;
1250 goto error;
1251 }
1252 }
1253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 dd->cur_tx_transfer = dd->cur_transfer;
1255 dd->cur_rx_transfer = dd->cur_transfer;
1256 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001259
1260 return;
1261
1262error:
1263 if (dd->cs_gpios[cs_num].valid) {
1264 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1265 dd->cs_gpios[cs_num].valid = 0;
1266 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267}
1268
1269/* workqueue - pull messages from queue & process */
1270static void msm_spi_workq(struct work_struct *work)
1271{
1272 struct msm_spi *dd =
1273 container_of(work, struct msm_spi, work_data);
1274 unsigned long flags;
1275 u32 status_error = 0;
1276
1277 mutex_lock(&dd->core_lock);
1278
1279 /* Don't allow power collapse until we release mutex */
1280 if (pm_qos_request_active(&qos_req_list))
1281 pm_qos_update_request(&qos_req_list,
1282 dd->pm_lat);
1283 if (dd->use_rlock)
1284 remote_mutex_lock(&dd->r_lock);
1285
1286 clk_enable(dd->clk);
1287 clk_enable(dd->pclk);
1288 msm_spi_enable_irqs(dd);
1289
1290 if (!msm_spi_is_valid_state(dd)) {
1291 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1292 __func__);
1293 status_error = 1;
1294 }
1295
1296 spin_lock_irqsave(&dd->queue_lock, flags);
1297 while (!list_empty(&dd->queue)) {
1298 dd->cur_msg = list_entry(dd->queue.next,
1299 struct spi_message, queue);
1300 list_del_init(&dd->cur_msg->queue);
1301 spin_unlock_irqrestore(&dd->queue_lock, flags);
1302 if (status_error)
1303 dd->cur_msg->status = -EIO;
1304 else
1305 msm_spi_process_message(dd);
1306 if (dd->cur_msg->complete)
1307 dd->cur_msg->complete(dd->cur_msg->context);
1308 spin_lock_irqsave(&dd->queue_lock, flags);
1309 }
1310 dd->transfer_pending = 0;
1311 spin_unlock_irqrestore(&dd->queue_lock, flags);
1312
1313 msm_spi_disable_irqs(dd);
1314 clk_disable(dd->clk);
1315 clk_disable(dd->pclk);
1316
1317 if (dd->use_rlock)
1318 remote_mutex_unlock(&dd->r_lock);
1319
1320 if (pm_qos_request_active(&qos_req_list))
1321 pm_qos_update_request(&qos_req_list,
1322 PM_QOS_DEFAULT_VALUE);
1323
1324 mutex_unlock(&dd->core_lock);
1325 /* If needed, this can be done after the current message is complete,
1326 and work can be continued upon resume. No motivation for now. */
1327 if (dd->suspended)
1328 wake_up_interruptible(&dd->continue_suspend);
1329}
1330
1331static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1332{
1333 struct msm_spi *dd;
1334 unsigned long flags;
1335 struct spi_transfer *tr;
1336
1337 dd = spi_master_get_devdata(spi->master);
1338 if (dd->suspended)
1339 return -EBUSY;
1340
1341 if (list_empty(&msg->transfers) || !msg->complete)
1342 return -EINVAL;
1343
1344 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1345 /* Check message parameters */
1346 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1347 (tr->bits_per_word &&
1348 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1349 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1350 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1351 "tx=%p, rx=%p\n",
1352 tr->speed_hz, tr->bits_per_word,
1353 tr->tx_buf, tr->rx_buf);
1354 return -EINVAL;
1355 }
1356 }
1357
1358 spin_lock_irqsave(&dd->queue_lock, flags);
1359 if (dd->suspended) {
1360 spin_unlock_irqrestore(&dd->queue_lock, flags);
1361 return -EBUSY;
1362 }
1363 dd->transfer_pending = 1;
1364 list_add_tail(&msg->queue, &dd->queue);
1365 spin_unlock_irqrestore(&dd->queue_lock, flags);
1366 queue_work(dd->workqueue, &dd->work_data);
1367 return 0;
1368}
1369
1370static int msm_spi_setup(struct spi_device *spi)
1371{
1372 struct msm_spi *dd;
1373 int rc = 0;
1374 u32 spi_ioc;
1375 u32 spi_config;
1376 u32 mask;
1377
1378 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1379 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1380 __func__, spi->bits_per_word);
1381 rc = -EINVAL;
1382 }
1383 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1384 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1385 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1386 rc = -EINVAL;
1387 }
1388
1389 if (rc)
1390 goto err_setup_exit;
1391
1392 dd = spi_master_get_devdata(spi->master);
1393
1394 mutex_lock(&dd->core_lock);
1395 if (dd->suspended) {
1396 mutex_unlock(&dd->core_lock);
1397 return -EBUSY;
1398 }
1399
1400 if (dd->use_rlock)
1401 remote_mutex_lock(&dd->r_lock);
1402
1403 clk_enable(dd->clk);
1404 clk_enable(dd->pclk);
1405
1406 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1407 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1408 if (spi->mode & SPI_CS_HIGH)
1409 spi_ioc |= mask;
1410 else
1411 spi_ioc &= ~mask;
1412 if (spi->mode & SPI_CPOL)
1413 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1414 else
1415 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1416
1417 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1418
1419 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1420 if (spi->mode & SPI_LOOP)
1421 spi_config |= SPI_CFG_LOOPBACK;
1422 else
1423 spi_config &= ~SPI_CFG_LOOPBACK;
1424 if (spi->mode & SPI_CPHA)
1425 spi_config &= ~SPI_CFG_INPUT_FIRST;
1426 else
1427 spi_config |= SPI_CFG_INPUT_FIRST;
1428 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1429
1430 /* Ensure previous write completed before disabling the clocks */
1431 mb();
1432 clk_disable(dd->clk);
1433 clk_disable(dd->pclk);
1434
1435 if (dd->use_rlock)
1436 remote_mutex_unlock(&dd->r_lock);
1437 mutex_unlock(&dd->core_lock);
1438
1439err_setup_exit:
1440 return rc;
1441}
1442
1443#ifdef CONFIG_DEBUG_FS
1444static int debugfs_iomem_x32_set(void *data, u64 val)
1445{
1446 writel_relaxed(val, data);
1447 /* Ensure the previous write completed. */
1448 mb();
1449 return 0;
1450}
1451
1452static int debugfs_iomem_x32_get(void *data, u64 *val)
1453{
1454 *val = readl_relaxed(data);
1455 /* Ensure the previous read completed. */
1456 mb();
1457 return 0;
1458}
1459
1460DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1461 debugfs_iomem_x32_set, "0x%08llx\n");
1462
1463static void spi_debugfs_init(struct msm_spi *dd)
1464{
1465 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1466 if (dd->dent_spi) {
1467 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1470 dd->debugfs_spi_regs[i] =
1471 debugfs_create_file(
1472 debugfs_spi_regs[i].name,
1473 debugfs_spi_regs[i].mode,
1474 dd->dent_spi,
1475 dd->base + debugfs_spi_regs[i].offset,
1476 &fops_iomem_x32);
1477 }
1478 }
1479}
1480
1481static void spi_debugfs_exit(struct msm_spi *dd)
1482{
1483 if (dd->dent_spi) {
1484 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 debugfs_remove_recursive(dd->dent_spi);
1487 dd->dent_spi = NULL;
1488 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1489 dd->debugfs_spi_regs[i] = NULL;
1490 }
1491}
1492#else
1493static void spi_debugfs_init(struct msm_spi *dd) {}
1494static void spi_debugfs_exit(struct msm_spi *dd) {}
1495#endif
1496
1497/* ===Device attributes begin=== */
1498static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1499 char *buf)
1500{
1501 struct spi_master *master = dev_get_drvdata(dev);
1502 struct msm_spi *dd = spi_master_get_devdata(master);
1503
1504 return snprintf(buf, PAGE_SIZE,
1505 "Device %s\n"
1506 "rx fifo_size = %d spi words\n"
1507 "tx fifo_size = %d spi words\n"
1508 "use_dma ? %s\n"
1509 "rx block size = %d bytes\n"
1510 "tx block size = %d bytes\n"
1511 "burst size = %d bytes\n"
1512 "DMA configuration:\n"
1513 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1514 "--statistics--\n"
1515 "Rx isrs = %d\n"
1516 "Tx isrs = %d\n"
1517 "DMA error = %d\n"
1518 "--debug--\n"
1519 "NA yet\n",
1520 dev_name(dev),
1521 dd->input_fifo_size,
1522 dd->output_fifo_size,
1523 dd->use_dma ? "yes" : "no",
1524 dd->input_block_size,
1525 dd->output_block_size,
1526 dd->burst_size,
1527 dd->tx_dma_chan,
1528 dd->rx_dma_chan,
1529 dd->tx_dma_crci,
1530 dd->rx_dma_crci,
1531 dd->stat_rx + dd->stat_dmov_rx,
1532 dd->stat_tx + dd->stat_dmov_tx,
1533 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1534 );
1535}
1536
1537/* Reset statistics on write */
1538static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1539 const char *buf, size_t count)
1540{
1541 struct msm_spi *dd = dev_get_drvdata(dev);
1542 dd->stat_rx = 0;
1543 dd->stat_tx = 0;
1544 dd->stat_dmov_rx = 0;
1545 dd->stat_dmov_tx = 0;
1546 dd->stat_dmov_rx_err = 0;
1547 dd->stat_dmov_tx_err = 0;
1548 return count;
1549}
1550
1551static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1552
1553static struct attribute *dev_attrs[] = {
1554 &dev_attr_stats.attr,
1555 NULL,
1556};
1557
1558static struct attribute_group dev_attr_grp = {
1559 .attrs = dev_attrs,
1560};
1561/* ===Device attributes end=== */
1562
1563/**
1564 * spi_dmov_tx_complete_func - DataMover tx completion callback
1565 *
1566 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1567 * spinlock @msm_dmov_lock held.
1568 */
1569static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1570 unsigned int result,
1571 struct msm_dmov_errdata *err)
1572{
1573 struct msm_spi *dd;
1574
1575 if (!(result & DMOV_RSLT_VALID)) {
1576 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1577 return;
1578 }
1579 /* restore original context */
1580 dd = container_of(cmd, struct msm_spi, tx_hdr);
1581 if (result & DMOV_RSLT_DONE)
1582 dd->stat_dmov_tx++;
1583 else {
1584 /* Error or flush */
1585 if (result & DMOV_RSLT_ERROR) {
1586 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1587 dd->stat_dmov_tx_err++;
1588 }
1589 if (result & DMOV_RSLT_FLUSH) {
1590 /*
1591 * Flushing normally happens in process of
1592 * removing, when we are waiting for outstanding
1593 * DMA commands to be flushed.
1594 */
1595 dev_info(dd->dev,
1596 "DMA channel flushed (0x%08x)\n", result);
1597 }
1598 if (err)
1599 dev_err(dd->dev,
1600 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1601 err->flush[0], err->flush[1], err->flush[2],
1602 err->flush[3], err->flush[4], err->flush[5]);
1603 dd->cur_msg->status = -EIO;
1604 complete(&dd->transfer_complete);
1605 }
1606}
1607
1608/**
1609 * spi_dmov_rx_complete_func - DataMover rx completion callback
1610 *
1611 * Executed in IRQ context (Data Mover's IRQ)
1612 * DataMover's spinlock @msm_dmov_lock held.
1613 */
1614static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1615 unsigned int result,
1616 struct msm_dmov_errdata *err)
1617{
1618 struct msm_spi *dd;
1619
1620 if (!(result & DMOV_RSLT_VALID)) {
1621 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1622 result, cmd);
1623 return;
1624 }
1625 /* restore original context */
1626 dd = container_of(cmd, struct msm_spi, rx_hdr);
1627 if (result & DMOV_RSLT_DONE) {
1628 dd->stat_dmov_rx++;
1629 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1630 return;
1631 complete(&dd->transfer_complete);
1632 } else {
1633 /** Error or flush */
1634 if (result & DMOV_RSLT_ERROR) {
1635 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1636 dd->stat_dmov_rx_err++;
1637 }
1638 if (result & DMOV_RSLT_FLUSH) {
1639 dev_info(dd->dev,
1640 "DMA channel flushed(0x%08x)\n", result);
1641 }
1642 if (err)
1643 dev_err(dd->dev,
1644 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1645 err->flush[0], err->flush[1], err->flush[2],
1646 err->flush[3], err->flush[4], err->flush[5]);
1647 dd->cur_msg->status = -EIO;
1648 complete(&dd->transfer_complete);
1649 }
1650}
1651
1652static inline u32 get_chunk_size(struct msm_spi *dd)
1653{
1654 u32 cache_line = dma_get_cache_alignment();
1655
1656 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1657 roundup(dd->burst_size, cache_line))*2;
1658}
1659
1660static void msm_spi_teardown_dma(struct msm_spi *dd)
1661{
1662 int limit = 0;
1663
1664 if (!dd->use_dma)
1665 return;
1666
1667 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
1668 msm_dmov_flush(dd->tx_dma_chan);
1669 msm_dmov_flush(dd->rx_dma_chan);
1670 msleep(10);
1671 }
1672
1673 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1674 dd->tx_dmov_cmd_dma);
1675 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1676 dd->tx_padding = dd->rx_padding = NULL;
1677}
1678
1679static __init int msm_spi_init_dma(struct msm_spi *dd)
1680{
1681 dmov_box *box;
1682 u32 cache_line = dma_get_cache_alignment();
1683
1684 /* Allocate all as one chunk, since all is smaller than page size */
1685
1686 /* We send NULL device, since it requires coherent_dma_mask id
1687 device definition, we're okay with using system pool */
1688 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1689 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1690 if (dd->tx_dmov_cmd == NULL)
1691 return -ENOMEM;
1692
1693 /* DMA addresses should be 64 bit aligned aligned */
1694 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1695 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1696 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1697 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1698
1699 /* Buffers should be aligned to cache line */
1700 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1701 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1702 sizeof(struct spi_dmov_cmd), cache_line);
1703 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1704 cache_line);
1705 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1706 cache_line);
1707
1708 /* Setup DM commands */
1709 box = &(dd->rx_dmov_cmd->box);
1710 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1711 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1712 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1713 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1714 offsetof(struct spi_dmov_cmd, cmd_ptr));
1715 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001716
1717 box = &(dd->tx_dmov_cmd->box);
1718 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1719 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1720 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1721 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1722 offsetof(struct spi_dmov_cmd, cmd_ptr));
1723 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724
1725 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1726 CMD_DST_CRCI(dd->tx_dma_crci);
1727 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1728 SPI_OUTPUT_FIFO;
1729 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1730 CMD_SRC_CRCI(dd->rx_dma_crci);
1731 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1732 SPI_INPUT_FIFO;
1733
1734 /* Clear remaining activities on channel */
1735 msm_dmov_flush(dd->tx_dma_chan);
1736 msm_dmov_flush(dd->rx_dma_chan);
1737
1738 return 0;
1739}
1740
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001741struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1742{
1743 struct device_node *node = pdev->dev.of_node;
1744 struct msm_spi_platform_data *pdata;
1745
1746 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1747 if (!pdata) {
1748 pr_err("Unable to allocate platform data\n");
1749 return NULL;
1750 }
1751
1752 of_property_read_u32(node, "spi-max-frequency",
1753 &pdata->max_clock_speed);
1754
1755 return pdata;
1756}
1757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001758static int __init msm_spi_probe(struct platform_device *pdev)
1759{
1760 struct spi_master *master;
1761 struct msm_spi *dd;
1762 struct resource *resource;
1763 int rc = -ENXIO;
1764 int locked = 0;
1765 int i = 0;
1766 int clk_enabled = 0;
1767 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001768 struct msm_spi_platform_data *pdata;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769
1770 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1771 if (!master) {
1772 rc = -ENOMEM;
1773 dev_err(&pdev->dev, "master allocation failed\n");
1774 goto err_probe_exit;
1775 }
1776
1777 master->bus_num = pdev->id;
1778 master->mode_bits = SPI_SUPPORTED_MODES;
1779 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1780 master->setup = msm_spi_setup;
1781 master->transfer = msm_spi_transfer;
1782 platform_set_drvdata(pdev, master);
1783 dd = spi_master_get_devdata(master);
1784
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001785 if (pdev->dev.of_node) {
1786 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1787 master->dev.of_node = pdev->dev.of_node;
1788 pdata = msm_spi_dt_to_pdata(pdev);
1789 if (!pdata) {
1790 rc = -ENOMEM;
1791 goto err_probe_exit;
1792 }
1793 } else {
1794 pdata = pdev->dev.platform_data;
1795 dd->qup_ver = SPI_QUP_VERSION_NONE;
1796 }
1797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001798 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001799 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800 if (!resource) {
1801 rc = -ENXIO;
1802 goto err_probe_res;
1803 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 dd->mem_phys_addr = resource->start;
1806 dd->mem_size = resource_size(resource);
1807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 if (pdata) {
1809 if (pdata->dma_config) {
1810 rc = pdata->dma_config();
1811 if (rc) {
1812 dev_warn(&pdev->dev,
1813 "%s: DM mode not supported\n",
1814 __func__);
1815 dd->use_dma = 0;
1816 goto skip_dma_resources;
1817 }
1818 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001819 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001820 if (resource) {
1821 dd->rx_dma_chan = resource->start;
1822 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001823 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1824 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001825 if (!resource) {
1826 rc = -ENXIO;
1827 goto err_probe_res;
1828 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001829
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 dd->rx_dma_crci = resource->start;
1831 dd->tx_dma_crci = resource->end;
1832 dd->use_dma = 1;
1833 master->dma_alignment = dma_get_cache_alignment();
1834 }
1835
1836skip_dma_resources:
1837 if (pdata->gpio_config) {
1838 rc = pdata->gpio_config();
1839 if (rc) {
1840 dev_err(&pdev->dev,
1841 "%s: error configuring GPIOs\n",
1842 __func__);
1843 goto err_probe_gpio;
1844 }
1845 }
1846 }
1847
1848 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001849 resource = platform_get_resource(pdev, IORESOURCE_IO, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001850 dd->spi_gpios[i] = resource ? resource->start : -1;
1851 }
1852
Harini Jayaramane4c06192011-09-28 16:26:39 -06001853 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001854 resource = platform_get_resource(pdev, IORESOURCE_IO,
1855 i + ARRAY_SIZE(spi_rsrcs));
Harini Jayaramane4c06192011-09-28 16:26:39 -06001856 dd->cs_gpios[i].gpio_num = resource ? resource->start : -1;
1857 dd->cs_gpios[i].valid = 0;
1858 }
1859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860 rc = msm_spi_request_gpios(dd);
1861 if (rc)
1862 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001864 spin_lock_init(&dd->queue_lock);
1865 mutex_init(&dd->core_lock);
1866 INIT_LIST_HEAD(&dd->queue);
1867 INIT_WORK(&dd->work_data, msm_spi_workq);
1868 init_waitqueue_head(&dd->continue_suspend);
1869 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001870 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871 if (!dd->workqueue)
1872 goto err_probe_workq;
1873
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001874 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1875 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001876 rc = -ENXIO;
1877 goto err_probe_reqmem;
1878 }
1879
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001880 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1881 if (!dd->base) {
1882 rc = -ENOMEM;
1883 goto err_probe_reqmem;
1884 }
1885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 if (pdata && pdata->rsl_id) {
1887 struct remote_mutex_id rmid;
1888 rmid.r_spinlock_id = pdata->rsl_id;
1889 rmid.delay_us = SPI_TRYLOCK_DELAY;
1890
1891 rc = remote_mutex_init(&dd->r_lock, &rmid);
1892 if (rc) {
1893 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1894 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1895 __func__, rc);
1896 goto err_probe_rlock_init;
1897 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001898
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899 dd->use_rlock = 1;
1900 dd->pm_lat = pdata->pm_lat;
1901 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1902 PM_QOS_DEFAULT_VALUE);
1903 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 mutex_lock(&dd->core_lock);
1906 if (dd->use_rlock)
1907 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001909 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001911 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001912 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001913 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914 rc = PTR_ERR(dd->clk);
1915 goto err_probe_clk_get;
1916 }
1917
Matt Wagantallac294852011-08-17 15:44:58 -07001918 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001920 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 rc = PTR_ERR(dd->pclk);
1922 goto err_probe_pclk_get;
1923 }
1924
1925 if (pdata && pdata->max_clock_speed)
1926 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
1927
1928 rc = clk_enable(dd->clk);
1929 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001930 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 __func__);
1932 goto err_probe_clk_enable;
1933 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001935 clk_enabled = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936 rc = clk_enable(dd->pclk);
1937 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001938 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001939 __func__);
1940 goto err_probe_pclk_enable;
1941 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001944 rc = msm_spi_configure_gsbi(dd, pdev);
1945 if (rc)
1946 goto err_probe_gsbi;
1947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 msm_spi_calculate_fifo_size(dd);
1949 if (dd->use_dma) {
1950 rc = msm_spi_init_dma(dd);
1951 if (rc)
1952 goto err_probe_dma;
1953 }
1954
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001955 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 /*
1957 * The SPI core generates a bogus input overrun error on some targets,
1958 * when a transition from run to reset state occurs and if the FIFO has
1959 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
1960 * bit.
1961 */
1962 msm_spi_enable_error_flags(dd);
1963
1964 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
1965 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1966 if (rc)
1967 goto err_probe_state;
1968
1969 clk_disable(dd->clk);
1970 clk_disable(dd->pclk);
1971 clk_enabled = 0;
1972 pclk_enabled = 0;
1973
1974 dd->suspended = 0;
1975 dd->transfer_pending = 0;
1976 dd->multi_xfr = 0;
1977 dd->mode = SPI_MODE_NONE;
1978
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001979 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 if (rc)
1981 goto err_probe_irq;
1982
1983 msm_spi_disable_irqs(dd);
1984 if (dd->use_rlock)
1985 remote_mutex_unlock(&dd->r_lock);
1986
1987 mutex_unlock(&dd->core_lock);
1988 locked = 0;
1989
1990 rc = spi_register_master(master);
1991 if (rc)
1992 goto err_probe_reg_master;
1993
1994 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
1995 if (rc) {
1996 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
1997 goto err_attrs;
1998 }
1999
2000 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001 return 0;
2002
2003err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002004 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002006err_probe_irq:
2007err_probe_state:
2008 msm_spi_teardown_dma(dd);
2009err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002010err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002011 if (pclk_enabled)
2012 clk_disable(dd->pclk);
2013err_probe_pclk_enable:
2014 if (clk_enabled)
2015 clk_disable(dd->clk);
2016err_probe_clk_enable:
2017 clk_put(dd->pclk);
2018err_probe_pclk_get:
2019 clk_put(dd->clk);
2020err_probe_clk_get:
2021 if (locked) {
2022 if (dd->use_rlock)
2023 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025 mutex_unlock(&dd->core_lock);
2026 }
2027err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002028err_probe_reqmem:
2029 destroy_workqueue(dd->workqueue);
2030err_probe_workq:
2031 msm_spi_free_gpios(dd);
2032err_probe_gpio:
2033 if (pdata && pdata->gpio_release)
2034 pdata->gpio_release();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035err_probe_res:
2036 spi_master_put(master);
2037err_probe_exit:
2038 return rc;
2039}
2040
2041#ifdef CONFIG_PM
2042static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2043{
2044 struct spi_master *master = platform_get_drvdata(pdev);
2045 struct msm_spi *dd;
2046 unsigned long flags;
2047
2048 if (!master)
2049 goto suspend_exit;
2050 dd = spi_master_get_devdata(master);
2051 if (!dd)
2052 goto suspend_exit;
2053
2054 /* Make sure nothing is added to the queue while we're suspending */
2055 spin_lock_irqsave(&dd->queue_lock, flags);
2056 dd->suspended = 1;
2057 spin_unlock_irqrestore(&dd->queue_lock, flags);
2058
2059 /* Wait for transactions to end, or time out */
2060 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2061 msm_spi_free_gpios(dd);
2062
2063suspend_exit:
2064 return 0;
2065}
2066
2067static int msm_spi_resume(struct platform_device *pdev)
2068{
2069 struct spi_master *master = platform_get_drvdata(pdev);
2070 struct msm_spi *dd;
2071
2072 if (!master)
2073 goto resume_exit;
2074 dd = spi_master_get_devdata(master);
2075 if (!dd)
2076 goto resume_exit;
2077
2078 BUG_ON(msm_spi_request_gpios(dd) != 0);
2079 dd->suspended = 0;
2080resume_exit:
2081 return 0;
2082}
2083#else
2084#define msm_spi_suspend NULL
2085#define msm_spi_resume NULL
2086#endif /* CONFIG_PM */
2087
2088static int __devexit msm_spi_remove(struct platform_device *pdev)
2089{
2090 struct spi_master *master = platform_get_drvdata(pdev);
2091 struct msm_spi *dd = spi_master_get_devdata(master);
2092 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2093
2094 pm_qos_remove_request(&qos_req_list);
2095 spi_debugfs_exit(dd);
2096 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2097
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002098 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002099 if (pdata && pdata->gpio_release)
2100 pdata->gpio_release();
2101
2102 msm_spi_free_gpios(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002103 clk_put(dd->clk);
2104 clk_put(dd->pclk);
2105 destroy_workqueue(dd->workqueue);
2106 platform_set_drvdata(pdev, 0);
2107 spi_unregister_master(master);
2108 spi_master_put(master);
2109
2110 return 0;
2111}
2112
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002113static struct of_device_id msm_spi_dt_match[] = {
2114 {
2115 .compatible = "qcom,spi-qup-v2",
2116 },
2117 {}
2118};
2119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120static struct platform_driver msm_spi_driver = {
2121 .driver = {
2122 .name = SPI_DRV_NAME,
2123 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002124 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002125 },
2126 .suspend = msm_spi_suspend,
2127 .resume = msm_spi_resume,
2128 .remove = __exit_p(msm_spi_remove),
2129};
2130
2131static int __init msm_spi_init(void)
2132{
2133 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2134}
2135module_init(msm_spi_init);
2136
2137static void __exit msm_spi_exit(void)
2138{
2139 platform_driver_unregister(&msm_spi_driver);
2140}
2141module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002142
2143MODULE_LICENSE("GPL v2");
2144MODULE_VERSION("0.4");
2145MODULE_ALIAS("platform:"SPI_DRV_NAME);