blob: 7b6b8902a4cd2acb4e3e6495dd9d4c44ae381f64 [file] [log] [blame]
Gilad Avidovd0262342012-10-24 16:52:30 -06001/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033#include <linux/gpio.h>
34#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070035#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070036#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070037#include <linux/of_gpio.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060038#include <linux/dma-mapping.h>
39#include <linux/sched.h>
40#include <linux/mutex.h>
41#include <linux/atomic.h>
42#include <mach/msm_spi.h>
43#include <mach/sps.h>
44#include <mach/dma.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070045#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070047static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
48 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049{
50 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070051 unsigned long gsbi_mem_phys_addr;
52 size_t gsbi_mem_size;
53 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070057 return 0;
58
59 gsbi_mem_phys_addr = resource->start;
60 gsbi_mem_size = resource_size(resource);
61 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
62 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070064
65 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
66 gsbi_mem_size);
67 if (!gsbi_base)
68 return -ENXIO;
69
70 /* Set GSBI to SPI mode */
71 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
73 return 0;
74}
75
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070076static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070078 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
79 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
80 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
81 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
82 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
83 if (dd->qup_ver)
84 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085}
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087static inline int msm_spi_request_gpios(struct msm_spi *dd)
88{
89 int i;
90 int result = 0;
91
92 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
93 if (dd->spi_gpios[i] >= 0) {
94 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
95 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060096 dev_err(dd->dev, "%s: gpio_request for pin %d "
97 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 dd->spi_gpios[i], result);
99 goto error;
100 }
101 }
102 }
103 return 0;
104
105error:
106 for (; --i >= 0;) {
107 if (dd->spi_gpios[i] >= 0)
108 gpio_free(dd->spi_gpios[i]);
109 }
110 return result;
111}
112
113static inline void msm_spi_free_gpios(struct msm_spi *dd)
114{
115 int i;
116
117 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
118 if (dd->spi_gpios[i] >= 0)
119 gpio_free(dd->spi_gpios[i]);
120 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600121
122 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
123 if (dd->cs_gpios[i].valid) {
124 gpio_free(dd->cs_gpios[i].gpio_num);
125 dd->cs_gpios[i].valid = 0;
126 }
127 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128}
129
130static void msm_spi_clock_set(struct msm_spi *dd, int speed)
131{
132 int rc;
133
134 rc = clk_set_rate(dd->clk, speed);
135 if (!rc)
136 dd->clock_speed = speed;
137}
138
139static int msm_spi_calculate_size(int *fifo_size,
140 int *block_size,
141 int block,
142 int mult)
143{
144 int words;
145
146 switch (block) {
147 case 0:
148 words = 1; /* 4 bytes */
149 break;
150 case 1:
151 words = 4; /* 16 bytes */
152 break;
153 case 2:
154 words = 8; /* 32 bytes */
155 break;
156 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700157 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160 switch (mult) {
161 case 0:
162 *fifo_size = words * 2;
163 break;
164 case 1:
165 *fifo_size = words * 4;
166 break;
167 case 2:
168 *fifo_size = words * 8;
169 break;
170 case 3:
171 *fifo_size = words * 16;
172 break;
173 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700174 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 *block_size = words * sizeof(u32); /* in bytes */
178 return 0;
179}
180
181static void get_next_transfer(struct msm_spi *dd)
182{
183 struct spi_transfer *t = dd->cur_transfer;
184
185 if (t->transfer_list.next != &dd->cur_msg->transfers) {
186 dd->cur_transfer = list_entry(t->transfer_list.next,
187 struct spi_transfer,
188 transfer_list);
189 dd->write_buf = dd->cur_transfer->tx_buf;
190 dd->read_buf = dd->cur_transfer->rx_buf;
191 }
192}
193
194static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
195{
196 u32 spi_iom;
197 int block;
198 int mult;
199
200 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
201
202 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
203 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
204 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
205 block, mult)) {
206 goto fifo_size_err;
207 }
208
209 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
210 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
211 if (msm_spi_calculate_size(&dd->output_fifo_size,
212 &dd->output_block_size, block, mult)) {
213 goto fifo_size_err;
214 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600215 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
216 /* DM mode is not available for this block size */
217 if (dd->input_block_size == 4 || dd->output_block_size == 4)
218 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219
Gilad Avidovd0262342012-10-24 16:52:30 -0600220 /* DM mode is currently unsupported for different block sizes */
221 if (dd->input_block_size != dd->output_block_size)
222 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223
Gilad Avidovd0262342012-10-24 16:52:30 -0600224 if (dd->use_dma)
225 dd->burst_size = max(dd->input_block_size,
226 DM_BURST_SIZE);
227 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228
229 return;
230
231fifo_size_err:
232 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700233 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234 return;
235}
236
237static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
238{
239 u32 data_in;
240 int i;
241 int shift;
242
243 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
244 if (dd->read_buf) {
245 for (i = 0; (i < dd->bytes_per_word) &&
246 dd->rx_bytes_remaining; i++) {
247 /* The data format depends on bytes_per_word:
248 4 bytes: 0x12345678
249 3 bytes: 0x00123456
250 2 bytes: 0x00001234
251 1 byte : 0x00000012
252 */
253 shift = 8 * (dd->bytes_per_word - i - 1);
254 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
255 dd->rx_bytes_remaining--;
256 }
257 } else {
258 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
259 dd->rx_bytes_remaining -= dd->bytes_per_word;
260 else
261 dd->rx_bytes_remaining = 0;
262 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 dd->read_xfr_cnt++;
265 if (dd->multi_xfr) {
266 if (!dd->rx_bytes_remaining)
267 dd->read_xfr_cnt = 0;
268 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
269 dd->read_len) {
270 struct spi_transfer *t = dd->cur_rx_transfer;
271 if (t->transfer_list.next != &dd->cur_msg->transfers) {
272 t = list_entry(t->transfer_list.next,
273 struct spi_transfer,
274 transfer_list);
275 dd->read_buf = t->rx_buf;
276 dd->read_len = t->len;
277 dd->read_xfr_cnt = 0;
278 dd->cur_rx_transfer = t;
279 }
280 }
281 }
282}
283
284static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
285{
286 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
287
288 return spi_op & SPI_OP_STATE_VALID;
289}
290
291static inline int msm_spi_wait_valid(struct msm_spi *dd)
292{
293 unsigned long delay = 0;
294 unsigned long timeout = 0;
295
296 if (dd->clock_speed == 0)
297 return -EINVAL;
298 /*
299 * Based on the SPI clock speed, sufficient time
300 * should be given for the SPI state transition
301 * to occur
302 */
303 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
304 /*
305 * For small delay values, the default timeout would
306 * be one jiffy
307 */
308 if (delay < SPI_DELAY_THRESHOLD)
309 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600310
311 /* Adding one to round off to the nearest jiffy */
312 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 while (!msm_spi_is_valid_state(dd)) {
314 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600315 if (!msm_spi_is_valid_state(dd)) {
316 if (dd->cur_msg)
317 dd->cur_msg->status = -EIO;
318 dev_err(dd->dev, "%s: SPI operational state"
319 "not valid\n", __func__);
320 return -ETIMEDOUT;
321 } else
322 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 }
324 /*
325 * For smaller values of delay, context switch time
326 * would negate the usage of usleep
327 */
328 if (delay > 20)
329 usleep(delay);
330 else if (delay)
331 udelay(delay);
332 }
333 return 0;
334}
335
336static inline int msm_spi_set_state(struct msm_spi *dd,
337 enum msm_spi_state state)
338{
339 enum msm_spi_state cur_state;
340 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700341 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342 cur_state = readl_relaxed(dd->base + SPI_STATE);
343 /* Per spec:
344 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
345 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
346 (state == SPI_OP_STATE_RESET)) {
347 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
348 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
349 } else {
350 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
351 dd->base + SPI_STATE);
352 }
353 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700354 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355
356 return 0;
357}
358
Gilad Avidovd0262342012-10-24 16:52:30 -0600359/**
360 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
361 */
362static inline void
363msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364{
365 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
366
367 if (n != (*config & SPI_CFG_N))
368 *config = (*config & ~SPI_CFG_N) | n;
369
Gilad Avidovd0262342012-10-24 16:52:30 -0600370 if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
371 || (dd->mode == SPI_BAM_MODE)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 if (dd->read_buf == NULL)
373 *config |= SPI_NO_INPUT;
374 if (dd->write_buf == NULL)
375 *config |= SPI_NO_OUTPUT;
376 }
377}
378
Gilad Avidovd0262342012-10-24 16:52:30 -0600379/**
380 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
381 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
382 * @return calculatd value for SPI_CONFIG
383 */
384static u32
385msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386{
Gilad Avidovd0262342012-10-24 16:52:30 -0600387 if (mode & SPI_LOOP)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 spi_config |= SPI_CFG_LOOPBACK;
389 else
390 spi_config &= ~SPI_CFG_LOOPBACK;
Gilad Avidovd0262342012-10-24 16:52:30 -0600391
392 if (mode & SPI_CPHA)
393 spi_config &= ~SPI_CFG_INPUT_FIRST;
394 else
395 spi_config |= SPI_CFG_INPUT_FIRST;
396
397 return spi_config;
398}
399
400/**
401 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
402 * next transfer
403 */
404static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
405{
406 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
407 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
408 spi_config, dd->cur_msg->spi->mode);
409
410 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
411 /* flags removed from SPI_CONFIG in QUP version-2 */
412 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
413 else if (dd->mode == SPI_BAM_MODE)
414 spi_config |= SPI_CFG_INPUT_FIRST;
415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -0600417}
418
419/**
420 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
421 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
422 * BAM and DMOV modes.
423 * @n_words The number of reads/writes of size N.
424 */
425static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
426{
427 /*
428 * n_words cannot exceed fifo_size, and only one READ COUNT
429 * interrupt is generated per transaction, so for transactions
430 * larger than fifo size READ COUNT must be disabled.
431 * For those transactions we usually move to Data Mover mode.
432 */
433 if (dd->mode == SPI_FIFO_MODE) {
434 if (n_words <= dd->input_fifo_size) {
435 writel_relaxed(n_words,
436 dd->base + SPI_MX_READ_COUNT);
437 msm_spi_set_write_count(dd, n_words);
438 } else {
439 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
440 msm_spi_set_write_count(dd, 0);
441 }
442 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
443 /* must be zero for FIFO */
444 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
445 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
446 }
447 } else {
448 /* must be zero for BAM and DMOV */
449 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
450 msm_spi_set_write_count(dd, 0);
451
452 /*
453 * for DMA transfers, both QUP_MX_INPUT_COUNT and
454 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
455 * That case is a non-balanced transfer when there is
456 * only a read_buf.
457 */
458 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
459 if (dd->write_buf)
460 writel_relaxed(0,
461 dd->base + SPI_MX_INPUT_COUNT);
462 else
463 writel_relaxed(n_words,
464 dd->base + SPI_MX_INPUT_COUNT);
465
466 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
467 }
468 }
469}
470
471/**
472 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
473 * using BAM.
474 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
475 * transfer. Between transfer QUP must change to reset state. A loop is
476 * issuing a single BAM transfer at a time. If another tsranfer is
477 * required, it waits for the trasfer to finish, then moving to reset
478 * state, and back to run state to issue the next transfer.
479 * The function dose not wait for the last transfer to end, or if only
480 * a single transfer is required, the function dose not wait for it to
481 * end.
482 * @timeout max time in jiffies to wait for a transfer to finish.
483 * @return zero on success
484 */
485static int
486msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
487{
488 u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
489 int ret;
490 /*
491 * QUP must move to reset mode every 64K-1 bytes of transfer
492 * (counter is 16 bit)
493 */
494 if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
495 /* assert chip select unconditionally */
496 u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
497 if (!(spi_ioc & SPI_IO_C_FORCE_CS))
498 writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
499 dd->base + SPI_IO_CONTROL);
500 }
501
502 /* Following flags are required since we are waiting on all transfers */
503 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
504 /*
505 * on a balanced transaction, BAM will set the flags on the producer
506 * pipe based on the flags set on the consumer pipe
507 */
508 prod_flags = (dd->write_buf) ? 0 : cons_flags;
509
510 while (dd->tx_bytes_remaining > 0) {
511 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
512 bytes_to_send = min_t(u32, dd->tx_bytes_remaining
513 , SPI_MAX_TRFR_BTWN_RESETS);
514 n_words_xfr = DIV_ROUND_UP(bytes_to_send
515 , dd->bytes_per_word);
516
517 msm_spi_set_mx_counts(dd, n_words_xfr);
518
519 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
520 if (ret < 0) {
521 dev_err(dd->dev,
522 "%s: Failed to set QUP state to run",
523 __func__);
524 goto xfr_err;
525 }
526
527 /* enqueue read buffer in BAM */
528 if (dd->read_buf) {
529 ret = sps_transfer_one(dd->bam.prod.handle,
530 dd->cur_transfer->rx_dma + bytes_sent,
531 bytes_to_send, dd, prod_flags);
532 if (ret < 0) {
533 dev_err(dd->dev,
534 "%s: Failed to queue producer BAM transfer",
535 __func__);
536 goto xfr_err;
537 }
538 }
539
540 /* enqueue write buffer in BAM */
541 if (dd->write_buf) {
542 ret = sps_transfer_one(dd->bam.cons.handle,
543 dd->cur_transfer->tx_dma + bytes_sent,
544 bytes_to_send, dd, cons_flags);
545 if (ret < 0) {
546 dev_err(dd->dev,
547 "%s: Failed to queue consumer BAM transfer",
548 __func__);
549 goto xfr_err;
550 }
551 }
552
553 dd->tx_bytes_remaining -= bytes_to_send;
554
555 /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
556 if (dd->tx_bytes_remaining > 0) {
557 if (!wait_for_completion_timeout(
558 &dd->transfer_complete, timeout)) {
559 dev_err(dd->dev,
560 "%s: SPI transaction timeout",
561 __func__);
562 dd->cur_msg->status = -EIO;
563 ret = -EIO;
564 goto xfr_err;
565 }
566 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
567 if (ret < 0) {
568 dev_err(dd->dev,
569 "%s: Failed to set QUP state to reset",
570 __func__);
571 goto xfr_err;
572 }
573 init_completion(&dd->transfer_complete);
574 }
575 }
576 return 0;
577
578xfr_err:
579 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580}
581
582static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
583{
584 dmov_box *box;
585 int bytes_to_send, num_rows, bytes_sent;
586 u32 num_transfers;
587
588 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530589 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 if (dd->write_len && !dd->read_len) {
591 /* WR-WR transfer */
592 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
593 dd->write_buf = dd->temp_buf;
594 } else {
595 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
596 /* For WR-RD transfer, bytes_sent can be negative */
597 if (bytes_sent < 0)
598 bytes_sent = 0;
599 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530600 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530601 * 4K bytes for targets that have only 12 bits in
602 * QUP_MAX_OUTPUT_CNT register. If the target supports
603 * more than 12bits then we send the data in chunks of
604 * the infinite_mode value that is defined in the
605 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530606 */
607 if (!dd->pdata->infinite_mode)
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530608 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530609 else
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530610 dd->max_trfr_len = (dd->pdata->infinite_mode) *
611 (dd->bytes_per_word);
612
613 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
614 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
617 dd->unaligned_len = bytes_to_send % dd->burst_size;
618 num_rows = bytes_to_send / dd->burst_size;
619
620 dd->mode = SPI_DMOV_MODE;
621
622 if (num_rows) {
623 /* src in 16 MSB, dst in 16 LSB */
624 box = &dd->tx_dmov_cmd->box;
625 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
626 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
627 box->num_rows = (num_rows << 16) | num_rows;
628 box->row_offset = (dd->burst_size << 16) | 0;
629
630 box = &dd->rx_dmov_cmd->box;
631 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
632 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
633 box->num_rows = (num_rows << 16) | num_rows;
634 box->row_offset = (0 << 16) | dd->burst_size;
635
636 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
637 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
638 offsetof(struct spi_dmov_cmd, box));
639 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
640 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
641 offsetof(struct spi_dmov_cmd, box));
642 } else {
643 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
644 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
645 offsetof(struct spi_dmov_cmd, single_pad));
646 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
647 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
648 offsetof(struct spi_dmov_cmd, single_pad));
649 }
650
651 if (!dd->unaligned_len) {
652 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
653 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
654 } else {
655 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
656 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
657 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
658
659 if ((dd->multi_xfr) && (dd->read_len <= 0))
660 offset = dd->cur_msg_len - dd->unaligned_len;
661
662 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
663 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
664
665 memset(dd->tx_padding, 0, dd->burst_size);
666 memset(dd->rx_padding, 0, dd->burst_size);
667 if (dd->write_buf)
668 memcpy(dd->tx_padding, dd->write_buf + offset,
669 dd->unaligned_len);
670
671 tx_cmd->src = dd->tx_padding_dma;
672 rx_cmd->dst = dd->rx_padding_dma;
673 tx_cmd->len = rx_cmd->len = dd->burst_size;
674 }
675 /* This also takes care of the padding dummy buf
676 Since this is set to the correct length, the
677 dummy bytes won't be actually sent */
678 if (dd->multi_xfr) {
679 u32 write_transfers = 0;
680 u32 read_transfers = 0;
681
682 if (dd->write_len > 0) {
683 write_transfers = DIV_ROUND_UP(dd->write_len,
684 dd->bytes_per_word);
685 writel_relaxed(write_transfers,
686 dd->base + SPI_MX_OUTPUT_COUNT);
687 }
688 if (dd->read_len > 0) {
689 /*
690 * The read following a write transfer must take
691 * into account, that the bytes pertaining to
692 * the write transfer needs to be discarded,
693 * before the actual read begins.
694 */
695 read_transfers = DIV_ROUND_UP(dd->read_len +
696 dd->write_len,
697 dd->bytes_per_word);
698 writel_relaxed(read_transfers,
699 dd->base + SPI_MX_INPUT_COUNT);
700 }
701 } else {
702 if (dd->write_buf)
703 writel_relaxed(num_transfers,
704 dd->base + SPI_MX_OUTPUT_COUNT);
705 if (dd->read_buf)
706 writel_relaxed(num_transfers,
707 dd->base + SPI_MX_INPUT_COUNT);
708 }
709}
710
711static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
712{
713 dma_coherent_pre_ops();
714 if (dd->write_buf)
715 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
716 if (dd->read_buf)
717 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
718}
719
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530720/* SPI core on targets that does not support infinite mode can send
721 maximum of 4K transfers or 64K transfers depending up on size of
722 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
723 chunks. Upon completion we send the next chunk, or complete the
724 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530725 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726*/
727static int msm_spi_dm_send_next(struct msm_spi *dd)
728{
729 /* By now we should have sent all the bytes in FIFO mode,
730 * However to make things right, we'll check anyway.
731 */
732 if (dd->mode != SPI_DMOV_MODE)
733 return 0;
734
Kiran Gundae8f16742012-06-27 10:06:32 +0530735 /* On targets which does not support infinite mode,
736 We need to send more chunks, if we sent max last time */
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530737 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
738 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
740 return 0;
741 dd->read_len = dd->write_len = 0;
742 msm_spi_setup_dm_transfer(dd);
743 msm_spi_enqueue_dm_commands(dd);
744 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
745 return 0;
746 return 1;
747 } else if (dd->read_len && dd->write_len) {
748 dd->tx_bytes_remaining -= dd->cur_transfer->len;
749 if (list_is_last(&dd->cur_transfer->transfer_list,
750 &dd->cur_msg->transfers))
751 return 0;
752 get_next_transfer(dd);
753 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
754 return 0;
755 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
756 dd->read_buf = dd->temp_buf;
757 dd->read_len = dd->write_len = -1;
758 msm_spi_setup_dm_transfer(dd);
759 msm_spi_enqueue_dm_commands(dd);
760 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
761 return 0;
762 return 1;
763 }
764 return 0;
765}
766
767static inline void msm_spi_ack_transfer(struct msm_spi *dd)
768{
769 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
770 SPI_OP_MAX_OUTPUT_DONE_FLAG,
771 dd->base + SPI_OPERATIONAL);
772 /* Ensure done flag was cleared before proceeding further */
773 mb();
774}
775
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700776/* Figure which irq occured and call the relevant functions */
777static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
778{
779 u32 op, ret = IRQ_NONE;
780 struct msm_spi *dd = dev_id;
781
782 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
783 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
784 struct spi_master *master = dev_get_drvdata(dd->dev);
785 ret |= msm_spi_error_irq(irq, master);
786 }
787
788 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
789 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
790 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
791 dd->base + SPI_OPERATIONAL);
792 /*
793 * Ensure service flag was cleared before further
794 * processing of interrupt.
795 */
796 mb();
797 ret |= msm_spi_input_irq(irq, dev_id);
798 }
799
800 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
801 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
802 dd->base + SPI_OPERATIONAL);
803 /*
804 * Ensure service flag was cleared before further
805 * processing of interrupt.
806 */
807 mb();
808 ret |= msm_spi_output_irq(irq, dev_id);
809 }
810
811 if (dd->done) {
812 complete(&dd->transfer_complete);
813 dd->done = 0;
814 }
815 return ret;
816}
817
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
819{
820 struct msm_spi *dd = dev_id;
821
822 dd->stat_rx++;
823
824 if (dd->mode == SPI_MODE_NONE)
825 return IRQ_HANDLED;
826
827 if (dd->mode == SPI_DMOV_MODE) {
828 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
829 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
830 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
831 msm_spi_ack_transfer(dd);
832 if (dd->unaligned_len == 0) {
833 if (atomic_inc_return(&dd->rx_irq_called) == 1)
834 return IRQ_HANDLED;
835 }
836 msm_spi_complete(dd);
837 return IRQ_HANDLED;
838 }
839 return IRQ_NONE;
840 }
841
842 if (dd->mode == SPI_FIFO_MODE) {
843 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
844 SPI_OP_IP_FIFO_NOT_EMPTY) &&
845 (dd->rx_bytes_remaining > 0)) {
846 msm_spi_read_word_from_fifo(dd);
847 }
848 if (dd->rx_bytes_remaining == 0)
849 msm_spi_complete(dd);
850 }
851
852 return IRQ_HANDLED;
853}
854
855static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
856{
857 u32 word;
858 u8 byte;
859 int i;
860
861 word = 0;
862 if (dd->write_buf) {
863 for (i = 0; (i < dd->bytes_per_word) &&
864 dd->tx_bytes_remaining; i++) {
865 dd->tx_bytes_remaining--;
866 byte = *dd->write_buf++;
867 word |= (byte << (BITS_PER_BYTE * (3 - i)));
868 }
869 } else
870 if (dd->tx_bytes_remaining > dd->bytes_per_word)
871 dd->tx_bytes_remaining -= dd->bytes_per_word;
872 else
873 dd->tx_bytes_remaining = 0;
874 dd->write_xfr_cnt++;
875 if (dd->multi_xfr) {
876 if (!dd->tx_bytes_remaining)
877 dd->write_xfr_cnt = 0;
878 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
879 dd->write_len) {
880 struct spi_transfer *t = dd->cur_tx_transfer;
881 if (t->transfer_list.next != &dd->cur_msg->transfers) {
882 t = list_entry(t->transfer_list.next,
883 struct spi_transfer,
884 transfer_list);
885 dd->write_buf = t->tx_buf;
886 dd->write_len = t->len;
887 dd->write_xfr_cnt = 0;
888 dd->cur_tx_transfer = t;
889 }
890 }
891 }
892 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
893}
894
895static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
896{
897 int count = 0;
898
899 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
900 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
901 SPI_OP_OUTPUT_FIFO_FULL)) {
902 msm_spi_write_word_to_fifo(dd);
903 count++;
904 }
905}
906
907static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
908{
909 struct msm_spi *dd = dev_id;
910
911 dd->stat_tx++;
912
913 if (dd->mode == SPI_MODE_NONE)
914 return IRQ_HANDLED;
915
916 if (dd->mode == SPI_DMOV_MODE) {
917 /* TX_ONLY transaction is handled here
918 This is the only place we send complete at tx and not rx */
919 if (dd->read_buf == NULL &&
920 readl_relaxed(dd->base + SPI_OPERATIONAL) &
921 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
922 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530923 if (atomic_inc_return(&dd->tx_irq_called) == 1)
924 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 msm_spi_complete(dd);
926 return IRQ_HANDLED;
927 }
928 return IRQ_NONE;
929 }
930
931 /* Output FIFO is empty. Transmit any outstanding write data. */
932 if (dd->mode == SPI_FIFO_MODE)
933 msm_spi_write_rmn_to_fifo(dd);
934
935 return IRQ_HANDLED;
936}
937
938static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
939{
940 struct spi_master *master = dev_id;
941 struct msm_spi *dd = spi_master_get_devdata(master);
942 u32 spi_err;
943
944 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
945 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
946 dev_warn(master->dev.parent, "SPI output overrun error\n");
947 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
948 dev_warn(master->dev.parent, "SPI input underrun error\n");
949 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
950 dev_warn(master->dev.parent, "SPI output underrun error\n");
951 msm_spi_get_clk_err(dd, &spi_err);
952 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
953 dev_warn(master->dev.parent, "SPI clock overrun error\n");
954 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
955 dev_warn(master->dev.parent, "SPI clock underrun error\n");
956 msm_spi_clear_error_flags(dd);
957 msm_spi_ack_clk_err(dd);
958 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
959 mb();
960 return IRQ_HANDLED;
961}
962
Gilad Avidovd0262342012-10-24 16:52:30 -0600963/**
964 * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
965 * @return zero on success or negative error code
966 *
967 * calls dma_map_single() on the read/write buffers, effectively invalidating
968 * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
969 * buffer and copy the data to/from the client buffers
970 */
971static int msm_spi_dma_map_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972{
973 struct device *dev;
974 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600975 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976 void *tx_buf, *rx_buf;
977 unsigned tx_len, rx_len;
978 int ret = -EINVAL;
979
980 dev = &dd->cur_msg->spi->dev;
981 first_xfr = dd->cur_transfer;
982 tx_buf = (void *)first_xfr->tx_buf;
983 rx_buf = first_xfr->rx_buf;
984 tx_len = rx_len = first_xfr->len;
985
986 /*
987 * For WR-WR and WR-RD transfers, we allocate our own temporary
988 * buffer and copy the data to/from the client buffers.
989 */
990 if (dd->multi_xfr) {
991 dd->temp_buf = kzalloc(dd->cur_msg_len,
992 GFP_KERNEL | __GFP_DMA);
993 if (!dd->temp_buf)
994 return -ENOMEM;
995 nxt_xfr = list_entry(first_xfr->transfer_list.next,
996 struct spi_transfer, transfer_list);
997
998 if (dd->write_len && !dd->read_len) {
999 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1000 goto error;
1001
1002 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1003 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1004 nxt_xfr->len);
1005 tx_buf = dd->temp_buf;
1006 tx_len = dd->cur_msg_len;
1007 } else {
1008 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1009 goto error;
1010
1011 rx_buf = dd->temp_buf;
1012 rx_len = dd->cur_msg_len;
1013 }
1014 }
1015 if (tx_buf != NULL) {
1016 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1017 tx_len, DMA_TO_DEVICE);
1018 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1019 dev_err(dev, "dma %cX %d bytes error\n",
1020 'T', tx_len);
1021 ret = -ENOMEM;
1022 goto error;
1023 }
1024 }
1025 if (rx_buf != NULL) {
1026 dma_addr_t dma_handle;
1027 dma_handle = dma_map_single(dev, rx_buf,
1028 rx_len, DMA_FROM_DEVICE);
1029 if (dma_mapping_error(NULL, dma_handle)) {
1030 dev_err(dev, "dma %cX %d bytes error\n",
1031 'R', rx_len);
1032 if (tx_buf != NULL)
1033 dma_unmap_single(NULL, first_xfr->tx_dma,
1034 tx_len, DMA_TO_DEVICE);
1035 ret = -ENOMEM;
1036 goto error;
1037 }
1038 if (dd->multi_xfr)
1039 nxt_xfr->rx_dma = dma_handle;
1040 else
1041 first_xfr->rx_dma = dma_handle;
1042 }
1043 return 0;
1044
1045error:
1046 kfree(dd->temp_buf);
1047 dd->temp_buf = NULL;
1048 return ret;
1049}
1050
Gilad Avidovd0262342012-10-24 16:52:30 -06001051static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052{
1053 struct device *dev;
1054 u32 offset;
1055
1056 dev = &dd->cur_msg->spi->dev;
1057 if (dd->cur_msg->is_dma_mapped)
1058 goto unmap_end;
1059
1060 if (dd->multi_xfr) {
1061 if (dd->write_len && !dd->read_len) {
1062 dma_unmap_single(dev,
1063 dd->cur_transfer->tx_dma,
1064 dd->cur_msg_len,
1065 DMA_TO_DEVICE);
1066 } else {
1067 struct spi_transfer *prev_xfr;
1068 prev_xfr = list_entry(
1069 dd->cur_transfer->transfer_list.prev,
1070 struct spi_transfer,
1071 transfer_list);
1072 if (dd->cur_transfer->rx_buf) {
1073 dma_unmap_single(dev,
1074 dd->cur_transfer->rx_dma,
1075 dd->cur_msg_len,
1076 DMA_FROM_DEVICE);
1077 }
1078 if (prev_xfr->tx_buf) {
1079 dma_unmap_single(dev,
1080 prev_xfr->tx_dma,
1081 prev_xfr->len,
1082 DMA_TO_DEVICE);
1083 }
1084 if (dd->unaligned_len && dd->read_buf) {
1085 offset = dd->cur_msg_len - dd->unaligned_len;
1086 dma_coherent_post_ops();
1087 memcpy(dd->read_buf + offset, dd->rx_padding,
1088 dd->unaligned_len);
1089 memcpy(dd->cur_transfer->rx_buf,
1090 dd->read_buf + prev_xfr->len,
1091 dd->cur_transfer->len);
1092 }
1093 }
1094 kfree(dd->temp_buf);
1095 dd->temp_buf = NULL;
1096 return;
1097 } else {
1098 if (dd->cur_transfer->rx_buf)
1099 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1100 dd->cur_transfer->len,
1101 DMA_FROM_DEVICE);
1102 if (dd->cur_transfer->tx_buf)
1103 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1104 dd->cur_transfer->len,
1105 DMA_TO_DEVICE);
1106 }
1107
1108unmap_end:
1109 /* If we padded the transfer, we copy it from the padding buf */
1110 if (dd->unaligned_len && dd->read_buf) {
1111 offset = dd->cur_transfer->len - dd->unaligned_len;
1112 dma_coherent_post_ops();
1113 memcpy(dd->read_buf + offset, dd->rx_padding,
1114 dd->unaligned_len);
1115 }
1116}
1117
Gilad Avidovd0262342012-10-24 16:52:30 -06001118static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1119{
1120 struct device *dev;
1121
1122 /* mapped by client */
1123 if (dd->cur_msg->is_dma_mapped)
1124 return;
1125
1126 dev = &dd->cur_msg->spi->dev;
1127 if (dd->cur_transfer->rx_buf)
1128 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1129 dd->cur_transfer->len,
1130 DMA_FROM_DEVICE);
1131
1132 if (dd->cur_transfer->tx_buf)
1133 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1134 dd->cur_transfer->len,
1135 DMA_TO_DEVICE);
1136}
1137
1138static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1139{
1140 if (dd->mode == SPI_DMOV_MODE)
1141 msm_spi_dmov_unmap_buffers(dd);
1142 else if (dd->mode == SPI_BAM_MODE)
1143 msm_spi_bam_unmap_buffers(dd);
1144}
1145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146/**
Gilad Avidovd0262342012-10-24 16:52:30 -06001147 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1148 * the given transfer
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 * @dd: device
1150 * @tr: transfer
1151 *
Gilad Avidovd0262342012-10-24 16:52:30 -06001152 * Start using DMA if:
1153 * 1. Is supported by HW
1154 * 2. Is not diabled by platfrom data
1155 * 3. Transfer size is greater than 3*block size.
1156 * 4. Buffers are aligned to cache line.
1157 * 5. Bytes-per-word is 8,16 or 32.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001159static inline bool
1160msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001161{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 if (!dd->use_dma)
Gilad Avidovd0262342012-10-24 16:52:30 -06001163 return false;
1164
1165 /* check constraints from platform data */
1166 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1167 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168
1169 if (dd->cur_msg_len < 3*dd->input_block_size)
Gilad Avidovd0262342012-10-24 16:52:30 -06001170 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171
1172 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
Gilad Avidovd0262342012-10-24 16:52:30 -06001173 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174
Gilad Avidovd0262342012-10-24 16:52:30 -06001175 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1176 u32 cache_line = dma_get_cache_alignment();
1177
1178 if (tr->tx_buf) {
1179 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1180 return 0;
1181 }
1182 if (tr->rx_buf) {
1183 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1184 return false;
1185 }
1186
1187 if (tr->cs_change &&
1188 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1189 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190 }
1191
Gilad Avidovd0262342012-10-24 16:52:30 -06001192 return true;
1193}
1194
1195/**
1196 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1197 * prepares to process a transfer.
1198 */
1199static void
1200msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1201{
1202 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1203 if (dd->qup_ver) {
1204 dd->mode = SPI_BAM_MODE;
1205 } else {
1206 dd->mode = SPI_DMOV_MODE;
1207 if (dd->write_len && dd->read_len) {
1208 dd->tx_bytes_remaining = dd->write_len;
1209 dd->rx_bytes_remaining = dd->read_len;
1210 }
1211 }
1212 } else {
1213 dd->mode = SPI_FIFO_MODE;
1214 if (dd->multi_xfr) {
1215 dd->read_len = dd->cur_transfer->len;
1216 dd->write_len = dd->cur_transfer->len;
1217 }
1218 }
1219}
1220
1221/**
1222 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1223 * transfer
1224 */
1225static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1226{
1227 u32 spi_iom;
1228 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1229 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1230 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1231 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1232 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1233 /* Turn on packing for data mover */
1234 if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
1235 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1236 else
1237 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1238
1239 /*if (dd->mode == SPI_BAM_MODE) {
1240 spi_iom |= SPI_IO_C_NO_TRI_STATE;
1241 spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
1242 }*/
1243 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1244}
1245
1246static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1247{
1248 if (mode & SPI_CPOL)
1249 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1250 else
1251 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1252 return spi_ioc;
1253}
1254
1255/**
1256 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1257 * next transfer
1258 * @return the new set value of SPI_IO_CONTROL
1259 */
1260static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1261{
1262 u32 spi_ioc, spi_ioc_orig, chip_select;
1263
1264 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1265 spi_ioc_orig = spi_ioc;
1266 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1267 , dd->cur_msg->spi->mode);
1268 /* Set chip-select */
1269 chip_select = dd->cur_msg->spi->chip_select << 2;
1270 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1271 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1272 if (!dd->cur_transfer->cs_change)
1273 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1274
1275 if (spi_ioc != spi_ioc_orig)
1276 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1277
1278 return spi_ioc;
1279}
1280
1281/**
1282 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1283 * the next transfer
1284 */
1285static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1286{
1287 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1288 * change in BAM mode */
1289 u32 mask = (dd->mode == SPI_BAM_MODE) ?
1290 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1291 : 0;
1292 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293}
1294
1295static void msm_spi_process_transfer(struct msm_spi *dd)
1296{
1297 u8 bpw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298 u32 max_speed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299 u32 read_count;
1300 u32 timeout;
Gilad Avidovd0262342012-10-24 16:52:30 -06001301 u32 spi_ioc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 u32 int_loopback = 0;
1303
1304 dd->tx_bytes_remaining = dd->cur_msg_len;
1305 dd->rx_bytes_remaining = dd->cur_msg_len;
1306 dd->read_buf = dd->cur_transfer->rx_buf;
1307 dd->write_buf = dd->cur_transfer->tx_buf;
1308 init_completion(&dd->transfer_complete);
1309 if (dd->cur_transfer->bits_per_word)
1310 bpw = dd->cur_transfer->bits_per_word;
1311 else
1312 if (dd->cur_msg->spi->bits_per_word)
1313 bpw = dd->cur_msg->spi->bits_per_word;
1314 else
1315 bpw = 8;
1316 dd->bytes_per_word = (bpw + 7) / 8;
1317
1318 if (dd->cur_transfer->speed_hz)
1319 max_speed = dd->cur_transfer->speed_hz;
1320 else
1321 max_speed = dd->cur_msg->spi->max_speed_hz;
1322 if (!dd->clock_speed || max_speed != dd->clock_speed)
1323 msm_spi_clock_set(dd, max_speed);
1324
Gilad Avidovd0262342012-10-24 16:52:30 -06001325 timeout = 100 * msecs_to_jiffies(
1326 DIV_ROUND_UP(dd->cur_msg_len * 8,
1327 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1330 if (dd->cur_msg->spi->mode & SPI_LOOP)
1331 int_loopback = 1;
1332 if (int_loopback && dd->multi_xfr &&
1333 (read_count > dd->input_fifo_size)) {
1334 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001335 pr_err(
1336 "%s:Internal Loopback does not support > fifo size"
1337 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 __func__);
1339 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001340 pr_err(
1341 "%s:Internal Loopback does not support > fifo size"
1342 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 __func__);
1344 return;
1345 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346
Gilad Avidovd0262342012-10-24 16:52:30 -06001347 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1348 dev_err(dd->dev,
1349 "%s: Error setting QUP to reset-state",
1350 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351
Gilad Avidovd0262342012-10-24 16:52:30 -06001352 msm_spi_set_transfer_mode(dd, bpw, read_count);
1353 msm_spi_set_mx_counts(dd, read_count);
1354 if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
1355 if (msm_spi_dma_map_buffers(dd) < 0) {
1356 pr_err("Mapping DMA buffers\n");
1357 return;
1358 }
1359 msm_spi_set_qup_io_modes(dd);
1360 msm_spi_set_spi_config(dd, bpw);
1361 msm_spi_set_qup_config(dd, bpw);
1362 spi_ioc = msm_spi_set_spi_io_control(dd);
1363 msm_spi_set_qup_op_mask(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364
1365 if (dd->mode == SPI_DMOV_MODE) {
1366 msm_spi_setup_dm_transfer(dd);
1367 msm_spi_enqueue_dm_commands(dd);
1368 }
1369 /* The output fifo interrupt handler will handle all writes after
1370 the first. Restricting this to one write avoids contention
1371 issues and race conditions between this thread and the int handler
1372 */
1373 else if (dd->mode == SPI_FIFO_MODE) {
1374 if (msm_spi_prepare_for_write(dd))
1375 goto transfer_end;
1376 msm_spi_start_write(dd, read_count);
Gilad Avidovd0262342012-10-24 16:52:30 -06001377 } else if (dd->mode == SPI_BAM_MODE) {
1378 if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
1379 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1380 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381 }
1382
Gilad Avidovd0262342012-10-24 16:52:30 -06001383 /*
1384 * On BAM mode, current state here is run.
1385 * Only enter the RUN state after the first word is written into
1386 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1387 * might fire before the first word is written resulting in a
1388 * possible race condition.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001389 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001390 if (dd->mode != SPI_BAM_MODE)
1391 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1392 dev_warn(dd->dev,
1393 "%s: Failed to set QUP to run-state. Mode:%d",
1394 __func__, dd->mode);
1395 goto transfer_end;
1396 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397
1398 /* Assume success, this might change later upon transaction result */
1399 dd->cur_msg->status = 0;
1400 do {
1401 if (!wait_for_completion_timeout(&dd->transfer_complete,
1402 timeout)) {
Gilad Avidovd0262342012-10-24 16:52:30 -06001403 dev_err(dd->dev,
1404 "%s: SPI transaction timeout\n",
1405 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 dd->cur_msg->status = -EIO;
1407 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001408 msm_dmov_flush(dd->tx_dma_chan, 1);
1409 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410 }
1411 break;
1412 }
1413 } while (msm_spi_dm_send_next(dd));
1414
1415transfer_end:
Gilad Avidovd0262342012-10-24 16:52:30 -06001416 msm_spi_dma_unmap_buffers(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 dd->mode = SPI_MODE_NONE;
1418
1419 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1420 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1421 dd->base + SPI_IO_CONTROL);
1422}
1423
1424static void get_transfer_length(struct msm_spi *dd)
1425{
1426 struct spi_transfer *tr;
1427 int num_xfrs = 0;
1428 int readlen = 0;
1429 int writelen = 0;
1430
1431 dd->cur_msg_len = 0;
1432 dd->multi_xfr = 0;
1433 dd->read_len = dd->write_len = 0;
1434
1435 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1436 if (tr->tx_buf)
1437 writelen += tr->len;
1438 if (tr->rx_buf)
1439 readlen += tr->len;
1440 dd->cur_msg_len += tr->len;
1441 num_xfrs++;
1442 }
1443
1444 if (num_xfrs == 2) {
1445 struct spi_transfer *first_xfr = dd->cur_transfer;
1446
1447 dd->multi_xfr = 1;
1448 tr = list_entry(first_xfr->transfer_list.next,
1449 struct spi_transfer,
1450 transfer_list);
1451 /*
1452 * We update dd->read_len and dd->write_len only
1453 * for WR-WR and WR-RD transfers.
1454 */
1455 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1456 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1457 ((!tr->tx_buf) && (tr->rx_buf))) {
1458 dd->read_len = readlen;
1459 dd->write_len = writelen;
1460 }
1461 }
1462 } else if (num_xfrs > 1)
1463 dd->multi_xfr = 1;
1464}
1465
1466static inline int combine_transfers(struct msm_spi *dd)
1467{
1468 struct spi_transfer *t = dd->cur_transfer;
1469 struct spi_transfer *nxt;
1470 int xfrs_grped = 1;
1471
1472 dd->cur_msg_len = dd->cur_transfer->len;
1473 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1474 nxt = list_entry(t->transfer_list.next,
1475 struct spi_transfer,
1476 transfer_list);
1477 if (t->cs_change != nxt->cs_change)
1478 return xfrs_grped;
1479 dd->cur_msg_len += nxt->len;
1480 xfrs_grped++;
1481 t = nxt;
1482 }
1483 return xfrs_grped;
1484}
1485
Harini Jayaraman093938a2012-04-20 15:33:23 -06001486static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1487{
1488 u32 spi_ioc;
1489 u32 spi_ioc_orig;
1490
1491 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1492 spi_ioc_orig = spi_ioc;
1493 if (set_flag)
1494 spi_ioc |= SPI_IO_C_FORCE_CS;
1495 else
1496 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1497
1498 if (spi_ioc != spi_ioc_orig)
1499 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1500}
1501
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502static void msm_spi_process_message(struct msm_spi *dd)
1503{
1504 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001505 int cs_num;
1506 int rc;
1507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001509 cs_num = dd->cur_msg->spi->chip_select;
1510 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1511 (!(dd->cs_gpios[cs_num].valid)) &&
1512 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1513 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1514 spi_cs_rsrcs[cs_num]);
1515 if (rc) {
1516 dev_err(dd->dev, "gpio_request for pin %d failed with "
1517 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1518 rc);
1519 return;
1520 }
1521 dd->cs_gpios[cs_num].valid = 1;
1522 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001524 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001525 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001527 &dd->cur_msg->transfers,
1528 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001529 struct spi_transfer *t = dd->cur_transfer;
1530 struct spi_transfer *nxt;
1531
1532 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1533 nxt = list_entry(t->transfer_list.next,
1534 struct spi_transfer,
1535 transfer_list);
1536
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001537 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001538 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001539 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001540 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001541 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001542
1543 dd->cur_msg_len = dd->cur_transfer->len;
1544 msm_spi_process_transfer(dd);
1545 }
1546 } else {
1547 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1548 struct spi_transfer,
1549 transfer_list);
1550 get_transfer_length(dd);
1551 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1552 /*
1553 * Handling of multi-transfers.
1554 * FIFO mode is used by default
1555 */
1556 list_for_each_entry(dd->cur_transfer,
1557 &dd->cur_msg->transfers,
1558 transfer_list) {
1559 if (!dd->cur_transfer->len)
1560 goto error;
1561 if (xfrs_grped) {
1562 xfrs_grped--;
1563 continue;
1564 } else {
1565 dd->read_len = dd->write_len = 0;
1566 xfrs_grped = combine_transfers(dd);
1567 }
1568
1569 dd->cur_tx_transfer = dd->cur_transfer;
1570 dd->cur_rx_transfer = dd->cur_transfer;
1571 msm_spi_process_transfer(dd);
1572 xfrs_grped--;
1573 }
1574 } else {
1575 /* Handling of a single transfer or
1576 * WR-WR or WR-RD transfers
1577 */
1578 if ((!dd->cur_msg->is_dma_mapped) &&
Gilad Avidovd0262342012-10-24 16:52:30 -06001579 (msm_spi_use_dma(dd, dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001580 dd->cur_transfer->bits_per_word))) {
1581 /* Mapping of DMA buffers */
Gilad Avidovd0262342012-10-24 16:52:30 -06001582 int ret = msm_spi_dma_map_buffers(dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001583 if (ret < 0) {
1584 dd->cur_msg->status = ret;
1585 goto error;
1586 }
1587 }
1588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 dd->cur_tx_transfer = dd->cur_transfer;
1590 dd->cur_rx_transfer = dd->cur_transfer;
1591 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001594
1595 return;
1596
1597error:
1598 if (dd->cs_gpios[cs_num].valid) {
1599 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1600 dd->cs_gpios[cs_num].valid = 0;
1601 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602}
1603
1604/* workqueue - pull messages from queue & process */
1605static void msm_spi_workq(struct work_struct *work)
1606{
1607 struct msm_spi *dd =
1608 container_of(work, struct msm_spi, work_data);
1609 unsigned long flags;
1610 u32 status_error = 0;
Alok Chauhan66554a12012-08-22 19:54:45 +05301611 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001612
1613 mutex_lock(&dd->core_lock);
1614
1615 /* Don't allow power collapse until we release mutex */
1616 if (pm_qos_request_active(&qos_req_list))
1617 pm_qos_update_request(&qos_req_list,
1618 dd->pm_lat);
1619 if (dd->use_rlock)
1620 remote_mutex_lock(&dd->r_lock);
1621
Alok Chauhan66554a12012-08-22 19:54:45 +05301622 /* Configure the spi clk, miso, mosi and cs gpio */
1623 if (dd->pdata->gpio_config) {
1624 rc = dd->pdata->gpio_config();
1625 if (rc) {
1626 dev_err(dd->dev,
1627 "%s: error configuring GPIOs\n",
1628 __func__);
1629 status_error = 1;
1630 }
1631 }
1632
1633 rc = msm_spi_request_gpios(dd);
1634 if (rc)
1635 status_error = 1;
1636
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001637 clk_prepare_enable(dd->clk);
1638 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639 msm_spi_enable_irqs(dd);
1640
1641 if (!msm_spi_is_valid_state(dd)) {
1642 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1643 __func__);
1644 status_error = 1;
1645 }
1646
1647 spin_lock_irqsave(&dd->queue_lock, flags);
1648 while (!list_empty(&dd->queue)) {
1649 dd->cur_msg = list_entry(dd->queue.next,
1650 struct spi_message, queue);
1651 list_del_init(&dd->cur_msg->queue);
1652 spin_unlock_irqrestore(&dd->queue_lock, flags);
1653 if (status_error)
1654 dd->cur_msg->status = -EIO;
1655 else
1656 msm_spi_process_message(dd);
1657 if (dd->cur_msg->complete)
1658 dd->cur_msg->complete(dd->cur_msg->context);
1659 spin_lock_irqsave(&dd->queue_lock, flags);
1660 }
1661 dd->transfer_pending = 0;
1662 spin_unlock_irqrestore(&dd->queue_lock, flags);
1663
1664 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001665 clk_disable_unprepare(dd->clk);
1666 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667
Alok Chauhan66554a12012-08-22 19:54:45 +05301668 /* Free the spi clk, miso, mosi, cs gpio */
1669 if (!rc && dd->pdata && dd->pdata->gpio_release)
1670 dd->pdata->gpio_release();
1671 if (!rc)
1672 msm_spi_free_gpios(dd);
1673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 if (dd->use_rlock)
1675 remote_mutex_unlock(&dd->r_lock);
1676
1677 if (pm_qos_request_active(&qos_req_list))
1678 pm_qos_update_request(&qos_req_list,
1679 PM_QOS_DEFAULT_VALUE);
1680
1681 mutex_unlock(&dd->core_lock);
1682 /* If needed, this can be done after the current message is complete,
1683 and work can be continued upon resume. No motivation for now. */
1684 if (dd->suspended)
1685 wake_up_interruptible(&dd->continue_suspend);
1686}
1687
1688static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1689{
1690 struct msm_spi *dd;
1691 unsigned long flags;
1692 struct spi_transfer *tr;
1693
1694 dd = spi_master_get_devdata(spi->master);
1695 if (dd->suspended)
1696 return -EBUSY;
1697
1698 if (list_empty(&msg->transfers) || !msg->complete)
1699 return -EINVAL;
1700
1701 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1702 /* Check message parameters */
1703 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1704 (tr->bits_per_word &&
1705 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1706 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1707 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1708 "tx=%p, rx=%p\n",
1709 tr->speed_hz, tr->bits_per_word,
1710 tr->tx_buf, tr->rx_buf);
1711 return -EINVAL;
1712 }
1713 }
1714
1715 spin_lock_irqsave(&dd->queue_lock, flags);
1716 if (dd->suspended) {
1717 spin_unlock_irqrestore(&dd->queue_lock, flags);
1718 return -EBUSY;
1719 }
1720 dd->transfer_pending = 1;
1721 list_add_tail(&msg->queue, &dd->queue);
1722 spin_unlock_irqrestore(&dd->queue_lock, flags);
1723 queue_work(dd->workqueue, &dd->work_data);
1724 return 0;
1725}
1726
1727static int msm_spi_setup(struct spi_device *spi)
1728{
1729 struct msm_spi *dd;
1730 int rc = 0;
1731 u32 spi_ioc;
1732 u32 spi_config;
1733 u32 mask;
1734
1735 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1736 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1737 __func__, spi->bits_per_word);
1738 rc = -EINVAL;
1739 }
1740 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1741 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1742 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1743 rc = -EINVAL;
1744 }
1745
1746 if (rc)
1747 goto err_setup_exit;
1748
1749 dd = spi_master_get_devdata(spi->master);
1750
1751 mutex_lock(&dd->core_lock);
1752 if (dd->suspended) {
1753 mutex_unlock(&dd->core_lock);
1754 return -EBUSY;
1755 }
1756
1757 if (dd->use_rlock)
1758 remote_mutex_lock(&dd->r_lock);
1759
Alok Chauhan66554a12012-08-22 19:54:45 +05301760 /* Configure the spi clk, miso, mosi, cs gpio */
1761 if (dd->pdata->gpio_config) {
1762 rc = dd->pdata->gpio_config();
1763 if (rc) {
1764 dev_err(&spi->dev,
1765 "%s: error configuring GPIOs\n",
1766 __func__);
1767 rc = -ENXIO;
1768 goto err_setup_gpio;
1769 }
1770 }
1771
1772 rc = msm_spi_request_gpios(dd);
1773 if (rc) {
1774 rc = -ENXIO;
1775 goto err_setup_gpio;
1776 }
1777
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001778 clk_prepare_enable(dd->clk);
1779 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001780
1781 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1782 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1783 if (spi->mode & SPI_CS_HIGH)
1784 spi_ioc |= mask;
1785 else
1786 spi_ioc &= ~mask;
Gilad Avidovd0262342012-10-24 16:52:30 -06001787 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001788
1789 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1790
1791 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -06001792 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
1793 spi_config, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1795
1796 /* Ensure previous write completed before disabling the clocks */
1797 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001798 clk_disable_unprepare(dd->clk);
1799 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800
Alok Chauhan66554a12012-08-22 19:54:45 +05301801 /* Free the spi clk, miso, mosi, cs gpio */
1802 if (dd->pdata && dd->pdata->gpio_release)
1803 dd->pdata->gpio_release();
1804 msm_spi_free_gpios(dd);
1805
1806err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001807 if (dd->use_rlock)
1808 remote_mutex_unlock(&dd->r_lock);
1809 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001810err_setup_exit:
1811 return rc;
1812}
1813
1814#ifdef CONFIG_DEBUG_FS
1815static int debugfs_iomem_x32_set(void *data, u64 val)
1816{
1817 writel_relaxed(val, data);
1818 /* Ensure the previous write completed. */
1819 mb();
1820 return 0;
1821}
1822
1823static int debugfs_iomem_x32_get(void *data, u64 *val)
1824{
1825 *val = readl_relaxed(data);
1826 /* Ensure the previous read completed. */
1827 mb();
1828 return 0;
1829}
1830
1831DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1832 debugfs_iomem_x32_set, "0x%08llx\n");
1833
1834static void spi_debugfs_init(struct msm_spi *dd)
1835{
1836 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1837 if (dd->dent_spi) {
1838 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1841 dd->debugfs_spi_regs[i] =
1842 debugfs_create_file(
1843 debugfs_spi_regs[i].name,
1844 debugfs_spi_regs[i].mode,
1845 dd->dent_spi,
1846 dd->base + debugfs_spi_regs[i].offset,
1847 &fops_iomem_x32);
1848 }
1849 }
1850}
1851
1852static void spi_debugfs_exit(struct msm_spi *dd)
1853{
1854 if (dd->dent_spi) {
1855 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001857 debugfs_remove_recursive(dd->dent_spi);
1858 dd->dent_spi = NULL;
1859 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1860 dd->debugfs_spi_regs[i] = NULL;
1861 }
1862}
1863#else
1864static void spi_debugfs_init(struct msm_spi *dd) {}
1865static void spi_debugfs_exit(struct msm_spi *dd) {}
1866#endif
1867
1868/* ===Device attributes begin=== */
1869static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1870 char *buf)
1871{
1872 struct spi_master *master = dev_get_drvdata(dev);
1873 struct msm_spi *dd = spi_master_get_devdata(master);
1874
1875 return snprintf(buf, PAGE_SIZE,
1876 "Device %s\n"
1877 "rx fifo_size = %d spi words\n"
1878 "tx fifo_size = %d spi words\n"
1879 "use_dma ? %s\n"
1880 "rx block size = %d bytes\n"
1881 "tx block size = %d bytes\n"
1882 "burst size = %d bytes\n"
1883 "DMA configuration:\n"
1884 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1885 "--statistics--\n"
1886 "Rx isrs = %d\n"
1887 "Tx isrs = %d\n"
1888 "DMA error = %d\n"
1889 "--debug--\n"
1890 "NA yet\n",
1891 dev_name(dev),
1892 dd->input_fifo_size,
1893 dd->output_fifo_size,
1894 dd->use_dma ? "yes" : "no",
1895 dd->input_block_size,
1896 dd->output_block_size,
1897 dd->burst_size,
1898 dd->tx_dma_chan,
1899 dd->rx_dma_chan,
1900 dd->tx_dma_crci,
1901 dd->rx_dma_crci,
1902 dd->stat_rx + dd->stat_dmov_rx,
1903 dd->stat_tx + dd->stat_dmov_tx,
1904 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1905 );
1906}
1907
1908/* Reset statistics on write */
1909static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1910 const char *buf, size_t count)
1911{
1912 struct msm_spi *dd = dev_get_drvdata(dev);
1913 dd->stat_rx = 0;
1914 dd->stat_tx = 0;
1915 dd->stat_dmov_rx = 0;
1916 dd->stat_dmov_tx = 0;
1917 dd->stat_dmov_rx_err = 0;
1918 dd->stat_dmov_tx_err = 0;
1919 return count;
1920}
1921
1922static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1923
1924static struct attribute *dev_attrs[] = {
1925 &dev_attr_stats.attr,
1926 NULL,
1927};
1928
1929static struct attribute_group dev_attr_grp = {
1930 .attrs = dev_attrs,
1931};
1932/* ===Device attributes end=== */
1933
1934/**
1935 * spi_dmov_tx_complete_func - DataMover tx completion callback
1936 *
1937 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1938 * spinlock @msm_dmov_lock held.
1939 */
1940static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1941 unsigned int result,
1942 struct msm_dmov_errdata *err)
1943{
1944 struct msm_spi *dd;
1945
1946 if (!(result & DMOV_RSLT_VALID)) {
1947 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1948 return;
1949 }
1950 /* restore original context */
1951 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301952 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301954 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1955 return;
1956 complete(&dd->transfer_complete);
1957 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 /* Error or flush */
1959 if (result & DMOV_RSLT_ERROR) {
1960 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1961 dd->stat_dmov_tx_err++;
1962 }
1963 if (result & DMOV_RSLT_FLUSH) {
1964 /*
1965 * Flushing normally happens in process of
1966 * removing, when we are waiting for outstanding
1967 * DMA commands to be flushed.
1968 */
1969 dev_info(dd->dev,
1970 "DMA channel flushed (0x%08x)\n", result);
1971 }
1972 if (err)
1973 dev_err(dd->dev,
1974 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1975 err->flush[0], err->flush[1], err->flush[2],
1976 err->flush[3], err->flush[4], err->flush[5]);
1977 dd->cur_msg->status = -EIO;
1978 complete(&dd->transfer_complete);
1979 }
1980}
1981
1982/**
1983 * spi_dmov_rx_complete_func - DataMover rx completion callback
1984 *
1985 * Executed in IRQ context (Data Mover's IRQ)
1986 * DataMover's spinlock @msm_dmov_lock held.
1987 */
1988static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1989 unsigned int result,
1990 struct msm_dmov_errdata *err)
1991{
1992 struct msm_spi *dd;
1993
1994 if (!(result & DMOV_RSLT_VALID)) {
1995 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1996 result, cmd);
1997 return;
1998 }
1999 /* restore original context */
2000 dd = container_of(cmd, struct msm_spi, rx_hdr);
2001 if (result & DMOV_RSLT_DONE) {
2002 dd->stat_dmov_rx++;
2003 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2004 return;
2005 complete(&dd->transfer_complete);
2006 } else {
2007 /** Error or flush */
2008 if (result & DMOV_RSLT_ERROR) {
2009 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2010 dd->stat_dmov_rx_err++;
2011 }
2012 if (result & DMOV_RSLT_FLUSH) {
2013 dev_info(dd->dev,
2014 "DMA channel flushed(0x%08x)\n", result);
2015 }
2016 if (err)
2017 dev_err(dd->dev,
2018 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2019 err->flush[0], err->flush[1], err->flush[2],
2020 err->flush[3], err->flush[4], err->flush[5]);
2021 dd->cur_msg->status = -EIO;
2022 complete(&dd->transfer_complete);
2023 }
2024}
2025
2026static inline u32 get_chunk_size(struct msm_spi *dd)
2027{
2028 u32 cache_line = dma_get_cache_alignment();
2029
2030 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
2031 roundup(dd->burst_size, cache_line))*2;
2032}
2033
Gilad Avidovd0262342012-10-24 16:52:30 -06002034static void msm_spi_dmov_teardown(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035{
2036 int limit = 0;
2037
2038 if (!dd->use_dma)
2039 return;
2040
2041 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002042 msm_dmov_flush(dd->tx_dma_chan, 1);
2043 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 msleep(10);
2045 }
2046
2047 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
2048 dd->tx_dmov_cmd_dma);
2049 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2050 dd->tx_padding = dd->rx_padding = NULL;
2051}
2052
Gilad Avidovd0262342012-10-24 16:52:30 -06002053static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
2054 enum msm_spi_pipe_direction pipe_dir)
2055{
2056 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2057 (&dd->bam.prod) : (&dd->bam.cons);
2058 if (!pipe->teardown_required)
2059 return;
2060
2061 sps_disconnect(pipe->handle);
2062 dma_free_coherent(dd->dev, pipe->config.desc.size,
2063 pipe->config.desc.base, pipe->config.desc.phys_base);
2064 sps_free_endpoint(pipe->handle);
2065 pipe->handle = 0;
2066 pipe->teardown_required = false;
2067}
2068
2069static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2070 enum msm_spi_pipe_direction pipe_dir)
2071{
2072 int rc = 0;
2073 struct sps_pipe *pipe_handle;
2074 struct sps_register_event event = {0};
2075 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2076 (&dd->bam.prod) : (&dd->bam.cons);
2077 struct sps_connect *pipe_conf = &pipe->config;
2078
2079 pipe->handle = 0;
2080 pipe_handle = sps_alloc_endpoint();
2081 if (!pipe_handle) {
2082 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2083 , __func__);
2084 return -ENOMEM;
2085 }
2086
2087 memset(pipe_conf, 0, sizeof(*pipe_conf));
2088 rc = sps_get_config(pipe_handle, pipe_conf);
2089 if (rc) {
2090 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2091 , __func__);
2092 goto config_err;
2093 }
2094
2095 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2096 pipe_conf->source = dd->bam.handle;
2097 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2098 pipe_conf->mode = SPS_MODE_SRC;
2099 pipe_conf->src_pipe_index =
2100 dd->pdata->bam_producer_pipe_index;
2101 pipe_conf->dest_pipe_index = 0;
2102 } else {
2103 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2104 pipe_conf->destination = dd->bam.handle;
2105 pipe_conf->mode = SPS_MODE_DEST;
2106 pipe_conf->src_pipe_index = 0;
2107 pipe_conf->dest_pipe_index =
2108 dd->pdata->bam_consumer_pipe_index;
2109 }
2110 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2111 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2112 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2113 pipe_conf->desc.size,
2114 &pipe_conf->desc.phys_base,
2115 GFP_KERNEL);
2116 if (!pipe_conf->desc.base) {
2117 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2118 , __func__);
2119 rc = -ENOMEM;
2120 goto config_err;
2121 }
2122
2123 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2124
2125 rc = sps_connect(pipe_handle, pipe_conf);
2126 if (rc) {
2127 dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
2128 goto connect_err;
2129 }
2130
2131 event.mode = SPS_TRIGGER_WAIT;
2132 event.options = SPS_O_EOT;
2133 event.xfer_done = &dd->transfer_complete;
2134 event.user = (void *)dd;
2135 rc = sps_register_event(pipe_handle, &event);
2136 if (rc) {
2137 dev_err(dd->dev, "%s: Failed to register BAM EOT event",
2138 __func__);
2139 goto register_err;
2140 }
2141
2142 pipe->handle = pipe_handle;
2143 pipe->teardown_required = true;
2144 return 0;
2145
2146register_err:
2147 sps_disconnect(pipe_handle);
2148connect_err:
2149 dma_free_coherent(dd->dev, pipe_conf->desc.size,
2150 pipe_conf->desc.base, pipe_conf->desc.phys_base);
2151config_err:
2152 sps_free_endpoint(pipe_handle);
2153
2154 return rc;
2155}
2156
2157static void msm_spi_bam_teardown(struct msm_spi *dd)
2158{
2159 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2160 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2161
2162 if (dd->bam.deregister_required) {
2163 sps_deregister_bam_device(dd->bam.handle);
2164 dd->bam.deregister_required = false;
2165 }
2166}
2167
2168static int msm_spi_bam_init(struct msm_spi *dd)
2169{
2170 struct sps_bam_props bam_props = {0};
2171 u32 bam_handle;
2172 int rc = 0;
2173
2174 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2175 if (rc || !bam_handle) {
2176 bam_props.phys_addr = dd->bam.phys_addr;
2177 bam_props.virt_addr = dd->bam.base;
2178 bam_props.irq = dd->bam.irq;
2179 bam_props.manage = SPS_BAM_MGR_LOCAL;
2180 bam_props.summing_threshold = 0x10;
2181
2182 rc = sps_register_bam_device(&bam_props, &bam_handle);
2183 if (rc) {
2184 dev_err(dd->dev,
2185 "%s: Failed to register BAM device",
2186 __func__);
2187 return rc;
2188 }
2189 dd->bam.deregister_required = true;
2190 }
2191
2192 dd->bam.handle = bam_handle;
2193
2194 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2195 if (rc) {
2196 dev_err(dd->dev,
2197 "%s: Failed to init producer BAM-pipe",
2198 __func__);
2199 goto bam_init_error;
2200 }
2201
2202 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2203 if (rc) {
2204 dev_err(dd->dev,
2205 "%s: Failed to init consumer BAM-pipe",
2206 __func__);
2207 goto bam_init_error;
2208 }
2209
2210 return 0;
2211
2212bam_init_error:
2213 msm_spi_bam_teardown(dd);
2214 return rc;
2215}
2216
2217static __init int msm_spi_dmov_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218{
2219 dmov_box *box;
2220 u32 cache_line = dma_get_cache_alignment();
2221
2222 /* Allocate all as one chunk, since all is smaller than page size */
2223
2224 /* We send NULL device, since it requires coherent_dma_mask id
2225 device definition, we're okay with using system pool */
2226 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
2227 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
2228 if (dd->tx_dmov_cmd == NULL)
2229 return -ENOMEM;
2230
2231 /* DMA addresses should be 64 bit aligned aligned */
2232 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2233 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2234 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2235 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2236
2237 /* Buffers should be aligned to cache line */
2238 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2239 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2240 sizeof(struct spi_dmov_cmd), cache_line);
2241 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
2242 cache_line);
2243 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
2244 cache_line);
2245
2246 /* Setup DM commands */
2247 box = &(dd->rx_dmov_cmd->box);
2248 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2249 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2250 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2251 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2252 offsetof(struct spi_dmov_cmd, cmd_ptr));
2253 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002254
2255 box = &(dd->tx_dmov_cmd->box);
2256 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2257 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2258 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2259 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2260 offsetof(struct spi_dmov_cmd, cmd_ptr));
2261 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002262
2263 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2264 CMD_DST_CRCI(dd->tx_dma_crci);
2265 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2266 SPI_OUTPUT_FIFO;
2267 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2268 CMD_SRC_CRCI(dd->rx_dma_crci);
2269 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2270 SPI_INPUT_FIFO;
2271
2272 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002273 msm_dmov_flush(dd->tx_dma_chan, 1);
2274 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275
2276 return 0;
2277}
2278
Gilad Avidovd0262342012-10-24 16:52:30 -06002279/**
2280 * msm_spi_dt_to_pdata: copy device-tree data to platfrom data struct
2281 */
2282struct msm_spi_platform_data *
2283__init msm_spi_dt_to_pdata(struct platform_device *pdev)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002284{
2285 struct device_node *node = pdev->dev.of_node;
2286 struct msm_spi_platform_data *pdata;
Gilad Avidovd0262342012-10-24 16:52:30 -06002287 int rc;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002288
2289 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2290 if (!pdata) {
2291 pr_err("Unable to allocate platform data\n");
2292 return NULL;
2293 }
2294
2295 of_property_read_u32(node, "spi-max-frequency",
2296 &pdata->max_clock_speed);
Kiran Gundae8f16742012-06-27 10:06:32 +05302297 of_property_read_u32(node, "infinite_mode",
2298 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002299
Gilad Avidovd0262342012-10-24 16:52:30 -06002300 pdata->ver_reg_exists = of_property_read_bool(node
2301 , "qcom,ver-reg-exists");
2302
2303 pdata->use_bam = of_property_read_bool(node, "qcom,use-bam");
2304
2305 if (pdata->use_bam) {
2306 rc = of_property_read_u32(node, "qcom,bam-consumer-pipe-index",
2307 &pdata->bam_consumer_pipe_index);
2308 if (rc) {
2309 dev_warn(&pdev->dev,
2310 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2311 pdata->use_bam = false;
2312 }
2313
2314 rc = of_property_read_u32(node, "qcom,bam-producer-pipe-index",
2315 &pdata->bam_producer_pipe_index);
2316 if (rc) {
2317 dev_warn(&pdev->dev,
2318 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2319 pdata->use_bam = false;
2320 }
2321 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002322 return pdata;
2323}
2324
Gilad Avidovd0262342012-10-24 16:52:30 -06002325static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2326{
2327 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2328 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2329 : SPI_QUP_VERSION_NONE;
2330}
2331
2332static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
2333 struct platform_device *pdev, struct spi_master *master)
2334{
2335 struct resource *resource;
2336 size_t bam_mem_size;
2337
2338 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2339 "spi_bam_physical");
2340 if (!resource) {
2341 dev_warn(&pdev->dev,
2342 "%s: Missing spi_bam_physical entry in DT",
2343 __func__);
2344 return -ENXIO;
2345 }
2346
2347 dd->bam.phys_addr = resource->start;
2348 bam_mem_size = resource_size(resource);
2349 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2350 bam_mem_size);
2351 if (!dd->bam.base) {
2352 dev_warn(&pdev->dev,
2353 "%s: Failed to ioremap(spi_bam_physical)",
2354 __func__);
2355 return -ENXIO;
2356 }
2357
2358 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2359 if (dd->bam.irq < 0) {
2360 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2361 __func__);
2362 return -EINVAL;
2363 }
2364
2365 dd->dma_init = msm_spi_bam_init;
2366 dd->dma_teardown = msm_spi_bam_teardown;
2367 return 0;
2368}
2369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002370static int __init msm_spi_probe(struct platform_device *pdev)
2371{
2372 struct spi_master *master;
2373 struct msm_spi *dd;
2374 struct resource *resource;
2375 int rc = -ENXIO;
2376 int locked = 0;
2377 int i = 0;
2378 int clk_enabled = 0;
2379 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002380 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002381 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002382
2383 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2384 if (!master) {
2385 rc = -ENOMEM;
2386 dev_err(&pdev->dev, "master allocation failed\n");
2387 goto err_probe_exit;
2388 }
2389
2390 master->bus_num = pdev->id;
2391 master->mode_bits = SPI_SUPPORTED_MODES;
2392 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2393 master->setup = msm_spi_setup;
2394 master->transfer = msm_spi_transfer;
2395 platform_set_drvdata(pdev, master);
2396 dd = spi_master_get_devdata(master);
2397
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002398 if (pdev->dev.of_node) {
2399 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2400 master->dev.of_node = pdev->dev.of_node;
2401 pdata = msm_spi_dt_to_pdata(pdev);
2402 if (!pdata) {
2403 rc = -ENOMEM;
2404 goto err_probe_exit;
2405 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002406
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002407 rc = of_property_read_u32(pdev->dev.of_node,
2408 "cell-index", &pdev->id);
2409 if (rc)
2410 dev_warn(&pdev->dev,
2411 "using default bus_num %d\n", pdev->id);
2412 else
2413 master->bus_num = pdev->id;
2414
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002415 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2416 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
2417 i, &flags);
2418 }
2419
2420 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2421 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
2422 pdev->dev.of_node, "cs-gpios",
2423 i, &flags);
2424 dd->cs_gpios[i].valid = 0;
2425 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002426 } else {
2427 pdata = pdev->dev.platform_data;
2428 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002429
2430 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2431 resource = platform_get_resource(pdev, IORESOURCE_IO,
2432 i);
2433 dd->spi_gpios[i] = resource ? resource->start : -1;
2434 }
2435
2436 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2437 resource = platform_get_resource(pdev, IORESOURCE_IO,
2438 i + ARRAY_SIZE(spi_rsrcs));
2439 dd->cs_gpios[i].gpio_num = resource ?
2440 resource->start : -1;
2441 dd->cs_gpios[i].valid = 0;
2442 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002443 }
2444
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002445 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002446 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002447 if (!resource) {
2448 rc = -ENXIO;
2449 goto err_probe_res;
2450 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002452 dd->mem_phys_addr = resource->start;
2453 dd->mem_size = resource_size(resource);
2454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002455 if (pdata) {
2456 if (pdata->dma_config) {
2457 rc = pdata->dma_config();
2458 if (rc) {
2459 dev_warn(&pdev->dev,
2460 "%s: DM mode not supported\n",
2461 __func__);
2462 dd->use_dma = 0;
2463 goto skip_dma_resources;
2464 }
2465 }
Gilad Avidovd0262342012-10-24 16:52:30 -06002466 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
2467 resource = platform_get_resource(pdev,
2468 IORESOURCE_DMA, 0);
2469 if (resource) {
2470 dd->rx_dma_chan = resource->start;
2471 dd->tx_dma_chan = resource->end;
2472 resource = platform_get_resource(pdev,
2473 IORESOURCE_DMA, 1);
2474 if (!resource) {
2475 rc = -ENXIO;
2476 goto err_probe_res;
2477 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002478
Gilad Avidovd0262342012-10-24 16:52:30 -06002479 dd->rx_dma_crci = resource->start;
2480 dd->tx_dma_crci = resource->end;
2481 dd->use_dma = 1;
2482 master->dma_alignment =
2483 dma_get_cache_alignment();
2484 dd->dma_init = msm_spi_dmov_init ;
2485 dd->dma_teardown = msm_spi_dmov_teardown;
2486 }
2487 } else {
2488 if (!dd->pdata->use_bam)
2489 goto skip_dma_resources;
2490
2491 rc = msm_spi_bam_get_resources(dd, pdev, master);
2492 if (rc) {
2493 dev_warn(dd->dev,
2494 "%s: Faild to get BAM resources",
2495 __func__);
2496 goto skip_dma_resources;
2497 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002498 dd->use_dma = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002499 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002500 }
2501
Alok Chauhan66554a12012-08-22 19:54:45 +05302502skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002504 spin_lock_init(&dd->queue_lock);
2505 mutex_init(&dd->core_lock);
2506 INIT_LIST_HEAD(&dd->queue);
2507 INIT_WORK(&dd->work_data, msm_spi_workq);
2508 init_waitqueue_head(&dd->continue_suspend);
2509 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002510 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002511 if (!dd->workqueue)
2512 goto err_probe_workq;
2513
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002514 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2515 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002516 rc = -ENXIO;
2517 goto err_probe_reqmem;
2518 }
2519
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002520 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2521 if (!dd->base) {
2522 rc = -ENOMEM;
2523 goto err_probe_reqmem;
2524 }
2525
Gilad Avidovd0262342012-10-24 16:52:30 -06002526 if (pdata && pdata->ver_reg_exists) {
2527 enum msm_spi_qup_version ver =
2528 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2529 if (dd->qup_ver != ver)
2530 dev_warn(&pdev->dev,
2531 "%s: HW version different then initially assumed by probe",
2532 __func__);
2533 }
2534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002535 if (pdata && pdata->rsl_id) {
2536 struct remote_mutex_id rmid;
2537 rmid.r_spinlock_id = pdata->rsl_id;
2538 rmid.delay_us = SPI_TRYLOCK_DELAY;
2539
2540 rc = remote_mutex_init(&dd->r_lock, &rmid);
2541 if (rc) {
2542 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2543 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2544 __func__, rc);
2545 goto err_probe_rlock_init;
2546 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002548 dd->use_rlock = 1;
2549 dd->pm_lat = pdata->pm_lat;
Alok Chauhan66554a12012-08-22 19:54:45 +05302550 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
Gilad Avidovd0262342012-10-24 16:52:30 -06002551 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002552 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002554 mutex_lock(&dd->core_lock);
2555 if (dd->use_rlock)
2556 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002557
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002558 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002559 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002560 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002561 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002562 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002563 rc = PTR_ERR(dd->clk);
2564 goto err_probe_clk_get;
2565 }
2566
Matt Wagantallac294852011-08-17 15:44:58 -07002567 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002568 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002569 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002570 rc = PTR_ERR(dd->pclk);
2571 goto err_probe_pclk_get;
2572 }
2573
2574 if (pdata && pdata->max_clock_speed)
2575 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2576
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002577 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002578 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002579 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002580 __func__);
2581 goto err_probe_clk_enable;
2582 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002583
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002584 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002585 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002586 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002587 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002588 __func__);
2589 goto err_probe_pclk_enable;
2590 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002592 pclk_enabled = 1;
Gilad Avidovd0262342012-10-24 16:52:30 -06002593 /* GSBI dose not exists on B-family MSM-chips */
2594 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2595 rc = msm_spi_configure_gsbi(dd, pdev);
2596 if (rc)
2597 goto err_probe_gsbi;
2598 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002600 msm_spi_calculate_fifo_size(dd);
2601 if (dd->use_dma) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002602 rc = dd->dma_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002603 if (rc)
2604 goto err_probe_dma;
2605 }
2606
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002607 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002608 /*
2609 * The SPI core generates a bogus input overrun error on some targets,
2610 * when a transition from run to reset state occurs and if the FIFO has
2611 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2612 * bit.
2613 */
2614 msm_spi_enable_error_flags(dd);
2615
2616 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2617 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2618 if (rc)
2619 goto err_probe_state;
2620
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002621 clk_disable_unprepare(dd->clk);
2622 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002623 clk_enabled = 0;
2624 pclk_enabled = 0;
2625
2626 dd->suspended = 0;
2627 dd->transfer_pending = 0;
2628 dd->multi_xfr = 0;
2629 dd->mode = SPI_MODE_NONE;
2630
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002631 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002632 if (rc)
2633 goto err_probe_irq;
2634
2635 msm_spi_disable_irqs(dd);
2636 if (dd->use_rlock)
2637 remote_mutex_unlock(&dd->r_lock);
2638
2639 mutex_unlock(&dd->core_lock);
2640 locked = 0;
2641
2642 rc = spi_register_master(master);
2643 if (rc)
2644 goto err_probe_reg_master;
2645
2646 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2647 if (rc) {
2648 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2649 goto err_attrs;
2650 }
2651
2652 spi_debugfs_init(dd);
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05302653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654 return 0;
2655
2656err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002657 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002658err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002659err_probe_irq:
2660err_probe_state:
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08002661 if (dd->dma_teardown)
2662 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002663err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002664err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002665 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002666 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002667err_probe_pclk_enable:
2668 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002669 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002670err_probe_clk_enable:
2671 clk_put(dd->pclk);
2672err_probe_pclk_get:
2673 clk_put(dd->clk);
2674err_probe_clk_get:
2675 if (locked) {
2676 if (dd->use_rlock)
2677 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002678
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002679 mutex_unlock(&dd->core_lock);
2680 }
2681err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002682err_probe_reqmem:
2683 destroy_workqueue(dd->workqueue);
2684err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002685err_probe_res:
2686 spi_master_put(master);
2687err_probe_exit:
2688 return rc;
2689}
2690
2691#ifdef CONFIG_PM
2692static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2693{
2694 struct spi_master *master = platform_get_drvdata(pdev);
2695 struct msm_spi *dd;
2696 unsigned long flags;
2697
2698 if (!master)
2699 goto suspend_exit;
2700 dd = spi_master_get_devdata(master);
2701 if (!dd)
2702 goto suspend_exit;
2703
2704 /* Make sure nothing is added to the queue while we're suspending */
2705 spin_lock_irqsave(&dd->queue_lock, flags);
2706 dd->suspended = 1;
2707 spin_unlock_irqrestore(&dd->queue_lock, flags);
2708
2709 /* Wait for transactions to end, or time out */
2710 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002711
2712suspend_exit:
2713 return 0;
2714}
2715
2716static int msm_spi_resume(struct platform_device *pdev)
2717{
2718 struct spi_master *master = platform_get_drvdata(pdev);
2719 struct msm_spi *dd;
2720
2721 if (!master)
2722 goto resume_exit;
2723 dd = spi_master_get_devdata(master);
2724 if (!dd)
2725 goto resume_exit;
2726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002727 dd->suspended = 0;
2728resume_exit:
2729 return 0;
2730}
2731#else
2732#define msm_spi_suspend NULL
2733#define msm_spi_resume NULL
2734#endif /* CONFIG_PM */
2735
2736static int __devexit msm_spi_remove(struct platform_device *pdev)
2737{
2738 struct spi_master *master = platform_get_drvdata(pdev);
2739 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002740
2741 pm_qos_remove_request(&qos_req_list);
2742 spi_debugfs_exit(dd);
2743 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2744
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08002745 if (dd->dma_teardown)
2746 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002747 clk_put(dd->clk);
2748 clk_put(dd->pclk);
2749 destroy_workqueue(dd->workqueue);
2750 platform_set_drvdata(pdev, 0);
2751 spi_unregister_master(master);
2752 spi_master_put(master);
2753
2754 return 0;
2755}
2756
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002757static struct of_device_id msm_spi_dt_match[] = {
2758 {
2759 .compatible = "qcom,spi-qup-v2",
2760 },
2761 {}
2762};
2763
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002764static struct platform_driver msm_spi_driver = {
2765 .driver = {
2766 .name = SPI_DRV_NAME,
2767 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002768 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 },
2770 .suspend = msm_spi_suspend,
2771 .resume = msm_spi_resume,
2772 .remove = __exit_p(msm_spi_remove),
2773};
2774
2775static int __init msm_spi_init(void)
2776{
2777 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2778}
2779module_init(msm_spi_init);
2780
2781static void __exit msm_spi_exit(void)
2782{
2783 platform_driver_unregister(&msm_spi_driver);
2784}
2785module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002786
2787MODULE_LICENSE("GPL v2");
2788MODULE_VERSION("0.4");
2789MODULE_ALIAS("platform:"SPI_DRV_NAME);