blob: 7f601282991f61780a802f8b5699cf71be6f7475 [file] [log] [blame]
Gilad Avidovd0262342012-10-24 16:52:30 -06001/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033#include <linux/gpio.h>
34#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070035#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070036#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070037#include <linux/of_gpio.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060038#include <linux/dma-mapping.h>
39#include <linux/sched.h>
40#include <linux/mutex.h>
41#include <linux/atomic.h>
42#include <mach/msm_spi.h>
43#include <mach/sps.h>
44#include <mach/dma.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070045#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070047static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
48 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049{
50 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070051 unsigned long gsbi_mem_phys_addr;
52 size_t gsbi_mem_size;
53 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070057 return 0;
58
59 gsbi_mem_phys_addr = resource->start;
60 gsbi_mem_size = resource_size(resource);
61 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
62 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070064
65 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
66 gsbi_mem_size);
67 if (!gsbi_base)
68 return -ENXIO;
69
70 /* Set GSBI to SPI mode */
71 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
73 return 0;
74}
75
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070076static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070078 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
79 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
80 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
81 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
82 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
83 if (dd->qup_ver)
84 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085}
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087static inline int msm_spi_request_gpios(struct msm_spi *dd)
88{
89 int i;
90 int result = 0;
91
92 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
93 if (dd->spi_gpios[i] >= 0) {
94 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
95 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060096 dev_err(dd->dev, "%s: gpio_request for pin %d "
97 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 dd->spi_gpios[i], result);
99 goto error;
100 }
101 }
102 }
103 return 0;
104
105error:
106 for (; --i >= 0;) {
107 if (dd->spi_gpios[i] >= 0)
108 gpio_free(dd->spi_gpios[i]);
109 }
110 return result;
111}
112
113static inline void msm_spi_free_gpios(struct msm_spi *dd)
114{
115 int i;
116
117 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
118 if (dd->spi_gpios[i] >= 0)
119 gpio_free(dd->spi_gpios[i]);
120 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600121
122 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
123 if (dd->cs_gpios[i].valid) {
124 gpio_free(dd->cs_gpios[i].gpio_num);
125 dd->cs_gpios[i].valid = 0;
126 }
127 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128}
129
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600130/**
131 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
132 * @clk the clock for which to find nearest lower rate
133 * @rate clock frequency in Hz
134 * @return nearest lower rate or negative error value
135 *
136 * Public clock API extends clk_round_rate which is a ceiling function. This
137 * function is a floor function implemented as a binary search using the
138 * ceiling function.
139 */
140static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
141{
142 long lowest_available, nearest_low, step_size, cur;
143 long step_direction = -1;
144 long guess = rate;
145 int max_steps = 10;
146
147 cur = clk_round_rate(clk, rate);
148 if (cur == rate)
149 return rate;
150
151 /* if we got here then: cur > rate */
152 lowest_available = clk_round_rate(clk, 0);
153 if (lowest_available > rate)
154 return -EINVAL;
155
156 step_size = (rate - lowest_available) >> 1;
157 nearest_low = lowest_available;
158
159 while (max_steps-- && step_size) {
160 guess += step_size * step_direction;
161
162 cur = clk_round_rate(clk, guess);
163
164 if ((cur < rate) && (cur > nearest_low))
165 nearest_low = cur;
166
167 /*
168 * if we stepped too far, then start stepping in the other
169 * direction with half the step size
170 */
171 if (((cur > rate) && (step_direction > 0))
172 || ((cur < rate) && (step_direction < 0))) {
173 step_direction = -step_direction;
174 step_size >>= 1;
175 }
176 }
177 return nearest_low;
178}
179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180static void msm_spi_clock_set(struct msm_spi *dd, int speed)
181{
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600182 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 int rc;
184
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600185 rate = msm_spi_clk_max_rate(dd->clk, speed);
186 if (rate < 0) {
187 dev_err(dd->dev,
188 "%s: no match found for requested clock frequency:%d",
189 __func__, speed);
190 return;
191 }
192
193 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 if (!rc)
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600195 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196}
197
198static int msm_spi_calculate_size(int *fifo_size,
199 int *block_size,
200 int block,
201 int mult)
202{
203 int words;
204
205 switch (block) {
206 case 0:
207 words = 1; /* 4 bytes */
208 break;
209 case 1:
210 words = 4; /* 16 bytes */
211 break;
212 case 2:
213 words = 8; /* 32 bytes */
214 break;
215 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700216 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 switch (mult) {
220 case 0:
221 *fifo_size = words * 2;
222 break;
223 case 1:
224 *fifo_size = words * 4;
225 break;
226 case 2:
227 *fifo_size = words * 8;
228 break;
229 case 3:
230 *fifo_size = words * 16;
231 break;
232 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700233 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 *block_size = words * sizeof(u32); /* in bytes */
237 return 0;
238}
239
240static void get_next_transfer(struct msm_spi *dd)
241{
242 struct spi_transfer *t = dd->cur_transfer;
243
244 if (t->transfer_list.next != &dd->cur_msg->transfers) {
245 dd->cur_transfer = list_entry(t->transfer_list.next,
246 struct spi_transfer,
247 transfer_list);
248 dd->write_buf = dd->cur_transfer->tx_buf;
249 dd->read_buf = dd->cur_transfer->rx_buf;
250 }
251}
252
253static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
254{
255 u32 spi_iom;
256 int block;
257 int mult;
258
259 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
260
261 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
262 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
263 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
264 block, mult)) {
265 goto fifo_size_err;
266 }
267
268 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
269 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
270 if (msm_spi_calculate_size(&dd->output_fifo_size,
271 &dd->output_block_size, block, mult)) {
272 goto fifo_size_err;
273 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600274 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
275 /* DM mode is not available for this block size */
276 if (dd->input_block_size == 4 || dd->output_block_size == 4)
277 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278
Gilad Avidovd0262342012-10-24 16:52:30 -0600279 /* DM mode is currently unsupported for different block sizes */
280 if (dd->input_block_size != dd->output_block_size)
281 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282
Gilad Avidovd0262342012-10-24 16:52:30 -0600283 if (dd->use_dma)
284 dd->burst_size = max(dd->input_block_size,
285 DM_BURST_SIZE);
286 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287
288 return;
289
290fifo_size_err:
291 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700292 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 return;
294}
295
296static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
297{
298 u32 data_in;
299 int i;
300 int shift;
301
302 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
303 if (dd->read_buf) {
304 for (i = 0; (i < dd->bytes_per_word) &&
305 dd->rx_bytes_remaining; i++) {
306 /* The data format depends on bytes_per_word:
307 4 bytes: 0x12345678
308 3 bytes: 0x00123456
309 2 bytes: 0x00001234
310 1 byte : 0x00000012
311 */
312 shift = 8 * (dd->bytes_per_word - i - 1);
313 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
314 dd->rx_bytes_remaining--;
315 }
316 } else {
317 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
318 dd->rx_bytes_remaining -= dd->bytes_per_word;
319 else
320 dd->rx_bytes_remaining = 0;
321 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 dd->read_xfr_cnt++;
324 if (dd->multi_xfr) {
325 if (!dd->rx_bytes_remaining)
326 dd->read_xfr_cnt = 0;
327 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
328 dd->read_len) {
329 struct spi_transfer *t = dd->cur_rx_transfer;
330 if (t->transfer_list.next != &dd->cur_msg->transfers) {
331 t = list_entry(t->transfer_list.next,
332 struct spi_transfer,
333 transfer_list);
334 dd->read_buf = t->rx_buf;
335 dd->read_len = t->len;
336 dd->read_xfr_cnt = 0;
337 dd->cur_rx_transfer = t;
338 }
339 }
340 }
341}
342
343static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
344{
345 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
346
347 return spi_op & SPI_OP_STATE_VALID;
348}
349
Sagar Dharia525593d2012-11-02 18:26:01 -0600350static inline void msm_spi_udelay(unsigned long delay_usecs)
351{
352 /*
353 * For smaller values of delay, context switch time
354 * would negate the usage of usleep
355 */
356 if (delay_usecs > 20)
357 usleep_range(delay_usecs, delay_usecs);
358 else if (delay_usecs)
359 udelay(delay_usecs);
360}
361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362static inline int msm_spi_wait_valid(struct msm_spi *dd)
363{
364 unsigned long delay = 0;
365 unsigned long timeout = 0;
366
367 if (dd->clock_speed == 0)
368 return -EINVAL;
369 /*
370 * Based on the SPI clock speed, sufficient time
371 * should be given for the SPI state transition
372 * to occur
373 */
374 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
375 /*
376 * For small delay values, the default timeout would
377 * be one jiffy
378 */
379 if (delay < SPI_DELAY_THRESHOLD)
380 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600381
382 /* Adding one to round off to the nearest jiffy */
383 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 while (!msm_spi_is_valid_state(dd)) {
385 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600386 if (!msm_spi_is_valid_state(dd)) {
387 if (dd->cur_msg)
388 dd->cur_msg->status = -EIO;
389 dev_err(dd->dev, "%s: SPI operational state"
390 "not valid\n", __func__);
391 return -ETIMEDOUT;
392 } else
393 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 }
Sagar Dharia525593d2012-11-02 18:26:01 -0600395 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
397 return 0;
398}
399
400static inline int msm_spi_set_state(struct msm_spi *dd,
401 enum msm_spi_state state)
402{
403 enum msm_spi_state cur_state;
404 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700405 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 cur_state = readl_relaxed(dd->base + SPI_STATE);
407 /* Per spec:
408 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
409 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
410 (state == SPI_OP_STATE_RESET)) {
411 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
412 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
413 } else {
414 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
415 dd->base + SPI_STATE);
416 }
417 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700418 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419
420 return 0;
421}
422
Gilad Avidovd0262342012-10-24 16:52:30 -0600423/**
424 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
425 */
426static inline void
427msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428{
429 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
430
431 if (n != (*config & SPI_CFG_N))
432 *config = (*config & ~SPI_CFG_N) | n;
433
Gilad Avidovd0262342012-10-24 16:52:30 -0600434 if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
435 || (dd->mode == SPI_BAM_MODE)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 if (dd->read_buf == NULL)
437 *config |= SPI_NO_INPUT;
438 if (dd->write_buf == NULL)
439 *config |= SPI_NO_OUTPUT;
440 }
441}
442
Gilad Avidovd0262342012-10-24 16:52:30 -0600443/**
444 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
445 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
446 * @return calculatd value for SPI_CONFIG
447 */
448static u32
449msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450{
Gilad Avidovd0262342012-10-24 16:52:30 -0600451 if (mode & SPI_LOOP)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 spi_config |= SPI_CFG_LOOPBACK;
453 else
454 spi_config &= ~SPI_CFG_LOOPBACK;
Gilad Avidovd0262342012-10-24 16:52:30 -0600455
456 if (mode & SPI_CPHA)
457 spi_config &= ~SPI_CFG_INPUT_FIRST;
458 else
459 spi_config |= SPI_CFG_INPUT_FIRST;
460
461 return spi_config;
462}
463
464/**
465 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
466 * next transfer
467 */
468static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
469{
470 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
471 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
472 spi_config, dd->cur_msg->spi->mode);
473
474 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
475 /* flags removed from SPI_CONFIG in QUP version-2 */
476 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
477 else if (dd->mode == SPI_BAM_MODE)
478 spi_config |= SPI_CFG_INPUT_FIRST;
479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -0600481}
482
483/**
484 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
485 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
486 * BAM and DMOV modes.
487 * @n_words The number of reads/writes of size N.
488 */
489static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
490{
491 /*
492 * n_words cannot exceed fifo_size, and only one READ COUNT
493 * interrupt is generated per transaction, so for transactions
494 * larger than fifo size READ COUNT must be disabled.
495 * For those transactions we usually move to Data Mover mode.
496 */
497 if (dd->mode == SPI_FIFO_MODE) {
498 if (n_words <= dd->input_fifo_size) {
499 writel_relaxed(n_words,
500 dd->base + SPI_MX_READ_COUNT);
501 msm_spi_set_write_count(dd, n_words);
502 } else {
503 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
504 msm_spi_set_write_count(dd, 0);
505 }
506 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
507 /* must be zero for FIFO */
508 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
509 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
510 }
511 } else {
512 /* must be zero for BAM and DMOV */
513 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
514 msm_spi_set_write_count(dd, 0);
515
516 /*
517 * for DMA transfers, both QUP_MX_INPUT_COUNT and
518 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
519 * That case is a non-balanced transfer when there is
520 * only a read_buf.
521 */
522 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
523 if (dd->write_buf)
524 writel_relaxed(0,
525 dd->base + SPI_MX_INPUT_COUNT);
526 else
527 writel_relaxed(n_words,
528 dd->base + SPI_MX_INPUT_COUNT);
529
530 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
531 }
532 }
533}
534
535/**
536 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
537 * using BAM.
538 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
539 * transfer. Between transfer QUP must change to reset state. A loop is
540 * issuing a single BAM transfer at a time. If another tsranfer is
541 * required, it waits for the trasfer to finish, then moving to reset
542 * state, and back to run state to issue the next transfer.
543 * The function dose not wait for the last transfer to end, or if only
544 * a single transfer is required, the function dose not wait for it to
545 * end.
546 * @timeout max time in jiffies to wait for a transfer to finish.
547 * @return zero on success
548 */
549static int
550msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
551{
552 u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
553 int ret;
554 /*
555 * QUP must move to reset mode every 64K-1 bytes of transfer
556 * (counter is 16 bit)
557 */
558 if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
559 /* assert chip select unconditionally */
560 u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
561 if (!(spi_ioc & SPI_IO_C_FORCE_CS))
562 writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
563 dd->base + SPI_IO_CONTROL);
564 }
565
566 /* Following flags are required since we are waiting on all transfers */
567 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
568 /*
569 * on a balanced transaction, BAM will set the flags on the producer
570 * pipe based on the flags set on the consumer pipe
571 */
572 prod_flags = (dd->write_buf) ? 0 : cons_flags;
573
574 while (dd->tx_bytes_remaining > 0) {
575 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
576 bytes_to_send = min_t(u32, dd->tx_bytes_remaining
577 , SPI_MAX_TRFR_BTWN_RESETS);
578 n_words_xfr = DIV_ROUND_UP(bytes_to_send
579 , dd->bytes_per_word);
580
581 msm_spi_set_mx_counts(dd, n_words_xfr);
582
583 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
584 if (ret < 0) {
585 dev_err(dd->dev,
586 "%s: Failed to set QUP state to run",
587 __func__);
588 goto xfr_err;
589 }
590
591 /* enqueue read buffer in BAM */
592 if (dd->read_buf) {
593 ret = sps_transfer_one(dd->bam.prod.handle,
594 dd->cur_transfer->rx_dma + bytes_sent,
595 bytes_to_send, dd, prod_flags);
596 if (ret < 0) {
597 dev_err(dd->dev,
598 "%s: Failed to queue producer BAM transfer",
599 __func__);
600 goto xfr_err;
601 }
602 }
603
604 /* enqueue write buffer in BAM */
605 if (dd->write_buf) {
606 ret = sps_transfer_one(dd->bam.cons.handle,
607 dd->cur_transfer->tx_dma + bytes_sent,
608 bytes_to_send, dd, cons_flags);
609 if (ret < 0) {
610 dev_err(dd->dev,
611 "%s: Failed to queue consumer BAM transfer",
612 __func__);
613 goto xfr_err;
614 }
615 }
616
617 dd->tx_bytes_remaining -= bytes_to_send;
618
619 /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
620 if (dd->tx_bytes_remaining > 0) {
621 if (!wait_for_completion_timeout(
622 &dd->transfer_complete, timeout)) {
623 dev_err(dd->dev,
624 "%s: SPI transaction timeout",
625 __func__);
626 dd->cur_msg->status = -EIO;
627 ret = -EIO;
628 goto xfr_err;
629 }
630 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
631 if (ret < 0) {
632 dev_err(dd->dev,
633 "%s: Failed to set QUP state to reset",
634 __func__);
635 goto xfr_err;
636 }
637 init_completion(&dd->transfer_complete);
638 }
639 }
640 return 0;
641
642xfr_err:
643 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644}
645
646static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
647{
648 dmov_box *box;
649 int bytes_to_send, num_rows, bytes_sent;
650 u32 num_transfers;
651
652 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530653 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 if (dd->write_len && !dd->read_len) {
655 /* WR-WR transfer */
656 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
657 dd->write_buf = dd->temp_buf;
658 } else {
659 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
660 /* For WR-RD transfer, bytes_sent can be negative */
661 if (bytes_sent < 0)
662 bytes_sent = 0;
663 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530664 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530665 * 4K bytes for targets that have only 12 bits in
666 * QUP_MAX_OUTPUT_CNT register. If the target supports
667 * more than 12bits then we send the data in chunks of
668 * the infinite_mode value that is defined in the
669 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530670 */
671 if (!dd->pdata->infinite_mode)
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530672 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530673 else
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530674 dd->max_trfr_len = (dd->pdata->infinite_mode) *
675 (dd->bytes_per_word);
676
677 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
678 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
681 dd->unaligned_len = bytes_to_send % dd->burst_size;
682 num_rows = bytes_to_send / dd->burst_size;
683
684 dd->mode = SPI_DMOV_MODE;
685
686 if (num_rows) {
687 /* src in 16 MSB, dst in 16 LSB */
688 box = &dd->tx_dmov_cmd->box;
689 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
690 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
691 box->num_rows = (num_rows << 16) | num_rows;
692 box->row_offset = (dd->burst_size << 16) | 0;
693
694 box = &dd->rx_dmov_cmd->box;
695 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
696 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
697 box->num_rows = (num_rows << 16) | num_rows;
698 box->row_offset = (0 << 16) | dd->burst_size;
699
700 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
701 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
702 offsetof(struct spi_dmov_cmd, box));
703 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
704 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
705 offsetof(struct spi_dmov_cmd, box));
706 } else {
707 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
708 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
709 offsetof(struct spi_dmov_cmd, single_pad));
710 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
711 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
712 offsetof(struct spi_dmov_cmd, single_pad));
713 }
714
715 if (!dd->unaligned_len) {
716 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
717 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
718 } else {
719 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
720 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
721 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
722
723 if ((dd->multi_xfr) && (dd->read_len <= 0))
724 offset = dd->cur_msg_len - dd->unaligned_len;
725
726 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
727 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
728
729 memset(dd->tx_padding, 0, dd->burst_size);
730 memset(dd->rx_padding, 0, dd->burst_size);
731 if (dd->write_buf)
732 memcpy(dd->tx_padding, dd->write_buf + offset,
733 dd->unaligned_len);
734
735 tx_cmd->src = dd->tx_padding_dma;
736 rx_cmd->dst = dd->rx_padding_dma;
737 tx_cmd->len = rx_cmd->len = dd->burst_size;
738 }
739 /* This also takes care of the padding dummy buf
740 Since this is set to the correct length, the
741 dummy bytes won't be actually sent */
742 if (dd->multi_xfr) {
743 u32 write_transfers = 0;
744 u32 read_transfers = 0;
745
746 if (dd->write_len > 0) {
747 write_transfers = DIV_ROUND_UP(dd->write_len,
748 dd->bytes_per_word);
749 writel_relaxed(write_transfers,
750 dd->base + SPI_MX_OUTPUT_COUNT);
751 }
752 if (dd->read_len > 0) {
753 /*
754 * The read following a write transfer must take
755 * into account, that the bytes pertaining to
756 * the write transfer needs to be discarded,
757 * before the actual read begins.
758 */
759 read_transfers = DIV_ROUND_UP(dd->read_len +
760 dd->write_len,
761 dd->bytes_per_word);
762 writel_relaxed(read_transfers,
763 dd->base + SPI_MX_INPUT_COUNT);
764 }
765 } else {
766 if (dd->write_buf)
767 writel_relaxed(num_transfers,
768 dd->base + SPI_MX_OUTPUT_COUNT);
769 if (dd->read_buf)
770 writel_relaxed(num_transfers,
771 dd->base + SPI_MX_INPUT_COUNT);
772 }
773}
774
775static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
776{
777 dma_coherent_pre_ops();
778 if (dd->write_buf)
779 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
780 if (dd->read_buf)
781 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
782}
783
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530784/* SPI core on targets that does not support infinite mode can send
785 maximum of 4K transfers or 64K transfers depending up on size of
786 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
787 chunks. Upon completion we send the next chunk, or complete the
788 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530789 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790*/
791static int msm_spi_dm_send_next(struct msm_spi *dd)
792{
793 /* By now we should have sent all the bytes in FIFO mode,
794 * However to make things right, we'll check anyway.
795 */
796 if (dd->mode != SPI_DMOV_MODE)
797 return 0;
798
Kiran Gundae8f16742012-06-27 10:06:32 +0530799 /* On targets which does not support infinite mode,
800 We need to send more chunks, if we sent max last time */
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530801 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
802 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700803 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
804 return 0;
805 dd->read_len = dd->write_len = 0;
806 msm_spi_setup_dm_transfer(dd);
807 msm_spi_enqueue_dm_commands(dd);
808 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
809 return 0;
810 return 1;
811 } else if (dd->read_len && dd->write_len) {
812 dd->tx_bytes_remaining -= dd->cur_transfer->len;
813 if (list_is_last(&dd->cur_transfer->transfer_list,
814 &dd->cur_msg->transfers))
815 return 0;
816 get_next_transfer(dd);
817 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
818 return 0;
819 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
820 dd->read_buf = dd->temp_buf;
821 dd->read_len = dd->write_len = -1;
822 msm_spi_setup_dm_transfer(dd);
823 msm_spi_enqueue_dm_commands(dd);
824 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
825 return 0;
826 return 1;
827 }
828 return 0;
829}
830
831static inline void msm_spi_ack_transfer(struct msm_spi *dd)
832{
833 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
834 SPI_OP_MAX_OUTPUT_DONE_FLAG,
835 dd->base + SPI_OPERATIONAL);
836 /* Ensure done flag was cleared before proceeding further */
837 mb();
838}
839
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700840/* Figure which irq occured and call the relevant functions */
841static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
842{
843 u32 op, ret = IRQ_NONE;
844 struct msm_spi *dd = dev_id;
845
846 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
847 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
848 struct spi_master *master = dev_get_drvdata(dd->dev);
849 ret |= msm_spi_error_irq(irq, master);
850 }
851
852 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
853 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
854 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
855 dd->base + SPI_OPERATIONAL);
856 /*
857 * Ensure service flag was cleared before further
858 * processing of interrupt.
859 */
860 mb();
861 ret |= msm_spi_input_irq(irq, dev_id);
862 }
863
864 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
865 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
866 dd->base + SPI_OPERATIONAL);
867 /*
868 * Ensure service flag was cleared before further
869 * processing of interrupt.
870 */
871 mb();
872 ret |= msm_spi_output_irq(irq, dev_id);
873 }
874
875 if (dd->done) {
876 complete(&dd->transfer_complete);
877 dd->done = 0;
878 }
879 return ret;
880}
881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
883{
884 struct msm_spi *dd = dev_id;
885
886 dd->stat_rx++;
887
888 if (dd->mode == SPI_MODE_NONE)
889 return IRQ_HANDLED;
890
891 if (dd->mode == SPI_DMOV_MODE) {
892 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
893 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
894 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
895 msm_spi_ack_transfer(dd);
896 if (dd->unaligned_len == 0) {
897 if (atomic_inc_return(&dd->rx_irq_called) == 1)
898 return IRQ_HANDLED;
899 }
900 msm_spi_complete(dd);
901 return IRQ_HANDLED;
902 }
903 return IRQ_NONE;
904 }
905
906 if (dd->mode == SPI_FIFO_MODE) {
907 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
908 SPI_OP_IP_FIFO_NOT_EMPTY) &&
909 (dd->rx_bytes_remaining > 0)) {
910 msm_spi_read_word_from_fifo(dd);
911 }
912 if (dd->rx_bytes_remaining == 0)
913 msm_spi_complete(dd);
914 }
915
916 return IRQ_HANDLED;
917}
918
919static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
920{
921 u32 word;
922 u8 byte;
923 int i;
924
925 word = 0;
926 if (dd->write_buf) {
927 for (i = 0; (i < dd->bytes_per_word) &&
928 dd->tx_bytes_remaining; i++) {
929 dd->tx_bytes_remaining--;
930 byte = *dd->write_buf++;
931 word |= (byte << (BITS_PER_BYTE * (3 - i)));
932 }
933 } else
934 if (dd->tx_bytes_remaining > dd->bytes_per_word)
935 dd->tx_bytes_remaining -= dd->bytes_per_word;
936 else
937 dd->tx_bytes_remaining = 0;
938 dd->write_xfr_cnt++;
939 if (dd->multi_xfr) {
940 if (!dd->tx_bytes_remaining)
941 dd->write_xfr_cnt = 0;
942 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
943 dd->write_len) {
944 struct spi_transfer *t = dd->cur_tx_transfer;
945 if (t->transfer_list.next != &dd->cur_msg->transfers) {
946 t = list_entry(t->transfer_list.next,
947 struct spi_transfer,
948 transfer_list);
949 dd->write_buf = t->tx_buf;
950 dd->write_len = t->len;
951 dd->write_xfr_cnt = 0;
952 dd->cur_tx_transfer = t;
953 }
954 }
955 }
956 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
957}
958
959static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
960{
961 int count = 0;
962
963 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
964 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
965 SPI_OP_OUTPUT_FIFO_FULL)) {
966 msm_spi_write_word_to_fifo(dd);
967 count++;
968 }
969}
970
971static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
972{
973 struct msm_spi *dd = dev_id;
974
975 dd->stat_tx++;
976
977 if (dd->mode == SPI_MODE_NONE)
978 return IRQ_HANDLED;
979
980 if (dd->mode == SPI_DMOV_MODE) {
981 /* TX_ONLY transaction is handled here
982 This is the only place we send complete at tx and not rx */
983 if (dd->read_buf == NULL &&
984 readl_relaxed(dd->base + SPI_OPERATIONAL) &
985 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
986 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530987 if (atomic_inc_return(&dd->tx_irq_called) == 1)
988 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 msm_spi_complete(dd);
990 return IRQ_HANDLED;
991 }
992 return IRQ_NONE;
993 }
994
995 /* Output FIFO is empty. Transmit any outstanding write data. */
996 if (dd->mode == SPI_FIFO_MODE)
997 msm_spi_write_rmn_to_fifo(dd);
998
999 return IRQ_HANDLED;
1000}
1001
1002static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1003{
1004 struct spi_master *master = dev_id;
1005 struct msm_spi *dd = spi_master_get_devdata(master);
1006 u32 spi_err;
1007
1008 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1009 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1010 dev_warn(master->dev.parent, "SPI output overrun error\n");
1011 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1012 dev_warn(master->dev.parent, "SPI input underrun error\n");
1013 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1014 dev_warn(master->dev.parent, "SPI output underrun error\n");
1015 msm_spi_get_clk_err(dd, &spi_err);
1016 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1017 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1018 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1019 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1020 msm_spi_clear_error_flags(dd);
1021 msm_spi_ack_clk_err(dd);
1022 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1023 mb();
1024 return IRQ_HANDLED;
1025}
1026
Gilad Avidovd0262342012-10-24 16:52:30 -06001027/**
1028 * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
1029 * @return zero on success or negative error code
1030 *
1031 * calls dma_map_single() on the read/write buffers, effectively invalidating
1032 * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
1033 * buffer and copy the data to/from the client buffers
1034 */
1035static int msm_spi_dma_map_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036{
1037 struct device *dev;
1038 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001039 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 void *tx_buf, *rx_buf;
1041 unsigned tx_len, rx_len;
1042 int ret = -EINVAL;
1043
1044 dev = &dd->cur_msg->spi->dev;
1045 first_xfr = dd->cur_transfer;
1046 tx_buf = (void *)first_xfr->tx_buf;
1047 rx_buf = first_xfr->rx_buf;
1048 tx_len = rx_len = first_xfr->len;
1049
1050 /*
1051 * For WR-WR and WR-RD transfers, we allocate our own temporary
1052 * buffer and copy the data to/from the client buffers.
1053 */
1054 if (dd->multi_xfr) {
1055 dd->temp_buf = kzalloc(dd->cur_msg_len,
1056 GFP_KERNEL | __GFP_DMA);
1057 if (!dd->temp_buf)
1058 return -ENOMEM;
1059 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1060 struct spi_transfer, transfer_list);
1061
1062 if (dd->write_len && !dd->read_len) {
1063 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1064 goto error;
1065
1066 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1067 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1068 nxt_xfr->len);
1069 tx_buf = dd->temp_buf;
1070 tx_len = dd->cur_msg_len;
1071 } else {
1072 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1073 goto error;
1074
1075 rx_buf = dd->temp_buf;
1076 rx_len = dd->cur_msg_len;
1077 }
1078 }
1079 if (tx_buf != NULL) {
1080 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1081 tx_len, DMA_TO_DEVICE);
1082 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1083 dev_err(dev, "dma %cX %d bytes error\n",
1084 'T', tx_len);
1085 ret = -ENOMEM;
1086 goto error;
1087 }
1088 }
1089 if (rx_buf != NULL) {
1090 dma_addr_t dma_handle;
1091 dma_handle = dma_map_single(dev, rx_buf,
1092 rx_len, DMA_FROM_DEVICE);
1093 if (dma_mapping_error(NULL, dma_handle)) {
1094 dev_err(dev, "dma %cX %d bytes error\n",
1095 'R', rx_len);
1096 if (tx_buf != NULL)
1097 dma_unmap_single(NULL, first_xfr->tx_dma,
1098 tx_len, DMA_TO_DEVICE);
1099 ret = -ENOMEM;
1100 goto error;
1101 }
1102 if (dd->multi_xfr)
1103 nxt_xfr->rx_dma = dma_handle;
1104 else
1105 first_xfr->rx_dma = dma_handle;
1106 }
1107 return 0;
1108
1109error:
1110 kfree(dd->temp_buf);
1111 dd->temp_buf = NULL;
1112 return ret;
1113}
1114
Gilad Avidovd0262342012-10-24 16:52:30 -06001115static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116{
1117 struct device *dev;
1118 u32 offset;
1119
1120 dev = &dd->cur_msg->spi->dev;
1121 if (dd->cur_msg->is_dma_mapped)
1122 goto unmap_end;
1123
1124 if (dd->multi_xfr) {
1125 if (dd->write_len && !dd->read_len) {
1126 dma_unmap_single(dev,
1127 dd->cur_transfer->tx_dma,
1128 dd->cur_msg_len,
1129 DMA_TO_DEVICE);
1130 } else {
1131 struct spi_transfer *prev_xfr;
1132 prev_xfr = list_entry(
1133 dd->cur_transfer->transfer_list.prev,
1134 struct spi_transfer,
1135 transfer_list);
1136 if (dd->cur_transfer->rx_buf) {
1137 dma_unmap_single(dev,
1138 dd->cur_transfer->rx_dma,
1139 dd->cur_msg_len,
1140 DMA_FROM_DEVICE);
1141 }
1142 if (prev_xfr->tx_buf) {
1143 dma_unmap_single(dev,
1144 prev_xfr->tx_dma,
1145 prev_xfr->len,
1146 DMA_TO_DEVICE);
1147 }
1148 if (dd->unaligned_len && dd->read_buf) {
1149 offset = dd->cur_msg_len - dd->unaligned_len;
1150 dma_coherent_post_ops();
1151 memcpy(dd->read_buf + offset, dd->rx_padding,
1152 dd->unaligned_len);
1153 memcpy(dd->cur_transfer->rx_buf,
1154 dd->read_buf + prev_xfr->len,
1155 dd->cur_transfer->len);
1156 }
1157 }
1158 kfree(dd->temp_buf);
1159 dd->temp_buf = NULL;
1160 return;
1161 } else {
1162 if (dd->cur_transfer->rx_buf)
1163 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1164 dd->cur_transfer->len,
1165 DMA_FROM_DEVICE);
1166 if (dd->cur_transfer->tx_buf)
1167 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1168 dd->cur_transfer->len,
1169 DMA_TO_DEVICE);
1170 }
1171
1172unmap_end:
1173 /* If we padded the transfer, we copy it from the padding buf */
1174 if (dd->unaligned_len && dd->read_buf) {
1175 offset = dd->cur_transfer->len - dd->unaligned_len;
1176 dma_coherent_post_ops();
1177 memcpy(dd->read_buf + offset, dd->rx_padding,
1178 dd->unaligned_len);
1179 }
1180}
1181
Gilad Avidovd0262342012-10-24 16:52:30 -06001182static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1183{
1184 struct device *dev;
1185
1186 /* mapped by client */
1187 if (dd->cur_msg->is_dma_mapped)
1188 return;
1189
1190 dev = &dd->cur_msg->spi->dev;
1191 if (dd->cur_transfer->rx_buf)
1192 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1193 dd->cur_transfer->len,
1194 DMA_FROM_DEVICE);
1195
1196 if (dd->cur_transfer->tx_buf)
1197 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1198 dd->cur_transfer->len,
1199 DMA_TO_DEVICE);
1200}
1201
1202static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1203{
1204 if (dd->mode == SPI_DMOV_MODE)
1205 msm_spi_dmov_unmap_buffers(dd);
1206 else if (dd->mode == SPI_BAM_MODE)
1207 msm_spi_bam_unmap_buffers(dd);
1208}
1209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210/**
Gilad Avidovd0262342012-10-24 16:52:30 -06001211 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1212 * the given transfer
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 * @dd: device
1214 * @tr: transfer
1215 *
Gilad Avidovd0262342012-10-24 16:52:30 -06001216 * Start using DMA if:
1217 * 1. Is supported by HW
1218 * 2. Is not diabled by platfrom data
1219 * 3. Transfer size is greater than 3*block size.
1220 * 4. Buffers are aligned to cache line.
1221 * 5. Bytes-per-word is 8,16 or 32.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001223static inline bool
1224msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001225{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226 if (!dd->use_dma)
Gilad Avidovd0262342012-10-24 16:52:30 -06001227 return false;
1228
1229 /* check constraints from platform data */
1230 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1231 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001232
1233 if (dd->cur_msg_len < 3*dd->input_block_size)
Gilad Avidovd0262342012-10-24 16:52:30 -06001234 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235
1236 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
Gilad Avidovd0262342012-10-24 16:52:30 -06001237 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238
Gilad Avidovd0262342012-10-24 16:52:30 -06001239 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1240 u32 cache_line = dma_get_cache_alignment();
1241
1242 if (tr->tx_buf) {
1243 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1244 return 0;
1245 }
1246 if (tr->rx_buf) {
1247 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1248 return false;
1249 }
1250
1251 if (tr->cs_change &&
1252 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1253 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 }
1255
Gilad Avidovd0262342012-10-24 16:52:30 -06001256 return true;
1257}
1258
1259/**
1260 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1261 * prepares to process a transfer.
1262 */
1263static void
1264msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1265{
1266 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1267 if (dd->qup_ver) {
1268 dd->mode = SPI_BAM_MODE;
1269 } else {
1270 dd->mode = SPI_DMOV_MODE;
1271 if (dd->write_len && dd->read_len) {
1272 dd->tx_bytes_remaining = dd->write_len;
1273 dd->rx_bytes_remaining = dd->read_len;
1274 }
1275 }
1276 } else {
1277 dd->mode = SPI_FIFO_MODE;
1278 if (dd->multi_xfr) {
1279 dd->read_len = dd->cur_transfer->len;
1280 dd->write_len = dd->cur_transfer->len;
1281 }
1282 }
1283}
1284
1285/**
1286 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1287 * transfer
1288 */
1289static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1290{
1291 u32 spi_iom;
1292 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1293 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1294 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1295 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1296 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1297 /* Turn on packing for data mover */
1298 if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
1299 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1300 else
1301 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1302
1303 /*if (dd->mode == SPI_BAM_MODE) {
1304 spi_iom |= SPI_IO_C_NO_TRI_STATE;
1305 spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
1306 }*/
1307 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1308}
1309
1310static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1311{
1312 if (mode & SPI_CPOL)
1313 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1314 else
1315 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1316 return spi_ioc;
1317}
1318
1319/**
1320 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1321 * next transfer
1322 * @return the new set value of SPI_IO_CONTROL
1323 */
1324static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1325{
1326 u32 spi_ioc, spi_ioc_orig, chip_select;
1327
1328 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1329 spi_ioc_orig = spi_ioc;
1330 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1331 , dd->cur_msg->spi->mode);
1332 /* Set chip-select */
1333 chip_select = dd->cur_msg->spi->chip_select << 2;
1334 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1335 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1336 if (!dd->cur_transfer->cs_change)
1337 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1338
1339 if (spi_ioc != spi_ioc_orig)
1340 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1341
1342 return spi_ioc;
1343}
1344
1345/**
1346 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1347 * the next transfer
1348 */
1349static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1350{
1351 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1352 * change in BAM mode */
1353 u32 mask = (dd->mode == SPI_BAM_MODE) ?
1354 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1355 : 0;
1356 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357}
1358
1359static void msm_spi_process_transfer(struct msm_spi *dd)
1360{
1361 u8 bpw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362 u32 max_speed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 u32 read_count;
1364 u32 timeout;
Gilad Avidovd0262342012-10-24 16:52:30 -06001365 u32 spi_ioc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366 u32 int_loopback = 0;
1367
1368 dd->tx_bytes_remaining = dd->cur_msg_len;
1369 dd->rx_bytes_remaining = dd->cur_msg_len;
1370 dd->read_buf = dd->cur_transfer->rx_buf;
1371 dd->write_buf = dd->cur_transfer->tx_buf;
1372 init_completion(&dd->transfer_complete);
1373 if (dd->cur_transfer->bits_per_word)
1374 bpw = dd->cur_transfer->bits_per_word;
1375 else
1376 if (dd->cur_msg->spi->bits_per_word)
1377 bpw = dd->cur_msg->spi->bits_per_word;
1378 else
1379 bpw = 8;
1380 dd->bytes_per_word = (bpw + 7) / 8;
1381
1382 if (dd->cur_transfer->speed_hz)
1383 max_speed = dd->cur_transfer->speed_hz;
1384 else
1385 max_speed = dd->cur_msg->spi->max_speed_hz;
1386 if (!dd->clock_speed || max_speed != dd->clock_speed)
1387 msm_spi_clock_set(dd, max_speed);
1388
Gilad Avidovd0262342012-10-24 16:52:30 -06001389 timeout = 100 * msecs_to_jiffies(
1390 DIV_ROUND_UP(dd->cur_msg_len * 8,
1391 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001393 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1394 if (dd->cur_msg->spi->mode & SPI_LOOP)
1395 int_loopback = 1;
1396 if (int_loopback && dd->multi_xfr &&
1397 (read_count > dd->input_fifo_size)) {
1398 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001399 pr_err(
1400 "%s:Internal Loopback does not support > fifo size"
1401 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 __func__);
1403 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001404 pr_err(
1405 "%s:Internal Loopback does not support > fifo size"
1406 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001407 __func__);
1408 return;
1409 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410
Gilad Avidovd0262342012-10-24 16:52:30 -06001411 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1412 dev_err(dd->dev,
1413 "%s: Error setting QUP to reset-state",
1414 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415
Gilad Avidovd0262342012-10-24 16:52:30 -06001416 msm_spi_set_transfer_mode(dd, bpw, read_count);
1417 msm_spi_set_mx_counts(dd, read_count);
1418 if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
1419 if (msm_spi_dma_map_buffers(dd) < 0) {
1420 pr_err("Mapping DMA buffers\n");
1421 return;
1422 }
1423 msm_spi_set_qup_io_modes(dd);
1424 msm_spi_set_spi_config(dd, bpw);
1425 msm_spi_set_qup_config(dd, bpw);
1426 spi_ioc = msm_spi_set_spi_io_control(dd);
1427 msm_spi_set_qup_op_mask(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428
1429 if (dd->mode == SPI_DMOV_MODE) {
1430 msm_spi_setup_dm_transfer(dd);
1431 msm_spi_enqueue_dm_commands(dd);
1432 }
1433 /* The output fifo interrupt handler will handle all writes after
1434 the first. Restricting this to one write avoids contention
1435 issues and race conditions between this thread and the int handler
1436 */
1437 else if (dd->mode == SPI_FIFO_MODE) {
1438 if (msm_spi_prepare_for_write(dd))
1439 goto transfer_end;
1440 msm_spi_start_write(dd, read_count);
Gilad Avidovd0262342012-10-24 16:52:30 -06001441 } else if (dd->mode == SPI_BAM_MODE) {
1442 if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
1443 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1444 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445 }
1446
Gilad Avidovd0262342012-10-24 16:52:30 -06001447 /*
1448 * On BAM mode, current state here is run.
1449 * Only enter the RUN state after the first word is written into
1450 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1451 * might fire before the first word is written resulting in a
1452 * possible race condition.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001454 if (dd->mode != SPI_BAM_MODE)
1455 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1456 dev_warn(dd->dev,
1457 "%s: Failed to set QUP to run-state. Mode:%d",
1458 __func__, dd->mode);
1459 goto transfer_end;
1460 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461
1462 /* Assume success, this might change later upon transaction result */
1463 dd->cur_msg->status = 0;
1464 do {
1465 if (!wait_for_completion_timeout(&dd->transfer_complete,
1466 timeout)) {
Gilad Avidovd0262342012-10-24 16:52:30 -06001467 dev_err(dd->dev,
1468 "%s: SPI transaction timeout\n",
1469 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 dd->cur_msg->status = -EIO;
1471 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001472 msm_dmov_flush(dd->tx_dma_chan, 1);
1473 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001474 }
1475 break;
1476 }
1477 } while (msm_spi_dm_send_next(dd));
1478
Sagar Dharia525593d2012-11-02 18:26:01 -06001479 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480transfer_end:
Gilad Avidovd0262342012-10-24 16:52:30 -06001481 msm_spi_dma_unmap_buffers(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 dd->mode = SPI_MODE_NONE;
1483
1484 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1485 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1486 dd->base + SPI_IO_CONTROL);
1487}
1488
1489static void get_transfer_length(struct msm_spi *dd)
1490{
1491 struct spi_transfer *tr;
1492 int num_xfrs = 0;
1493 int readlen = 0;
1494 int writelen = 0;
1495
1496 dd->cur_msg_len = 0;
1497 dd->multi_xfr = 0;
1498 dd->read_len = dd->write_len = 0;
1499
1500 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1501 if (tr->tx_buf)
1502 writelen += tr->len;
1503 if (tr->rx_buf)
1504 readlen += tr->len;
1505 dd->cur_msg_len += tr->len;
1506 num_xfrs++;
1507 }
1508
1509 if (num_xfrs == 2) {
1510 struct spi_transfer *first_xfr = dd->cur_transfer;
1511
1512 dd->multi_xfr = 1;
1513 tr = list_entry(first_xfr->transfer_list.next,
1514 struct spi_transfer,
1515 transfer_list);
1516 /*
1517 * We update dd->read_len and dd->write_len only
1518 * for WR-WR and WR-RD transfers.
1519 */
1520 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1521 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1522 ((!tr->tx_buf) && (tr->rx_buf))) {
1523 dd->read_len = readlen;
1524 dd->write_len = writelen;
1525 }
1526 }
1527 } else if (num_xfrs > 1)
1528 dd->multi_xfr = 1;
1529}
1530
1531static inline int combine_transfers(struct msm_spi *dd)
1532{
1533 struct spi_transfer *t = dd->cur_transfer;
1534 struct spi_transfer *nxt;
1535 int xfrs_grped = 1;
1536
1537 dd->cur_msg_len = dd->cur_transfer->len;
1538 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1539 nxt = list_entry(t->transfer_list.next,
1540 struct spi_transfer,
1541 transfer_list);
1542 if (t->cs_change != nxt->cs_change)
1543 return xfrs_grped;
1544 dd->cur_msg_len += nxt->len;
1545 xfrs_grped++;
1546 t = nxt;
1547 }
1548 return xfrs_grped;
1549}
1550
Harini Jayaraman093938a2012-04-20 15:33:23 -06001551static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1552{
1553 u32 spi_ioc;
1554 u32 spi_ioc_orig;
1555
1556 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1557 spi_ioc_orig = spi_ioc;
1558 if (set_flag)
1559 spi_ioc |= SPI_IO_C_FORCE_CS;
1560 else
1561 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1562
1563 if (spi_ioc != spi_ioc_orig)
1564 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1565}
1566
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567static void msm_spi_process_message(struct msm_spi *dd)
1568{
1569 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001570 int cs_num;
1571 int rc;
Sagar Dharia525593d2012-11-02 18:26:01 -06001572 bool xfer_delay = false;
1573 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001575 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001576 cs_num = dd->cur_msg->spi->chip_select;
1577 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1578 (!(dd->cs_gpios[cs_num].valid)) &&
1579 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1580 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1581 spi_cs_rsrcs[cs_num]);
1582 if (rc) {
1583 dev_err(dd->dev, "gpio_request for pin %d failed with "
1584 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1585 rc);
1586 return;
1587 }
1588 dd->cs_gpios[cs_num].valid = 1;
1589 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590
Sagar Dharia525593d2012-11-02 18:26:01 -06001591 list_for_each_entry(tr,
1592 &dd->cur_msg->transfers,
1593 transfer_list) {
1594 if (tr->delay_usecs) {
1595 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1596 tr->delay_usecs);
1597 xfer_delay = true;
1598 break;
1599 }
1600 }
1601
1602 /* Don't combine xfers if delay is needed after every xfer */
1603 if (dd->qup_ver || xfer_delay) {
1604 if (dd->qup_ver)
1605 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001606 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001607 &dd->cur_msg->transfers,
1608 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001609 struct spi_transfer *t = dd->cur_transfer;
1610 struct spi_transfer *nxt;
1611
1612 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1613 nxt = list_entry(t->transfer_list.next,
1614 struct spi_transfer,
1615 transfer_list);
1616
Sagar Dharia525593d2012-11-02 18:26:01 -06001617 if (dd->qup_ver &&
1618 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001619 write_force_cs(dd, 1);
Sagar Dharia525593d2012-11-02 18:26:01 -06001620 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001621 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001622 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001623
1624 dd->cur_msg_len = dd->cur_transfer->len;
1625 msm_spi_process_transfer(dd);
1626 }
1627 } else {
1628 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1629 struct spi_transfer,
1630 transfer_list);
1631 get_transfer_length(dd);
1632 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1633 /*
1634 * Handling of multi-transfers.
1635 * FIFO mode is used by default
1636 */
1637 list_for_each_entry(dd->cur_transfer,
1638 &dd->cur_msg->transfers,
1639 transfer_list) {
1640 if (!dd->cur_transfer->len)
1641 goto error;
1642 if (xfrs_grped) {
1643 xfrs_grped--;
1644 continue;
1645 } else {
1646 dd->read_len = dd->write_len = 0;
1647 xfrs_grped = combine_transfers(dd);
1648 }
1649
1650 dd->cur_tx_transfer = dd->cur_transfer;
1651 dd->cur_rx_transfer = dd->cur_transfer;
1652 msm_spi_process_transfer(dd);
1653 xfrs_grped--;
1654 }
1655 } else {
1656 /* Handling of a single transfer or
1657 * WR-WR or WR-RD transfers
1658 */
1659 if ((!dd->cur_msg->is_dma_mapped) &&
Gilad Avidovd0262342012-10-24 16:52:30 -06001660 (msm_spi_use_dma(dd, dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001661 dd->cur_transfer->bits_per_word))) {
1662 /* Mapping of DMA buffers */
Gilad Avidovd0262342012-10-24 16:52:30 -06001663 int ret = msm_spi_dma_map_buffers(dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001664 if (ret < 0) {
1665 dd->cur_msg->status = ret;
1666 goto error;
1667 }
1668 }
1669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 dd->cur_tx_transfer = dd->cur_transfer;
1671 dd->cur_rx_transfer = dd->cur_transfer;
1672 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001675
1676 return;
1677
1678error:
1679 if (dd->cs_gpios[cs_num].valid) {
1680 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1681 dd->cs_gpios[cs_num].valid = 0;
1682 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683}
1684
1685/* workqueue - pull messages from queue & process */
1686static void msm_spi_workq(struct work_struct *work)
1687{
1688 struct msm_spi *dd =
1689 container_of(work, struct msm_spi, work_data);
1690 unsigned long flags;
1691 u32 status_error = 0;
Alok Chauhan66554a12012-08-22 19:54:45 +05301692 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001693
1694 mutex_lock(&dd->core_lock);
1695
1696 /* Don't allow power collapse until we release mutex */
1697 if (pm_qos_request_active(&qos_req_list))
1698 pm_qos_update_request(&qos_req_list,
1699 dd->pm_lat);
1700 if (dd->use_rlock)
1701 remote_mutex_lock(&dd->r_lock);
1702
Alok Chauhan66554a12012-08-22 19:54:45 +05301703 /* Configure the spi clk, miso, mosi and cs gpio */
1704 if (dd->pdata->gpio_config) {
1705 rc = dd->pdata->gpio_config();
1706 if (rc) {
1707 dev_err(dd->dev,
1708 "%s: error configuring GPIOs\n",
1709 __func__);
1710 status_error = 1;
1711 }
1712 }
1713
1714 rc = msm_spi_request_gpios(dd);
1715 if (rc)
1716 status_error = 1;
1717
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001718 clk_prepare_enable(dd->clk);
1719 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 msm_spi_enable_irqs(dd);
1721
1722 if (!msm_spi_is_valid_state(dd)) {
1723 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1724 __func__);
1725 status_error = 1;
1726 }
1727
1728 spin_lock_irqsave(&dd->queue_lock, flags);
1729 while (!list_empty(&dd->queue)) {
1730 dd->cur_msg = list_entry(dd->queue.next,
1731 struct spi_message, queue);
1732 list_del_init(&dd->cur_msg->queue);
1733 spin_unlock_irqrestore(&dd->queue_lock, flags);
1734 if (status_error)
1735 dd->cur_msg->status = -EIO;
1736 else
1737 msm_spi_process_message(dd);
1738 if (dd->cur_msg->complete)
1739 dd->cur_msg->complete(dd->cur_msg->context);
1740 spin_lock_irqsave(&dd->queue_lock, flags);
1741 }
1742 dd->transfer_pending = 0;
1743 spin_unlock_irqrestore(&dd->queue_lock, flags);
1744
1745 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001746 clk_disable_unprepare(dd->clk);
1747 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001748
Alok Chauhan66554a12012-08-22 19:54:45 +05301749 /* Free the spi clk, miso, mosi, cs gpio */
1750 if (!rc && dd->pdata && dd->pdata->gpio_release)
1751 dd->pdata->gpio_release();
1752 if (!rc)
1753 msm_spi_free_gpios(dd);
1754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755 if (dd->use_rlock)
1756 remote_mutex_unlock(&dd->r_lock);
1757
1758 if (pm_qos_request_active(&qos_req_list))
1759 pm_qos_update_request(&qos_req_list,
1760 PM_QOS_DEFAULT_VALUE);
1761
1762 mutex_unlock(&dd->core_lock);
1763 /* If needed, this can be done after the current message is complete,
1764 and work can be continued upon resume. No motivation for now. */
1765 if (dd->suspended)
1766 wake_up_interruptible(&dd->continue_suspend);
1767}
1768
1769static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1770{
1771 struct msm_spi *dd;
1772 unsigned long flags;
1773 struct spi_transfer *tr;
1774
1775 dd = spi_master_get_devdata(spi->master);
1776 if (dd->suspended)
1777 return -EBUSY;
1778
1779 if (list_empty(&msg->transfers) || !msg->complete)
1780 return -EINVAL;
1781
1782 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1783 /* Check message parameters */
1784 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1785 (tr->bits_per_word &&
1786 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1787 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1788 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1789 "tx=%p, rx=%p\n",
1790 tr->speed_hz, tr->bits_per_word,
1791 tr->tx_buf, tr->rx_buf);
1792 return -EINVAL;
1793 }
1794 }
1795
1796 spin_lock_irqsave(&dd->queue_lock, flags);
1797 if (dd->suspended) {
1798 spin_unlock_irqrestore(&dd->queue_lock, flags);
1799 return -EBUSY;
1800 }
1801 dd->transfer_pending = 1;
1802 list_add_tail(&msg->queue, &dd->queue);
1803 spin_unlock_irqrestore(&dd->queue_lock, flags);
1804 queue_work(dd->workqueue, &dd->work_data);
1805 return 0;
1806}
1807
1808static int msm_spi_setup(struct spi_device *spi)
1809{
1810 struct msm_spi *dd;
1811 int rc = 0;
1812 u32 spi_ioc;
1813 u32 spi_config;
1814 u32 mask;
1815
1816 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1817 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1818 __func__, spi->bits_per_word);
1819 rc = -EINVAL;
1820 }
1821 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1822 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1823 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1824 rc = -EINVAL;
1825 }
1826
1827 if (rc)
1828 goto err_setup_exit;
1829
1830 dd = spi_master_get_devdata(spi->master);
1831
1832 mutex_lock(&dd->core_lock);
1833 if (dd->suspended) {
1834 mutex_unlock(&dd->core_lock);
1835 return -EBUSY;
1836 }
1837
1838 if (dd->use_rlock)
1839 remote_mutex_lock(&dd->r_lock);
1840
Alok Chauhan66554a12012-08-22 19:54:45 +05301841 /* Configure the spi clk, miso, mosi, cs gpio */
1842 if (dd->pdata->gpio_config) {
1843 rc = dd->pdata->gpio_config();
1844 if (rc) {
1845 dev_err(&spi->dev,
1846 "%s: error configuring GPIOs\n",
1847 __func__);
1848 rc = -ENXIO;
1849 goto err_setup_gpio;
1850 }
1851 }
1852
1853 rc = msm_spi_request_gpios(dd);
1854 if (rc) {
1855 rc = -ENXIO;
1856 goto err_setup_gpio;
1857 }
1858
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001859 clk_prepare_enable(dd->clk);
1860 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001861
1862 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1863 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1864 if (spi->mode & SPI_CS_HIGH)
1865 spi_ioc |= mask;
1866 else
1867 spi_ioc &= ~mask;
Gilad Avidovd0262342012-10-24 16:52:30 -06001868 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869
1870 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1871
1872 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -06001873 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
1874 spi_config, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001875 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1876
1877 /* Ensure previous write completed before disabling the clocks */
1878 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001879 clk_disable_unprepare(dd->clk);
1880 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881
Alok Chauhan66554a12012-08-22 19:54:45 +05301882 /* Free the spi clk, miso, mosi, cs gpio */
1883 if (dd->pdata && dd->pdata->gpio_release)
1884 dd->pdata->gpio_release();
1885 msm_spi_free_gpios(dd);
1886
1887err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888 if (dd->use_rlock)
1889 remote_mutex_unlock(&dd->r_lock);
1890 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891err_setup_exit:
1892 return rc;
1893}
1894
1895#ifdef CONFIG_DEBUG_FS
1896static int debugfs_iomem_x32_set(void *data, u64 val)
1897{
1898 writel_relaxed(val, data);
1899 /* Ensure the previous write completed. */
1900 mb();
1901 return 0;
1902}
1903
1904static int debugfs_iomem_x32_get(void *data, u64 *val)
1905{
1906 *val = readl_relaxed(data);
1907 /* Ensure the previous read completed. */
1908 mb();
1909 return 0;
1910}
1911
1912DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1913 debugfs_iomem_x32_set, "0x%08llx\n");
1914
1915static void spi_debugfs_init(struct msm_spi *dd)
1916{
1917 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1918 if (dd->dent_spi) {
1919 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1922 dd->debugfs_spi_regs[i] =
1923 debugfs_create_file(
1924 debugfs_spi_regs[i].name,
1925 debugfs_spi_regs[i].mode,
1926 dd->dent_spi,
1927 dd->base + debugfs_spi_regs[i].offset,
1928 &fops_iomem_x32);
1929 }
1930 }
1931}
1932
1933static void spi_debugfs_exit(struct msm_spi *dd)
1934{
1935 if (dd->dent_spi) {
1936 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938 debugfs_remove_recursive(dd->dent_spi);
1939 dd->dent_spi = NULL;
1940 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1941 dd->debugfs_spi_regs[i] = NULL;
1942 }
1943}
1944#else
1945static void spi_debugfs_init(struct msm_spi *dd) {}
1946static void spi_debugfs_exit(struct msm_spi *dd) {}
1947#endif
1948
1949/* ===Device attributes begin=== */
1950static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1951 char *buf)
1952{
1953 struct spi_master *master = dev_get_drvdata(dev);
1954 struct msm_spi *dd = spi_master_get_devdata(master);
1955
1956 return snprintf(buf, PAGE_SIZE,
1957 "Device %s\n"
1958 "rx fifo_size = %d spi words\n"
1959 "tx fifo_size = %d spi words\n"
1960 "use_dma ? %s\n"
1961 "rx block size = %d bytes\n"
1962 "tx block size = %d bytes\n"
1963 "burst size = %d bytes\n"
1964 "DMA configuration:\n"
1965 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1966 "--statistics--\n"
1967 "Rx isrs = %d\n"
1968 "Tx isrs = %d\n"
1969 "DMA error = %d\n"
1970 "--debug--\n"
1971 "NA yet\n",
1972 dev_name(dev),
1973 dd->input_fifo_size,
1974 dd->output_fifo_size,
1975 dd->use_dma ? "yes" : "no",
1976 dd->input_block_size,
1977 dd->output_block_size,
1978 dd->burst_size,
1979 dd->tx_dma_chan,
1980 dd->rx_dma_chan,
1981 dd->tx_dma_crci,
1982 dd->rx_dma_crci,
1983 dd->stat_rx + dd->stat_dmov_rx,
1984 dd->stat_tx + dd->stat_dmov_tx,
1985 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1986 );
1987}
1988
1989/* Reset statistics on write */
1990static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1991 const char *buf, size_t count)
1992{
1993 struct msm_spi *dd = dev_get_drvdata(dev);
1994 dd->stat_rx = 0;
1995 dd->stat_tx = 0;
1996 dd->stat_dmov_rx = 0;
1997 dd->stat_dmov_tx = 0;
1998 dd->stat_dmov_rx_err = 0;
1999 dd->stat_dmov_tx_err = 0;
2000 return count;
2001}
2002
2003static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
2004
2005static struct attribute *dev_attrs[] = {
2006 &dev_attr_stats.attr,
2007 NULL,
2008};
2009
2010static struct attribute_group dev_attr_grp = {
2011 .attrs = dev_attrs,
2012};
2013/* ===Device attributes end=== */
2014
2015/**
2016 * spi_dmov_tx_complete_func - DataMover tx completion callback
2017 *
2018 * Executed in IRQ context (Data Mover's IRQ) DataMover's
2019 * spinlock @msm_dmov_lock held.
2020 */
2021static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
2022 unsigned int result,
2023 struct msm_dmov_errdata *err)
2024{
2025 struct msm_spi *dd;
2026
2027 if (!(result & DMOV_RSLT_VALID)) {
2028 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2029 return;
2030 }
2031 /* restore original context */
2032 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302033 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302035 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
2036 return;
2037 complete(&dd->transfer_complete);
2038 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002039 /* Error or flush */
2040 if (result & DMOV_RSLT_ERROR) {
2041 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2042 dd->stat_dmov_tx_err++;
2043 }
2044 if (result & DMOV_RSLT_FLUSH) {
2045 /*
2046 * Flushing normally happens in process of
2047 * removing, when we are waiting for outstanding
2048 * DMA commands to be flushed.
2049 */
2050 dev_info(dd->dev,
2051 "DMA channel flushed (0x%08x)\n", result);
2052 }
2053 if (err)
2054 dev_err(dd->dev,
2055 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2056 err->flush[0], err->flush[1], err->flush[2],
2057 err->flush[3], err->flush[4], err->flush[5]);
2058 dd->cur_msg->status = -EIO;
2059 complete(&dd->transfer_complete);
2060 }
2061}
2062
2063/**
2064 * spi_dmov_rx_complete_func - DataMover rx completion callback
2065 *
2066 * Executed in IRQ context (Data Mover's IRQ)
2067 * DataMover's spinlock @msm_dmov_lock held.
2068 */
2069static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2070 unsigned int result,
2071 struct msm_dmov_errdata *err)
2072{
2073 struct msm_spi *dd;
2074
2075 if (!(result & DMOV_RSLT_VALID)) {
2076 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2077 result, cmd);
2078 return;
2079 }
2080 /* restore original context */
2081 dd = container_of(cmd, struct msm_spi, rx_hdr);
2082 if (result & DMOV_RSLT_DONE) {
2083 dd->stat_dmov_rx++;
2084 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2085 return;
2086 complete(&dd->transfer_complete);
2087 } else {
2088 /** Error or flush */
2089 if (result & DMOV_RSLT_ERROR) {
2090 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2091 dd->stat_dmov_rx_err++;
2092 }
2093 if (result & DMOV_RSLT_FLUSH) {
2094 dev_info(dd->dev,
2095 "DMA channel flushed(0x%08x)\n", result);
2096 }
2097 if (err)
2098 dev_err(dd->dev,
2099 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2100 err->flush[0], err->flush[1], err->flush[2],
2101 err->flush[3], err->flush[4], err->flush[5]);
2102 dd->cur_msg->status = -EIO;
2103 complete(&dd->transfer_complete);
2104 }
2105}
2106
2107static inline u32 get_chunk_size(struct msm_spi *dd)
2108{
2109 u32 cache_line = dma_get_cache_alignment();
2110
2111 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
2112 roundup(dd->burst_size, cache_line))*2;
2113}
2114
Gilad Avidovd0262342012-10-24 16:52:30 -06002115static void msm_spi_dmov_teardown(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116{
2117 int limit = 0;
2118
2119 if (!dd->use_dma)
2120 return;
2121
2122 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002123 msm_dmov_flush(dd->tx_dma_chan, 1);
2124 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002125 msleep(10);
2126 }
2127
2128 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
2129 dd->tx_dmov_cmd_dma);
2130 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2131 dd->tx_padding = dd->rx_padding = NULL;
2132}
2133
Gilad Avidovd0262342012-10-24 16:52:30 -06002134static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
2135 enum msm_spi_pipe_direction pipe_dir)
2136{
2137 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2138 (&dd->bam.prod) : (&dd->bam.cons);
2139 if (!pipe->teardown_required)
2140 return;
2141
2142 sps_disconnect(pipe->handle);
2143 dma_free_coherent(dd->dev, pipe->config.desc.size,
2144 pipe->config.desc.base, pipe->config.desc.phys_base);
2145 sps_free_endpoint(pipe->handle);
2146 pipe->handle = 0;
2147 pipe->teardown_required = false;
2148}
2149
2150static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2151 enum msm_spi_pipe_direction pipe_dir)
2152{
2153 int rc = 0;
2154 struct sps_pipe *pipe_handle;
2155 struct sps_register_event event = {0};
2156 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2157 (&dd->bam.prod) : (&dd->bam.cons);
2158 struct sps_connect *pipe_conf = &pipe->config;
2159
2160 pipe->handle = 0;
2161 pipe_handle = sps_alloc_endpoint();
2162 if (!pipe_handle) {
2163 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2164 , __func__);
2165 return -ENOMEM;
2166 }
2167
2168 memset(pipe_conf, 0, sizeof(*pipe_conf));
2169 rc = sps_get_config(pipe_handle, pipe_conf);
2170 if (rc) {
2171 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2172 , __func__);
2173 goto config_err;
2174 }
2175
2176 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2177 pipe_conf->source = dd->bam.handle;
2178 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2179 pipe_conf->mode = SPS_MODE_SRC;
2180 pipe_conf->src_pipe_index =
2181 dd->pdata->bam_producer_pipe_index;
2182 pipe_conf->dest_pipe_index = 0;
2183 } else {
2184 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2185 pipe_conf->destination = dd->bam.handle;
2186 pipe_conf->mode = SPS_MODE_DEST;
2187 pipe_conf->src_pipe_index = 0;
2188 pipe_conf->dest_pipe_index =
2189 dd->pdata->bam_consumer_pipe_index;
2190 }
2191 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2192 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2193 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2194 pipe_conf->desc.size,
2195 &pipe_conf->desc.phys_base,
2196 GFP_KERNEL);
2197 if (!pipe_conf->desc.base) {
2198 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2199 , __func__);
2200 rc = -ENOMEM;
2201 goto config_err;
2202 }
2203
2204 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2205
2206 rc = sps_connect(pipe_handle, pipe_conf);
2207 if (rc) {
2208 dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
2209 goto connect_err;
2210 }
2211
2212 event.mode = SPS_TRIGGER_WAIT;
2213 event.options = SPS_O_EOT;
2214 event.xfer_done = &dd->transfer_complete;
2215 event.user = (void *)dd;
2216 rc = sps_register_event(pipe_handle, &event);
2217 if (rc) {
2218 dev_err(dd->dev, "%s: Failed to register BAM EOT event",
2219 __func__);
2220 goto register_err;
2221 }
2222
2223 pipe->handle = pipe_handle;
2224 pipe->teardown_required = true;
2225 return 0;
2226
2227register_err:
2228 sps_disconnect(pipe_handle);
2229connect_err:
2230 dma_free_coherent(dd->dev, pipe_conf->desc.size,
2231 pipe_conf->desc.base, pipe_conf->desc.phys_base);
2232config_err:
2233 sps_free_endpoint(pipe_handle);
2234
2235 return rc;
2236}
2237
2238static void msm_spi_bam_teardown(struct msm_spi *dd)
2239{
2240 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2241 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2242
2243 if (dd->bam.deregister_required) {
2244 sps_deregister_bam_device(dd->bam.handle);
2245 dd->bam.deregister_required = false;
2246 }
2247}
2248
2249static int msm_spi_bam_init(struct msm_spi *dd)
2250{
2251 struct sps_bam_props bam_props = {0};
2252 u32 bam_handle;
2253 int rc = 0;
2254
2255 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2256 if (rc || !bam_handle) {
2257 bam_props.phys_addr = dd->bam.phys_addr;
2258 bam_props.virt_addr = dd->bam.base;
2259 bam_props.irq = dd->bam.irq;
2260 bam_props.manage = SPS_BAM_MGR_LOCAL;
2261 bam_props.summing_threshold = 0x10;
2262
2263 rc = sps_register_bam_device(&bam_props, &bam_handle);
2264 if (rc) {
2265 dev_err(dd->dev,
2266 "%s: Failed to register BAM device",
2267 __func__);
2268 return rc;
2269 }
2270 dd->bam.deregister_required = true;
2271 }
2272
2273 dd->bam.handle = bam_handle;
2274
2275 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2276 if (rc) {
2277 dev_err(dd->dev,
2278 "%s: Failed to init producer BAM-pipe",
2279 __func__);
2280 goto bam_init_error;
2281 }
2282
2283 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2284 if (rc) {
2285 dev_err(dd->dev,
2286 "%s: Failed to init consumer BAM-pipe",
2287 __func__);
2288 goto bam_init_error;
2289 }
2290
2291 return 0;
2292
2293bam_init_error:
2294 msm_spi_bam_teardown(dd);
2295 return rc;
2296}
2297
2298static __init int msm_spi_dmov_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002299{
2300 dmov_box *box;
2301 u32 cache_line = dma_get_cache_alignment();
2302
2303 /* Allocate all as one chunk, since all is smaller than page size */
2304
2305 /* We send NULL device, since it requires coherent_dma_mask id
2306 device definition, we're okay with using system pool */
2307 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
2308 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
2309 if (dd->tx_dmov_cmd == NULL)
2310 return -ENOMEM;
2311
2312 /* DMA addresses should be 64 bit aligned aligned */
2313 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2314 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2315 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2316 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2317
2318 /* Buffers should be aligned to cache line */
2319 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2320 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2321 sizeof(struct spi_dmov_cmd), cache_line);
2322 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
2323 cache_line);
2324 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
2325 cache_line);
2326
2327 /* Setup DM commands */
2328 box = &(dd->rx_dmov_cmd->box);
2329 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2330 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2331 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2332 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2333 offsetof(struct spi_dmov_cmd, cmd_ptr));
2334 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002335
2336 box = &(dd->tx_dmov_cmd->box);
2337 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2338 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2339 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2340 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2341 offsetof(struct spi_dmov_cmd, cmd_ptr));
2342 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002343
2344 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2345 CMD_DST_CRCI(dd->tx_dma_crci);
2346 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2347 SPI_OUTPUT_FIFO;
2348 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2349 CMD_SRC_CRCI(dd->rx_dma_crci);
2350 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2351 SPI_INPUT_FIFO;
2352
2353 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002354 msm_dmov_flush(dd->tx_dma_chan, 1);
2355 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002356
2357 return 0;
2358}
2359
Gilad Avidovd0262342012-10-24 16:52:30 -06002360/**
2361 * msm_spi_dt_to_pdata: copy device-tree data to platfrom data struct
2362 */
2363struct msm_spi_platform_data *
2364__init msm_spi_dt_to_pdata(struct platform_device *pdev)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002365{
2366 struct device_node *node = pdev->dev.of_node;
2367 struct msm_spi_platform_data *pdata;
Gilad Avidovd0262342012-10-24 16:52:30 -06002368 int rc;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002369
2370 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2371 if (!pdata) {
2372 pr_err("Unable to allocate platform data\n");
2373 return NULL;
2374 }
2375
2376 of_property_read_u32(node, "spi-max-frequency",
2377 &pdata->max_clock_speed);
Gilad Avidov0697ea62013-02-11 16:46:38 -07002378 of_property_read_u32(node, "qcom,infinite-mode",
Kiran Gundae8f16742012-06-27 10:06:32 +05302379 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002380
Gilad Avidovd0262342012-10-24 16:52:30 -06002381 pdata->ver_reg_exists = of_property_read_bool(node
2382 , "qcom,ver-reg-exists");
2383
2384 pdata->use_bam = of_property_read_bool(node, "qcom,use-bam");
2385
2386 if (pdata->use_bam) {
2387 rc = of_property_read_u32(node, "qcom,bam-consumer-pipe-index",
2388 &pdata->bam_consumer_pipe_index);
2389 if (rc) {
2390 dev_warn(&pdev->dev,
2391 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2392 pdata->use_bam = false;
2393 }
2394
2395 rc = of_property_read_u32(node, "qcom,bam-producer-pipe-index",
2396 &pdata->bam_producer_pipe_index);
2397 if (rc) {
2398 dev_warn(&pdev->dev,
2399 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2400 pdata->use_bam = false;
2401 }
2402 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002403 return pdata;
2404}
2405
Gilad Avidovd0262342012-10-24 16:52:30 -06002406static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2407{
2408 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2409 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2410 : SPI_QUP_VERSION_NONE;
2411}
2412
2413static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
2414 struct platform_device *pdev, struct spi_master *master)
2415{
2416 struct resource *resource;
2417 size_t bam_mem_size;
2418
2419 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2420 "spi_bam_physical");
2421 if (!resource) {
2422 dev_warn(&pdev->dev,
2423 "%s: Missing spi_bam_physical entry in DT",
2424 __func__);
2425 return -ENXIO;
2426 }
2427
2428 dd->bam.phys_addr = resource->start;
2429 bam_mem_size = resource_size(resource);
2430 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2431 bam_mem_size);
2432 if (!dd->bam.base) {
2433 dev_warn(&pdev->dev,
2434 "%s: Failed to ioremap(spi_bam_physical)",
2435 __func__);
2436 return -ENXIO;
2437 }
2438
2439 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2440 if (dd->bam.irq < 0) {
2441 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2442 __func__);
2443 return -EINVAL;
2444 }
2445
2446 dd->dma_init = msm_spi_bam_init;
2447 dd->dma_teardown = msm_spi_bam_teardown;
2448 return 0;
2449}
2450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002451static int __init msm_spi_probe(struct platform_device *pdev)
2452{
2453 struct spi_master *master;
2454 struct msm_spi *dd;
2455 struct resource *resource;
2456 int rc = -ENXIO;
2457 int locked = 0;
2458 int i = 0;
2459 int clk_enabled = 0;
2460 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002461 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002462 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002463
2464 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2465 if (!master) {
2466 rc = -ENOMEM;
2467 dev_err(&pdev->dev, "master allocation failed\n");
2468 goto err_probe_exit;
2469 }
2470
2471 master->bus_num = pdev->id;
2472 master->mode_bits = SPI_SUPPORTED_MODES;
2473 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2474 master->setup = msm_spi_setup;
2475 master->transfer = msm_spi_transfer;
2476 platform_set_drvdata(pdev, master);
2477 dd = spi_master_get_devdata(master);
2478
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002479 if (pdev->dev.of_node) {
2480 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2481 master->dev.of_node = pdev->dev.of_node;
2482 pdata = msm_spi_dt_to_pdata(pdev);
2483 if (!pdata) {
2484 rc = -ENOMEM;
2485 goto err_probe_exit;
2486 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002487
Gilad Avidov0697ea62013-02-11 16:46:38 -07002488 rc = of_alias_get_id(pdev->dev.of_node, "spi");
2489 if (rc < 0)
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002490 dev_warn(&pdev->dev,
2491 "using default bus_num %d\n", pdev->id);
2492 else
Gilad Avidov0697ea62013-02-11 16:46:38 -07002493 master->bus_num = pdev->id = rc;
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002494
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002495 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2496 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
2497 i, &flags);
2498 }
2499
2500 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2501 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
2502 pdev->dev.of_node, "cs-gpios",
2503 i, &flags);
2504 dd->cs_gpios[i].valid = 0;
2505 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002506 } else {
2507 pdata = pdev->dev.platform_data;
2508 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002509
2510 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2511 resource = platform_get_resource(pdev, IORESOURCE_IO,
2512 i);
2513 dd->spi_gpios[i] = resource ? resource->start : -1;
2514 }
2515
2516 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2517 resource = platform_get_resource(pdev, IORESOURCE_IO,
2518 i + ARRAY_SIZE(spi_rsrcs));
2519 dd->cs_gpios[i].gpio_num = resource ?
2520 resource->start : -1;
2521 dd->cs_gpios[i].valid = 0;
2522 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002523 }
2524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002525 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002526 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002527 if (!resource) {
2528 rc = -ENXIO;
2529 goto err_probe_res;
2530 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002531
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002532 dd->mem_phys_addr = resource->start;
2533 dd->mem_size = resource_size(resource);
2534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002535 if (pdata) {
2536 if (pdata->dma_config) {
2537 rc = pdata->dma_config();
2538 if (rc) {
2539 dev_warn(&pdev->dev,
2540 "%s: DM mode not supported\n",
2541 __func__);
2542 dd->use_dma = 0;
2543 goto skip_dma_resources;
2544 }
2545 }
Gilad Avidovd0262342012-10-24 16:52:30 -06002546 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
2547 resource = platform_get_resource(pdev,
2548 IORESOURCE_DMA, 0);
2549 if (resource) {
2550 dd->rx_dma_chan = resource->start;
2551 dd->tx_dma_chan = resource->end;
2552 resource = platform_get_resource(pdev,
2553 IORESOURCE_DMA, 1);
2554 if (!resource) {
2555 rc = -ENXIO;
2556 goto err_probe_res;
2557 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002558
Gilad Avidovd0262342012-10-24 16:52:30 -06002559 dd->rx_dma_crci = resource->start;
2560 dd->tx_dma_crci = resource->end;
2561 dd->use_dma = 1;
2562 master->dma_alignment =
2563 dma_get_cache_alignment();
2564 dd->dma_init = msm_spi_dmov_init ;
2565 dd->dma_teardown = msm_spi_dmov_teardown;
2566 }
2567 } else {
2568 if (!dd->pdata->use_bam)
2569 goto skip_dma_resources;
2570
2571 rc = msm_spi_bam_get_resources(dd, pdev, master);
2572 if (rc) {
2573 dev_warn(dd->dev,
2574 "%s: Faild to get BAM resources",
2575 __func__);
2576 goto skip_dma_resources;
2577 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002578 dd->use_dma = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002579 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002580 }
2581
Alok Chauhan66554a12012-08-22 19:54:45 +05302582skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002583
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002584 spin_lock_init(&dd->queue_lock);
2585 mutex_init(&dd->core_lock);
2586 INIT_LIST_HEAD(&dd->queue);
2587 INIT_WORK(&dd->work_data, msm_spi_workq);
2588 init_waitqueue_head(&dd->continue_suspend);
2589 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002590 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002591 if (!dd->workqueue)
2592 goto err_probe_workq;
2593
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002594 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2595 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002596 rc = -ENXIO;
2597 goto err_probe_reqmem;
2598 }
2599
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002600 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2601 if (!dd->base) {
2602 rc = -ENOMEM;
2603 goto err_probe_reqmem;
2604 }
2605
Gilad Avidovd0262342012-10-24 16:52:30 -06002606 if (pdata && pdata->ver_reg_exists) {
2607 enum msm_spi_qup_version ver =
2608 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2609 if (dd->qup_ver != ver)
2610 dev_warn(&pdev->dev,
2611 "%s: HW version different then initially assumed by probe",
2612 __func__);
2613 }
2614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002615 if (pdata && pdata->rsl_id) {
2616 struct remote_mutex_id rmid;
2617 rmid.r_spinlock_id = pdata->rsl_id;
2618 rmid.delay_us = SPI_TRYLOCK_DELAY;
2619
2620 rc = remote_mutex_init(&dd->r_lock, &rmid);
2621 if (rc) {
2622 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2623 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2624 __func__, rc);
2625 goto err_probe_rlock_init;
2626 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002628 dd->use_rlock = 1;
2629 dd->pm_lat = pdata->pm_lat;
Alok Chauhan66554a12012-08-22 19:54:45 +05302630 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
Gilad Avidovd0262342012-10-24 16:52:30 -06002631 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002632 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002633
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002634 mutex_lock(&dd->core_lock);
2635 if (dd->use_rlock)
2636 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002637
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002638 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002639 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002640 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002641 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002642 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002643 rc = PTR_ERR(dd->clk);
2644 goto err_probe_clk_get;
2645 }
2646
Matt Wagantallac294852011-08-17 15:44:58 -07002647 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002648 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002649 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002650 rc = PTR_ERR(dd->pclk);
2651 goto err_probe_pclk_get;
2652 }
2653
2654 if (pdata && pdata->max_clock_speed)
2655 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2656
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002657 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002658 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002659 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002660 __func__);
2661 goto err_probe_clk_enable;
2662 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002663
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002664 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002665 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002666 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002667 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002668 __func__);
2669 goto err_probe_pclk_enable;
2670 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002671
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002672 pclk_enabled = 1;
Gilad Avidovd0262342012-10-24 16:52:30 -06002673 /* GSBI dose not exists on B-family MSM-chips */
2674 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2675 rc = msm_spi_configure_gsbi(dd, pdev);
2676 if (rc)
2677 goto err_probe_gsbi;
2678 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002680 msm_spi_calculate_fifo_size(dd);
2681 if (dd->use_dma) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002682 rc = dd->dma_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002683 if (rc)
2684 goto err_probe_dma;
2685 }
2686
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002687 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002688 /*
2689 * The SPI core generates a bogus input overrun error on some targets,
2690 * when a transition from run to reset state occurs and if the FIFO has
2691 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2692 * bit.
2693 */
2694 msm_spi_enable_error_flags(dd);
2695
2696 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2697 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2698 if (rc)
2699 goto err_probe_state;
2700
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002701 clk_disable_unprepare(dd->clk);
2702 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002703 clk_enabled = 0;
2704 pclk_enabled = 0;
2705
2706 dd->suspended = 0;
2707 dd->transfer_pending = 0;
2708 dd->multi_xfr = 0;
2709 dd->mode = SPI_MODE_NONE;
2710
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002711 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002712 if (rc)
2713 goto err_probe_irq;
2714
2715 msm_spi_disable_irqs(dd);
2716 if (dd->use_rlock)
2717 remote_mutex_unlock(&dd->r_lock);
2718
2719 mutex_unlock(&dd->core_lock);
2720 locked = 0;
2721
2722 rc = spi_register_master(master);
2723 if (rc)
2724 goto err_probe_reg_master;
2725
2726 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2727 if (rc) {
2728 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2729 goto err_attrs;
2730 }
2731
2732 spi_debugfs_init(dd);
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05302733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002734 return 0;
2735
2736err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002737 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002738err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002739err_probe_irq:
2740err_probe_state:
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08002741 if (dd->dma_teardown)
2742 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002743err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002744err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002745 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002746 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002747err_probe_pclk_enable:
2748 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002749 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002750err_probe_clk_enable:
2751 clk_put(dd->pclk);
2752err_probe_pclk_get:
2753 clk_put(dd->clk);
2754err_probe_clk_get:
2755 if (locked) {
2756 if (dd->use_rlock)
2757 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002758
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002759 mutex_unlock(&dd->core_lock);
2760 }
2761err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762err_probe_reqmem:
2763 destroy_workqueue(dd->workqueue);
2764err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002765err_probe_res:
2766 spi_master_put(master);
2767err_probe_exit:
2768 return rc;
2769}
2770
2771#ifdef CONFIG_PM
2772static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2773{
2774 struct spi_master *master = platform_get_drvdata(pdev);
2775 struct msm_spi *dd;
2776 unsigned long flags;
2777
2778 if (!master)
2779 goto suspend_exit;
2780 dd = spi_master_get_devdata(master);
2781 if (!dd)
2782 goto suspend_exit;
2783
2784 /* Make sure nothing is added to the queue while we're suspending */
2785 spin_lock_irqsave(&dd->queue_lock, flags);
2786 dd->suspended = 1;
2787 spin_unlock_irqrestore(&dd->queue_lock, flags);
2788
2789 /* Wait for transactions to end, or time out */
2790 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002791
2792suspend_exit:
2793 return 0;
2794}
2795
2796static int msm_spi_resume(struct platform_device *pdev)
2797{
2798 struct spi_master *master = platform_get_drvdata(pdev);
2799 struct msm_spi *dd;
2800
2801 if (!master)
2802 goto resume_exit;
2803 dd = spi_master_get_devdata(master);
2804 if (!dd)
2805 goto resume_exit;
2806
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002807 dd->suspended = 0;
2808resume_exit:
2809 return 0;
2810}
2811#else
2812#define msm_spi_suspend NULL
2813#define msm_spi_resume NULL
2814#endif /* CONFIG_PM */
2815
2816static int __devexit msm_spi_remove(struct platform_device *pdev)
2817{
2818 struct spi_master *master = platform_get_drvdata(pdev);
2819 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002820
2821 pm_qos_remove_request(&qos_req_list);
2822 spi_debugfs_exit(dd);
2823 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2824
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08002825 if (dd->dma_teardown)
2826 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002827 clk_put(dd->clk);
2828 clk_put(dd->pclk);
2829 destroy_workqueue(dd->workqueue);
2830 platform_set_drvdata(pdev, 0);
2831 spi_unregister_master(master);
2832 spi_master_put(master);
2833
2834 return 0;
2835}
2836
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002837static struct of_device_id msm_spi_dt_match[] = {
2838 {
2839 .compatible = "qcom,spi-qup-v2",
2840 },
2841 {}
2842};
2843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002844static struct platform_driver msm_spi_driver = {
2845 .driver = {
2846 .name = SPI_DRV_NAME,
2847 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002848 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002849 },
2850 .suspend = msm_spi_suspend,
2851 .resume = msm_spi_resume,
2852 .remove = __exit_p(msm_spi_remove),
2853};
2854
2855static int __init msm_spi_init(void)
2856{
2857 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2858}
2859module_init(msm_spi_init);
2860
2861static void __exit msm_spi_exit(void)
2862{
2863 platform_driver_unregister(&msm_spi_driver);
2864}
2865module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002866
2867MODULE_LICENSE("GPL v2");
2868MODULE_VERSION("0.4");
2869MODULE_ALIAS("platform:"SPI_DRV_NAME);