blob: c5aa7e5205684b526a766f15c150fc8f7ae21c3d [file] [log] [blame]
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033#include <linux/gpio.h>
34#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070035#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070036#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070037#include <linux/of_gpio.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060038#include <linux/dma-mapping.h>
39#include <linux/sched.h>
40#include <linux/mutex.h>
41#include <linux/atomic.h>
42#include <mach/msm_spi.h>
43#include <mach/sps.h>
44#include <mach/dma.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070045#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070047static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
48 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049{
50 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070051 unsigned long gsbi_mem_phys_addr;
52 size_t gsbi_mem_size;
53 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070057 return 0;
58
59 gsbi_mem_phys_addr = resource->start;
60 gsbi_mem_size = resource_size(resource);
61 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
62 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070064
65 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
66 gsbi_mem_size);
67 if (!gsbi_base)
68 return -ENXIO;
69
70 /* Set GSBI to SPI mode */
71 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072
73 return 0;
74}
75
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070076static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070078 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
79 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
80 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
81 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
82 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
83 if (dd->qup_ver)
84 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085}
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087static inline int msm_spi_request_gpios(struct msm_spi *dd)
88{
89 int i;
90 int result = 0;
91
92 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
93 if (dd->spi_gpios[i] >= 0) {
94 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
95 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060096 dev_err(dd->dev, "%s: gpio_request for pin %d "
97 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 dd->spi_gpios[i], result);
99 goto error;
100 }
101 }
102 }
103 return 0;
104
105error:
106 for (; --i >= 0;) {
107 if (dd->spi_gpios[i] >= 0)
108 gpio_free(dd->spi_gpios[i]);
109 }
110 return result;
111}
112
113static inline void msm_spi_free_gpios(struct msm_spi *dd)
114{
115 int i;
116
117 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
118 if (dd->spi_gpios[i] >= 0)
119 gpio_free(dd->spi_gpios[i]);
120 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600121
122 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
123 if (dd->cs_gpios[i].valid) {
124 gpio_free(dd->cs_gpios[i].gpio_num);
125 dd->cs_gpios[i].valid = 0;
126 }
127 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128}
129
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600130/**
131 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
132 * @clk the clock for which to find nearest lower rate
133 * @rate clock frequency in Hz
134 * @return nearest lower rate or negative error value
135 *
136 * Public clock API extends clk_round_rate which is a ceiling function. This
137 * function is a floor function implemented as a binary search using the
138 * ceiling function.
139 */
140static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
141{
142 long lowest_available, nearest_low, step_size, cur;
143 long step_direction = -1;
144 long guess = rate;
145 int max_steps = 10;
146
147 cur = clk_round_rate(clk, rate);
148 if (cur == rate)
149 return rate;
150
151 /* if we got here then: cur > rate */
152 lowest_available = clk_round_rate(clk, 0);
153 if (lowest_available > rate)
154 return -EINVAL;
155
156 step_size = (rate - lowest_available) >> 1;
157 nearest_low = lowest_available;
158
159 while (max_steps-- && step_size) {
160 guess += step_size * step_direction;
161
162 cur = clk_round_rate(clk, guess);
163
164 if ((cur < rate) && (cur > nearest_low))
165 nearest_low = cur;
166
167 /*
168 * if we stepped too far, then start stepping in the other
169 * direction with half the step size
170 */
171 if (((cur > rate) && (step_direction > 0))
172 || ((cur < rate) && (step_direction < 0))) {
173 step_direction = -step_direction;
174 step_size >>= 1;
175 }
176 }
177 return nearest_low;
178}
179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180static void msm_spi_clock_set(struct msm_spi *dd, int speed)
181{
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600182 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 int rc;
184
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600185 rate = msm_spi_clk_max_rate(dd->clk, speed);
186 if (rate < 0) {
187 dev_err(dd->dev,
188 "%s: no match found for requested clock frequency:%d",
189 __func__, speed);
190 return;
191 }
192
193 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 if (!rc)
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600195 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196}
197
198static int msm_spi_calculate_size(int *fifo_size,
199 int *block_size,
200 int block,
201 int mult)
202{
203 int words;
204
205 switch (block) {
206 case 0:
207 words = 1; /* 4 bytes */
208 break;
209 case 1:
210 words = 4; /* 16 bytes */
211 break;
212 case 2:
213 words = 8; /* 32 bytes */
214 break;
215 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700216 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 switch (mult) {
220 case 0:
221 *fifo_size = words * 2;
222 break;
223 case 1:
224 *fifo_size = words * 4;
225 break;
226 case 2:
227 *fifo_size = words * 8;
228 break;
229 case 3:
230 *fifo_size = words * 16;
231 break;
232 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700233 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 *block_size = words * sizeof(u32); /* in bytes */
237 return 0;
238}
239
240static void get_next_transfer(struct msm_spi *dd)
241{
242 struct spi_transfer *t = dd->cur_transfer;
243
244 if (t->transfer_list.next != &dd->cur_msg->transfers) {
245 dd->cur_transfer = list_entry(t->transfer_list.next,
246 struct spi_transfer,
247 transfer_list);
248 dd->write_buf = dd->cur_transfer->tx_buf;
249 dd->read_buf = dd->cur_transfer->rx_buf;
250 }
251}
252
253static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
254{
255 u32 spi_iom;
256 int block;
257 int mult;
258
259 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
260
261 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
262 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
263 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
264 block, mult)) {
265 goto fifo_size_err;
266 }
267
268 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
269 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
270 if (msm_spi_calculate_size(&dd->output_fifo_size,
271 &dd->output_block_size, block, mult)) {
272 goto fifo_size_err;
273 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600274 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
275 /* DM mode is not available for this block size */
276 if (dd->input_block_size == 4 || dd->output_block_size == 4)
277 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530279 if (dd->use_dma) {
280 dd->input_burst_size = max(dd->input_block_size,
281 DM_BURST_SIZE);
282 dd->output_burst_size = max(dd->output_block_size,
283 DM_BURST_SIZE);
284 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600285 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286
287 return;
288
289fifo_size_err:
290 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700291 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 return;
293}
294
295static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
296{
297 u32 data_in;
298 int i;
299 int shift;
300
301 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
302 if (dd->read_buf) {
303 for (i = 0; (i < dd->bytes_per_word) &&
304 dd->rx_bytes_remaining; i++) {
305 /* The data format depends on bytes_per_word:
306 4 bytes: 0x12345678
307 3 bytes: 0x00123456
308 2 bytes: 0x00001234
309 1 byte : 0x00000012
310 */
311 shift = 8 * (dd->bytes_per_word - i - 1);
312 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
313 dd->rx_bytes_remaining--;
314 }
315 } else {
316 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
317 dd->rx_bytes_remaining -= dd->bytes_per_word;
318 else
319 dd->rx_bytes_remaining = 0;
320 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 dd->read_xfr_cnt++;
323 if (dd->multi_xfr) {
324 if (!dd->rx_bytes_remaining)
325 dd->read_xfr_cnt = 0;
326 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
327 dd->read_len) {
328 struct spi_transfer *t = dd->cur_rx_transfer;
329 if (t->transfer_list.next != &dd->cur_msg->transfers) {
330 t = list_entry(t->transfer_list.next,
331 struct spi_transfer,
332 transfer_list);
333 dd->read_buf = t->rx_buf;
334 dd->read_len = t->len;
335 dd->read_xfr_cnt = 0;
336 dd->cur_rx_transfer = t;
337 }
338 }
339 }
340}
341
342static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
343{
344 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
345
346 return spi_op & SPI_OP_STATE_VALID;
347}
348
Sagar Dharia525593d2012-11-02 18:26:01 -0600349static inline void msm_spi_udelay(unsigned long delay_usecs)
350{
351 /*
352 * For smaller values of delay, context switch time
353 * would negate the usage of usleep
354 */
355 if (delay_usecs > 20)
356 usleep_range(delay_usecs, delay_usecs);
357 else if (delay_usecs)
358 udelay(delay_usecs);
359}
360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361static inline int msm_spi_wait_valid(struct msm_spi *dd)
362{
363 unsigned long delay = 0;
364 unsigned long timeout = 0;
365
366 if (dd->clock_speed == 0)
367 return -EINVAL;
368 /*
369 * Based on the SPI clock speed, sufficient time
370 * should be given for the SPI state transition
371 * to occur
372 */
373 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
374 /*
375 * For small delay values, the default timeout would
376 * be one jiffy
377 */
378 if (delay < SPI_DELAY_THRESHOLD)
379 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600380
381 /* Adding one to round off to the nearest jiffy */
382 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 while (!msm_spi_is_valid_state(dd)) {
384 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600385 if (!msm_spi_is_valid_state(dd)) {
386 if (dd->cur_msg)
387 dd->cur_msg->status = -EIO;
388 dev_err(dd->dev, "%s: SPI operational state"
389 "not valid\n", __func__);
390 return -ETIMEDOUT;
391 } else
392 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 }
Sagar Dharia525593d2012-11-02 18:26:01 -0600394 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 }
396 return 0;
397}
398
399static inline int msm_spi_set_state(struct msm_spi *dd,
400 enum msm_spi_state state)
401{
402 enum msm_spi_state cur_state;
403 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700404 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 cur_state = readl_relaxed(dd->base + SPI_STATE);
406 /* Per spec:
407 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
408 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
409 (state == SPI_OP_STATE_RESET)) {
410 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
411 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
412 } else {
413 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
414 dd->base + SPI_STATE);
415 }
416 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700417 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419 return 0;
420}
421
Gilad Avidovd0262342012-10-24 16:52:30 -0600422/**
423 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
424 */
425static inline void
426msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427{
428 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
429
430 if (n != (*config & SPI_CFG_N))
431 *config = (*config & ~SPI_CFG_N) | n;
432
Gilad Avidovd0262342012-10-24 16:52:30 -0600433 if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
434 || (dd->mode == SPI_BAM_MODE)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 if (dd->read_buf == NULL)
436 *config |= SPI_NO_INPUT;
437 if (dd->write_buf == NULL)
438 *config |= SPI_NO_OUTPUT;
439 }
440}
441
Gilad Avidovd0262342012-10-24 16:52:30 -0600442/**
443 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
444 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
445 * @return calculatd value for SPI_CONFIG
446 */
447static u32
448msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449{
Gilad Avidovd0262342012-10-24 16:52:30 -0600450 if (mode & SPI_LOOP)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 spi_config |= SPI_CFG_LOOPBACK;
452 else
453 spi_config &= ~SPI_CFG_LOOPBACK;
Gilad Avidovd0262342012-10-24 16:52:30 -0600454
455 if (mode & SPI_CPHA)
456 spi_config &= ~SPI_CFG_INPUT_FIRST;
457 else
458 spi_config |= SPI_CFG_INPUT_FIRST;
459
460 return spi_config;
461}
462
463/**
464 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
465 * next transfer
466 */
467static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
468{
469 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
470 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
471 spi_config, dd->cur_msg->spi->mode);
472
473 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
474 /* flags removed from SPI_CONFIG in QUP version-2 */
475 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
476 else if (dd->mode == SPI_BAM_MODE)
477 spi_config |= SPI_CFG_INPUT_FIRST;
478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -0600480}
481
482/**
483 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
484 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
485 * BAM and DMOV modes.
486 * @n_words The number of reads/writes of size N.
487 */
488static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
489{
490 /*
491 * n_words cannot exceed fifo_size, and only one READ COUNT
492 * interrupt is generated per transaction, so for transactions
493 * larger than fifo size READ COUNT must be disabled.
494 * For those transactions we usually move to Data Mover mode.
495 */
496 if (dd->mode == SPI_FIFO_MODE) {
497 if (n_words <= dd->input_fifo_size) {
498 writel_relaxed(n_words,
499 dd->base + SPI_MX_READ_COUNT);
500 msm_spi_set_write_count(dd, n_words);
501 } else {
502 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
503 msm_spi_set_write_count(dd, 0);
504 }
505 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
506 /* must be zero for FIFO */
507 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
508 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
509 }
510 } else {
511 /* must be zero for BAM and DMOV */
512 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
513 msm_spi_set_write_count(dd, 0);
514
515 /*
516 * for DMA transfers, both QUP_MX_INPUT_COUNT and
517 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
518 * That case is a non-balanced transfer when there is
519 * only a read_buf.
520 */
521 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
522 if (dd->write_buf)
523 writel_relaxed(0,
524 dd->base + SPI_MX_INPUT_COUNT);
525 else
526 writel_relaxed(n_words,
527 dd->base + SPI_MX_INPUT_COUNT);
528
529 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
530 }
531 }
532}
533
534/**
535 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
536 * using BAM.
537 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
538 * transfer. Between transfer QUP must change to reset state. A loop is
539 * issuing a single BAM transfer at a time. If another tsranfer is
540 * required, it waits for the trasfer to finish, then moving to reset
541 * state, and back to run state to issue the next transfer.
542 * The function dose not wait for the last transfer to end, or if only
543 * a single transfer is required, the function dose not wait for it to
544 * end.
545 * @timeout max time in jiffies to wait for a transfer to finish.
546 * @return zero on success
547 */
548static int
549msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
550{
551 u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
552 int ret;
553 /*
554 * QUP must move to reset mode every 64K-1 bytes of transfer
555 * (counter is 16 bit)
556 */
557 if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
558 /* assert chip select unconditionally */
559 u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
560 if (!(spi_ioc & SPI_IO_C_FORCE_CS))
561 writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
562 dd->base + SPI_IO_CONTROL);
563 }
564
565 /* Following flags are required since we are waiting on all transfers */
566 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
567 /*
568 * on a balanced transaction, BAM will set the flags on the producer
569 * pipe based on the flags set on the consumer pipe
570 */
571 prod_flags = (dd->write_buf) ? 0 : cons_flags;
572
573 while (dd->tx_bytes_remaining > 0) {
574 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
575 bytes_to_send = min_t(u32, dd->tx_bytes_remaining
576 , SPI_MAX_TRFR_BTWN_RESETS);
577 n_words_xfr = DIV_ROUND_UP(bytes_to_send
578 , dd->bytes_per_word);
579
580 msm_spi_set_mx_counts(dd, n_words_xfr);
581
582 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
583 if (ret < 0) {
584 dev_err(dd->dev,
585 "%s: Failed to set QUP state to run",
586 __func__);
587 goto xfr_err;
588 }
589
590 /* enqueue read buffer in BAM */
591 if (dd->read_buf) {
592 ret = sps_transfer_one(dd->bam.prod.handle,
593 dd->cur_transfer->rx_dma + bytes_sent,
594 bytes_to_send, dd, prod_flags);
595 if (ret < 0) {
596 dev_err(dd->dev,
597 "%s: Failed to queue producer BAM transfer",
598 __func__);
599 goto xfr_err;
600 }
601 }
602
603 /* enqueue write buffer in BAM */
604 if (dd->write_buf) {
605 ret = sps_transfer_one(dd->bam.cons.handle,
606 dd->cur_transfer->tx_dma + bytes_sent,
607 bytes_to_send, dd, cons_flags);
608 if (ret < 0) {
609 dev_err(dd->dev,
610 "%s: Failed to queue consumer BAM transfer",
611 __func__);
612 goto xfr_err;
613 }
614 }
615
616 dd->tx_bytes_remaining -= bytes_to_send;
617
618 /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
619 if (dd->tx_bytes_remaining > 0) {
620 if (!wait_for_completion_timeout(
621 &dd->transfer_complete, timeout)) {
622 dev_err(dd->dev,
623 "%s: SPI transaction timeout",
624 __func__);
625 dd->cur_msg->status = -EIO;
626 ret = -EIO;
627 goto xfr_err;
628 }
629 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
630 if (ret < 0) {
631 dev_err(dd->dev,
632 "%s: Failed to set QUP state to reset",
633 __func__);
634 goto xfr_err;
635 }
636 init_completion(&dd->transfer_complete);
637 }
638 }
639 return 0;
640
641xfr_err:
642 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643}
644
645static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
646{
647 dmov_box *box;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530648 int bytes_to_send, bytes_sent;
649 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 u32 num_transfers;
651
652 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530653 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 if (dd->write_len && !dd->read_len) {
655 /* WR-WR transfer */
656 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
657 dd->write_buf = dd->temp_buf;
658 } else {
659 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
660 /* For WR-RD transfer, bytes_sent can be negative */
661 if (bytes_sent < 0)
662 bytes_sent = 0;
663 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530664 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530665 * 4K bytes for targets that have only 12 bits in
666 * QUP_MAX_OUTPUT_CNT register. If the target supports
667 * more than 12bits then we send the data in chunks of
668 * the infinite_mode value that is defined in the
669 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530670 */
671 if (!dd->pdata->infinite_mode)
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530672 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530673 else
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530674 dd->max_trfr_len = (dd->pdata->infinite_mode) *
675 (dd->bytes_per_word);
676
677 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
678 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530681 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
682 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
683 tx_num_rows = bytes_to_send / dd->output_burst_size;
684 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685
686 dd->mode = SPI_DMOV_MODE;
687
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530688 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 /* src in 16 MSB, dst in 16 LSB */
690 box = &dd->tx_dmov_cmd->box;
691 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530692 box->src_dst_len
693 = (dd->output_burst_size << 16) | dd->output_burst_size;
694 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
695 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530697 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
698 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
699 offsetof(struct spi_dmov_cmd, box));
700 } else {
701 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
702 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
703 offsetof(struct spi_dmov_cmd, single_pad));
704 }
705
706 if (rx_num_rows) {
707 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708 box = &dd->rx_dmov_cmd->box;
709 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530710 box->src_dst_len
711 = (dd->input_burst_size << 16) | dd->input_burst_size;
712 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
713 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
716 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
717 offsetof(struct spi_dmov_cmd, box));
718 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
720 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
721 offsetof(struct spi_dmov_cmd, single_pad));
722 }
723
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530724 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726 } else {
727 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530728 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729
730 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530731 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732
733 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530735 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736 if (dd->write_buf)
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530737 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
738 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739
740 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530741 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530743
744 if (!dd->rx_unaligned_len) {
745 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
746 } else {
747 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
748 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
749
750 memset(dd->rx_padding, 0, dd->input_burst_size);
751 rx_cmd->dst = dd->rx_padding_dma;
752 rx_cmd->len = dd->input_burst_size;
753 }
754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 /* This also takes care of the padding dummy buf
756 Since this is set to the correct length, the
757 dummy bytes won't be actually sent */
758 if (dd->multi_xfr) {
759 u32 write_transfers = 0;
760 u32 read_transfers = 0;
761
762 if (dd->write_len > 0) {
763 write_transfers = DIV_ROUND_UP(dd->write_len,
764 dd->bytes_per_word);
765 writel_relaxed(write_transfers,
766 dd->base + SPI_MX_OUTPUT_COUNT);
767 }
768 if (dd->read_len > 0) {
769 /*
770 * The read following a write transfer must take
771 * into account, that the bytes pertaining to
772 * the write transfer needs to be discarded,
773 * before the actual read begins.
774 */
775 read_transfers = DIV_ROUND_UP(dd->read_len +
776 dd->write_len,
777 dd->bytes_per_word);
778 writel_relaxed(read_transfers,
779 dd->base + SPI_MX_INPUT_COUNT);
780 }
781 } else {
782 if (dd->write_buf)
783 writel_relaxed(num_transfers,
784 dd->base + SPI_MX_OUTPUT_COUNT);
785 if (dd->read_buf)
786 writel_relaxed(num_transfers,
787 dd->base + SPI_MX_INPUT_COUNT);
788 }
789}
790
791static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
792{
793 dma_coherent_pre_ops();
794 if (dd->write_buf)
795 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
796 if (dd->read_buf)
797 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
798}
799
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530800/* SPI core on targets that does not support infinite mode can send
801 maximum of 4K transfers or 64K transfers depending up on size of
802 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
803 chunks. Upon completion we send the next chunk, or complete the
804 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530805 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806*/
807static int msm_spi_dm_send_next(struct msm_spi *dd)
808{
809 /* By now we should have sent all the bytes in FIFO mode,
810 * However to make things right, we'll check anyway.
811 */
812 if (dd->mode != SPI_DMOV_MODE)
813 return 0;
814
Kiran Gundae8f16742012-06-27 10:06:32 +0530815 /* On targets which does not support infinite mode,
816 We need to send more chunks, if we sent max last time */
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530817 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
818 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
820 return 0;
821 dd->read_len = dd->write_len = 0;
822 msm_spi_setup_dm_transfer(dd);
823 msm_spi_enqueue_dm_commands(dd);
824 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
825 return 0;
826 return 1;
827 } else if (dd->read_len && dd->write_len) {
828 dd->tx_bytes_remaining -= dd->cur_transfer->len;
829 if (list_is_last(&dd->cur_transfer->transfer_list,
830 &dd->cur_msg->transfers))
831 return 0;
832 get_next_transfer(dd);
833 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
834 return 0;
835 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
836 dd->read_buf = dd->temp_buf;
837 dd->read_len = dd->write_len = -1;
838 msm_spi_setup_dm_transfer(dd);
839 msm_spi_enqueue_dm_commands(dd);
840 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
841 return 0;
842 return 1;
843 }
844 return 0;
845}
846
847static inline void msm_spi_ack_transfer(struct msm_spi *dd)
848{
849 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
850 SPI_OP_MAX_OUTPUT_DONE_FLAG,
851 dd->base + SPI_OPERATIONAL);
852 /* Ensure done flag was cleared before proceeding further */
853 mb();
854}
855
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700856/* Figure which irq occured and call the relevant functions */
857static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
858{
859 u32 op, ret = IRQ_NONE;
860 struct msm_spi *dd = dev_id;
861
862 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
863 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
864 struct spi_master *master = dev_get_drvdata(dd->dev);
865 ret |= msm_spi_error_irq(irq, master);
866 }
867
868 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
869 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
870 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
871 dd->base + SPI_OPERATIONAL);
872 /*
873 * Ensure service flag was cleared before further
874 * processing of interrupt.
875 */
876 mb();
877 ret |= msm_spi_input_irq(irq, dev_id);
878 }
879
880 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
881 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
882 dd->base + SPI_OPERATIONAL);
883 /*
884 * Ensure service flag was cleared before further
885 * processing of interrupt.
886 */
887 mb();
888 ret |= msm_spi_output_irq(irq, dev_id);
889 }
890
891 if (dd->done) {
892 complete(&dd->transfer_complete);
893 dd->done = 0;
894 }
895 return ret;
896}
897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
899{
900 struct msm_spi *dd = dev_id;
901
902 dd->stat_rx++;
903
904 if (dd->mode == SPI_MODE_NONE)
905 return IRQ_HANDLED;
906
907 if (dd->mode == SPI_DMOV_MODE) {
908 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
909 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
910 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
911 msm_spi_ack_transfer(dd);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530912 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 if (atomic_inc_return(&dd->rx_irq_called) == 1)
914 return IRQ_HANDLED;
915 }
916 msm_spi_complete(dd);
917 return IRQ_HANDLED;
918 }
919 return IRQ_NONE;
920 }
921
922 if (dd->mode == SPI_FIFO_MODE) {
923 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
924 SPI_OP_IP_FIFO_NOT_EMPTY) &&
925 (dd->rx_bytes_remaining > 0)) {
926 msm_spi_read_word_from_fifo(dd);
927 }
928 if (dd->rx_bytes_remaining == 0)
929 msm_spi_complete(dd);
930 }
931
932 return IRQ_HANDLED;
933}
934
935static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
936{
937 u32 word;
938 u8 byte;
939 int i;
940
941 word = 0;
942 if (dd->write_buf) {
943 for (i = 0; (i < dd->bytes_per_word) &&
944 dd->tx_bytes_remaining; i++) {
945 dd->tx_bytes_remaining--;
946 byte = *dd->write_buf++;
947 word |= (byte << (BITS_PER_BYTE * (3 - i)));
948 }
949 } else
950 if (dd->tx_bytes_remaining > dd->bytes_per_word)
951 dd->tx_bytes_remaining -= dd->bytes_per_word;
952 else
953 dd->tx_bytes_remaining = 0;
954 dd->write_xfr_cnt++;
955 if (dd->multi_xfr) {
956 if (!dd->tx_bytes_remaining)
957 dd->write_xfr_cnt = 0;
958 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
959 dd->write_len) {
960 struct spi_transfer *t = dd->cur_tx_transfer;
961 if (t->transfer_list.next != &dd->cur_msg->transfers) {
962 t = list_entry(t->transfer_list.next,
963 struct spi_transfer,
964 transfer_list);
965 dd->write_buf = t->tx_buf;
966 dd->write_len = t->len;
967 dd->write_xfr_cnt = 0;
968 dd->cur_tx_transfer = t;
969 }
970 }
971 }
972 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
973}
974
975static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
976{
977 int count = 0;
978
979 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
980 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
981 SPI_OP_OUTPUT_FIFO_FULL)) {
982 msm_spi_write_word_to_fifo(dd);
983 count++;
984 }
985}
986
987static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
988{
989 struct msm_spi *dd = dev_id;
990
991 dd->stat_tx++;
992
993 if (dd->mode == SPI_MODE_NONE)
994 return IRQ_HANDLED;
995
996 if (dd->mode == SPI_DMOV_MODE) {
997 /* TX_ONLY transaction is handled here
998 This is the only place we send complete at tx and not rx */
999 if (dd->read_buf == NULL &&
1000 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1001 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1002 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301003 if (atomic_inc_return(&dd->tx_irq_called) == 1)
1004 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 msm_spi_complete(dd);
1006 return IRQ_HANDLED;
1007 }
1008 return IRQ_NONE;
1009 }
1010
1011 /* Output FIFO is empty. Transmit any outstanding write data. */
1012 if (dd->mode == SPI_FIFO_MODE)
1013 msm_spi_write_rmn_to_fifo(dd);
1014
1015 return IRQ_HANDLED;
1016}
1017
1018static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1019{
1020 struct spi_master *master = dev_id;
1021 struct msm_spi *dd = spi_master_get_devdata(master);
1022 u32 spi_err;
1023
1024 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1025 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1026 dev_warn(master->dev.parent, "SPI output overrun error\n");
1027 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1028 dev_warn(master->dev.parent, "SPI input underrun error\n");
1029 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1030 dev_warn(master->dev.parent, "SPI output underrun error\n");
1031 msm_spi_get_clk_err(dd, &spi_err);
1032 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1033 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1034 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1035 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1036 msm_spi_clear_error_flags(dd);
1037 msm_spi_ack_clk_err(dd);
1038 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1039 mb();
1040 return IRQ_HANDLED;
1041}
1042
Gilad Avidovd0262342012-10-24 16:52:30 -06001043/**
1044 * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
1045 * @return zero on success or negative error code
1046 *
1047 * calls dma_map_single() on the read/write buffers, effectively invalidating
1048 * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
1049 * buffer and copy the data to/from the client buffers
1050 */
1051static int msm_spi_dma_map_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052{
1053 struct device *dev;
1054 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001055 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 void *tx_buf, *rx_buf;
1057 unsigned tx_len, rx_len;
1058 int ret = -EINVAL;
1059
1060 dev = &dd->cur_msg->spi->dev;
1061 first_xfr = dd->cur_transfer;
1062 tx_buf = (void *)first_xfr->tx_buf;
1063 rx_buf = first_xfr->rx_buf;
1064 tx_len = rx_len = first_xfr->len;
1065
1066 /*
1067 * For WR-WR and WR-RD transfers, we allocate our own temporary
1068 * buffer and copy the data to/from the client buffers.
1069 */
1070 if (dd->multi_xfr) {
1071 dd->temp_buf = kzalloc(dd->cur_msg_len,
1072 GFP_KERNEL | __GFP_DMA);
1073 if (!dd->temp_buf)
1074 return -ENOMEM;
1075 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1076 struct spi_transfer, transfer_list);
1077
1078 if (dd->write_len && !dd->read_len) {
1079 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1080 goto error;
1081
1082 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1083 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1084 nxt_xfr->len);
1085 tx_buf = dd->temp_buf;
1086 tx_len = dd->cur_msg_len;
1087 } else {
1088 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1089 goto error;
1090
1091 rx_buf = dd->temp_buf;
1092 rx_len = dd->cur_msg_len;
1093 }
1094 }
1095 if (tx_buf != NULL) {
1096 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1097 tx_len, DMA_TO_DEVICE);
1098 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1099 dev_err(dev, "dma %cX %d bytes error\n",
1100 'T', tx_len);
1101 ret = -ENOMEM;
1102 goto error;
1103 }
1104 }
1105 if (rx_buf != NULL) {
1106 dma_addr_t dma_handle;
1107 dma_handle = dma_map_single(dev, rx_buf,
1108 rx_len, DMA_FROM_DEVICE);
1109 if (dma_mapping_error(NULL, dma_handle)) {
1110 dev_err(dev, "dma %cX %d bytes error\n",
1111 'R', rx_len);
1112 if (tx_buf != NULL)
1113 dma_unmap_single(NULL, first_xfr->tx_dma,
1114 tx_len, DMA_TO_DEVICE);
1115 ret = -ENOMEM;
1116 goto error;
1117 }
1118 if (dd->multi_xfr)
1119 nxt_xfr->rx_dma = dma_handle;
1120 else
1121 first_xfr->rx_dma = dma_handle;
1122 }
1123 return 0;
1124
1125error:
1126 kfree(dd->temp_buf);
1127 dd->temp_buf = NULL;
1128 return ret;
1129}
1130
Gilad Avidovd0262342012-10-24 16:52:30 -06001131static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132{
1133 struct device *dev;
1134 u32 offset;
1135
1136 dev = &dd->cur_msg->spi->dev;
1137 if (dd->cur_msg->is_dma_mapped)
1138 goto unmap_end;
1139
1140 if (dd->multi_xfr) {
1141 if (dd->write_len && !dd->read_len) {
1142 dma_unmap_single(dev,
1143 dd->cur_transfer->tx_dma,
1144 dd->cur_msg_len,
1145 DMA_TO_DEVICE);
1146 } else {
1147 struct spi_transfer *prev_xfr;
1148 prev_xfr = list_entry(
1149 dd->cur_transfer->transfer_list.prev,
1150 struct spi_transfer,
1151 transfer_list);
1152 if (dd->cur_transfer->rx_buf) {
1153 dma_unmap_single(dev,
1154 dd->cur_transfer->rx_dma,
1155 dd->cur_msg_len,
1156 DMA_FROM_DEVICE);
1157 }
1158 if (prev_xfr->tx_buf) {
1159 dma_unmap_single(dev,
1160 prev_xfr->tx_dma,
1161 prev_xfr->len,
1162 DMA_TO_DEVICE);
1163 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301164 if (dd->rx_unaligned_len && dd->read_buf) {
1165 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 dma_coherent_post_ops();
1167 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301168 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 memcpy(dd->cur_transfer->rx_buf,
1170 dd->read_buf + prev_xfr->len,
1171 dd->cur_transfer->len);
1172 }
1173 }
1174 kfree(dd->temp_buf);
1175 dd->temp_buf = NULL;
1176 return;
1177 } else {
1178 if (dd->cur_transfer->rx_buf)
1179 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1180 dd->cur_transfer->len,
1181 DMA_FROM_DEVICE);
1182 if (dd->cur_transfer->tx_buf)
1183 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1184 dd->cur_transfer->len,
1185 DMA_TO_DEVICE);
1186 }
1187
1188unmap_end:
1189 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301190 if (dd->rx_unaligned_len && dd->read_buf) {
1191 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 dma_coherent_post_ops();
1193 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301194 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 }
1196}
1197
Gilad Avidovd0262342012-10-24 16:52:30 -06001198static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1199{
1200 struct device *dev;
1201
1202 /* mapped by client */
1203 if (dd->cur_msg->is_dma_mapped)
1204 return;
1205
1206 dev = &dd->cur_msg->spi->dev;
1207 if (dd->cur_transfer->rx_buf)
1208 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1209 dd->cur_transfer->len,
1210 DMA_FROM_DEVICE);
1211
1212 if (dd->cur_transfer->tx_buf)
1213 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1214 dd->cur_transfer->len,
1215 DMA_TO_DEVICE);
1216}
1217
1218static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1219{
1220 if (dd->mode == SPI_DMOV_MODE)
1221 msm_spi_dmov_unmap_buffers(dd);
1222 else if (dd->mode == SPI_BAM_MODE)
1223 msm_spi_bam_unmap_buffers(dd);
1224}
1225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226/**
Gilad Avidovd0262342012-10-24 16:52:30 -06001227 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1228 * the given transfer
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 * @dd: device
1230 * @tr: transfer
1231 *
Gilad Avidovd0262342012-10-24 16:52:30 -06001232 * Start using DMA if:
1233 * 1. Is supported by HW
1234 * 2. Is not diabled by platfrom data
1235 * 3. Transfer size is greater than 3*block size.
1236 * 4. Buffers are aligned to cache line.
1237 * 5. Bytes-per-word is 8,16 or 32.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001239static inline bool
1240msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242 if (!dd->use_dma)
Gilad Avidovd0262342012-10-24 16:52:30 -06001243 return false;
1244
1245 /* check constraints from platform data */
1246 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1247 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248
1249 if (dd->cur_msg_len < 3*dd->input_block_size)
Gilad Avidovd0262342012-10-24 16:52:30 -06001250 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251
1252 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
Gilad Avidovd0262342012-10-24 16:52:30 -06001253 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254
Gilad Avidovd0262342012-10-24 16:52:30 -06001255 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1256 u32 cache_line = dma_get_cache_alignment();
1257
1258 if (tr->tx_buf) {
1259 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1260 return 0;
1261 }
1262 if (tr->rx_buf) {
1263 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1264 return false;
1265 }
1266
1267 if (tr->cs_change &&
1268 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
1269 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 }
1271
Gilad Avidovd0262342012-10-24 16:52:30 -06001272 return true;
1273}
1274
1275/**
1276 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1277 * prepares to process a transfer.
1278 */
1279static void
1280msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1281{
1282 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1283 if (dd->qup_ver) {
1284 dd->mode = SPI_BAM_MODE;
1285 } else {
1286 dd->mode = SPI_DMOV_MODE;
1287 if (dd->write_len && dd->read_len) {
1288 dd->tx_bytes_remaining = dd->write_len;
1289 dd->rx_bytes_remaining = dd->read_len;
1290 }
1291 }
1292 } else {
1293 dd->mode = SPI_FIFO_MODE;
1294 if (dd->multi_xfr) {
1295 dd->read_len = dd->cur_transfer->len;
1296 dd->write_len = dd->cur_transfer->len;
1297 }
1298 }
1299}
1300
1301/**
1302 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1303 * transfer
1304 */
1305static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1306{
1307 u32 spi_iom;
1308 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1309 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1310 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1311 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1312 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1313 /* Turn on packing for data mover */
1314 if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
1315 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1316 else
1317 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1318
1319 /*if (dd->mode == SPI_BAM_MODE) {
1320 spi_iom |= SPI_IO_C_NO_TRI_STATE;
1321 spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
1322 }*/
1323 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1324}
1325
1326static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1327{
1328 if (mode & SPI_CPOL)
1329 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1330 else
1331 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1332 return spi_ioc;
1333}
1334
1335/**
1336 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1337 * next transfer
1338 * @return the new set value of SPI_IO_CONTROL
1339 */
1340static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1341{
1342 u32 spi_ioc, spi_ioc_orig, chip_select;
1343
1344 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1345 spi_ioc_orig = spi_ioc;
1346 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1347 , dd->cur_msg->spi->mode);
1348 /* Set chip-select */
1349 chip_select = dd->cur_msg->spi->chip_select << 2;
1350 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1351 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1352 if (!dd->cur_transfer->cs_change)
1353 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1354
1355 if (spi_ioc != spi_ioc_orig)
1356 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1357
1358 return spi_ioc;
1359}
1360
1361/**
1362 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1363 * the next transfer
1364 */
1365static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1366{
1367 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1368 * change in BAM mode */
1369 u32 mask = (dd->mode == SPI_BAM_MODE) ?
1370 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1371 : 0;
1372 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001373}
1374
1375static void msm_spi_process_transfer(struct msm_spi *dd)
1376{
1377 u8 bpw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378 u32 max_speed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001379 u32 read_count;
1380 u32 timeout;
Gilad Avidovd0262342012-10-24 16:52:30 -06001381 u32 spi_ioc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 u32 int_loopback = 0;
1383
1384 dd->tx_bytes_remaining = dd->cur_msg_len;
1385 dd->rx_bytes_remaining = dd->cur_msg_len;
1386 dd->read_buf = dd->cur_transfer->rx_buf;
1387 dd->write_buf = dd->cur_transfer->tx_buf;
1388 init_completion(&dd->transfer_complete);
1389 if (dd->cur_transfer->bits_per_word)
1390 bpw = dd->cur_transfer->bits_per_word;
1391 else
1392 if (dd->cur_msg->spi->bits_per_word)
1393 bpw = dd->cur_msg->spi->bits_per_word;
1394 else
1395 bpw = 8;
1396 dd->bytes_per_word = (bpw + 7) / 8;
1397
1398 if (dd->cur_transfer->speed_hz)
1399 max_speed = dd->cur_transfer->speed_hz;
1400 else
1401 max_speed = dd->cur_msg->spi->max_speed_hz;
1402 if (!dd->clock_speed || max_speed != dd->clock_speed)
1403 msm_spi_clock_set(dd, max_speed);
1404
Gilad Avidovd0262342012-10-24 16:52:30 -06001405 timeout = 100 * msecs_to_jiffies(
1406 DIV_ROUND_UP(dd->cur_msg_len * 8,
1407 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1410 if (dd->cur_msg->spi->mode & SPI_LOOP)
1411 int_loopback = 1;
1412 if (int_loopback && dd->multi_xfr &&
1413 (read_count > dd->input_fifo_size)) {
1414 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001415 pr_err(
1416 "%s:Internal Loopback does not support > fifo size"
1417 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 __func__);
1419 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001420 pr_err(
1421 "%s:Internal Loopback does not support > fifo size"
1422 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423 __func__);
1424 return;
1425 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426
Gilad Avidovd0262342012-10-24 16:52:30 -06001427 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1428 dev_err(dd->dev,
1429 "%s: Error setting QUP to reset-state",
1430 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431
Gilad Avidovd0262342012-10-24 16:52:30 -06001432 msm_spi_set_transfer_mode(dd, bpw, read_count);
1433 msm_spi_set_mx_counts(dd, read_count);
1434 if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
1435 if (msm_spi_dma_map_buffers(dd) < 0) {
1436 pr_err("Mapping DMA buffers\n");
1437 return;
1438 }
1439 msm_spi_set_qup_io_modes(dd);
1440 msm_spi_set_spi_config(dd, bpw);
1441 msm_spi_set_qup_config(dd, bpw);
1442 spi_ioc = msm_spi_set_spi_io_control(dd);
1443 msm_spi_set_qup_op_mask(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001444
1445 if (dd->mode == SPI_DMOV_MODE) {
1446 msm_spi_setup_dm_transfer(dd);
1447 msm_spi_enqueue_dm_commands(dd);
1448 }
1449 /* The output fifo interrupt handler will handle all writes after
1450 the first. Restricting this to one write avoids contention
1451 issues and race conditions between this thread and the int handler
1452 */
1453 else if (dd->mode == SPI_FIFO_MODE) {
1454 if (msm_spi_prepare_for_write(dd))
1455 goto transfer_end;
1456 msm_spi_start_write(dd, read_count);
Gilad Avidovd0262342012-10-24 16:52:30 -06001457 } else if (dd->mode == SPI_BAM_MODE) {
1458 if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
1459 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1460 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461 }
1462
Gilad Avidovd0262342012-10-24 16:52:30 -06001463 /*
1464 * On BAM mode, current state here is run.
1465 * Only enter the RUN state after the first word is written into
1466 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1467 * might fire before the first word is written resulting in a
1468 * possible race condition.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001470 if (dd->mode != SPI_BAM_MODE)
1471 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1472 dev_warn(dd->dev,
1473 "%s: Failed to set QUP to run-state. Mode:%d",
1474 __func__, dd->mode);
1475 goto transfer_end;
1476 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477
1478 /* Assume success, this might change later upon transaction result */
1479 dd->cur_msg->status = 0;
1480 do {
1481 if (!wait_for_completion_timeout(&dd->transfer_complete,
1482 timeout)) {
Gilad Avidovd0262342012-10-24 16:52:30 -06001483 dev_err(dd->dev,
1484 "%s: SPI transaction timeout\n",
1485 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 dd->cur_msg->status = -EIO;
1487 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001488 msm_dmov_flush(dd->tx_dma_chan, 1);
1489 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 }
1491 break;
1492 }
1493 } while (msm_spi_dm_send_next(dd));
1494
Sagar Dharia525593d2012-11-02 18:26:01 -06001495 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496transfer_end:
Gilad Avidovd0262342012-10-24 16:52:30 -06001497 msm_spi_dma_unmap_buffers(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498 dd->mode = SPI_MODE_NONE;
1499
1500 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1501 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1502 dd->base + SPI_IO_CONTROL);
1503}
1504
1505static void get_transfer_length(struct msm_spi *dd)
1506{
1507 struct spi_transfer *tr;
1508 int num_xfrs = 0;
1509 int readlen = 0;
1510 int writelen = 0;
1511
1512 dd->cur_msg_len = 0;
1513 dd->multi_xfr = 0;
1514 dd->read_len = dd->write_len = 0;
1515
1516 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1517 if (tr->tx_buf)
1518 writelen += tr->len;
1519 if (tr->rx_buf)
1520 readlen += tr->len;
1521 dd->cur_msg_len += tr->len;
1522 num_xfrs++;
1523 }
1524
1525 if (num_xfrs == 2) {
1526 struct spi_transfer *first_xfr = dd->cur_transfer;
1527
1528 dd->multi_xfr = 1;
1529 tr = list_entry(first_xfr->transfer_list.next,
1530 struct spi_transfer,
1531 transfer_list);
1532 /*
1533 * We update dd->read_len and dd->write_len only
1534 * for WR-WR and WR-RD transfers.
1535 */
1536 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1537 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1538 ((!tr->tx_buf) && (tr->rx_buf))) {
1539 dd->read_len = readlen;
1540 dd->write_len = writelen;
1541 }
1542 }
1543 } else if (num_xfrs > 1)
1544 dd->multi_xfr = 1;
1545}
1546
1547static inline int combine_transfers(struct msm_spi *dd)
1548{
1549 struct spi_transfer *t = dd->cur_transfer;
1550 struct spi_transfer *nxt;
1551 int xfrs_grped = 1;
1552
1553 dd->cur_msg_len = dd->cur_transfer->len;
1554 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1555 nxt = list_entry(t->transfer_list.next,
1556 struct spi_transfer,
1557 transfer_list);
1558 if (t->cs_change != nxt->cs_change)
1559 return xfrs_grped;
1560 dd->cur_msg_len += nxt->len;
1561 xfrs_grped++;
1562 t = nxt;
1563 }
1564 return xfrs_grped;
1565}
1566
Harini Jayaraman093938a2012-04-20 15:33:23 -06001567static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1568{
1569 u32 spi_ioc;
1570 u32 spi_ioc_orig;
1571
1572 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1573 spi_ioc_orig = spi_ioc;
1574 if (set_flag)
1575 spi_ioc |= SPI_IO_C_FORCE_CS;
1576 else
1577 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1578
1579 if (spi_ioc != spi_ioc_orig)
1580 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1581}
1582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583static void msm_spi_process_message(struct msm_spi *dd)
1584{
1585 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001586 int cs_num;
1587 int rc;
Sagar Dharia525593d2012-11-02 18:26:01 -06001588 bool xfer_delay = false;
1589 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001592 cs_num = dd->cur_msg->spi->chip_select;
1593 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1594 (!(dd->cs_gpios[cs_num].valid)) &&
1595 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1596 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1597 spi_cs_rsrcs[cs_num]);
1598 if (rc) {
1599 dev_err(dd->dev, "gpio_request for pin %d failed with "
1600 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1601 rc);
1602 return;
1603 }
1604 dd->cs_gpios[cs_num].valid = 1;
1605 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001606
Sagar Dharia525593d2012-11-02 18:26:01 -06001607 list_for_each_entry(tr,
1608 &dd->cur_msg->transfers,
1609 transfer_list) {
1610 if (tr->delay_usecs) {
1611 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1612 tr->delay_usecs);
1613 xfer_delay = true;
1614 break;
1615 }
1616 }
1617
1618 /* Don't combine xfers if delay is needed after every xfer */
1619 if (dd->qup_ver || xfer_delay) {
1620 if (dd->qup_ver)
1621 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001622 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001623 &dd->cur_msg->transfers,
1624 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001625 struct spi_transfer *t = dd->cur_transfer;
1626 struct spi_transfer *nxt;
1627
1628 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1629 nxt = list_entry(t->transfer_list.next,
1630 struct spi_transfer,
1631 transfer_list);
1632
Sagar Dharia525593d2012-11-02 18:26:01 -06001633 if (dd->qup_ver &&
1634 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001635 write_force_cs(dd, 1);
Sagar Dharia525593d2012-11-02 18:26:01 -06001636 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001637 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001639
1640 dd->cur_msg_len = dd->cur_transfer->len;
1641 msm_spi_process_transfer(dd);
1642 }
1643 } else {
1644 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1645 struct spi_transfer,
1646 transfer_list);
1647 get_transfer_length(dd);
1648 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1649 /*
1650 * Handling of multi-transfers.
1651 * FIFO mode is used by default
1652 */
1653 list_for_each_entry(dd->cur_transfer,
1654 &dd->cur_msg->transfers,
1655 transfer_list) {
1656 if (!dd->cur_transfer->len)
1657 goto error;
1658 if (xfrs_grped) {
1659 xfrs_grped--;
1660 continue;
1661 } else {
1662 dd->read_len = dd->write_len = 0;
1663 xfrs_grped = combine_transfers(dd);
1664 }
1665
1666 dd->cur_tx_transfer = dd->cur_transfer;
1667 dd->cur_rx_transfer = dd->cur_transfer;
1668 msm_spi_process_transfer(dd);
1669 xfrs_grped--;
1670 }
1671 } else {
1672 /* Handling of a single transfer or
1673 * WR-WR or WR-RD transfers
1674 */
1675 if ((!dd->cur_msg->is_dma_mapped) &&
Gilad Avidovd0262342012-10-24 16:52:30 -06001676 (msm_spi_use_dma(dd, dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001677 dd->cur_transfer->bits_per_word))) {
1678 /* Mapping of DMA buffers */
Gilad Avidovd0262342012-10-24 16:52:30 -06001679 int ret = msm_spi_dma_map_buffers(dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001680 if (ret < 0) {
1681 dd->cur_msg->status = ret;
1682 goto error;
1683 }
1684 }
1685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686 dd->cur_tx_transfer = dd->cur_transfer;
1687 dd->cur_rx_transfer = dd->cur_transfer;
1688 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001691
1692 return;
1693
1694error:
1695 if (dd->cs_gpios[cs_num].valid) {
1696 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1697 dd->cs_gpios[cs_num].valid = 0;
1698 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001699}
1700
1701/* workqueue - pull messages from queue & process */
1702static void msm_spi_workq(struct work_struct *work)
1703{
1704 struct msm_spi *dd =
1705 container_of(work, struct msm_spi, work_data);
1706 unsigned long flags;
1707 u32 status_error = 0;
Alok Chauhan66554a12012-08-22 19:54:45 +05301708 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709
1710 mutex_lock(&dd->core_lock);
1711
1712 /* Don't allow power collapse until we release mutex */
1713 if (pm_qos_request_active(&qos_req_list))
1714 pm_qos_update_request(&qos_req_list,
1715 dd->pm_lat);
1716 if (dd->use_rlock)
1717 remote_mutex_lock(&dd->r_lock);
1718
Alok Chauhan66554a12012-08-22 19:54:45 +05301719 /* Configure the spi clk, miso, mosi and cs gpio */
1720 if (dd->pdata->gpio_config) {
1721 rc = dd->pdata->gpio_config();
1722 if (rc) {
1723 dev_err(dd->dev,
1724 "%s: error configuring GPIOs\n",
1725 __func__);
1726 status_error = 1;
1727 }
1728 }
1729
1730 rc = msm_spi_request_gpios(dd);
1731 if (rc)
1732 status_error = 1;
1733
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001734 clk_prepare_enable(dd->clk);
1735 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001736 msm_spi_enable_irqs(dd);
1737
1738 if (!msm_spi_is_valid_state(dd)) {
1739 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1740 __func__);
1741 status_error = 1;
1742 }
1743
1744 spin_lock_irqsave(&dd->queue_lock, flags);
1745 while (!list_empty(&dd->queue)) {
1746 dd->cur_msg = list_entry(dd->queue.next,
1747 struct spi_message, queue);
1748 list_del_init(&dd->cur_msg->queue);
1749 spin_unlock_irqrestore(&dd->queue_lock, flags);
1750 if (status_error)
1751 dd->cur_msg->status = -EIO;
1752 else
1753 msm_spi_process_message(dd);
1754 if (dd->cur_msg->complete)
1755 dd->cur_msg->complete(dd->cur_msg->context);
1756 spin_lock_irqsave(&dd->queue_lock, flags);
1757 }
1758 dd->transfer_pending = 0;
1759 spin_unlock_irqrestore(&dd->queue_lock, flags);
1760
1761 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001762 clk_disable_unprepare(dd->clk);
1763 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764
Alok Chauhan66554a12012-08-22 19:54:45 +05301765 /* Free the spi clk, miso, mosi, cs gpio */
1766 if (!rc && dd->pdata && dd->pdata->gpio_release)
1767 dd->pdata->gpio_release();
1768 if (!rc)
1769 msm_spi_free_gpios(dd);
1770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 if (dd->use_rlock)
1772 remote_mutex_unlock(&dd->r_lock);
1773
1774 if (pm_qos_request_active(&qos_req_list))
1775 pm_qos_update_request(&qos_req_list,
1776 PM_QOS_DEFAULT_VALUE);
1777
1778 mutex_unlock(&dd->core_lock);
1779 /* If needed, this can be done after the current message is complete,
1780 and work can be continued upon resume. No motivation for now. */
1781 if (dd->suspended)
1782 wake_up_interruptible(&dd->continue_suspend);
1783}
1784
1785static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1786{
1787 struct msm_spi *dd;
1788 unsigned long flags;
1789 struct spi_transfer *tr;
1790
1791 dd = spi_master_get_devdata(spi->master);
1792 if (dd->suspended)
1793 return -EBUSY;
1794
1795 if (list_empty(&msg->transfers) || !msg->complete)
1796 return -EINVAL;
1797
1798 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1799 /* Check message parameters */
1800 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1801 (tr->bits_per_word &&
1802 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1803 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1804 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1805 "tx=%p, rx=%p\n",
1806 tr->speed_hz, tr->bits_per_word,
1807 tr->tx_buf, tr->rx_buf);
1808 return -EINVAL;
1809 }
1810 }
1811
1812 spin_lock_irqsave(&dd->queue_lock, flags);
1813 if (dd->suspended) {
1814 spin_unlock_irqrestore(&dd->queue_lock, flags);
1815 return -EBUSY;
1816 }
1817 dd->transfer_pending = 1;
1818 list_add_tail(&msg->queue, &dd->queue);
1819 spin_unlock_irqrestore(&dd->queue_lock, flags);
1820 queue_work(dd->workqueue, &dd->work_data);
1821 return 0;
1822}
1823
1824static int msm_spi_setup(struct spi_device *spi)
1825{
1826 struct msm_spi *dd;
1827 int rc = 0;
1828 u32 spi_ioc;
1829 u32 spi_config;
1830 u32 mask;
1831
1832 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1833 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1834 __func__, spi->bits_per_word);
1835 rc = -EINVAL;
1836 }
1837 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1838 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1839 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1840 rc = -EINVAL;
1841 }
1842
1843 if (rc)
1844 goto err_setup_exit;
1845
1846 dd = spi_master_get_devdata(spi->master);
1847
1848 mutex_lock(&dd->core_lock);
1849 if (dd->suspended) {
1850 mutex_unlock(&dd->core_lock);
1851 return -EBUSY;
1852 }
1853
1854 if (dd->use_rlock)
1855 remote_mutex_lock(&dd->r_lock);
1856
Alok Chauhan66554a12012-08-22 19:54:45 +05301857 /* Configure the spi clk, miso, mosi, cs gpio */
1858 if (dd->pdata->gpio_config) {
1859 rc = dd->pdata->gpio_config();
1860 if (rc) {
1861 dev_err(&spi->dev,
1862 "%s: error configuring GPIOs\n",
1863 __func__);
1864 rc = -ENXIO;
1865 goto err_setup_gpio;
1866 }
1867 }
1868
1869 rc = msm_spi_request_gpios(dd);
1870 if (rc) {
1871 rc = -ENXIO;
1872 goto err_setup_gpio;
1873 }
1874
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001875 clk_prepare_enable(dd->clk);
1876 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877
1878 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1879 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1880 if (spi->mode & SPI_CS_HIGH)
1881 spi_ioc |= mask;
1882 else
1883 spi_ioc &= ~mask;
Gilad Avidovd0262342012-10-24 16:52:30 -06001884 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885
1886 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1887
1888 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -06001889 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
1890 spi_config, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001891 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1892
1893 /* Ensure previous write completed before disabling the clocks */
1894 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001895 clk_disable_unprepare(dd->clk);
1896 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001897
Alok Chauhan66554a12012-08-22 19:54:45 +05301898 /* Free the spi clk, miso, mosi, cs gpio */
1899 if (dd->pdata && dd->pdata->gpio_release)
1900 dd->pdata->gpio_release();
1901 msm_spi_free_gpios(dd);
1902
1903err_setup_gpio:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 if (dd->use_rlock)
1905 remote_mutex_unlock(&dd->r_lock);
1906 mutex_unlock(&dd->core_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001907err_setup_exit:
1908 return rc;
1909}
1910
1911#ifdef CONFIG_DEBUG_FS
1912static int debugfs_iomem_x32_set(void *data, u64 val)
1913{
1914 writel_relaxed(val, data);
1915 /* Ensure the previous write completed. */
1916 mb();
1917 return 0;
1918}
1919
1920static int debugfs_iomem_x32_get(void *data, u64 *val)
1921{
1922 *val = readl_relaxed(data);
1923 /* Ensure the previous read completed. */
1924 mb();
1925 return 0;
1926}
1927
1928DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1929 debugfs_iomem_x32_set, "0x%08llx\n");
1930
1931static void spi_debugfs_init(struct msm_spi *dd)
1932{
1933 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1934 if (dd->dent_spi) {
1935 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1938 dd->debugfs_spi_regs[i] =
1939 debugfs_create_file(
1940 debugfs_spi_regs[i].name,
1941 debugfs_spi_regs[i].mode,
1942 dd->dent_spi,
1943 dd->base + debugfs_spi_regs[i].offset,
1944 &fops_iomem_x32);
1945 }
1946 }
1947}
1948
1949static void spi_debugfs_exit(struct msm_spi *dd)
1950{
1951 if (dd->dent_spi) {
1952 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001953
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001954 debugfs_remove_recursive(dd->dent_spi);
1955 dd->dent_spi = NULL;
1956 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1957 dd->debugfs_spi_regs[i] = NULL;
1958 }
1959}
1960#else
1961static void spi_debugfs_init(struct msm_spi *dd) {}
1962static void spi_debugfs_exit(struct msm_spi *dd) {}
1963#endif
1964
1965/* ===Device attributes begin=== */
1966static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1967 char *buf)
1968{
1969 struct spi_master *master = dev_get_drvdata(dev);
1970 struct msm_spi *dd = spi_master_get_devdata(master);
1971
1972 return snprintf(buf, PAGE_SIZE,
1973 "Device %s\n"
1974 "rx fifo_size = %d spi words\n"
1975 "tx fifo_size = %d spi words\n"
1976 "use_dma ? %s\n"
1977 "rx block size = %d bytes\n"
1978 "tx block size = %d bytes\n"
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301979 "input burst size = %d bytes\n"
1980 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981 "DMA configuration:\n"
1982 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1983 "--statistics--\n"
1984 "Rx isrs = %d\n"
1985 "Tx isrs = %d\n"
1986 "DMA error = %d\n"
1987 "--debug--\n"
1988 "NA yet\n",
1989 dev_name(dev),
1990 dd->input_fifo_size,
1991 dd->output_fifo_size,
1992 dd->use_dma ? "yes" : "no",
1993 dd->input_block_size,
1994 dd->output_block_size,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301995 dd->input_burst_size,
1996 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001997 dd->tx_dma_chan,
1998 dd->rx_dma_chan,
1999 dd->tx_dma_crci,
2000 dd->rx_dma_crci,
2001 dd->stat_rx + dd->stat_dmov_rx,
2002 dd->stat_tx + dd->stat_dmov_tx,
2003 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
2004 );
2005}
2006
2007/* Reset statistics on write */
2008static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
2009 const char *buf, size_t count)
2010{
2011 struct msm_spi *dd = dev_get_drvdata(dev);
2012 dd->stat_rx = 0;
2013 dd->stat_tx = 0;
2014 dd->stat_dmov_rx = 0;
2015 dd->stat_dmov_tx = 0;
2016 dd->stat_dmov_rx_err = 0;
2017 dd->stat_dmov_tx_err = 0;
2018 return count;
2019}
2020
2021static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
2022
2023static struct attribute *dev_attrs[] = {
2024 &dev_attr_stats.attr,
2025 NULL,
2026};
2027
2028static struct attribute_group dev_attr_grp = {
2029 .attrs = dev_attrs,
2030};
2031/* ===Device attributes end=== */
2032
2033/**
2034 * spi_dmov_tx_complete_func - DataMover tx completion callback
2035 *
2036 * Executed in IRQ context (Data Mover's IRQ) DataMover's
2037 * spinlock @msm_dmov_lock held.
2038 */
2039static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
2040 unsigned int result,
2041 struct msm_dmov_errdata *err)
2042{
2043 struct msm_spi *dd;
2044
2045 if (!(result & DMOV_RSLT_VALID)) {
2046 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2047 return;
2048 }
2049 /* restore original context */
2050 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302051 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302053 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
2054 return;
2055 complete(&dd->transfer_complete);
2056 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057 /* Error or flush */
2058 if (result & DMOV_RSLT_ERROR) {
2059 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2060 dd->stat_dmov_tx_err++;
2061 }
2062 if (result & DMOV_RSLT_FLUSH) {
2063 /*
2064 * Flushing normally happens in process of
2065 * removing, when we are waiting for outstanding
2066 * DMA commands to be flushed.
2067 */
2068 dev_info(dd->dev,
2069 "DMA channel flushed (0x%08x)\n", result);
2070 }
2071 if (err)
2072 dev_err(dd->dev,
2073 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2074 err->flush[0], err->flush[1], err->flush[2],
2075 err->flush[3], err->flush[4], err->flush[5]);
2076 dd->cur_msg->status = -EIO;
2077 complete(&dd->transfer_complete);
2078 }
2079}
2080
2081/**
2082 * spi_dmov_rx_complete_func - DataMover rx completion callback
2083 *
2084 * Executed in IRQ context (Data Mover's IRQ)
2085 * DataMover's spinlock @msm_dmov_lock held.
2086 */
2087static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2088 unsigned int result,
2089 struct msm_dmov_errdata *err)
2090{
2091 struct msm_spi *dd;
2092
2093 if (!(result & DMOV_RSLT_VALID)) {
2094 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2095 result, cmd);
2096 return;
2097 }
2098 /* restore original context */
2099 dd = container_of(cmd, struct msm_spi, rx_hdr);
2100 if (result & DMOV_RSLT_DONE) {
2101 dd->stat_dmov_rx++;
2102 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2103 return;
2104 complete(&dd->transfer_complete);
2105 } else {
2106 /** Error or flush */
2107 if (result & DMOV_RSLT_ERROR) {
2108 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2109 dd->stat_dmov_rx_err++;
2110 }
2111 if (result & DMOV_RSLT_FLUSH) {
2112 dev_info(dd->dev,
2113 "DMA channel flushed(0x%08x)\n", result);
2114 }
2115 if (err)
2116 dev_err(dd->dev,
2117 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2118 err->flush[0], err->flush[1], err->flush[2],
2119 err->flush[3], err->flush[4], err->flush[5]);
2120 dd->cur_msg->status = -EIO;
2121 complete(&dd->transfer_complete);
2122 }
2123}
2124
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302125static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
2126 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127{
2128 u32 cache_line = dma_get_cache_alignment();
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302129 int burst_size = (input_burst_size > output_burst_size) ?
2130 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002131
2132 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302133 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002134}
2135
Gilad Avidovd0262342012-10-24 16:52:30 -06002136static void msm_spi_dmov_teardown(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137{
2138 int limit = 0;
2139
2140 if (!dd->use_dma)
2141 return;
2142
2143 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002144 msm_dmov_flush(dd->tx_dma_chan, 1);
2145 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146 msleep(10);
2147 }
2148
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302149 dma_free_coherent(NULL,
2150 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
2151 dd->tx_dmov_cmd,
2152 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002153 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2154 dd->tx_padding = dd->rx_padding = NULL;
2155}
2156
Gilad Avidovd0262342012-10-24 16:52:30 -06002157static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
2158 enum msm_spi_pipe_direction pipe_dir)
2159{
2160 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2161 (&dd->bam.prod) : (&dd->bam.cons);
2162 if (!pipe->teardown_required)
2163 return;
2164
2165 sps_disconnect(pipe->handle);
2166 dma_free_coherent(dd->dev, pipe->config.desc.size,
2167 pipe->config.desc.base, pipe->config.desc.phys_base);
2168 sps_free_endpoint(pipe->handle);
2169 pipe->handle = 0;
2170 pipe->teardown_required = false;
2171}
2172
2173static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2174 enum msm_spi_pipe_direction pipe_dir)
2175{
2176 int rc = 0;
2177 struct sps_pipe *pipe_handle;
2178 struct sps_register_event event = {0};
2179 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2180 (&dd->bam.prod) : (&dd->bam.cons);
2181 struct sps_connect *pipe_conf = &pipe->config;
2182
2183 pipe->handle = 0;
2184 pipe_handle = sps_alloc_endpoint();
2185 if (!pipe_handle) {
2186 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2187 , __func__);
2188 return -ENOMEM;
2189 }
2190
2191 memset(pipe_conf, 0, sizeof(*pipe_conf));
2192 rc = sps_get_config(pipe_handle, pipe_conf);
2193 if (rc) {
2194 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2195 , __func__);
2196 goto config_err;
2197 }
2198
2199 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2200 pipe_conf->source = dd->bam.handle;
2201 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2202 pipe_conf->mode = SPS_MODE_SRC;
2203 pipe_conf->src_pipe_index =
2204 dd->pdata->bam_producer_pipe_index;
2205 pipe_conf->dest_pipe_index = 0;
2206 } else {
2207 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2208 pipe_conf->destination = dd->bam.handle;
2209 pipe_conf->mode = SPS_MODE_DEST;
2210 pipe_conf->src_pipe_index = 0;
2211 pipe_conf->dest_pipe_index =
2212 dd->pdata->bam_consumer_pipe_index;
2213 }
2214 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2215 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2216 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2217 pipe_conf->desc.size,
2218 &pipe_conf->desc.phys_base,
2219 GFP_KERNEL);
2220 if (!pipe_conf->desc.base) {
2221 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2222 , __func__);
2223 rc = -ENOMEM;
2224 goto config_err;
2225 }
2226
2227 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2228
2229 rc = sps_connect(pipe_handle, pipe_conf);
2230 if (rc) {
2231 dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
2232 goto connect_err;
2233 }
2234
2235 event.mode = SPS_TRIGGER_WAIT;
2236 event.options = SPS_O_EOT;
2237 event.xfer_done = &dd->transfer_complete;
2238 event.user = (void *)dd;
2239 rc = sps_register_event(pipe_handle, &event);
2240 if (rc) {
2241 dev_err(dd->dev, "%s: Failed to register BAM EOT event",
2242 __func__);
2243 goto register_err;
2244 }
2245
2246 pipe->handle = pipe_handle;
2247 pipe->teardown_required = true;
2248 return 0;
2249
2250register_err:
2251 sps_disconnect(pipe_handle);
2252connect_err:
2253 dma_free_coherent(dd->dev, pipe_conf->desc.size,
2254 pipe_conf->desc.base, pipe_conf->desc.phys_base);
2255config_err:
2256 sps_free_endpoint(pipe_handle);
2257
2258 return rc;
2259}
2260
2261static void msm_spi_bam_teardown(struct msm_spi *dd)
2262{
2263 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2264 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2265
2266 if (dd->bam.deregister_required) {
2267 sps_deregister_bam_device(dd->bam.handle);
2268 dd->bam.deregister_required = false;
2269 }
2270}
2271
2272static int msm_spi_bam_init(struct msm_spi *dd)
2273{
2274 struct sps_bam_props bam_props = {0};
2275 u32 bam_handle;
2276 int rc = 0;
2277
2278 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2279 if (rc || !bam_handle) {
2280 bam_props.phys_addr = dd->bam.phys_addr;
2281 bam_props.virt_addr = dd->bam.base;
2282 bam_props.irq = dd->bam.irq;
Gilad Avidovb0968052013-05-03 09:51:37 -06002283 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Gilad Avidovd0262342012-10-24 16:52:30 -06002284 bam_props.summing_threshold = 0x10;
2285
2286 rc = sps_register_bam_device(&bam_props, &bam_handle);
2287 if (rc) {
2288 dev_err(dd->dev,
2289 "%s: Failed to register BAM device",
2290 __func__);
2291 return rc;
2292 }
2293 dd->bam.deregister_required = true;
2294 }
2295
2296 dd->bam.handle = bam_handle;
2297
2298 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2299 if (rc) {
2300 dev_err(dd->dev,
2301 "%s: Failed to init producer BAM-pipe",
2302 __func__);
2303 goto bam_init_error;
2304 }
2305
2306 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2307 if (rc) {
2308 dev_err(dd->dev,
2309 "%s: Failed to init consumer BAM-pipe",
2310 __func__);
2311 goto bam_init_error;
2312 }
2313
2314 return 0;
2315
2316bam_init_error:
2317 msm_spi_bam_teardown(dd);
2318 return rc;
2319}
2320
2321static __init int msm_spi_dmov_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002322{
2323 dmov_box *box;
2324 u32 cache_line = dma_get_cache_alignment();
2325
2326 /* Allocate all as one chunk, since all is smaller than page size */
2327
2328 /* We send NULL device, since it requires coherent_dma_mask id
2329 device definition, we're okay with using system pool */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302330 dd->tx_dmov_cmd
2331 = dma_alloc_coherent(NULL,
2332 get_chunk_size(dd, dd->input_burst_size,
2333 dd->output_burst_size),
2334 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002335 if (dd->tx_dmov_cmd == NULL)
2336 return -ENOMEM;
2337
2338 /* DMA addresses should be 64 bit aligned aligned */
2339 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2340 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2341 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2342 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2343
2344 /* Buffers should be aligned to cache line */
2345 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2346 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2347 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302348 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
2349 dd->output_burst_size), cache_line);
2350 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002351 cache_line);
2352
2353 /* Setup DM commands */
2354 box = &(dd->rx_dmov_cmd->box);
2355 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2356 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2357 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2358 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2359 offsetof(struct spi_dmov_cmd, cmd_ptr));
2360 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002361
2362 box = &(dd->tx_dmov_cmd->box);
2363 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2364 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2365 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2366 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2367 offsetof(struct spi_dmov_cmd, cmd_ptr));
2368 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002369
2370 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2371 CMD_DST_CRCI(dd->tx_dma_crci);
2372 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2373 SPI_OUTPUT_FIFO;
2374 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2375 CMD_SRC_CRCI(dd->rx_dma_crci);
2376 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2377 SPI_INPUT_FIFO;
2378
2379 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002380 msm_dmov_flush(dd->tx_dma_chan, 1);
2381 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002382
2383 return 0;
2384}
2385
Gilad Avidovd0262342012-10-24 16:52:30 -06002386/**
2387 * msm_spi_dt_to_pdata: copy device-tree data to platfrom data struct
2388 */
2389struct msm_spi_platform_data *
2390__init msm_spi_dt_to_pdata(struct platform_device *pdev)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002391{
2392 struct device_node *node = pdev->dev.of_node;
2393 struct msm_spi_platform_data *pdata;
Gilad Avidovd0262342012-10-24 16:52:30 -06002394 int rc;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002395
2396 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2397 if (!pdata) {
2398 pr_err("Unable to allocate platform data\n");
2399 return NULL;
2400 }
2401
2402 of_property_read_u32(node, "spi-max-frequency",
2403 &pdata->max_clock_speed);
Gilad Avidov0697ea62013-02-11 16:46:38 -07002404 of_property_read_u32(node, "qcom,infinite-mode",
Kiran Gundae8f16742012-06-27 10:06:32 +05302405 &pdata->infinite_mode);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002406
Gilad Avidovd0262342012-10-24 16:52:30 -06002407 pdata->ver_reg_exists = of_property_read_bool(node
2408 , "qcom,ver-reg-exists");
2409
2410 pdata->use_bam = of_property_read_bool(node, "qcom,use-bam");
2411
2412 if (pdata->use_bam) {
2413 rc = of_property_read_u32(node, "qcom,bam-consumer-pipe-index",
2414 &pdata->bam_consumer_pipe_index);
2415 if (rc) {
2416 dev_warn(&pdev->dev,
2417 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2418 pdata->use_bam = false;
2419 }
2420
2421 rc = of_property_read_u32(node, "qcom,bam-producer-pipe-index",
2422 &pdata->bam_producer_pipe_index);
2423 if (rc) {
2424 dev_warn(&pdev->dev,
2425 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2426 pdata->use_bam = false;
2427 }
2428 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002429 return pdata;
2430}
2431
Gilad Avidovd0262342012-10-24 16:52:30 -06002432static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2433{
2434 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2435 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2436 : SPI_QUP_VERSION_NONE;
2437}
2438
2439static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
2440 struct platform_device *pdev, struct spi_master *master)
2441{
2442 struct resource *resource;
2443 size_t bam_mem_size;
2444
2445 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2446 "spi_bam_physical");
2447 if (!resource) {
2448 dev_warn(&pdev->dev,
2449 "%s: Missing spi_bam_physical entry in DT",
2450 __func__);
2451 return -ENXIO;
2452 }
2453
2454 dd->bam.phys_addr = resource->start;
2455 bam_mem_size = resource_size(resource);
2456 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2457 bam_mem_size);
2458 if (!dd->bam.base) {
2459 dev_warn(&pdev->dev,
2460 "%s: Failed to ioremap(spi_bam_physical)",
2461 __func__);
2462 return -ENXIO;
2463 }
2464
2465 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2466 if (dd->bam.irq < 0) {
2467 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2468 __func__);
2469 return -EINVAL;
2470 }
2471
2472 dd->dma_init = msm_spi_bam_init;
2473 dd->dma_teardown = msm_spi_bam_teardown;
2474 return 0;
2475}
2476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477static int __init msm_spi_probe(struct platform_device *pdev)
2478{
2479 struct spi_master *master;
2480 struct msm_spi *dd;
2481 struct resource *resource;
2482 int rc = -ENXIO;
2483 int locked = 0;
2484 int i = 0;
2485 int clk_enabled = 0;
2486 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002487 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002488 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002489
2490 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2491 if (!master) {
2492 rc = -ENOMEM;
2493 dev_err(&pdev->dev, "master allocation failed\n");
2494 goto err_probe_exit;
2495 }
2496
2497 master->bus_num = pdev->id;
2498 master->mode_bits = SPI_SUPPORTED_MODES;
2499 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2500 master->setup = msm_spi_setup;
2501 master->transfer = msm_spi_transfer;
2502 platform_set_drvdata(pdev, master);
2503 dd = spi_master_get_devdata(master);
2504
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002505 if (pdev->dev.of_node) {
2506 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2507 master->dev.of_node = pdev->dev.of_node;
2508 pdata = msm_spi_dt_to_pdata(pdev);
2509 if (!pdata) {
2510 rc = -ENOMEM;
2511 goto err_probe_exit;
2512 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002513
Gilad Avidov0697ea62013-02-11 16:46:38 -07002514 rc = of_alias_get_id(pdev->dev.of_node, "spi");
2515 if (rc < 0)
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002516 dev_warn(&pdev->dev,
2517 "using default bus_num %d\n", pdev->id);
2518 else
Gilad Avidov0697ea62013-02-11 16:46:38 -07002519 master->bus_num = pdev->id = rc;
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002520
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002521 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2522 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
2523 i, &flags);
2524 }
2525
2526 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2527 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
2528 pdev->dev.of_node, "cs-gpios",
2529 i, &flags);
2530 dd->cs_gpios[i].valid = 0;
2531 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002532 } else {
2533 pdata = pdev->dev.platform_data;
2534 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002535
2536 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2537 resource = platform_get_resource(pdev, IORESOURCE_IO,
2538 i);
2539 dd->spi_gpios[i] = resource ? resource->start : -1;
2540 }
2541
2542 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2543 resource = platform_get_resource(pdev, IORESOURCE_IO,
2544 i + ARRAY_SIZE(spi_rsrcs));
2545 dd->cs_gpios[i].gpio_num = resource ?
2546 resource->start : -1;
2547 dd->cs_gpios[i].valid = 0;
2548 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002549 }
2550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002551 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002552 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002553 if (!resource) {
2554 rc = -ENXIO;
2555 goto err_probe_res;
2556 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002558 dd->mem_phys_addr = resource->start;
2559 dd->mem_size = resource_size(resource);
2560
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002561 if (pdata) {
2562 if (pdata->dma_config) {
2563 rc = pdata->dma_config();
2564 if (rc) {
2565 dev_warn(&pdev->dev,
2566 "%s: DM mode not supported\n",
2567 __func__);
2568 dd->use_dma = 0;
2569 goto skip_dma_resources;
2570 }
2571 }
Gilad Avidovd0262342012-10-24 16:52:30 -06002572 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
2573 resource = platform_get_resource(pdev,
2574 IORESOURCE_DMA, 0);
2575 if (resource) {
2576 dd->rx_dma_chan = resource->start;
2577 dd->tx_dma_chan = resource->end;
2578 resource = platform_get_resource(pdev,
2579 IORESOURCE_DMA, 1);
2580 if (!resource) {
2581 rc = -ENXIO;
2582 goto err_probe_res;
2583 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002584
Gilad Avidovd0262342012-10-24 16:52:30 -06002585 dd->rx_dma_crci = resource->start;
2586 dd->tx_dma_crci = resource->end;
2587 dd->use_dma = 1;
2588 master->dma_alignment =
2589 dma_get_cache_alignment();
2590 dd->dma_init = msm_spi_dmov_init ;
2591 dd->dma_teardown = msm_spi_dmov_teardown;
2592 }
2593 } else {
2594 if (!dd->pdata->use_bam)
2595 goto skip_dma_resources;
2596
2597 rc = msm_spi_bam_get_resources(dd, pdev, master);
2598 if (rc) {
2599 dev_warn(dd->dev,
2600 "%s: Faild to get BAM resources",
2601 __func__);
2602 goto skip_dma_resources;
2603 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002604 dd->use_dma = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002605 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002606 }
2607
Alok Chauhan66554a12012-08-22 19:54:45 +05302608skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002610 spin_lock_init(&dd->queue_lock);
2611 mutex_init(&dd->core_lock);
2612 INIT_LIST_HEAD(&dd->queue);
2613 INIT_WORK(&dd->work_data, msm_spi_workq);
2614 init_waitqueue_head(&dd->continue_suspend);
2615 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002616 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002617 if (!dd->workqueue)
2618 goto err_probe_workq;
2619
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002620 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2621 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002622 rc = -ENXIO;
2623 goto err_probe_reqmem;
2624 }
2625
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002626 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2627 if (!dd->base) {
2628 rc = -ENOMEM;
2629 goto err_probe_reqmem;
2630 }
2631
Gilad Avidovd0262342012-10-24 16:52:30 -06002632 if (pdata && pdata->ver_reg_exists) {
2633 enum msm_spi_qup_version ver =
2634 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2635 if (dd->qup_ver != ver)
2636 dev_warn(&pdev->dev,
2637 "%s: HW version different then initially assumed by probe",
2638 __func__);
2639 }
2640
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002641 if (pdata && pdata->rsl_id) {
2642 struct remote_mutex_id rmid;
2643 rmid.r_spinlock_id = pdata->rsl_id;
2644 rmid.delay_us = SPI_TRYLOCK_DELAY;
2645
2646 rc = remote_mutex_init(&dd->r_lock, &rmid);
2647 if (rc) {
2648 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2649 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2650 __func__, rc);
2651 goto err_probe_rlock_init;
2652 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654 dd->use_rlock = 1;
2655 dd->pm_lat = pdata->pm_lat;
Alok Chauhan66554a12012-08-22 19:54:45 +05302656 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
Gilad Avidovd0262342012-10-24 16:52:30 -06002657 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002658 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002659
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002660 mutex_lock(&dd->core_lock);
2661 if (dd->use_rlock)
2662 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002663
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002664 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002665 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002666 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002667 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002668 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002669 rc = PTR_ERR(dd->clk);
2670 goto err_probe_clk_get;
2671 }
2672
Matt Wagantallac294852011-08-17 15:44:58 -07002673 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002674 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002675 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002676 rc = PTR_ERR(dd->pclk);
2677 goto err_probe_pclk_get;
2678 }
2679
2680 if (pdata && pdata->max_clock_speed)
2681 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2682
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002683 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002684 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002685 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 __func__);
2687 goto err_probe_clk_enable;
2688 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002689
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002690 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002691 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002692 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002693 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002694 __func__);
2695 goto err_probe_pclk_enable;
2696 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002698 pclk_enabled = 1;
Gilad Avidovd0262342012-10-24 16:52:30 -06002699 /* GSBI dose not exists on B-family MSM-chips */
2700 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2701 rc = msm_spi_configure_gsbi(dd, pdev);
2702 if (rc)
2703 goto err_probe_gsbi;
2704 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002706 msm_spi_calculate_fifo_size(dd);
2707 if (dd->use_dma) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002708 rc = dd->dma_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002709 if (rc)
2710 goto err_probe_dma;
2711 }
2712
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002713 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002714 /*
2715 * The SPI core generates a bogus input overrun error on some targets,
2716 * when a transition from run to reset state occurs and if the FIFO has
2717 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2718 * bit.
2719 */
2720 msm_spi_enable_error_flags(dd);
2721
2722 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2723 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2724 if (rc)
2725 goto err_probe_state;
2726
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002727 clk_disable_unprepare(dd->clk);
2728 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002729 clk_enabled = 0;
2730 pclk_enabled = 0;
2731
2732 dd->suspended = 0;
2733 dd->transfer_pending = 0;
2734 dd->multi_xfr = 0;
2735 dd->mode = SPI_MODE_NONE;
2736
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002737 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002738 if (rc)
2739 goto err_probe_irq;
2740
2741 msm_spi_disable_irqs(dd);
2742 if (dd->use_rlock)
2743 remote_mutex_unlock(&dd->r_lock);
2744
2745 mutex_unlock(&dd->core_lock);
2746 locked = 0;
2747
2748 rc = spi_register_master(master);
2749 if (rc)
2750 goto err_probe_reg_master;
2751
2752 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2753 if (rc) {
2754 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2755 goto err_attrs;
2756 }
2757
2758 spi_debugfs_init(dd);
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05302759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002760 return 0;
2761
2762err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002763 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002764err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002765err_probe_irq:
2766err_probe_state:
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08002767 if (dd->dma_teardown)
2768 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002770err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002771 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002772 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002773err_probe_pclk_enable:
2774 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002775 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002776err_probe_clk_enable:
2777 clk_put(dd->pclk);
2778err_probe_pclk_get:
2779 clk_put(dd->clk);
2780err_probe_clk_get:
2781 if (locked) {
2782 if (dd->use_rlock)
2783 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002785 mutex_unlock(&dd->core_lock);
2786 }
2787err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002788err_probe_reqmem:
2789 destroy_workqueue(dd->workqueue);
2790err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002791err_probe_res:
2792 spi_master_put(master);
2793err_probe_exit:
2794 return rc;
2795}
2796
2797#ifdef CONFIG_PM
2798static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2799{
2800 struct spi_master *master = platform_get_drvdata(pdev);
2801 struct msm_spi *dd;
2802 unsigned long flags;
2803
2804 if (!master)
2805 goto suspend_exit;
2806 dd = spi_master_get_devdata(master);
2807 if (!dd)
2808 goto suspend_exit;
2809
2810 /* Make sure nothing is added to the queue while we're suspending */
2811 spin_lock_irqsave(&dd->queue_lock, flags);
2812 dd->suspended = 1;
2813 spin_unlock_irqrestore(&dd->queue_lock, flags);
2814
2815 /* Wait for transactions to end, or time out */
2816 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002817
2818suspend_exit:
2819 return 0;
2820}
2821
2822static int msm_spi_resume(struct platform_device *pdev)
2823{
2824 struct spi_master *master = platform_get_drvdata(pdev);
2825 struct msm_spi *dd;
2826
2827 if (!master)
2828 goto resume_exit;
2829 dd = spi_master_get_devdata(master);
2830 if (!dd)
2831 goto resume_exit;
2832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002833 dd->suspended = 0;
2834resume_exit:
2835 return 0;
2836}
2837#else
2838#define msm_spi_suspend NULL
2839#define msm_spi_resume NULL
2840#endif /* CONFIG_PM */
2841
2842static int __devexit msm_spi_remove(struct platform_device *pdev)
2843{
2844 struct spi_master *master = platform_get_drvdata(pdev);
2845 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002846
2847 pm_qos_remove_request(&qos_req_list);
2848 spi_debugfs_exit(dd);
2849 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2850
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08002851 if (dd->dma_teardown)
2852 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002853 clk_put(dd->clk);
2854 clk_put(dd->pclk);
2855 destroy_workqueue(dd->workqueue);
2856 platform_set_drvdata(pdev, 0);
2857 spi_unregister_master(master);
2858 spi_master_put(master);
2859
2860 return 0;
2861}
2862
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002863static struct of_device_id msm_spi_dt_match[] = {
2864 {
2865 .compatible = "qcom,spi-qup-v2",
2866 },
2867 {}
2868};
2869
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002870static struct platform_driver msm_spi_driver = {
2871 .driver = {
2872 .name = SPI_DRV_NAME,
2873 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002874 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002875 },
2876 .suspend = msm_spi_suspend,
2877 .resume = msm_spi_resume,
2878 .remove = __exit_p(msm_spi_remove),
2879};
2880
2881static int __init msm_spi_init(void)
2882{
2883 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2884}
2885module_init(msm_spi_init);
2886
2887static void __exit msm_spi_exit(void)
2888{
2889 platform_driver_unregister(&msm_spi_driver);
2890}
2891module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002892
2893MODULE_LICENSE("GPL v2");
2894MODULE_VERSION("0.4");
2895MODULE_ALIAS("platform:"SPI_DRV_NAME);