blob: 2c86e83dffd554c753d981ffc11b99b3e59718e8 [file] [log] [blame]
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17#include <linux/version.h>
18#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070019#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <linux/init.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25#include <linux/spi/spi.h>
26#include <linux/interrupt.h>
27#include <linux/err.h>
28#include <linux/clk.h>
29#include <linux/delay.h>
30#include <linux/workqueue.h>
31#include <linux/io.h>
32#include <linux/debugfs.h>
33#include <mach/msm_spi.h>
34#include <linux/dma-mapping.h>
35#include <linux/sched.h>
36#include <mach/dma.h>
37#include <asm/atomic.h>
38#include <linux/mutex.h>
39#include <linux/gpio.h>
40#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070042#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070043#include <linux/of_gpio.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070044#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070046static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
47 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048{
49 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070050 unsigned long gsbi_mem_phys_addr;
51 size_t gsbi_mem_size;
52 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070054 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070056 return 0;
57
58 gsbi_mem_phys_addr = resource->start;
59 gsbi_mem_size = resource_size(resource);
60 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
61 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063
64 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
65 gsbi_mem_size);
66 if (!gsbi_base)
67 return -ENXIO;
68
69 /* Set GSBI to SPI mode */
70 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 return 0;
73}
74
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070075static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070077 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
78 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
79 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
80 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
81 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
82 if (dd->qup_ver)
83 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084}
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086static inline int msm_spi_request_gpios(struct msm_spi *dd)
87{
88 int i;
89 int result = 0;
90
91 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
92 if (dd->spi_gpios[i] >= 0) {
93 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
94 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -060095 dev_err(dd->dev, "%s: gpio_request for pin %d "
96 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 dd->spi_gpios[i], result);
98 goto error;
99 }
100 }
101 }
102 return 0;
103
104error:
105 for (; --i >= 0;) {
106 if (dd->spi_gpios[i] >= 0)
107 gpio_free(dd->spi_gpios[i]);
108 }
109 return result;
110}
111
112static inline void msm_spi_free_gpios(struct msm_spi *dd)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
117 if (dd->spi_gpios[i] >= 0)
118 gpio_free(dd->spi_gpios[i]);
119 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600120
121 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
122 if (dd->cs_gpios[i].valid) {
123 gpio_free(dd->cs_gpios[i].gpio_num);
124 dd->cs_gpios[i].valid = 0;
125 }
126 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127}
128
129static void msm_spi_clock_set(struct msm_spi *dd, int speed)
130{
131 int rc;
132
133 rc = clk_set_rate(dd->clk, speed);
134 if (!rc)
135 dd->clock_speed = speed;
136}
137
138static int msm_spi_calculate_size(int *fifo_size,
139 int *block_size,
140 int block,
141 int mult)
142{
143 int words;
144
145 switch (block) {
146 case 0:
147 words = 1; /* 4 bytes */
148 break;
149 case 1:
150 words = 4; /* 16 bytes */
151 break;
152 case 2:
153 words = 8; /* 32 bytes */
154 break;
155 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700156 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 switch (mult) {
160 case 0:
161 *fifo_size = words * 2;
162 break;
163 case 1:
164 *fifo_size = words * 4;
165 break;
166 case 2:
167 *fifo_size = words * 8;
168 break;
169 case 3:
170 *fifo_size = words * 16;
171 break;
172 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700173 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 *block_size = words * sizeof(u32); /* in bytes */
177 return 0;
178}
179
180static void get_next_transfer(struct msm_spi *dd)
181{
182 struct spi_transfer *t = dd->cur_transfer;
183
184 if (t->transfer_list.next != &dd->cur_msg->transfers) {
185 dd->cur_transfer = list_entry(t->transfer_list.next,
186 struct spi_transfer,
187 transfer_list);
188 dd->write_buf = dd->cur_transfer->tx_buf;
189 dd->read_buf = dd->cur_transfer->rx_buf;
190 }
191}
192
193static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
194{
195 u32 spi_iom;
196 int block;
197 int mult;
198
199 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
200
201 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
202 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
203 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
204 block, mult)) {
205 goto fifo_size_err;
206 }
207
208 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
209 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
210 if (msm_spi_calculate_size(&dd->output_fifo_size,
211 &dd->output_block_size, block, mult)) {
212 goto fifo_size_err;
213 }
214 /* DM mode is not available for this block size */
215 if (dd->input_block_size == 4 || dd->output_block_size == 4)
216 dd->use_dma = 0;
217
218 /* DM mode is currently unsupported for different block sizes */
219 if (dd->input_block_size != dd->output_block_size)
220 dd->use_dma = 0;
221
222 if (dd->use_dma)
223 dd->burst_size = max(dd->input_block_size, DM_BURST_SIZE);
224
225 return;
226
227fifo_size_err:
228 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700229 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return;
231}
232
233static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
234{
235 u32 data_in;
236 int i;
237 int shift;
238
239 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
240 if (dd->read_buf) {
241 for (i = 0; (i < dd->bytes_per_word) &&
242 dd->rx_bytes_remaining; i++) {
243 /* The data format depends on bytes_per_word:
244 4 bytes: 0x12345678
245 3 bytes: 0x00123456
246 2 bytes: 0x00001234
247 1 byte : 0x00000012
248 */
249 shift = 8 * (dd->bytes_per_word - i - 1);
250 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
251 dd->rx_bytes_remaining--;
252 }
253 } else {
254 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
255 dd->rx_bytes_remaining -= dd->bytes_per_word;
256 else
257 dd->rx_bytes_remaining = 0;
258 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 dd->read_xfr_cnt++;
261 if (dd->multi_xfr) {
262 if (!dd->rx_bytes_remaining)
263 dd->read_xfr_cnt = 0;
264 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
265 dd->read_len) {
266 struct spi_transfer *t = dd->cur_rx_transfer;
267 if (t->transfer_list.next != &dd->cur_msg->transfers) {
268 t = list_entry(t->transfer_list.next,
269 struct spi_transfer,
270 transfer_list);
271 dd->read_buf = t->rx_buf;
272 dd->read_len = t->len;
273 dd->read_xfr_cnt = 0;
274 dd->cur_rx_transfer = t;
275 }
276 }
277 }
278}
279
280static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
281{
282 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
283
284 return spi_op & SPI_OP_STATE_VALID;
285}
286
287static inline int msm_spi_wait_valid(struct msm_spi *dd)
288{
289 unsigned long delay = 0;
290 unsigned long timeout = 0;
291
292 if (dd->clock_speed == 0)
293 return -EINVAL;
294 /*
295 * Based on the SPI clock speed, sufficient time
296 * should be given for the SPI state transition
297 * to occur
298 */
299 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
300 /*
301 * For small delay values, the default timeout would
302 * be one jiffy
303 */
304 if (delay < SPI_DELAY_THRESHOLD)
305 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600306
307 /* Adding one to round off to the nearest jiffy */
308 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 while (!msm_spi_is_valid_state(dd)) {
310 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600311 if (!msm_spi_is_valid_state(dd)) {
312 if (dd->cur_msg)
313 dd->cur_msg->status = -EIO;
314 dev_err(dd->dev, "%s: SPI operational state"
315 "not valid\n", __func__);
316 return -ETIMEDOUT;
317 } else
318 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 }
320 /*
321 * For smaller values of delay, context switch time
322 * would negate the usage of usleep
323 */
324 if (delay > 20)
325 usleep(delay);
326 else if (delay)
327 udelay(delay);
328 }
329 return 0;
330}
331
332static inline int msm_spi_set_state(struct msm_spi *dd,
333 enum msm_spi_state state)
334{
335 enum msm_spi_state cur_state;
336 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700337 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 cur_state = readl_relaxed(dd->base + SPI_STATE);
339 /* Per spec:
340 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
341 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
342 (state == SPI_OP_STATE_RESET)) {
343 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
344 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
345 } else {
346 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
347 dd->base + SPI_STATE);
348 }
349 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700350 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351
352 return 0;
353}
354
355static inline void msm_spi_add_configs(struct msm_spi *dd, u32 *config, int n)
356{
357 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
358
359 if (n != (*config & SPI_CFG_N))
360 *config = (*config & ~SPI_CFG_N) | n;
361
362 if ((dd->mode == SPI_DMOV_MODE) && (!dd->read_len)) {
363 if (dd->read_buf == NULL)
364 *config |= SPI_NO_INPUT;
365 if (dd->write_buf == NULL)
366 *config |= SPI_NO_OUTPUT;
367 }
368}
369
370static void msm_spi_set_config(struct msm_spi *dd, int bpw)
371{
372 u32 spi_config;
373
374 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
375
376 if (dd->cur_msg->spi->mode & SPI_CPHA)
377 spi_config &= ~SPI_CFG_INPUT_FIRST;
378 else
379 spi_config |= SPI_CFG_INPUT_FIRST;
380 if (dd->cur_msg->spi->mode & SPI_LOOP)
381 spi_config |= SPI_CFG_LOOPBACK;
382 else
383 spi_config &= ~SPI_CFG_LOOPBACK;
384 msm_spi_add_configs(dd, &spi_config, bpw-1);
385 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
386 msm_spi_set_qup_config(dd, bpw);
387}
388
389static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
390{
391 dmov_box *box;
392 int bytes_to_send, num_rows, bytes_sent;
393 u32 num_transfers;
394
395 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530396 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 if (dd->write_len && !dd->read_len) {
398 /* WR-WR transfer */
399 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
400 dd->write_buf = dd->temp_buf;
401 } else {
402 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
403 /* For WR-RD transfer, bytes_sent can be negative */
404 if (bytes_sent < 0)
405 bytes_sent = 0;
406 }
407
408 /* We'll send in chunks of SPI_MAX_LEN if larger */
409 bytes_to_send = dd->tx_bytes_remaining / SPI_MAX_LEN ?
410 SPI_MAX_LEN : dd->tx_bytes_remaining;
411 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
412 dd->unaligned_len = bytes_to_send % dd->burst_size;
413 num_rows = bytes_to_send / dd->burst_size;
414
415 dd->mode = SPI_DMOV_MODE;
416
417 if (num_rows) {
418 /* src in 16 MSB, dst in 16 LSB */
419 box = &dd->tx_dmov_cmd->box;
420 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
421 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
422 box->num_rows = (num_rows << 16) | num_rows;
423 box->row_offset = (dd->burst_size << 16) | 0;
424
425 box = &dd->rx_dmov_cmd->box;
426 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
427 box->src_dst_len = (dd->burst_size << 16) | dd->burst_size;
428 box->num_rows = (num_rows << 16) | num_rows;
429 box->row_offset = (0 << 16) | dd->burst_size;
430
431 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
432 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
433 offsetof(struct spi_dmov_cmd, box));
434 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
435 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
436 offsetof(struct spi_dmov_cmd, box));
437 } else {
438 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
439 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
440 offsetof(struct spi_dmov_cmd, single_pad));
441 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
442 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
443 offsetof(struct spi_dmov_cmd, single_pad));
444 }
445
446 if (!dd->unaligned_len) {
447 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
448 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
449 } else {
450 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
451 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
452 u32 offset = dd->cur_transfer->len - dd->unaligned_len;
453
454 if ((dd->multi_xfr) && (dd->read_len <= 0))
455 offset = dd->cur_msg_len - dd->unaligned_len;
456
457 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
458 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
459
460 memset(dd->tx_padding, 0, dd->burst_size);
461 memset(dd->rx_padding, 0, dd->burst_size);
462 if (dd->write_buf)
463 memcpy(dd->tx_padding, dd->write_buf + offset,
464 dd->unaligned_len);
465
466 tx_cmd->src = dd->tx_padding_dma;
467 rx_cmd->dst = dd->rx_padding_dma;
468 tx_cmd->len = rx_cmd->len = dd->burst_size;
469 }
470 /* This also takes care of the padding dummy buf
471 Since this is set to the correct length, the
472 dummy bytes won't be actually sent */
473 if (dd->multi_xfr) {
474 u32 write_transfers = 0;
475 u32 read_transfers = 0;
476
477 if (dd->write_len > 0) {
478 write_transfers = DIV_ROUND_UP(dd->write_len,
479 dd->bytes_per_word);
480 writel_relaxed(write_transfers,
481 dd->base + SPI_MX_OUTPUT_COUNT);
482 }
483 if (dd->read_len > 0) {
484 /*
485 * The read following a write transfer must take
486 * into account, that the bytes pertaining to
487 * the write transfer needs to be discarded,
488 * before the actual read begins.
489 */
490 read_transfers = DIV_ROUND_UP(dd->read_len +
491 dd->write_len,
492 dd->bytes_per_word);
493 writel_relaxed(read_transfers,
494 dd->base + SPI_MX_INPUT_COUNT);
495 }
496 } else {
497 if (dd->write_buf)
498 writel_relaxed(num_transfers,
499 dd->base + SPI_MX_OUTPUT_COUNT);
500 if (dd->read_buf)
501 writel_relaxed(num_transfers,
502 dd->base + SPI_MX_INPUT_COUNT);
503 }
504}
505
506static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
507{
508 dma_coherent_pre_ops();
509 if (dd->write_buf)
510 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
511 if (dd->read_buf)
512 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
513}
514
515/* SPI core can send maximum of 4K transfers, because there is HW problem
516 with infinite mode.
517 Therefore, we are sending several chunks of 3K or less (depending on how
518 much is left).
519 Upon completion we send the next chunk, or complete the transfer if
520 everything is finished.
521*/
522static int msm_spi_dm_send_next(struct msm_spi *dd)
523{
524 /* By now we should have sent all the bytes in FIFO mode,
525 * However to make things right, we'll check anyway.
526 */
527 if (dd->mode != SPI_DMOV_MODE)
528 return 0;
529
530 /* We need to send more chunks, if we sent max last time */
531 if (dd->tx_bytes_remaining > SPI_MAX_LEN) {
532 dd->tx_bytes_remaining -= SPI_MAX_LEN;
533 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
534 return 0;
535 dd->read_len = dd->write_len = 0;
536 msm_spi_setup_dm_transfer(dd);
537 msm_spi_enqueue_dm_commands(dd);
538 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
539 return 0;
540 return 1;
541 } else if (dd->read_len && dd->write_len) {
542 dd->tx_bytes_remaining -= dd->cur_transfer->len;
543 if (list_is_last(&dd->cur_transfer->transfer_list,
544 &dd->cur_msg->transfers))
545 return 0;
546 get_next_transfer(dd);
547 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
548 return 0;
549 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
550 dd->read_buf = dd->temp_buf;
551 dd->read_len = dd->write_len = -1;
552 msm_spi_setup_dm_transfer(dd);
553 msm_spi_enqueue_dm_commands(dd);
554 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
555 return 0;
556 return 1;
557 }
558 return 0;
559}
560
561static inline void msm_spi_ack_transfer(struct msm_spi *dd)
562{
563 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
564 SPI_OP_MAX_OUTPUT_DONE_FLAG,
565 dd->base + SPI_OPERATIONAL);
566 /* Ensure done flag was cleared before proceeding further */
567 mb();
568}
569
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700570/* Figure which irq occured and call the relevant functions */
571static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
572{
573 u32 op, ret = IRQ_NONE;
574 struct msm_spi *dd = dev_id;
575
576 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
577 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
578 struct spi_master *master = dev_get_drvdata(dd->dev);
579 ret |= msm_spi_error_irq(irq, master);
580 }
581
582 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
583 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
584 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
585 dd->base + SPI_OPERATIONAL);
586 /*
587 * Ensure service flag was cleared before further
588 * processing of interrupt.
589 */
590 mb();
591 ret |= msm_spi_input_irq(irq, dev_id);
592 }
593
594 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
595 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
596 dd->base + SPI_OPERATIONAL);
597 /*
598 * Ensure service flag was cleared before further
599 * processing of interrupt.
600 */
601 mb();
602 ret |= msm_spi_output_irq(irq, dev_id);
603 }
604
605 if (dd->done) {
606 complete(&dd->transfer_complete);
607 dd->done = 0;
608 }
609 return ret;
610}
611
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
613{
614 struct msm_spi *dd = dev_id;
615
616 dd->stat_rx++;
617
618 if (dd->mode == SPI_MODE_NONE)
619 return IRQ_HANDLED;
620
621 if (dd->mode == SPI_DMOV_MODE) {
622 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
623 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
624 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
625 msm_spi_ack_transfer(dd);
626 if (dd->unaligned_len == 0) {
627 if (atomic_inc_return(&dd->rx_irq_called) == 1)
628 return IRQ_HANDLED;
629 }
630 msm_spi_complete(dd);
631 return IRQ_HANDLED;
632 }
633 return IRQ_NONE;
634 }
635
636 if (dd->mode == SPI_FIFO_MODE) {
637 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
638 SPI_OP_IP_FIFO_NOT_EMPTY) &&
639 (dd->rx_bytes_remaining > 0)) {
640 msm_spi_read_word_from_fifo(dd);
641 }
642 if (dd->rx_bytes_remaining == 0)
643 msm_spi_complete(dd);
644 }
645
646 return IRQ_HANDLED;
647}
648
649static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
650{
651 u32 word;
652 u8 byte;
653 int i;
654
655 word = 0;
656 if (dd->write_buf) {
657 for (i = 0; (i < dd->bytes_per_word) &&
658 dd->tx_bytes_remaining; i++) {
659 dd->tx_bytes_remaining--;
660 byte = *dd->write_buf++;
661 word |= (byte << (BITS_PER_BYTE * (3 - i)));
662 }
663 } else
664 if (dd->tx_bytes_remaining > dd->bytes_per_word)
665 dd->tx_bytes_remaining -= dd->bytes_per_word;
666 else
667 dd->tx_bytes_remaining = 0;
668 dd->write_xfr_cnt++;
669 if (dd->multi_xfr) {
670 if (!dd->tx_bytes_remaining)
671 dd->write_xfr_cnt = 0;
672 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
673 dd->write_len) {
674 struct spi_transfer *t = dd->cur_tx_transfer;
675 if (t->transfer_list.next != &dd->cur_msg->transfers) {
676 t = list_entry(t->transfer_list.next,
677 struct spi_transfer,
678 transfer_list);
679 dd->write_buf = t->tx_buf;
680 dd->write_len = t->len;
681 dd->write_xfr_cnt = 0;
682 dd->cur_tx_transfer = t;
683 }
684 }
685 }
686 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
687}
688
689static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
690{
691 int count = 0;
692
693 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
694 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
695 SPI_OP_OUTPUT_FIFO_FULL)) {
696 msm_spi_write_word_to_fifo(dd);
697 count++;
698 }
699}
700
701static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
702{
703 struct msm_spi *dd = dev_id;
704
705 dd->stat_tx++;
706
707 if (dd->mode == SPI_MODE_NONE)
708 return IRQ_HANDLED;
709
710 if (dd->mode == SPI_DMOV_MODE) {
711 /* TX_ONLY transaction is handled here
712 This is the only place we send complete at tx and not rx */
713 if (dd->read_buf == NULL &&
714 readl_relaxed(dd->base + SPI_OPERATIONAL) &
715 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
716 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530717 if (atomic_inc_return(&dd->tx_irq_called) == 1)
718 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 msm_spi_complete(dd);
720 return IRQ_HANDLED;
721 }
722 return IRQ_NONE;
723 }
724
725 /* Output FIFO is empty. Transmit any outstanding write data. */
726 if (dd->mode == SPI_FIFO_MODE)
727 msm_spi_write_rmn_to_fifo(dd);
728
729 return IRQ_HANDLED;
730}
731
732static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
733{
734 struct spi_master *master = dev_id;
735 struct msm_spi *dd = spi_master_get_devdata(master);
736 u32 spi_err;
737
738 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
739 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
740 dev_warn(master->dev.parent, "SPI output overrun error\n");
741 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
742 dev_warn(master->dev.parent, "SPI input underrun error\n");
743 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
744 dev_warn(master->dev.parent, "SPI output underrun error\n");
745 msm_spi_get_clk_err(dd, &spi_err);
746 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
747 dev_warn(master->dev.parent, "SPI clock overrun error\n");
748 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
749 dev_warn(master->dev.parent, "SPI clock underrun error\n");
750 msm_spi_clear_error_flags(dd);
751 msm_spi_ack_clk_err(dd);
752 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
753 mb();
754 return IRQ_HANDLED;
755}
756
757static int msm_spi_map_dma_buffers(struct msm_spi *dd)
758{
759 struct device *dev;
760 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -0600761 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 void *tx_buf, *rx_buf;
763 unsigned tx_len, rx_len;
764 int ret = -EINVAL;
765
766 dev = &dd->cur_msg->spi->dev;
767 first_xfr = dd->cur_transfer;
768 tx_buf = (void *)first_xfr->tx_buf;
769 rx_buf = first_xfr->rx_buf;
770 tx_len = rx_len = first_xfr->len;
771
772 /*
773 * For WR-WR and WR-RD transfers, we allocate our own temporary
774 * buffer and copy the data to/from the client buffers.
775 */
776 if (dd->multi_xfr) {
777 dd->temp_buf = kzalloc(dd->cur_msg_len,
778 GFP_KERNEL | __GFP_DMA);
779 if (!dd->temp_buf)
780 return -ENOMEM;
781 nxt_xfr = list_entry(first_xfr->transfer_list.next,
782 struct spi_transfer, transfer_list);
783
784 if (dd->write_len && !dd->read_len) {
785 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
786 goto error;
787
788 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
789 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
790 nxt_xfr->len);
791 tx_buf = dd->temp_buf;
792 tx_len = dd->cur_msg_len;
793 } else {
794 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
795 goto error;
796
797 rx_buf = dd->temp_buf;
798 rx_len = dd->cur_msg_len;
799 }
800 }
801 if (tx_buf != NULL) {
802 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
803 tx_len, DMA_TO_DEVICE);
804 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
805 dev_err(dev, "dma %cX %d bytes error\n",
806 'T', tx_len);
807 ret = -ENOMEM;
808 goto error;
809 }
810 }
811 if (rx_buf != NULL) {
812 dma_addr_t dma_handle;
813 dma_handle = dma_map_single(dev, rx_buf,
814 rx_len, DMA_FROM_DEVICE);
815 if (dma_mapping_error(NULL, dma_handle)) {
816 dev_err(dev, "dma %cX %d bytes error\n",
817 'R', rx_len);
818 if (tx_buf != NULL)
819 dma_unmap_single(NULL, first_xfr->tx_dma,
820 tx_len, DMA_TO_DEVICE);
821 ret = -ENOMEM;
822 goto error;
823 }
824 if (dd->multi_xfr)
825 nxt_xfr->rx_dma = dma_handle;
826 else
827 first_xfr->rx_dma = dma_handle;
828 }
829 return 0;
830
831error:
832 kfree(dd->temp_buf);
833 dd->temp_buf = NULL;
834 return ret;
835}
836
837static void msm_spi_unmap_dma_buffers(struct msm_spi *dd)
838{
839 struct device *dev;
840 u32 offset;
841
842 dev = &dd->cur_msg->spi->dev;
843 if (dd->cur_msg->is_dma_mapped)
844 goto unmap_end;
845
846 if (dd->multi_xfr) {
847 if (dd->write_len && !dd->read_len) {
848 dma_unmap_single(dev,
849 dd->cur_transfer->tx_dma,
850 dd->cur_msg_len,
851 DMA_TO_DEVICE);
852 } else {
853 struct spi_transfer *prev_xfr;
854 prev_xfr = list_entry(
855 dd->cur_transfer->transfer_list.prev,
856 struct spi_transfer,
857 transfer_list);
858 if (dd->cur_transfer->rx_buf) {
859 dma_unmap_single(dev,
860 dd->cur_transfer->rx_dma,
861 dd->cur_msg_len,
862 DMA_FROM_DEVICE);
863 }
864 if (prev_xfr->tx_buf) {
865 dma_unmap_single(dev,
866 prev_xfr->tx_dma,
867 prev_xfr->len,
868 DMA_TO_DEVICE);
869 }
870 if (dd->unaligned_len && dd->read_buf) {
871 offset = dd->cur_msg_len - dd->unaligned_len;
872 dma_coherent_post_ops();
873 memcpy(dd->read_buf + offset, dd->rx_padding,
874 dd->unaligned_len);
875 memcpy(dd->cur_transfer->rx_buf,
876 dd->read_buf + prev_xfr->len,
877 dd->cur_transfer->len);
878 }
879 }
880 kfree(dd->temp_buf);
881 dd->temp_buf = NULL;
882 return;
883 } else {
884 if (dd->cur_transfer->rx_buf)
885 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
886 dd->cur_transfer->len,
887 DMA_FROM_DEVICE);
888 if (dd->cur_transfer->tx_buf)
889 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
890 dd->cur_transfer->len,
891 DMA_TO_DEVICE);
892 }
893
894unmap_end:
895 /* If we padded the transfer, we copy it from the padding buf */
896 if (dd->unaligned_len && dd->read_buf) {
897 offset = dd->cur_transfer->len - dd->unaligned_len;
898 dma_coherent_post_ops();
899 memcpy(dd->read_buf + offset, dd->rx_padding,
900 dd->unaligned_len);
901 }
902}
903
904/**
905 * msm_use_dm - decides whether to use data mover for this
906 * transfer
907 * @dd: device
908 * @tr: transfer
909 *
910 * Start using DM if:
911 * 1. Transfer is longer than 3*block size.
912 * 2. Buffers should be aligned to cache line.
913 * 3. For WR-RD or WR-WR transfers, if condition (1) and (2) above are met.
914 */
915static inline int msm_use_dm(struct msm_spi *dd, struct spi_transfer *tr,
916 u8 bpw)
917{
918 u32 cache_line = dma_get_cache_alignment();
919
920 if (!dd->use_dma)
921 return 0;
922
923 if (dd->cur_msg_len < 3*dd->input_block_size)
924 return 0;
925
926 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
927 return 0;
928
929 if (tr->tx_buf) {
930 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
931 return 0;
932 }
933 if (tr->rx_buf) {
934 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
935 return 0;
936 }
937
938 if (tr->cs_change &&
939 ((bpw != 8) || (bpw != 16) || (bpw != 32)))
940 return 0;
941 return 1;
942}
943
944static void msm_spi_process_transfer(struct msm_spi *dd)
945{
946 u8 bpw;
947 u32 spi_ioc;
948 u32 spi_iom;
949 u32 spi_ioc_orig;
950 u32 max_speed;
951 u32 chip_select;
952 u32 read_count;
953 u32 timeout;
954 u32 int_loopback = 0;
955
956 dd->tx_bytes_remaining = dd->cur_msg_len;
957 dd->rx_bytes_remaining = dd->cur_msg_len;
958 dd->read_buf = dd->cur_transfer->rx_buf;
959 dd->write_buf = dd->cur_transfer->tx_buf;
960 init_completion(&dd->transfer_complete);
961 if (dd->cur_transfer->bits_per_word)
962 bpw = dd->cur_transfer->bits_per_word;
963 else
964 if (dd->cur_msg->spi->bits_per_word)
965 bpw = dd->cur_msg->spi->bits_per_word;
966 else
967 bpw = 8;
968 dd->bytes_per_word = (bpw + 7) / 8;
969
970 if (dd->cur_transfer->speed_hz)
971 max_speed = dd->cur_transfer->speed_hz;
972 else
973 max_speed = dd->cur_msg->spi->max_speed_hz;
974 if (!dd->clock_speed || max_speed != dd->clock_speed)
975 msm_spi_clock_set(dd, max_speed);
976
977 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
978 if (dd->cur_msg->spi->mode & SPI_LOOP)
979 int_loopback = 1;
980 if (int_loopback && dd->multi_xfr &&
981 (read_count > dd->input_fifo_size)) {
982 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700983 pr_err(
984 "%s:Internal Loopback does not support > fifo size"
985 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 __func__);
987 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700988 pr_err(
989 "%s:Internal Loopback does not support > fifo size"
990 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 __func__);
992 return;
993 }
994 if (!msm_use_dm(dd, dd->cur_transfer, bpw)) {
995 dd->mode = SPI_FIFO_MODE;
996 if (dd->multi_xfr) {
997 dd->read_len = dd->cur_transfer->len;
998 dd->write_len = dd->cur_transfer->len;
999 }
1000 /* read_count cannot exceed fifo_size, and only one READ COUNT
1001 interrupt is generated per transaction, so for transactions
1002 larger than fifo size READ COUNT must be disabled.
1003 For those transactions we usually move to Data Mover mode.
1004 */
1005 if (read_count <= dd->input_fifo_size) {
1006 writel_relaxed(read_count,
1007 dd->base + SPI_MX_READ_COUNT);
1008 msm_spi_set_write_count(dd, read_count);
1009 } else {
1010 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
1011 msm_spi_set_write_count(dd, 0);
1012 }
1013 } else {
1014 dd->mode = SPI_DMOV_MODE;
1015 if (dd->write_len && dd->read_len) {
1016 dd->tx_bytes_remaining = dd->write_len;
1017 dd->rx_bytes_remaining = dd->read_len;
1018 }
1019 }
1020
1021 /* Write mode - fifo or data mover*/
1022 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1023 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1024 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1025 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1026 /* Turn on packing for data mover */
1027 if (dd->mode == SPI_DMOV_MODE)
1028 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1029 else
1030 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1031 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1032
1033 msm_spi_set_config(dd, bpw);
1034
1035 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1036 spi_ioc_orig = spi_ioc;
1037 if (dd->cur_msg->spi->mode & SPI_CPOL)
1038 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1039 else
1040 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1041 chip_select = dd->cur_msg->spi->chip_select << 2;
1042 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1043 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1044 if (!dd->cur_transfer->cs_change)
1045 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1046 if (spi_ioc != spi_ioc_orig)
1047 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1048
1049 if (dd->mode == SPI_DMOV_MODE) {
1050 msm_spi_setup_dm_transfer(dd);
1051 msm_spi_enqueue_dm_commands(dd);
1052 }
1053 /* The output fifo interrupt handler will handle all writes after
1054 the first. Restricting this to one write avoids contention
1055 issues and race conditions between this thread and the int handler
1056 */
1057 else if (dd->mode == SPI_FIFO_MODE) {
1058 if (msm_spi_prepare_for_write(dd))
1059 goto transfer_end;
1060 msm_spi_start_write(dd, read_count);
1061 }
1062
1063 /* Only enter the RUN state after the first word is written into
1064 the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1065 might fire before the first word is written resulting in a
1066 possible race condition.
1067 */
1068 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1069 goto transfer_end;
1070
1071 timeout = 100 * msecs_to_jiffies(
1072 DIV_ROUND_UP(dd->cur_msg_len * 8,
1073 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1074
1075 /* Assume success, this might change later upon transaction result */
1076 dd->cur_msg->status = 0;
1077 do {
1078 if (!wait_for_completion_timeout(&dd->transfer_complete,
1079 timeout)) {
1080 dev_err(dd->dev, "%s: SPI transaction "
1081 "timeout\n", __func__);
1082 dd->cur_msg->status = -EIO;
1083 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001084 msm_dmov_flush(dd->tx_dma_chan, 1);
1085 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086 }
1087 break;
1088 }
1089 } while (msm_spi_dm_send_next(dd));
1090
1091transfer_end:
1092 if (dd->mode == SPI_DMOV_MODE)
1093 msm_spi_unmap_dma_buffers(dd);
1094 dd->mode = SPI_MODE_NONE;
1095
1096 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1097 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1098 dd->base + SPI_IO_CONTROL);
1099}
1100
1101static void get_transfer_length(struct msm_spi *dd)
1102{
1103 struct spi_transfer *tr;
1104 int num_xfrs = 0;
1105 int readlen = 0;
1106 int writelen = 0;
1107
1108 dd->cur_msg_len = 0;
1109 dd->multi_xfr = 0;
1110 dd->read_len = dd->write_len = 0;
1111
1112 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1113 if (tr->tx_buf)
1114 writelen += tr->len;
1115 if (tr->rx_buf)
1116 readlen += tr->len;
1117 dd->cur_msg_len += tr->len;
1118 num_xfrs++;
1119 }
1120
1121 if (num_xfrs == 2) {
1122 struct spi_transfer *first_xfr = dd->cur_transfer;
1123
1124 dd->multi_xfr = 1;
1125 tr = list_entry(first_xfr->transfer_list.next,
1126 struct spi_transfer,
1127 transfer_list);
1128 /*
1129 * We update dd->read_len and dd->write_len only
1130 * for WR-WR and WR-RD transfers.
1131 */
1132 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1133 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1134 ((!tr->tx_buf) && (tr->rx_buf))) {
1135 dd->read_len = readlen;
1136 dd->write_len = writelen;
1137 }
1138 }
1139 } else if (num_xfrs > 1)
1140 dd->multi_xfr = 1;
1141}
1142
1143static inline int combine_transfers(struct msm_spi *dd)
1144{
1145 struct spi_transfer *t = dd->cur_transfer;
1146 struct spi_transfer *nxt;
1147 int xfrs_grped = 1;
1148
1149 dd->cur_msg_len = dd->cur_transfer->len;
1150 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1151 nxt = list_entry(t->transfer_list.next,
1152 struct spi_transfer,
1153 transfer_list);
1154 if (t->cs_change != nxt->cs_change)
1155 return xfrs_grped;
1156 dd->cur_msg_len += nxt->len;
1157 xfrs_grped++;
1158 t = nxt;
1159 }
1160 return xfrs_grped;
1161}
1162
Harini Jayaraman093938a2012-04-20 15:33:23 -06001163static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1164{
1165 u32 spi_ioc;
1166 u32 spi_ioc_orig;
1167
1168 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1169 spi_ioc_orig = spi_ioc;
1170 if (set_flag)
1171 spi_ioc |= SPI_IO_C_FORCE_CS;
1172 else
1173 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1174
1175 if (spi_ioc != spi_ioc_orig)
1176 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1177}
1178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179static void msm_spi_process_message(struct msm_spi *dd)
1180{
1181 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001182 int cs_num;
1183 int rc;
1184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001186 cs_num = dd->cur_msg->spi->chip_select;
1187 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1188 (!(dd->cs_gpios[cs_num].valid)) &&
1189 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1190 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1191 spi_cs_rsrcs[cs_num]);
1192 if (rc) {
1193 dev_err(dd->dev, "gpio_request for pin %d failed with "
1194 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1195 rc);
1196 return;
1197 }
1198 dd->cs_gpios[cs_num].valid = 1;
1199 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001201 if (dd->qup_ver) {
Harini Jayaraman093938a2012-04-20 15:33:23 -06001202 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001204 &dd->cur_msg->transfers,
1205 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001206 struct spi_transfer *t = dd->cur_transfer;
1207 struct spi_transfer *nxt;
1208
1209 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1210 nxt = list_entry(t->transfer_list.next,
1211 struct spi_transfer,
1212 transfer_list);
1213
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001214 if (t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001215 write_force_cs(dd, 1);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001216 else
Harini Jayaraman093938a2012-04-20 15:33:23 -06001217 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001219
1220 dd->cur_msg_len = dd->cur_transfer->len;
1221 msm_spi_process_transfer(dd);
1222 }
1223 } else {
1224 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1225 struct spi_transfer,
1226 transfer_list);
1227 get_transfer_length(dd);
1228 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1229 /*
1230 * Handling of multi-transfers.
1231 * FIFO mode is used by default
1232 */
1233 list_for_each_entry(dd->cur_transfer,
1234 &dd->cur_msg->transfers,
1235 transfer_list) {
1236 if (!dd->cur_transfer->len)
1237 goto error;
1238 if (xfrs_grped) {
1239 xfrs_grped--;
1240 continue;
1241 } else {
1242 dd->read_len = dd->write_len = 0;
1243 xfrs_grped = combine_transfers(dd);
1244 }
1245
1246 dd->cur_tx_transfer = dd->cur_transfer;
1247 dd->cur_rx_transfer = dd->cur_transfer;
1248 msm_spi_process_transfer(dd);
1249 xfrs_grped--;
1250 }
1251 } else {
1252 /* Handling of a single transfer or
1253 * WR-WR or WR-RD transfers
1254 */
1255 if ((!dd->cur_msg->is_dma_mapped) &&
1256 (msm_use_dm(dd, dd->cur_transfer,
1257 dd->cur_transfer->bits_per_word))) {
1258 /* Mapping of DMA buffers */
1259 int ret = msm_spi_map_dma_buffers(dd);
1260 if (ret < 0) {
1261 dd->cur_msg->status = ret;
1262 goto error;
1263 }
1264 }
1265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266 dd->cur_tx_transfer = dd->cur_transfer;
1267 dd->cur_rx_transfer = dd->cur_transfer;
1268 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001271
1272 return;
1273
1274error:
1275 if (dd->cs_gpios[cs_num].valid) {
1276 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1277 dd->cs_gpios[cs_num].valid = 0;
1278 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279}
1280
1281/* workqueue - pull messages from queue & process */
1282static void msm_spi_workq(struct work_struct *work)
1283{
1284 struct msm_spi *dd =
1285 container_of(work, struct msm_spi, work_data);
1286 unsigned long flags;
1287 u32 status_error = 0;
1288
1289 mutex_lock(&dd->core_lock);
1290
1291 /* Don't allow power collapse until we release mutex */
1292 if (pm_qos_request_active(&qos_req_list))
1293 pm_qos_update_request(&qos_req_list,
1294 dd->pm_lat);
1295 if (dd->use_rlock)
1296 remote_mutex_lock(&dd->r_lock);
1297
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001298 clk_prepare_enable(dd->clk);
1299 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300 msm_spi_enable_irqs(dd);
1301
1302 if (!msm_spi_is_valid_state(dd)) {
1303 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1304 __func__);
1305 status_error = 1;
1306 }
1307
1308 spin_lock_irqsave(&dd->queue_lock, flags);
1309 while (!list_empty(&dd->queue)) {
1310 dd->cur_msg = list_entry(dd->queue.next,
1311 struct spi_message, queue);
1312 list_del_init(&dd->cur_msg->queue);
1313 spin_unlock_irqrestore(&dd->queue_lock, flags);
1314 if (status_error)
1315 dd->cur_msg->status = -EIO;
1316 else
1317 msm_spi_process_message(dd);
1318 if (dd->cur_msg->complete)
1319 dd->cur_msg->complete(dd->cur_msg->context);
1320 spin_lock_irqsave(&dd->queue_lock, flags);
1321 }
1322 dd->transfer_pending = 0;
1323 spin_unlock_irqrestore(&dd->queue_lock, flags);
1324
1325 msm_spi_disable_irqs(dd);
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001326 clk_disable_unprepare(dd->clk);
1327 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328
1329 if (dd->use_rlock)
1330 remote_mutex_unlock(&dd->r_lock);
1331
1332 if (pm_qos_request_active(&qos_req_list))
1333 pm_qos_update_request(&qos_req_list,
1334 PM_QOS_DEFAULT_VALUE);
1335
1336 mutex_unlock(&dd->core_lock);
1337 /* If needed, this can be done after the current message is complete,
1338 and work can be continued upon resume. No motivation for now. */
1339 if (dd->suspended)
1340 wake_up_interruptible(&dd->continue_suspend);
1341}
1342
1343static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1344{
1345 struct msm_spi *dd;
1346 unsigned long flags;
1347 struct spi_transfer *tr;
1348
1349 dd = spi_master_get_devdata(spi->master);
1350 if (dd->suspended)
1351 return -EBUSY;
1352
1353 if (list_empty(&msg->transfers) || !msg->complete)
1354 return -EINVAL;
1355
1356 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1357 /* Check message parameters */
1358 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1359 (tr->bits_per_word &&
1360 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1361 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1362 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1363 "tx=%p, rx=%p\n",
1364 tr->speed_hz, tr->bits_per_word,
1365 tr->tx_buf, tr->rx_buf);
1366 return -EINVAL;
1367 }
1368 }
1369
1370 spin_lock_irqsave(&dd->queue_lock, flags);
1371 if (dd->suspended) {
1372 spin_unlock_irqrestore(&dd->queue_lock, flags);
1373 return -EBUSY;
1374 }
1375 dd->transfer_pending = 1;
1376 list_add_tail(&msg->queue, &dd->queue);
1377 spin_unlock_irqrestore(&dd->queue_lock, flags);
1378 queue_work(dd->workqueue, &dd->work_data);
1379 return 0;
1380}
1381
1382static int msm_spi_setup(struct spi_device *spi)
1383{
1384 struct msm_spi *dd;
1385 int rc = 0;
1386 u32 spi_ioc;
1387 u32 spi_config;
1388 u32 mask;
1389
1390 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1391 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1392 __func__, spi->bits_per_word);
1393 rc = -EINVAL;
1394 }
1395 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1396 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1397 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1398 rc = -EINVAL;
1399 }
1400
1401 if (rc)
1402 goto err_setup_exit;
1403
1404 dd = spi_master_get_devdata(spi->master);
1405
1406 mutex_lock(&dd->core_lock);
1407 if (dd->suspended) {
1408 mutex_unlock(&dd->core_lock);
1409 return -EBUSY;
1410 }
1411
1412 if (dd->use_rlock)
1413 remote_mutex_lock(&dd->r_lock);
1414
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001415 clk_prepare_enable(dd->clk);
1416 clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417
1418 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1419 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1420 if (spi->mode & SPI_CS_HIGH)
1421 spi_ioc |= mask;
1422 else
1423 spi_ioc &= ~mask;
1424 if (spi->mode & SPI_CPOL)
1425 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1426 else
1427 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1428
1429 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1430
1431 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1432 if (spi->mode & SPI_LOOP)
1433 spi_config |= SPI_CFG_LOOPBACK;
1434 else
1435 spi_config &= ~SPI_CFG_LOOPBACK;
1436 if (spi->mode & SPI_CPHA)
1437 spi_config &= ~SPI_CFG_INPUT_FIRST;
1438 else
1439 spi_config |= SPI_CFG_INPUT_FIRST;
1440 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1441
1442 /* Ensure previous write completed before disabling the clocks */
1443 mb();
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001444 clk_disable_unprepare(dd->clk);
1445 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446
1447 if (dd->use_rlock)
1448 remote_mutex_unlock(&dd->r_lock);
1449 mutex_unlock(&dd->core_lock);
1450
1451err_setup_exit:
1452 return rc;
1453}
1454
1455#ifdef CONFIG_DEBUG_FS
1456static int debugfs_iomem_x32_set(void *data, u64 val)
1457{
1458 writel_relaxed(val, data);
1459 /* Ensure the previous write completed. */
1460 mb();
1461 return 0;
1462}
1463
1464static int debugfs_iomem_x32_get(void *data, u64 *val)
1465{
1466 *val = readl_relaxed(data);
1467 /* Ensure the previous read completed. */
1468 mb();
1469 return 0;
1470}
1471
1472DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1473 debugfs_iomem_x32_set, "0x%08llx\n");
1474
1475static void spi_debugfs_init(struct msm_spi *dd)
1476{
1477 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
1478 if (dd->dent_spi) {
1479 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1482 dd->debugfs_spi_regs[i] =
1483 debugfs_create_file(
1484 debugfs_spi_regs[i].name,
1485 debugfs_spi_regs[i].mode,
1486 dd->dent_spi,
1487 dd->base + debugfs_spi_regs[i].offset,
1488 &fops_iomem_x32);
1489 }
1490 }
1491}
1492
1493static void spi_debugfs_exit(struct msm_spi *dd)
1494{
1495 if (dd->dent_spi) {
1496 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498 debugfs_remove_recursive(dd->dent_spi);
1499 dd->dent_spi = NULL;
1500 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1501 dd->debugfs_spi_regs[i] = NULL;
1502 }
1503}
1504#else
1505static void spi_debugfs_init(struct msm_spi *dd) {}
1506static void spi_debugfs_exit(struct msm_spi *dd) {}
1507#endif
1508
1509/* ===Device attributes begin=== */
1510static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1511 char *buf)
1512{
1513 struct spi_master *master = dev_get_drvdata(dev);
1514 struct msm_spi *dd = spi_master_get_devdata(master);
1515
1516 return snprintf(buf, PAGE_SIZE,
1517 "Device %s\n"
1518 "rx fifo_size = %d spi words\n"
1519 "tx fifo_size = %d spi words\n"
1520 "use_dma ? %s\n"
1521 "rx block size = %d bytes\n"
1522 "tx block size = %d bytes\n"
1523 "burst size = %d bytes\n"
1524 "DMA configuration:\n"
1525 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1526 "--statistics--\n"
1527 "Rx isrs = %d\n"
1528 "Tx isrs = %d\n"
1529 "DMA error = %d\n"
1530 "--debug--\n"
1531 "NA yet\n",
1532 dev_name(dev),
1533 dd->input_fifo_size,
1534 dd->output_fifo_size,
1535 dd->use_dma ? "yes" : "no",
1536 dd->input_block_size,
1537 dd->output_block_size,
1538 dd->burst_size,
1539 dd->tx_dma_chan,
1540 dd->rx_dma_chan,
1541 dd->tx_dma_crci,
1542 dd->rx_dma_crci,
1543 dd->stat_rx + dd->stat_dmov_rx,
1544 dd->stat_tx + dd->stat_dmov_tx,
1545 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
1546 );
1547}
1548
1549/* Reset statistics on write */
1550static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1551 const char *buf, size_t count)
1552{
1553 struct msm_spi *dd = dev_get_drvdata(dev);
1554 dd->stat_rx = 0;
1555 dd->stat_tx = 0;
1556 dd->stat_dmov_rx = 0;
1557 dd->stat_dmov_tx = 0;
1558 dd->stat_dmov_rx_err = 0;
1559 dd->stat_dmov_tx_err = 0;
1560 return count;
1561}
1562
1563static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
1564
1565static struct attribute *dev_attrs[] = {
1566 &dev_attr_stats.attr,
1567 NULL,
1568};
1569
1570static struct attribute_group dev_attr_grp = {
1571 .attrs = dev_attrs,
1572};
1573/* ===Device attributes end=== */
1574
1575/**
1576 * spi_dmov_tx_complete_func - DataMover tx completion callback
1577 *
1578 * Executed in IRQ context (Data Mover's IRQ) DataMover's
1579 * spinlock @msm_dmov_lock held.
1580 */
1581static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
1582 unsigned int result,
1583 struct msm_dmov_errdata *err)
1584{
1585 struct msm_spi *dd;
1586
1587 if (!(result & DMOV_RSLT_VALID)) {
1588 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
1589 return;
1590 }
1591 /* restore original context */
1592 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301593 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301595 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
1596 return;
1597 complete(&dd->transfer_complete);
1598 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001599 /* Error or flush */
1600 if (result & DMOV_RSLT_ERROR) {
1601 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
1602 dd->stat_dmov_tx_err++;
1603 }
1604 if (result & DMOV_RSLT_FLUSH) {
1605 /*
1606 * Flushing normally happens in process of
1607 * removing, when we are waiting for outstanding
1608 * DMA commands to be flushed.
1609 */
1610 dev_info(dd->dev,
1611 "DMA channel flushed (0x%08x)\n", result);
1612 }
1613 if (err)
1614 dev_err(dd->dev,
1615 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1616 err->flush[0], err->flush[1], err->flush[2],
1617 err->flush[3], err->flush[4], err->flush[5]);
1618 dd->cur_msg->status = -EIO;
1619 complete(&dd->transfer_complete);
1620 }
1621}
1622
1623/**
1624 * spi_dmov_rx_complete_func - DataMover rx completion callback
1625 *
1626 * Executed in IRQ context (Data Mover's IRQ)
1627 * DataMover's spinlock @msm_dmov_lock held.
1628 */
1629static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
1630 unsigned int result,
1631 struct msm_dmov_errdata *err)
1632{
1633 struct msm_spi *dd;
1634
1635 if (!(result & DMOV_RSLT_VALID)) {
1636 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
1637 result, cmd);
1638 return;
1639 }
1640 /* restore original context */
1641 dd = container_of(cmd, struct msm_spi, rx_hdr);
1642 if (result & DMOV_RSLT_DONE) {
1643 dd->stat_dmov_rx++;
1644 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1645 return;
1646 complete(&dd->transfer_complete);
1647 } else {
1648 /** Error or flush */
1649 if (result & DMOV_RSLT_ERROR) {
1650 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
1651 dd->stat_dmov_rx_err++;
1652 }
1653 if (result & DMOV_RSLT_FLUSH) {
1654 dev_info(dd->dev,
1655 "DMA channel flushed(0x%08x)\n", result);
1656 }
1657 if (err)
1658 dev_err(dd->dev,
1659 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
1660 err->flush[0], err->flush[1], err->flush[2],
1661 err->flush[3], err->flush[4], err->flush[5]);
1662 dd->cur_msg->status = -EIO;
1663 complete(&dd->transfer_complete);
1664 }
1665}
1666
1667static inline u32 get_chunk_size(struct msm_spi *dd)
1668{
1669 u32 cache_line = dma_get_cache_alignment();
1670
1671 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
1672 roundup(dd->burst_size, cache_line))*2;
1673}
1674
1675static void msm_spi_teardown_dma(struct msm_spi *dd)
1676{
1677 int limit = 0;
1678
1679 if (!dd->use_dma)
1680 return;
1681
1682 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001683 msm_dmov_flush(dd->tx_dma_chan, 1);
1684 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685 msleep(10);
1686 }
1687
1688 dma_free_coherent(NULL, get_chunk_size(dd), dd->tx_dmov_cmd,
1689 dd->tx_dmov_cmd_dma);
1690 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
1691 dd->tx_padding = dd->rx_padding = NULL;
1692}
1693
1694static __init int msm_spi_init_dma(struct msm_spi *dd)
1695{
1696 dmov_box *box;
1697 u32 cache_line = dma_get_cache_alignment();
1698
1699 /* Allocate all as one chunk, since all is smaller than page size */
1700
1701 /* We send NULL device, since it requires coherent_dma_mask id
1702 device definition, we're okay with using system pool */
1703 dd->tx_dmov_cmd = dma_alloc_coherent(NULL, get_chunk_size(dd),
1704 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
1705 if (dd->tx_dmov_cmd == NULL)
1706 return -ENOMEM;
1707
1708 /* DMA addresses should be 64 bit aligned aligned */
1709 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
1710 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
1711 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
1712 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
1713
1714 /* Buffers should be aligned to cache line */
1715 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
1716 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
1717 sizeof(struct spi_dmov_cmd), cache_line);
1718 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding + dd->burst_size),
1719 cache_line);
1720 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->burst_size,
1721 cache_line);
1722
1723 /* Setup DM commands */
1724 box = &(dd->rx_dmov_cmd->box);
1725 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
1726 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
1727 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1728 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
1729 offsetof(struct spi_dmov_cmd, cmd_ptr));
1730 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001731
1732 box = &(dd->tx_dmov_cmd->box);
1733 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
1734 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
1735 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
1736 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
1737 offsetof(struct spi_dmov_cmd, cmd_ptr));
1738 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001739
1740 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1741 CMD_DST_CRCI(dd->tx_dma_crci);
1742 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
1743 SPI_OUTPUT_FIFO;
1744 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
1745 CMD_SRC_CRCI(dd->rx_dma_crci);
1746 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
1747 SPI_INPUT_FIFO;
1748
1749 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001750 msm_dmov_flush(dd->tx_dma_chan, 1);
1751 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752
1753 return 0;
1754}
1755
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001756struct msm_spi_platform_data *msm_spi_dt_to_pdata(struct platform_device *pdev)
1757{
1758 struct device_node *node = pdev->dev.of_node;
1759 struct msm_spi_platform_data *pdata;
1760
1761 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1762 if (!pdata) {
1763 pr_err("Unable to allocate platform data\n");
1764 return NULL;
1765 }
1766
1767 of_property_read_u32(node, "spi-max-frequency",
1768 &pdata->max_clock_speed);
1769
1770 return pdata;
1771}
1772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001773static int __init msm_spi_probe(struct platform_device *pdev)
1774{
1775 struct spi_master *master;
1776 struct msm_spi *dd;
1777 struct resource *resource;
1778 int rc = -ENXIO;
1779 int locked = 0;
1780 int i = 0;
1781 int clk_enabled = 0;
1782 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001783 struct msm_spi_platform_data *pdata;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001784 enum of_gpio_flags flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785
1786 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
1787 if (!master) {
1788 rc = -ENOMEM;
1789 dev_err(&pdev->dev, "master allocation failed\n");
1790 goto err_probe_exit;
1791 }
1792
1793 master->bus_num = pdev->id;
1794 master->mode_bits = SPI_SUPPORTED_MODES;
1795 master->num_chipselect = SPI_NUM_CHIPSELECTS;
1796 master->setup = msm_spi_setup;
1797 master->transfer = msm_spi_transfer;
1798 platform_set_drvdata(pdev, master);
1799 dd = spi_master_get_devdata(master);
1800
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001801 if (pdev->dev.of_node) {
1802 dd->qup_ver = SPI_QUP_VERSION_BFAM;
1803 master->dev.of_node = pdev->dev.of_node;
1804 pdata = msm_spi_dt_to_pdata(pdev);
1805 if (!pdata) {
1806 rc = -ENOMEM;
1807 goto err_probe_exit;
1808 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001809
1810 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1811 dd->spi_gpios[i] = of_get_gpio_flags(pdev->dev.of_node,
1812 i, &flags);
1813 }
1814
1815 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1816 dd->cs_gpios[i].gpio_num = of_get_named_gpio_flags(
1817 pdev->dev.of_node, "cs-gpios",
1818 i, &flags);
1819 dd->cs_gpios[i].valid = 0;
1820 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001821 } else {
1822 pdata = pdev->dev.platform_data;
1823 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07001824
1825 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
1826 resource = platform_get_resource(pdev, IORESOURCE_IO,
1827 i);
1828 dd->spi_gpios[i] = resource ? resource->start : -1;
1829 }
1830
1831 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
1832 resource = platform_get_resource(pdev, IORESOURCE_IO,
1833 i + ARRAY_SIZE(spi_rsrcs));
1834 dd->cs_gpios[i].gpio_num = resource ?
1835 resource->start : -1;
1836 dd->cs_gpios[i].valid = 0;
1837 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001838 }
1839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001841 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 if (!resource) {
1843 rc = -ENXIO;
1844 goto err_probe_res;
1845 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001846
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001847 dd->mem_phys_addr = resource->start;
1848 dd->mem_size = resource_size(resource);
1849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001850 if (pdata) {
1851 if (pdata->dma_config) {
1852 rc = pdata->dma_config();
1853 if (rc) {
1854 dev_warn(&pdev->dev,
1855 "%s: DM mode not supported\n",
1856 __func__);
1857 dd->use_dma = 0;
1858 goto skip_dma_resources;
1859 }
1860 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001861 resource = platform_get_resource(pdev, IORESOURCE_DMA, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862 if (resource) {
1863 dd->rx_dma_chan = resource->start;
1864 dd->tx_dma_chan = resource->end;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001865 resource = platform_get_resource(pdev, IORESOURCE_DMA,
1866 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867 if (!resource) {
1868 rc = -ENXIO;
1869 goto err_probe_res;
1870 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001872 dd->rx_dma_crci = resource->start;
1873 dd->tx_dma_crci = resource->end;
1874 dd->use_dma = 1;
1875 master->dma_alignment = dma_get_cache_alignment();
1876 }
1877
1878skip_dma_resources:
1879 if (pdata->gpio_config) {
1880 rc = pdata->gpio_config();
1881 if (rc) {
1882 dev_err(&pdev->dev,
1883 "%s: error configuring GPIOs\n",
1884 __func__);
1885 goto err_probe_gpio;
1886 }
1887 }
1888 }
1889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890 rc = msm_spi_request_gpios(dd);
1891 if (rc)
1892 goto err_probe_gpio;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 spin_lock_init(&dd->queue_lock);
1895 mutex_init(&dd->core_lock);
1896 INIT_LIST_HEAD(&dd->queue);
1897 INIT_WORK(&dd->work_data, msm_spi_workq);
1898 init_waitqueue_head(&dd->continue_suspend);
1899 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001900 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 if (!dd->workqueue)
1902 goto err_probe_workq;
1903
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001904 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
1905 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 rc = -ENXIO;
1907 goto err_probe_reqmem;
1908 }
1909
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001910 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
1911 if (!dd->base) {
1912 rc = -ENOMEM;
1913 goto err_probe_reqmem;
1914 }
1915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916 if (pdata && pdata->rsl_id) {
1917 struct remote_mutex_id rmid;
1918 rmid.r_spinlock_id = pdata->rsl_id;
1919 rmid.delay_us = SPI_TRYLOCK_DELAY;
1920
1921 rc = remote_mutex_init(&dd->r_lock, &rmid);
1922 if (rc) {
1923 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
1924 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
1925 __func__, rc);
1926 goto err_probe_rlock_init;
1927 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001929 dd->use_rlock = 1;
1930 dd->pm_lat = pdata->pm_lat;
1931 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
1932 PM_QOS_DEFAULT_VALUE);
1933 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001934
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001935 mutex_lock(&dd->core_lock);
1936 if (dd->use_rlock)
1937 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001938
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001939 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07001941 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001943 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 rc = PTR_ERR(dd->clk);
1945 goto err_probe_clk_get;
1946 }
1947
Matt Wagantallac294852011-08-17 15:44:58 -07001948 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07001950 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001951 rc = PTR_ERR(dd->pclk);
1952 goto err_probe_pclk_get;
1953 }
1954
1955 if (pdata && pdata->max_clock_speed)
1956 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
1957
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001958 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001959 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001960 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 __func__);
1962 goto err_probe_clk_enable;
1963 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001964
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001965 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001966 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07001968 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969 __func__);
1970 goto err_probe_pclk_enable;
1971 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973 pclk_enabled = 1;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001974 rc = msm_spi_configure_gsbi(dd, pdev);
1975 if (rc)
1976 goto err_probe_gsbi;
1977
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978 msm_spi_calculate_fifo_size(dd);
1979 if (dd->use_dma) {
1980 rc = msm_spi_init_dma(dd);
1981 if (rc)
1982 goto err_probe_dma;
1983 }
1984
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001985 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001986 /*
1987 * The SPI core generates a bogus input overrun error on some targets,
1988 * when a transition from run to reset state occurs and if the FIFO has
1989 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
1990 * bit.
1991 */
1992 msm_spi_enable_error_flags(dd);
1993
1994 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
1995 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1996 if (rc)
1997 goto err_probe_state;
1998
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07001999 clk_disable_unprepare(dd->clk);
2000 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001 clk_enabled = 0;
2002 pclk_enabled = 0;
2003
2004 dd->suspended = 0;
2005 dd->transfer_pending = 0;
2006 dd->multi_xfr = 0;
2007 dd->mode = SPI_MODE_NONE;
2008
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002009 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 if (rc)
2011 goto err_probe_irq;
2012
2013 msm_spi_disable_irqs(dd);
2014 if (dd->use_rlock)
2015 remote_mutex_unlock(&dd->r_lock);
2016
2017 mutex_unlock(&dd->core_lock);
2018 locked = 0;
2019
2020 rc = spi_register_master(master);
2021 if (rc)
2022 goto err_probe_reg_master;
2023
2024 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2025 if (rc) {
2026 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2027 goto err_attrs;
2028 }
2029
2030 spi_debugfs_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002031 return 0;
2032
2033err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002034 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002035err_probe_reg_master:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036err_probe_irq:
2037err_probe_state:
2038 msm_spi_teardown_dma(dd);
2039err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002040err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002041 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002042 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043err_probe_pclk_enable:
2044 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002045 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002046err_probe_clk_enable:
2047 clk_put(dd->pclk);
2048err_probe_pclk_get:
2049 clk_put(dd->clk);
2050err_probe_clk_get:
2051 if (locked) {
2052 if (dd->use_rlock)
2053 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002055 mutex_unlock(&dd->core_lock);
2056 }
2057err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058err_probe_reqmem:
2059 destroy_workqueue(dd->workqueue);
2060err_probe_workq:
2061 msm_spi_free_gpios(dd);
2062err_probe_gpio:
2063 if (pdata && pdata->gpio_release)
2064 pdata->gpio_release();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065err_probe_res:
2066 spi_master_put(master);
2067err_probe_exit:
2068 return rc;
2069}
2070
2071#ifdef CONFIG_PM
2072static int msm_spi_suspend(struct platform_device *pdev, pm_message_t state)
2073{
2074 struct spi_master *master = platform_get_drvdata(pdev);
2075 struct msm_spi *dd;
2076 unsigned long flags;
2077
2078 if (!master)
2079 goto suspend_exit;
2080 dd = spi_master_get_devdata(master);
2081 if (!dd)
2082 goto suspend_exit;
2083
2084 /* Make sure nothing is added to the queue while we're suspending */
2085 spin_lock_irqsave(&dd->queue_lock, flags);
2086 dd->suspended = 1;
2087 spin_unlock_irqrestore(&dd->queue_lock, flags);
2088
2089 /* Wait for transactions to end, or time out */
2090 wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending);
2091 msm_spi_free_gpios(dd);
2092
2093suspend_exit:
2094 return 0;
2095}
2096
2097static int msm_spi_resume(struct platform_device *pdev)
2098{
2099 struct spi_master *master = platform_get_drvdata(pdev);
2100 struct msm_spi *dd;
2101
2102 if (!master)
2103 goto resume_exit;
2104 dd = spi_master_get_devdata(master);
2105 if (!dd)
2106 goto resume_exit;
2107
2108 BUG_ON(msm_spi_request_gpios(dd) != 0);
2109 dd->suspended = 0;
2110resume_exit:
2111 return 0;
2112}
2113#else
2114#define msm_spi_suspend NULL
2115#define msm_spi_resume NULL
2116#endif /* CONFIG_PM */
2117
2118static int __devexit msm_spi_remove(struct platform_device *pdev)
2119{
2120 struct spi_master *master = platform_get_drvdata(pdev);
2121 struct msm_spi *dd = spi_master_get_devdata(master);
2122 struct msm_spi_platform_data *pdata = pdev->dev.platform_data;
2123
2124 pm_qos_remove_request(&qos_req_list);
2125 spi_debugfs_exit(dd);
2126 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128 msm_spi_teardown_dma(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002129 if (pdata && pdata->gpio_release)
2130 pdata->gpio_release();
2131
2132 msm_spi_free_gpios(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133 clk_put(dd->clk);
2134 clk_put(dd->pclk);
2135 destroy_workqueue(dd->workqueue);
2136 platform_set_drvdata(pdev, 0);
2137 spi_unregister_master(master);
2138 spi_master_put(master);
2139
2140 return 0;
2141}
2142
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002143static struct of_device_id msm_spi_dt_match[] = {
2144 {
2145 .compatible = "qcom,spi-qup-v2",
2146 },
2147 {}
2148};
2149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002150static struct platform_driver msm_spi_driver = {
2151 .driver = {
2152 .name = SPI_DRV_NAME,
2153 .owner = THIS_MODULE,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002154 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002155 },
2156 .suspend = msm_spi_suspend,
2157 .resume = msm_spi_resume,
2158 .remove = __exit_p(msm_spi_remove),
2159};
2160
2161static int __init msm_spi_init(void)
2162{
2163 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
2164}
2165module_init(msm_spi_init);
2166
2167static void __exit msm_spi_exit(void)
2168{
2169 platform_driver_unregister(&msm_spi_driver);
2170}
2171module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002172
2173MODULE_LICENSE("GPL v2");
2174MODULE_VERSION("0.4");
2175MODULE_ALIAS("platform:"SPI_DRV_NAME);