blob: 863339b7149cd6f849289dac7afffadd29c3bfc4 [file] [log] [blame]
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
Gilad Avidov002dba02013-05-21 18:06:32 -060017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/version.h>
19#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070020#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/init.h>
22#include <linux/spinlock.h>
23#include <linux/list.h>
24#include <linux/irq.h>
25#include <linux/platform_device.h>
26#include <linux/spi/spi.h>
27#include <linux/interrupt.h>
28#include <linux/err.h>
29#include <linux/clk.h>
30#include <linux/delay.h>
31#include <linux/workqueue.h>
32#include <linux/io.h>
33#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#include <linux/gpio.h>
35#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070036#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070037#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070038#include <linux/of_gpio.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060039#include <linux/dma-mapping.h>
40#include <linux/sched.h>
41#include <linux/mutex.h>
42#include <linux/atomic.h>
Alok Chauhan7fd3add2013-03-12 18:34:43 +053043#include <linux/pm_runtime.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060044#include <mach/msm_spi.h>
45#include <mach/sps.h>
46#include <mach/dma.h>
Gilad Avidov23350552013-05-21 09:26:46 -060047#include <mach/msm_bus.h>
48#include <mach/msm_bus_board.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070049#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Alok Chauhan7fd3add2013-03-12 18:34:43 +053051static int msm_spi_pm_resume_runtime(struct device *device);
52static int msm_spi_pm_suspend_runtime(struct device *device);
53
54
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
56 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057{
58 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070059 unsigned long gsbi_mem_phys_addr;
60 size_t gsbi_mem_size;
61 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070065 return 0;
66
67 gsbi_mem_phys_addr = resource->start;
68 gsbi_mem_size = resource_size(resource);
69 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
70 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070072
73 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
74 gsbi_mem_size);
75 if (!gsbi_base)
76 return -ENXIO;
77
78 /* Set GSBI to SPI mode */
79 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
81 return 0;
82}
83
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070084static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070086 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
87 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
88 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
89 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
90 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
91 if (dd->qup_ver)
92 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093}
94
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static inline int msm_spi_request_gpios(struct msm_spi *dd)
96{
97 int i;
98 int result = 0;
99
100 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
101 if (dd->spi_gpios[i] >= 0) {
102 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
103 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -0600104 dev_err(dd->dev, "%s: gpio_request for pin %d "
105 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 dd->spi_gpios[i], result);
107 goto error;
108 }
109 }
110 }
111 return 0;
112
113error:
114 for (; --i >= 0;) {
115 if (dd->spi_gpios[i] >= 0)
116 gpio_free(dd->spi_gpios[i]);
117 }
118 return result;
119}
120
121static inline void msm_spi_free_gpios(struct msm_spi *dd)
122{
123 int i;
124
125 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
126 if (dd->spi_gpios[i] >= 0)
127 gpio_free(dd->spi_gpios[i]);
128 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600129
130 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
131 if (dd->cs_gpios[i].valid) {
132 gpio_free(dd->cs_gpios[i].gpio_num);
133 dd->cs_gpios[i].valid = 0;
134 }
135 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136}
137
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600138/**
139 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
140 * @clk the clock for which to find nearest lower rate
141 * @rate clock frequency in Hz
142 * @return nearest lower rate or negative error value
143 *
144 * Public clock API extends clk_round_rate which is a ceiling function. This
145 * function is a floor function implemented as a binary search using the
146 * ceiling function.
147 */
148static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
149{
150 long lowest_available, nearest_low, step_size, cur;
151 long step_direction = -1;
152 long guess = rate;
153 int max_steps = 10;
154
155 cur = clk_round_rate(clk, rate);
156 if (cur == rate)
157 return rate;
158
159 /* if we got here then: cur > rate */
160 lowest_available = clk_round_rate(clk, 0);
161 if (lowest_available > rate)
162 return -EINVAL;
163
164 step_size = (rate - lowest_available) >> 1;
165 nearest_low = lowest_available;
166
167 while (max_steps-- && step_size) {
168 guess += step_size * step_direction;
169
170 cur = clk_round_rate(clk, guess);
171
172 if ((cur < rate) && (cur > nearest_low))
173 nearest_low = cur;
174
175 /*
176 * if we stepped too far, then start stepping in the other
177 * direction with half the step size
178 */
179 if (((cur > rate) && (step_direction > 0))
180 || ((cur < rate) && (step_direction < 0))) {
181 step_direction = -step_direction;
182 step_size >>= 1;
183 }
184 }
185 return nearest_low;
186}
187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188static void msm_spi_clock_set(struct msm_spi *dd, int speed)
189{
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600190 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 int rc;
192
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600193 rate = msm_spi_clk_max_rate(dd->clk, speed);
194 if (rate < 0) {
195 dev_err(dd->dev,
196 "%s: no match found for requested clock frequency:%d",
197 __func__, speed);
198 return;
199 }
200
201 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 if (!rc)
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600203 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204}
205
Gilad Avidov23350552013-05-21 09:26:46 -0600206static void msm_spi_clk_path_vote(struct msm_spi *dd)
207{
208 if (dd->clk_path_vote.client_hdl)
209 msm_bus_scale_client_update_request(
210 dd->clk_path_vote.client_hdl,
211 MSM_SPI_CLK_PATH_RESUME_VEC);
212}
213
214static void msm_spi_clk_path_unvote(struct msm_spi *dd)
215{
216 if (dd->clk_path_vote.client_hdl)
217 msm_bus_scale_client_update_request(
218 dd->clk_path_vote.client_hdl,
219 MSM_SPI_CLK_PATH_SUSPEND_VEC);
220}
221
222static void msm_spi_clk_path_teardown(struct msm_spi *dd)
223{
224 if (dd->pdata->active_only)
225 msm_spi_clk_path_unvote(dd);
226
227 if (dd->clk_path_vote.client_hdl) {
228 msm_bus_scale_unregister_client(dd->clk_path_vote.client_hdl);
229 dd->clk_path_vote.client_hdl = 0;
230 }
231}
232
233/**
234 * msm_spi_clk_path_init_structs: internal impl detail of msm_spi_clk_path_init
235 *
236 * allocates and initilizes the bus scaling vectors.
237 */
238static int msm_spi_clk_path_init_structs(struct msm_spi *dd)
239{
240 struct msm_bus_vectors *paths = NULL;
241 struct msm_bus_paths *usecases = NULL;
242
243 dev_dbg(dd->dev, "initialises path clock voting structs");
244
245 paths = devm_kzalloc(dd->dev, sizeof(*paths) * 2, GFP_KERNEL);
246 if (!paths) {
247 dev_err(dd->dev,
248 "msm_bus_paths.paths memory allocation failed");
249 return -ENOMEM;
250 }
251
252 usecases = devm_kzalloc(dd->dev, sizeof(*usecases) * 2, GFP_KERNEL);
253 if (!usecases) {
254 dev_err(dd->dev,
255 "msm_bus_scale_pdata.usecases memory allocation failed");
256 goto path_init_err;
257 }
258
259 dd->clk_path_vote.pdata = devm_kzalloc(dd->dev,
260 sizeof(*dd->clk_path_vote.pdata),
261 GFP_KERNEL);
262 if (!dd->clk_path_vote.pdata) {
263 dev_err(dd->dev,
264 "msm_bus_scale_pdata memory allocation failed");
265 goto path_init_err;
266 }
267
268 paths[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
269 .src = dd->pdata->master_id,
270 .dst = MSM_BUS_SLAVE_EBI_CH0,
271 .ab = 0,
272 .ib = 0,
273 };
274
275 paths[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) {
276 .src = dd->pdata->master_id,
277 .dst = MSM_BUS_SLAVE_EBI_CH0,
278 .ab = MSM_SPI_CLK_PATH_AVRG_BW(dd),
279 .ib = MSM_SPI_CLK_PATH_BRST_BW(dd),
280 };
281
282 usecases[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
283 .num_paths = 1,
284 .vectors = &paths[MSM_SPI_CLK_PATH_SUSPEND_VEC],
285 };
286
287 usecases[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
288 .num_paths = 1,
289 .vectors = &paths[MSM_SPI_CLK_PATH_RESUME_VEC],
290 };
291
292 *dd->clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
293 .active_only = dd->pdata->active_only,
294 .name = dev_name(dd->dev),
295 .num_usecases = 2,
296 .usecase = usecases,
297 };
298
299 return 0;
300
301path_init_err:
302 devm_kfree(dd->dev, paths);
303 devm_kfree(dd->dev, usecases);
304 devm_kfree(dd->dev, dd->clk_path_vote.pdata);
305 dd->clk_path_vote.pdata = NULL;
306 return -ENOMEM;
307}
308
309/**
310 * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
311 *
312 * @return zero on success
313 *
314 * Workaround: SPI driver may be probed before the bus scaling driver. Calling
315 * msm_bus_scale_register_client() will fail if the bus scaling driver is not
316 * ready yet. Thus, this function should be called not from probe but from a
317 * later context. Also, this function may be called more then once before
318 * register succeed. At this case only one error message will be logged. At boot
319 * time all clocks are on, so earlier SPI transactions should succeed.
320 */
321static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
322{
323 dd->clk_path_vote.client_hdl = msm_bus_scale_register_client(
324 dd->clk_path_vote.pdata);
325
326 if (dd->clk_path_vote.client_hdl) {
327 if (dd->clk_path_vote.reg_err) {
328 /* log a success message if an error msg was logged */
329 dd->clk_path_vote.reg_err = false;
330 dev_info(dd->dev,
331 "msm_bus_scale_register_client(mstr-id:%d "
332 "actv-only:%d):0x%x",
333 dd->pdata->master_id, dd->pdata->active_only,
334 dd->clk_path_vote.client_hdl);
335 }
336
337 if (dd->pdata->active_only)
338 msm_spi_clk_path_vote(dd);
339 } else {
340 /* guard to log only one error on multiple failure */
341 if (!dd->clk_path_vote.reg_err) {
342 dd->clk_path_vote.reg_err = true;
343
344 dev_info(dd->dev,
345 "msm_bus_scale_register_client(mstr-id:%d "
346 "actv-only:%d):0",
347 dd->pdata->master_id, dd->pdata->active_only);
348 }
349 }
350
351 return dd->clk_path_vote.client_hdl ? 0 : -EAGAIN;
352}
353
354static void msm_spi_clk_path_init(struct msm_spi *dd)
355{
356 /*
357 * bail out if path voting is diabled (master_id == 0) or if it is
358 * already registered (client_hdl != 0)
359 */
360 if (!dd->pdata->master_id || dd->clk_path_vote.client_hdl)
361 return;
362
363 /* if fail once then try no more */
364 if (!dd->clk_path_vote.pdata && msm_spi_clk_path_init_structs(dd)) {
365 dd->pdata->master_id = 0;
366 return;
367 };
368
369 /* on failure try again later */
370 if (msm_spi_clk_path_postponed_register(dd))
371 return;
372
373 if (dd->pdata->active_only)
374 msm_spi_clk_path_vote(dd);
375}
376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377static int msm_spi_calculate_size(int *fifo_size,
378 int *block_size,
379 int block,
380 int mult)
381{
382 int words;
383
384 switch (block) {
385 case 0:
386 words = 1; /* 4 bytes */
387 break;
388 case 1:
389 words = 4; /* 16 bytes */
390 break;
391 case 2:
392 words = 8; /* 32 bytes */
393 break;
394 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700395 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 switch (mult) {
399 case 0:
400 *fifo_size = words * 2;
401 break;
402 case 1:
403 *fifo_size = words * 4;
404 break;
405 case 2:
406 *fifo_size = words * 8;
407 break;
408 case 3:
409 *fifo_size = words * 16;
410 break;
411 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700412 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 *block_size = words * sizeof(u32); /* in bytes */
416 return 0;
417}
418
419static void get_next_transfer(struct msm_spi *dd)
420{
421 struct spi_transfer *t = dd->cur_transfer;
422
423 if (t->transfer_list.next != &dd->cur_msg->transfers) {
424 dd->cur_transfer = list_entry(t->transfer_list.next,
425 struct spi_transfer,
426 transfer_list);
427 dd->write_buf = dd->cur_transfer->tx_buf;
428 dd->read_buf = dd->cur_transfer->rx_buf;
429 }
430}
431
432static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
433{
434 u32 spi_iom;
435 int block;
436 int mult;
437
438 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
439
440 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
441 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
442 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
443 block, mult)) {
444 goto fifo_size_err;
445 }
446
447 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
448 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
449 if (msm_spi_calculate_size(&dd->output_fifo_size,
450 &dd->output_block_size, block, mult)) {
451 goto fifo_size_err;
452 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600453 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
454 /* DM mode is not available for this block size */
455 if (dd->input_block_size == 4 || dd->output_block_size == 4)
456 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530458 if (dd->use_dma) {
459 dd->input_burst_size = max(dd->input_block_size,
460 DM_BURST_SIZE);
461 dd->output_burst_size = max(dd->output_block_size,
462 DM_BURST_SIZE);
463 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600464 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465
466 return;
467
468fifo_size_err:
469 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700470 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 return;
472}
473
474static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
475{
476 u32 data_in;
477 int i;
478 int shift;
479
480 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
481 if (dd->read_buf) {
482 for (i = 0; (i < dd->bytes_per_word) &&
483 dd->rx_bytes_remaining; i++) {
484 /* The data format depends on bytes_per_word:
485 4 bytes: 0x12345678
486 3 bytes: 0x00123456
487 2 bytes: 0x00001234
488 1 byte : 0x00000012
489 */
490 shift = 8 * (dd->bytes_per_word - i - 1);
491 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
492 dd->rx_bytes_remaining--;
493 }
494 } else {
495 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
496 dd->rx_bytes_remaining -= dd->bytes_per_word;
497 else
498 dd->rx_bytes_remaining = 0;
499 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 dd->read_xfr_cnt++;
502 if (dd->multi_xfr) {
503 if (!dd->rx_bytes_remaining)
504 dd->read_xfr_cnt = 0;
505 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
506 dd->read_len) {
507 struct spi_transfer *t = dd->cur_rx_transfer;
508 if (t->transfer_list.next != &dd->cur_msg->transfers) {
509 t = list_entry(t->transfer_list.next,
510 struct spi_transfer,
511 transfer_list);
512 dd->read_buf = t->rx_buf;
513 dd->read_len = t->len;
514 dd->read_xfr_cnt = 0;
515 dd->cur_rx_transfer = t;
516 }
517 }
518 }
519}
520
521static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
522{
523 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
524
525 return spi_op & SPI_OP_STATE_VALID;
526}
527
Sagar Dharia525593d2012-11-02 18:26:01 -0600528static inline void msm_spi_udelay(unsigned long delay_usecs)
529{
530 /*
531 * For smaller values of delay, context switch time
532 * would negate the usage of usleep
533 */
534 if (delay_usecs > 20)
535 usleep_range(delay_usecs, delay_usecs);
536 else if (delay_usecs)
537 udelay(delay_usecs);
538}
539
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540static inline int msm_spi_wait_valid(struct msm_spi *dd)
541{
542 unsigned long delay = 0;
543 unsigned long timeout = 0;
544
545 if (dd->clock_speed == 0)
546 return -EINVAL;
547 /*
548 * Based on the SPI clock speed, sufficient time
549 * should be given for the SPI state transition
550 * to occur
551 */
552 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
553 /*
554 * For small delay values, the default timeout would
555 * be one jiffy
556 */
557 if (delay < SPI_DELAY_THRESHOLD)
558 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600559
560 /* Adding one to round off to the nearest jiffy */
561 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 while (!msm_spi_is_valid_state(dd)) {
563 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600564 if (!msm_spi_is_valid_state(dd)) {
565 if (dd->cur_msg)
566 dd->cur_msg->status = -EIO;
567 dev_err(dd->dev, "%s: SPI operational state"
568 "not valid\n", __func__);
569 return -ETIMEDOUT;
570 } else
571 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 }
Sagar Dharia525593d2012-11-02 18:26:01 -0600573 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 }
575 return 0;
576}
577
578static inline int msm_spi_set_state(struct msm_spi *dd,
579 enum msm_spi_state state)
580{
581 enum msm_spi_state cur_state;
582 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700583 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 cur_state = readl_relaxed(dd->base + SPI_STATE);
585 /* Per spec:
586 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
587 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
588 (state == SPI_OP_STATE_RESET)) {
589 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
590 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
591 } else {
592 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
593 dd->base + SPI_STATE);
594 }
595 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700596 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597
598 return 0;
599}
600
Gilad Avidovd0262342012-10-24 16:52:30 -0600601/**
602 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
603 */
604static inline void
605msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606{
607 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
608
609 if (n != (*config & SPI_CFG_N))
610 *config = (*config & ~SPI_CFG_N) | n;
611
Gilad Avidovd0262342012-10-24 16:52:30 -0600612 if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
613 || (dd->mode == SPI_BAM_MODE)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 if (dd->read_buf == NULL)
615 *config |= SPI_NO_INPUT;
616 if (dd->write_buf == NULL)
617 *config |= SPI_NO_OUTPUT;
618 }
619}
620
Gilad Avidovd0262342012-10-24 16:52:30 -0600621/**
622 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
623 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
624 * @return calculatd value for SPI_CONFIG
625 */
626static u32
627msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628{
Gilad Avidovd0262342012-10-24 16:52:30 -0600629 if (mode & SPI_LOOP)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 spi_config |= SPI_CFG_LOOPBACK;
631 else
632 spi_config &= ~SPI_CFG_LOOPBACK;
Gilad Avidovd0262342012-10-24 16:52:30 -0600633
634 if (mode & SPI_CPHA)
635 spi_config &= ~SPI_CFG_INPUT_FIRST;
636 else
637 spi_config |= SPI_CFG_INPUT_FIRST;
638
639 return spi_config;
640}
641
642/**
643 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
644 * next transfer
645 */
646static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
647{
648 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
649 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
650 spi_config, dd->cur_msg->spi->mode);
651
652 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
653 /* flags removed from SPI_CONFIG in QUP version-2 */
654 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
655 else if (dd->mode == SPI_BAM_MODE)
656 spi_config |= SPI_CFG_INPUT_FIRST;
657
Gilad Avidov91c2ab4c2013-03-12 11:01:22 -0600658 /*
659 * HS_MODE improves signal stability for spi-clk high rates
660 * but is invalid in LOOPBACK mode.
661 */
662 if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
663 !(dd->cur_msg->spi->mode & SPI_LOOP))
664 spi_config |= SPI_CFG_HS_MODE;
665 else
666 spi_config &= ~SPI_CFG_HS_MODE;
667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -0600669}
670
671/**
672 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
673 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
674 * BAM and DMOV modes.
675 * @n_words The number of reads/writes of size N.
676 */
677static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
678{
679 /*
680 * n_words cannot exceed fifo_size, and only one READ COUNT
681 * interrupt is generated per transaction, so for transactions
682 * larger than fifo size READ COUNT must be disabled.
683 * For those transactions we usually move to Data Mover mode.
684 */
685 if (dd->mode == SPI_FIFO_MODE) {
686 if (n_words <= dd->input_fifo_size) {
687 writel_relaxed(n_words,
688 dd->base + SPI_MX_READ_COUNT);
689 msm_spi_set_write_count(dd, n_words);
690 } else {
691 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
692 msm_spi_set_write_count(dd, 0);
693 }
694 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
695 /* must be zero for FIFO */
696 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
697 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
698 }
699 } else {
700 /* must be zero for BAM and DMOV */
701 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
702 msm_spi_set_write_count(dd, 0);
703
704 /*
705 * for DMA transfers, both QUP_MX_INPUT_COUNT and
706 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
707 * That case is a non-balanced transfer when there is
708 * only a read_buf.
709 */
710 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
711 if (dd->write_buf)
712 writel_relaxed(0,
713 dd->base + SPI_MX_INPUT_COUNT);
714 else
715 writel_relaxed(n_words,
716 dd->base + SPI_MX_INPUT_COUNT);
717
718 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
719 }
720 }
721}
722
Gilad Avidov799cfeb2013-06-26 17:18:36 -0600723static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
724 struct msm_spi_bam_pipe *pipe)
725{
726 int ret = sps_disconnect(pipe->handle);
727 if (ret) {
728 dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
729 __func__, pipe->name);
730 return ret;
731 }
732 return 0;
733}
734
735static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
736 struct msm_spi_bam_pipe *pipe, struct sps_connect *config)
737{
738 int ret;
739 struct sps_register_event event = {
740 .mode = SPS_TRIGGER_WAIT,
741 .options = SPS_O_EOT,
742 .xfer_done = &dd->transfer_complete,
743 };
744
745 ret = sps_connect(pipe->handle, config);
746 if (ret) {
747 dev_err(dd->dev, "%s: sps_connect(%s:0x%p):%d",
748 __func__, pipe->name, pipe->handle, ret);
749 return ret;
750 }
751
752 ret = sps_register_event(pipe->handle, &event);
753 if (ret) {
754 dev_err(dd->dev, "%s sps_register_event(hndl:0x%p %s):%d",
755 __func__, pipe->handle, pipe->name, ret);
756 msm_spi_bam_pipe_disconnect(dd, pipe);
757 return ret;
758 }
759
760 pipe->teardown_required = true;
761 return 0;
762}
763
764
765static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
766 enum msm_spi_pipe_direction pipe_dir)
767{
768 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
769 (&dd->bam.prod) : (&dd->bam.cons);
770 struct sps_connect config = pipe->config;
771 int ret;
772
773 ret = msm_spi_bam_pipe_disconnect(dd, pipe);
774 if (ret)
775 return;
776
777 ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
778 if (ret)
779 return;
780}
781
782static void msm_spi_bam_flush(struct msm_spi *dd)
783{
784 dev_dbg(dd->dev, "%s flushing bam for recovery\n" , __func__);
785
786 msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
787 msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
788}
789
Gilad Avidovd0262342012-10-24 16:52:30 -0600790/**
791 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
792 * using BAM.
793 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
794 * transfer. Between transfer QUP must change to reset state. A loop is
795 * issuing a single BAM transfer at a time. If another tsranfer is
796 * required, it waits for the trasfer to finish, then moving to reset
797 * state, and back to run state to issue the next transfer.
798 * The function dose not wait for the last transfer to end, or if only
799 * a single transfer is required, the function dose not wait for it to
800 * end.
801 * @timeout max time in jiffies to wait for a transfer to finish.
802 * @return zero on success
803 */
804static int
805msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
806{
807 u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
808 int ret;
809 /*
810 * QUP must move to reset mode every 64K-1 bytes of transfer
811 * (counter is 16 bit)
812 */
813 if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
814 /* assert chip select unconditionally */
815 u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
816 if (!(spi_ioc & SPI_IO_C_FORCE_CS))
817 writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
818 dd->base + SPI_IO_CONTROL);
819 }
820
821 /* Following flags are required since we are waiting on all transfers */
822 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
823 /*
824 * on a balanced transaction, BAM will set the flags on the producer
825 * pipe based on the flags set on the consumer pipe
826 */
827 prod_flags = (dd->write_buf) ? 0 : cons_flags;
828
829 while (dd->tx_bytes_remaining > 0) {
830 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
831 bytes_to_send = min_t(u32, dd->tx_bytes_remaining
832 , SPI_MAX_TRFR_BTWN_RESETS);
833 n_words_xfr = DIV_ROUND_UP(bytes_to_send
834 , dd->bytes_per_word);
835
836 msm_spi_set_mx_counts(dd, n_words_xfr);
837
838 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
839 if (ret < 0) {
840 dev_err(dd->dev,
841 "%s: Failed to set QUP state to run",
842 __func__);
843 goto xfr_err;
844 }
845
846 /* enqueue read buffer in BAM */
847 if (dd->read_buf) {
848 ret = sps_transfer_one(dd->bam.prod.handle,
849 dd->cur_transfer->rx_dma + bytes_sent,
850 bytes_to_send, dd, prod_flags);
851 if (ret < 0) {
852 dev_err(dd->dev,
853 "%s: Failed to queue producer BAM transfer",
854 __func__);
855 goto xfr_err;
856 }
857 }
858
859 /* enqueue write buffer in BAM */
860 if (dd->write_buf) {
861 ret = sps_transfer_one(dd->bam.cons.handle,
862 dd->cur_transfer->tx_dma + bytes_sent,
863 bytes_to_send, dd, cons_flags);
864 if (ret < 0) {
865 dev_err(dd->dev,
866 "%s: Failed to queue consumer BAM transfer",
867 __func__);
868 goto xfr_err;
869 }
870 }
871
872 dd->tx_bytes_remaining -= bytes_to_send;
873
874 /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
875 if (dd->tx_bytes_remaining > 0) {
876 if (!wait_for_completion_timeout(
877 &dd->transfer_complete, timeout)) {
878 dev_err(dd->dev,
879 "%s: SPI transaction timeout",
880 __func__);
881 dd->cur_msg->status = -EIO;
882 ret = -EIO;
883 goto xfr_err;
884 }
885 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
886 if (ret < 0) {
887 dev_err(dd->dev,
888 "%s: Failed to set QUP state to reset",
889 __func__);
890 goto xfr_err;
891 }
892 init_completion(&dd->transfer_complete);
893 }
894 }
895 return 0;
896
897xfr_err:
898 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899}
900
901static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
902{
903 dmov_box *box;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530904 int bytes_to_send, bytes_sent;
905 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 u32 num_transfers;
907
908 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530909 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910 if (dd->write_len && !dd->read_len) {
911 /* WR-WR transfer */
912 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
913 dd->write_buf = dd->temp_buf;
914 } else {
915 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
916 /* For WR-RD transfer, bytes_sent can be negative */
917 if (bytes_sent < 0)
918 bytes_sent = 0;
919 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530920 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530921 * 4K bytes for targets that have only 12 bits in
922 * QUP_MAX_OUTPUT_CNT register. If the target supports
923 * more than 12bits then we send the data in chunks of
924 * the infinite_mode value that is defined in the
925 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530926 */
927 if (!dd->pdata->infinite_mode)
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530928 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530929 else
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530930 dd->max_trfr_len = (dd->pdata->infinite_mode) *
931 (dd->bytes_per_word);
932
933 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
934 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530937 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
938 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
939 tx_num_rows = bytes_to_send / dd->output_burst_size;
940 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941
942 dd->mode = SPI_DMOV_MODE;
943
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530944 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 /* src in 16 MSB, dst in 16 LSB */
946 box = &dd->tx_dmov_cmd->box;
947 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530948 box->src_dst_len
949 = (dd->output_burst_size << 16) | dd->output_burst_size;
950 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
951 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530953 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
954 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
955 offsetof(struct spi_dmov_cmd, box));
956 } else {
957 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
958 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
959 offsetof(struct spi_dmov_cmd, single_pad));
960 }
961
962 if (rx_num_rows) {
963 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964 box = &dd->rx_dmov_cmd->box;
965 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530966 box->src_dst_len
967 = (dd->input_burst_size << 16) | dd->input_burst_size;
968 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
969 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
972 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
973 offsetof(struct spi_dmov_cmd, box));
974 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
976 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
977 offsetof(struct spi_dmov_cmd, single_pad));
978 }
979
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530980 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 } else {
983 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530984 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985
986 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530987 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988
989 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530991 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 if (dd->write_buf)
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530993 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
994 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995
996 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530997 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530999
1000 if (!dd->rx_unaligned_len) {
1001 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
1002 } else {
1003 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
1004 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
1005
1006 memset(dd->rx_padding, 0, dd->input_burst_size);
1007 rx_cmd->dst = dd->rx_padding_dma;
1008 rx_cmd->len = dd->input_burst_size;
1009 }
1010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 /* This also takes care of the padding dummy buf
1012 Since this is set to the correct length, the
1013 dummy bytes won't be actually sent */
1014 if (dd->multi_xfr) {
1015 u32 write_transfers = 0;
1016 u32 read_transfers = 0;
1017
1018 if (dd->write_len > 0) {
1019 write_transfers = DIV_ROUND_UP(dd->write_len,
1020 dd->bytes_per_word);
1021 writel_relaxed(write_transfers,
1022 dd->base + SPI_MX_OUTPUT_COUNT);
1023 }
1024 if (dd->read_len > 0) {
1025 /*
1026 * The read following a write transfer must take
1027 * into account, that the bytes pertaining to
1028 * the write transfer needs to be discarded,
1029 * before the actual read begins.
1030 */
1031 read_transfers = DIV_ROUND_UP(dd->read_len +
1032 dd->write_len,
1033 dd->bytes_per_word);
1034 writel_relaxed(read_transfers,
1035 dd->base + SPI_MX_INPUT_COUNT);
1036 }
1037 } else {
1038 if (dd->write_buf)
1039 writel_relaxed(num_transfers,
1040 dd->base + SPI_MX_OUTPUT_COUNT);
1041 if (dd->read_buf)
1042 writel_relaxed(num_transfers,
1043 dd->base + SPI_MX_INPUT_COUNT);
1044 }
1045}
1046
1047static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
1048{
1049 dma_coherent_pre_ops();
1050 if (dd->write_buf)
1051 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
1052 if (dd->read_buf)
1053 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
1054}
1055
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05301056/* SPI core on targets that does not support infinite mode can send
1057 maximum of 4K transfers or 64K transfers depending up on size of
1058 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
1059 chunks. Upon completion we send the next chunk, or complete the
1060 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +05301061 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062*/
1063static int msm_spi_dm_send_next(struct msm_spi *dd)
1064{
1065 /* By now we should have sent all the bytes in FIFO mode,
1066 * However to make things right, we'll check anyway.
1067 */
1068 if (dd->mode != SPI_DMOV_MODE)
1069 return 0;
1070
Kiran Gundae8f16742012-06-27 10:06:32 +05301071 /* On targets which does not support infinite mode,
1072 We need to send more chunks, if we sent max last time */
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05301073 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
1074 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1076 return 0;
1077 dd->read_len = dd->write_len = 0;
1078 msm_spi_setup_dm_transfer(dd);
1079 msm_spi_enqueue_dm_commands(dd);
1080 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1081 return 0;
1082 return 1;
1083 } else if (dd->read_len && dd->write_len) {
1084 dd->tx_bytes_remaining -= dd->cur_transfer->len;
1085 if (list_is_last(&dd->cur_transfer->transfer_list,
1086 &dd->cur_msg->transfers))
1087 return 0;
1088 get_next_transfer(dd);
1089 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
1090 return 0;
1091 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
1092 dd->read_buf = dd->temp_buf;
1093 dd->read_len = dd->write_len = -1;
1094 msm_spi_setup_dm_transfer(dd);
1095 msm_spi_enqueue_dm_commands(dd);
1096 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1097 return 0;
1098 return 1;
1099 }
1100 return 0;
1101}
1102
1103static inline void msm_spi_ack_transfer(struct msm_spi *dd)
1104{
1105 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
1106 SPI_OP_MAX_OUTPUT_DONE_FLAG,
1107 dd->base + SPI_OPERATIONAL);
1108 /* Ensure done flag was cleared before proceeding further */
1109 mb();
1110}
1111
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001112/* Figure which irq occured and call the relevant functions */
1113static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
1114{
1115 u32 op, ret = IRQ_NONE;
1116 struct msm_spi *dd = dev_id;
1117
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301118 if (pm_runtime_suspended(dd->dev)) {
1119 dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
1120 return ret;
1121 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001122 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
1123 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
1124 struct spi_master *master = dev_get_drvdata(dd->dev);
1125 ret |= msm_spi_error_irq(irq, master);
1126 }
1127
1128 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1129 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
1130 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
1131 dd->base + SPI_OPERATIONAL);
1132 /*
1133 * Ensure service flag was cleared before further
1134 * processing of interrupt.
1135 */
1136 mb();
1137 ret |= msm_spi_input_irq(irq, dev_id);
1138 }
1139
1140 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
1141 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
1142 dd->base + SPI_OPERATIONAL);
1143 /*
1144 * Ensure service flag was cleared before further
1145 * processing of interrupt.
1146 */
1147 mb();
1148 ret |= msm_spi_output_irq(irq, dev_id);
1149 }
1150
1151 if (dd->done) {
1152 complete(&dd->transfer_complete);
1153 dd->done = 0;
1154 }
1155 return ret;
1156}
1157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
1159{
1160 struct msm_spi *dd = dev_id;
1161
1162 dd->stat_rx++;
1163
1164 if (dd->mode == SPI_MODE_NONE)
1165 return IRQ_HANDLED;
1166
1167 if (dd->mode == SPI_DMOV_MODE) {
1168 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1169 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
1170 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
1171 msm_spi_ack_transfer(dd);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301172 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1174 return IRQ_HANDLED;
1175 }
1176 msm_spi_complete(dd);
1177 return IRQ_HANDLED;
1178 }
1179 return IRQ_NONE;
1180 }
1181
1182 if (dd->mode == SPI_FIFO_MODE) {
1183 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
1184 SPI_OP_IP_FIFO_NOT_EMPTY) &&
1185 (dd->rx_bytes_remaining > 0)) {
1186 msm_spi_read_word_from_fifo(dd);
1187 }
1188 if (dd->rx_bytes_remaining == 0)
1189 msm_spi_complete(dd);
1190 }
1191
1192 return IRQ_HANDLED;
1193}
1194
1195static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1196{
1197 u32 word;
1198 u8 byte;
1199 int i;
1200
1201 word = 0;
1202 if (dd->write_buf) {
1203 for (i = 0; (i < dd->bytes_per_word) &&
1204 dd->tx_bytes_remaining; i++) {
1205 dd->tx_bytes_remaining--;
1206 byte = *dd->write_buf++;
1207 word |= (byte << (BITS_PER_BYTE * (3 - i)));
1208 }
1209 } else
1210 if (dd->tx_bytes_remaining > dd->bytes_per_word)
1211 dd->tx_bytes_remaining -= dd->bytes_per_word;
1212 else
1213 dd->tx_bytes_remaining = 0;
1214 dd->write_xfr_cnt++;
1215 if (dd->multi_xfr) {
1216 if (!dd->tx_bytes_remaining)
1217 dd->write_xfr_cnt = 0;
1218 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
1219 dd->write_len) {
1220 struct spi_transfer *t = dd->cur_tx_transfer;
1221 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1222 t = list_entry(t->transfer_list.next,
1223 struct spi_transfer,
1224 transfer_list);
1225 dd->write_buf = t->tx_buf;
1226 dd->write_len = t->len;
1227 dd->write_xfr_cnt = 0;
1228 dd->cur_tx_transfer = t;
1229 }
1230 }
1231 }
1232 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1233}
1234
1235static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1236{
1237 int count = 0;
1238
1239 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
1240 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
1241 SPI_OP_OUTPUT_FIFO_FULL)) {
1242 msm_spi_write_word_to_fifo(dd);
1243 count++;
1244 }
1245}
1246
1247static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1248{
1249 struct msm_spi *dd = dev_id;
1250
1251 dd->stat_tx++;
1252
1253 if (dd->mode == SPI_MODE_NONE)
1254 return IRQ_HANDLED;
1255
1256 if (dd->mode == SPI_DMOV_MODE) {
1257 /* TX_ONLY transaction is handled here
1258 This is the only place we send complete at tx and not rx */
1259 if (dd->read_buf == NULL &&
1260 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1261 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1262 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301263 if (atomic_inc_return(&dd->tx_irq_called) == 1)
1264 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 msm_spi_complete(dd);
1266 return IRQ_HANDLED;
1267 }
1268 return IRQ_NONE;
1269 }
1270
1271 /* Output FIFO is empty. Transmit any outstanding write data. */
1272 if (dd->mode == SPI_FIFO_MODE)
1273 msm_spi_write_rmn_to_fifo(dd);
1274
1275 return IRQ_HANDLED;
1276}
1277
1278static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1279{
1280 struct spi_master *master = dev_id;
1281 struct msm_spi *dd = spi_master_get_devdata(master);
1282 u32 spi_err;
1283
1284 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1285 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1286 dev_warn(master->dev.parent, "SPI output overrun error\n");
1287 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1288 dev_warn(master->dev.parent, "SPI input underrun error\n");
1289 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1290 dev_warn(master->dev.parent, "SPI output underrun error\n");
1291 msm_spi_get_clk_err(dd, &spi_err);
1292 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1293 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1294 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1295 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1296 msm_spi_clear_error_flags(dd);
1297 msm_spi_ack_clk_err(dd);
1298 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1299 mb();
1300 return IRQ_HANDLED;
1301}
1302
Gilad Avidovd0262342012-10-24 16:52:30 -06001303/**
1304 * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
1305 * @return zero on success or negative error code
1306 *
1307 * calls dma_map_single() on the read/write buffers, effectively invalidating
1308 * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
1309 * buffer and copy the data to/from the client buffers
1310 */
1311static int msm_spi_dma_map_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312{
1313 struct device *dev;
1314 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001315 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001316 void *tx_buf, *rx_buf;
1317 unsigned tx_len, rx_len;
1318 int ret = -EINVAL;
1319
1320 dev = &dd->cur_msg->spi->dev;
1321 first_xfr = dd->cur_transfer;
1322 tx_buf = (void *)first_xfr->tx_buf;
1323 rx_buf = first_xfr->rx_buf;
1324 tx_len = rx_len = first_xfr->len;
1325
1326 /*
1327 * For WR-WR and WR-RD transfers, we allocate our own temporary
1328 * buffer and copy the data to/from the client buffers.
1329 */
1330 if (dd->multi_xfr) {
1331 dd->temp_buf = kzalloc(dd->cur_msg_len,
1332 GFP_KERNEL | __GFP_DMA);
1333 if (!dd->temp_buf)
1334 return -ENOMEM;
1335 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1336 struct spi_transfer, transfer_list);
1337
1338 if (dd->write_len && !dd->read_len) {
1339 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1340 goto error;
1341
1342 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1343 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1344 nxt_xfr->len);
1345 tx_buf = dd->temp_buf;
1346 tx_len = dd->cur_msg_len;
1347 } else {
1348 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1349 goto error;
1350
1351 rx_buf = dd->temp_buf;
1352 rx_len = dd->cur_msg_len;
1353 }
1354 }
1355 if (tx_buf != NULL) {
1356 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1357 tx_len, DMA_TO_DEVICE);
1358 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1359 dev_err(dev, "dma %cX %d bytes error\n",
1360 'T', tx_len);
1361 ret = -ENOMEM;
1362 goto error;
1363 }
1364 }
1365 if (rx_buf != NULL) {
1366 dma_addr_t dma_handle;
1367 dma_handle = dma_map_single(dev, rx_buf,
1368 rx_len, DMA_FROM_DEVICE);
1369 if (dma_mapping_error(NULL, dma_handle)) {
1370 dev_err(dev, "dma %cX %d bytes error\n",
1371 'R', rx_len);
1372 if (tx_buf != NULL)
1373 dma_unmap_single(NULL, first_xfr->tx_dma,
1374 tx_len, DMA_TO_DEVICE);
1375 ret = -ENOMEM;
1376 goto error;
1377 }
1378 if (dd->multi_xfr)
1379 nxt_xfr->rx_dma = dma_handle;
1380 else
1381 first_xfr->rx_dma = dma_handle;
1382 }
1383 return 0;
1384
1385error:
1386 kfree(dd->temp_buf);
1387 dd->temp_buf = NULL;
1388 return ret;
1389}
1390
Gilad Avidovd0262342012-10-24 16:52:30 -06001391static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392{
1393 struct device *dev;
1394 u32 offset;
1395
1396 dev = &dd->cur_msg->spi->dev;
1397 if (dd->cur_msg->is_dma_mapped)
1398 goto unmap_end;
1399
1400 if (dd->multi_xfr) {
1401 if (dd->write_len && !dd->read_len) {
1402 dma_unmap_single(dev,
1403 dd->cur_transfer->tx_dma,
1404 dd->cur_msg_len,
1405 DMA_TO_DEVICE);
1406 } else {
1407 struct spi_transfer *prev_xfr;
1408 prev_xfr = list_entry(
1409 dd->cur_transfer->transfer_list.prev,
1410 struct spi_transfer,
1411 transfer_list);
1412 if (dd->cur_transfer->rx_buf) {
1413 dma_unmap_single(dev,
1414 dd->cur_transfer->rx_dma,
1415 dd->cur_msg_len,
1416 DMA_FROM_DEVICE);
1417 }
1418 if (prev_xfr->tx_buf) {
1419 dma_unmap_single(dev,
1420 prev_xfr->tx_dma,
1421 prev_xfr->len,
1422 DMA_TO_DEVICE);
1423 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301424 if (dd->rx_unaligned_len && dd->read_buf) {
1425 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426 dma_coherent_post_ops();
1427 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301428 dd->rx_unaligned_len);
Gilad Avidov8d99efa2013-06-27 15:33:02 -06001429 if (dd->cur_transfer->rx_buf)
1430 memcpy(dd->cur_transfer->rx_buf,
1431 dd->read_buf + prev_xfr->len,
1432 dd->cur_transfer->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001433 }
1434 }
1435 kfree(dd->temp_buf);
1436 dd->temp_buf = NULL;
1437 return;
1438 } else {
1439 if (dd->cur_transfer->rx_buf)
1440 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1441 dd->cur_transfer->len,
1442 DMA_FROM_DEVICE);
1443 if (dd->cur_transfer->tx_buf)
1444 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1445 dd->cur_transfer->len,
1446 DMA_TO_DEVICE);
1447 }
1448
1449unmap_end:
1450 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301451 if (dd->rx_unaligned_len && dd->read_buf) {
1452 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 dma_coherent_post_ops();
1454 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301455 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 }
1457}
1458
Gilad Avidovd0262342012-10-24 16:52:30 -06001459static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1460{
1461 struct device *dev;
1462
1463 /* mapped by client */
1464 if (dd->cur_msg->is_dma_mapped)
1465 return;
1466
1467 dev = &dd->cur_msg->spi->dev;
1468 if (dd->cur_transfer->rx_buf)
1469 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1470 dd->cur_transfer->len,
1471 DMA_FROM_DEVICE);
1472
1473 if (dd->cur_transfer->tx_buf)
1474 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1475 dd->cur_transfer->len,
1476 DMA_TO_DEVICE);
1477}
1478
1479static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1480{
1481 if (dd->mode == SPI_DMOV_MODE)
1482 msm_spi_dmov_unmap_buffers(dd);
1483 else if (dd->mode == SPI_BAM_MODE)
1484 msm_spi_bam_unmap_buffers(dd);
1485}
1486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487/**
Gilad Avidovd0262342012-10-24 16:52:30 -06001488 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1489 * the given transfer
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 * @dd: device
1491 * @tr: transfer
1492 *
Gilad Avidovd0262342012-10-24 16:52:30 -06001493 * Start using DMA if:
1494 * 1. Is supported by HW
1495 * 2. Is not diabled by platfrom data
1496 * 3. Transfer size is greater than 3*block size.
1497 * 4. Buffers are aligned to cache line.
1498 * 5. Bytes-per-word is 8,16 or 32.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001500static inline bool
1501msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 if (!dd->use_dma)
Gilad Avidovd0262342012-10-24 16:52:30 -06001504 return false;
1505
1506 /* check constraints from platform data */
1507 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1508 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509
1510 if (dd->cur_msg_len < 3*dd->input_block_size)
Gilad Avidovd0262342012-10-24 16:52:30 -06001511 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001512
1513 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
Gilad Avidovd0262342012-10-24 16:52:30 -06001514 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515
Gilad Avidovd0262342012-10-24 16:52:30 -06001516 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1517 u32 cache_line = dma_get_cache_alignment();
1518
1519 if (tr->tx_buf) {
1520 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1521 return 0;
1522 }
1523 if (tr->rx_buf) {
1524 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1525 return false;
1526 }
1527
1528 if (tr->cs_change &&
Kiran Gunda84286c32013-04-29 18:05:49 +05301529 ((bpw != 8) && (bpw != 16) && (bpw != 32)))
Gilad Avidovd0262342012-10-24 16:52:30 -06001530 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 }
1532
Gilad Avidovd0262342012-10-24 16:52:30 -06001533 return true;
1534}
1535
1536/**
1537 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1538 * prepares to process a transfer.
1539 */
1540static void
1541msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1542{
1543 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1544 if (dd->qup_ver) {
1545 dd->mode = SPI_BAM_MODE;
1546 } else {
1547 dd->mode = SPI_DMOV_MODE;
1548 if (dd->write_len && dd->read_len) {
1549 dd->tx_bytes_remaining = dd->write_len;
1550 dd->rx_bytes_remaining = dd->read_len;
1551 }
1552 }
1553 } else {
1554 dd->mode = SPI_FIFO_MODE;
1555 if (dd->multi_xfr) {
1556 dd->read_len = dd->cur_transfer->len;
1557 dd->write_len = dd->cur_transfer->len;
1558 }
1559 }
1560}
1561
1562/**
1563 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1564 * transfer
1565 */
1566static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1567{
1568 u32 spi_iom;
1569 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1570 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1571 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1572 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1573 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1574 /* Turn on packing for data mover */
1575 if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
1576 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1577 else
1578 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1579
1580 /*if (dd->mode == SPI_BAM_MODE) {
1581 spi_iom |= SPI_IO_C_NO_TRI_STATE;
1582 spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
1583 }*/
1584 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1585}
1586
1587static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1588{
1589 if (mode & SPI_CPOL)
1590 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1591 else
1592 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1593 return spi_ioc;
1594}
1595
1596/**
1597 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1598 * next transfer
1599 * @return the new set value of SPI_IO_CONTROL
1600 */
1601static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1602{
1603 u32 spi_ioc, spi_ioc_orig, chip_select;
1604
1605 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1606 spi_ioc_orig = spi_ioc;
1607 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1608 , dd->cur_msg->spi->mode);
1609 /* Set chip-select */
1610 chip_select = dd->cur_msg->spi->chip_select << 2;
1611 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1612 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1613 if (!dd->cur_transfer->cs_change)
1614 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1615
1616 if (spi_ioc != spi_ioc_orig)
1617 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1618
1619 return spi_ioc;
1620}
1621
1622/**
1623 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1624 * the next transfer
1625 */
1626static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1627{
1628 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1629 * change in BAM mode */
1630 u32 mask = (dd->mode == SPI_BAM_MODE) ?
1631 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1632 : 0;
1633 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634}
1635
1636static void msm_spi_process_transfer(struct msm_spi *dd)
1637{
1638 u8 bpw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639 u32 max_speed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640 u32 read_count;
1641 u32 timeout;
Gilad Avidovd0262342012-10-24 16:52:30 -06001642 u32 spi_ioc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001643 u32 int_loopback = 0;
1644
1645 dd->tx_bytes_remaining = dd->cur_msg_len;
1646 dd->rx_bytes_remaining = dd->cur_msg_len;
1647 dd->read_buf = dd->cur_transfer->rx_buf;
1648 dd->write_buf = dd->cur_transfer->tx_buf;
1649 init_completion(&dd->transfer_complete);
1650 if (dd->cur_transfer->bits_per_word)
1651 bpw = dd->cur_transfer->bits_per_word;
1652 else
1653 if (dd->cur_msg->spi->bits_per_word)
1654 bpw = dd->cur_msg->spi->bits_per_word;
1655 else
1656 bpw = 8;
1657 dd->bytes_per_word = (bpw + 7) / 8;
1658
1659 if (dd->cur_transfer->speed_hz)
1660 max_speed = dd->cur_transfer->speed_hz;
1661 else
1662 max_speed = dd->cur_msg->spi->max_speed_hz;
1663 if (!dd->clock_speed || max_speed != dd->clock_speed)
1664 msm_spi_clock_set(dd, max_speed);
1665
Gilad Avidovd0262342012-10-24 16:52:30 -06001666 timeout = 100 * msecs_to_jiffies(
1667 DIV_ROUND_UP(dd->cur_msg_len * 8,
1668 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1671 if (dd->cur_msg->spi->mode & SPI_LOOP)
1672 int_loopback = 1;
1673 if (int_loopback && dd->multi_xfr &&
1674 (read_count > dd->input_fifo_size)) {
1675 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001676 pr_err(
1677 "%s:Internal Loopback does not support > fifo size"
1678 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 __func__);
1680 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001681 pr_err(
1682 "%s:Internal Loopback does not support > fifo size"
1683 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001684 __func__);
1685 return;
1686 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687
Gilad Avidovd0262342012-10-24 16:52:30 -06001688 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1689 dev_err(dd->dev,
1690 "%s: Error setting QUP to reset-state",
1691 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001692
Gilad Avidovd0262342012-10-24 16:52:30 -06001693 msm_spi_set_transfer_mode(dd, bpw, read_count);
1694 msm_spi_set_mx_counts(dd, read_count);
1695 if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
1696 if (msm_spi_dma_map_buffers(dd) < 0) {
1697 pr_err("Mapping DMA buffers\n");
1698 return;
1699 }
1700 msm_spi_set_qup_io_modes(dd);
1701 msm_spi_set_spi_config(dd, bpw);
1702 msm_spi_set_qup_config(dd, bpw);
1703 spi_ioc = msm_spi_set_spi_io_control(dd);
1704 msm_spi_set_qup_op_mask(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705
1706 if (dd->mode == SPI_DMOV_MODE) {
1707 msm_spi_setup_dm_transfer(dd);
1708 msm_spi_enqueue_dm_commands(dd);
1709 }
1710 /* The output fifo interrupt handler will handle all writes after
1711 the first. Restricting this to one write avoids contention
1712 issues and race conditions between this thread and the int handler
1713 */
1714 else if (dd->mode == SPI_FIFO_MODE) {
1715 if (msm_spi_prepare_for_write(dd))
1716 goto transfer_end;
1717 msm_spi_start_write(dd, read_count);
Gilad Avidovd0262342012-10-24 16:52:30 -06001718 } else if (dd->mode == SPI_BAM_MODE) {
1719 if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
1720 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1721 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 }
1723
Gilad Avidovd0262342012-10-24 16:52:30 -06001724 /*
1725 * On BAM mode, current state here is run.
1726 * Only enter the RUN state after the first word is written into
1727 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1728 * might fire before the first word is written resulting in a
1729 * possible race condition.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001730 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001731 if (dd->mode != SPI_BAM_MODE)
1732 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1733 dev_warn(dd->dev,
1734 "%s: Failed to set QUP to run-state. Mode:%d",
1735 __func__, dd->mode);
1736 goto transfer_end;
1737 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738
1739 /* Assume success, this might change later upon transaction result */
1740 dd->cur_msg->status = 0;
1741 do {
1742 if (!wait_for_completion_timeout(&dd->transfer_complete,
1743 timeout)) {
Gilad Avidovd0262342012-10-24 16:52:30 -06001744 dev_err(dd->dev,
1745 "%s: SPI transaction timeout\n",
1746 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001747 dd->cur_msg->status = -EIO;
1748 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001749 msm_dmov_flush(dd->tx_dma_chan, 1);
1750 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001751 }
Gilad Avidov799cfeb2013-06-26 17:18:36 -06001752 if (dd->mode == SPI_BAM_MODE)
1753 msm_spi_bam_flush(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001754 break;
1755 }
1756 } while (msm_spi_dm_send_next(dd));
1757
Sagar Dharia525593d2012-11-02 18:26:01 -06001758 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001759transfer_end:
Gilad Avidovd0262342012-10-24 16:52:30 -06001760 msm_spi_dma_unmap_buffers(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001761 dd->mode = SPI_MODE_NONE;
1762
1763 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1764 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1765 dd->base + SPI_IO_CONTROL);
1766}
1767
1768static void get_transfer_length(struct msm_spi *dd)
1769{
1770 struct spi_transfer *tr;
1771 int num_xfrs = 0;
1772 int readlen = 0;
1773 int writelen = 0;
1774
1775 dd->cur_msg_len = 0;
1776 dd->multi_xfr = 0;
1777 dd->read_len = dd->write_len = 0;
1778
1779 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1780 if (tr->tx_buf)
1781 writelen += tr->len;
1782 if (tr->rx_buf)
1783 readlen += tr->len;
1784 dd->cur_msg_len += tr->len;
1785 num_xfrs++;
1786 }
1787
1788 if (num_xfrs == 2) {
1789 struct spi_transfer *first_xfr = dd->cur_transfer;
1790
1791 dd->multi_xfr = 1;
1792 tr = list_entry(first_xfr->transfer_list.next,
1793 struct spi_transfer,
1794 transfer_list);
1795 /*
1796 * We update dd->read_len and dd->write_len only
1797 * for WR-WR and WR-RD transfers.
1798 */
1799 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1800 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1801 ((!tr->tx_buf) && (tr->rx_buf))) {
1802 dd->read_len = readlen;
1803 dd->write_len = writelen;
1804 }
1805 }
1806 } else if (num_xfrs > 1)
1807 dd->multi_xfr = 1;
1808}
1809
1810static inline int combine_transfers(struct msm_spi *dd)
1811{
1812 struct spi_transfer *t = dd->cur_transfer;
1813 struct spi_transfer *nxt;
1814 int xfrs_grped = 1;
1815
1816 dd->cur_msg_len = dd->cur_transfer->len;
1817 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1818 nxt = list_entry(t->transfer_list.next,
1819 struct spi_transfer,
1820 transfer_list);
1821 if (t->cs_change != nxt->cs_change)
1822 return xfrs_grped;
1823 dd->cur_msg_len += nxt->len;
1824 xfrs_grped++;
1825 t = nxt;
1826 }
1827 return xfrs_grped;
1828}
1829
Harini Jayaraman093938a2012-04-20 15:33:23 -06001830static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1831{
1832 u32 spi_ioc;
1833 u32 spi_ioc_orig;
1834
1835 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1836 spi_ioc_orig = spi_ioc;
1837 if (set_flag)
1838 spi_ioc |= SPI_IO_C_FORCE_CS;
1839 else
1840 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1841
1842 if (spi_ioc != spi_ioc_orig)
1843 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1844}
1845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846static void msm_spi_process_message(struct msm_spi *dd)
1847{
1848 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001849 int cs_num;
1850 int rc;
Sagar Dharia525593d2012-11-02 18:26:01 -06001851 bool xfer_delay = false;
1852 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001855 cs_num = dd->cur_msg->spi->chip_select;
1856 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1857 (!(dd->cs_gpios[cs_num].valid)) &&
1858 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1859 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1860 spi_cs_rsrcs[cs_num]);
1861 if (rc) {
1862 dev_err(dd->dev, "gpio_request for pin %d failed with "
1863 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1864 rc);
1865 return;
1866 }
1867 dd->cs_gpios[cs_num].valid = 1;
1868 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869
Sagar Dharia525593d2012-11-02 18:26:01 -06001870 list_for_each_entry(tr,
1871 &dd->cur_msg->transfers,
1872 transfer_list) {
1873 if (tr->delay_usecs) {
1874 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1875 tr->delay_usecs);
1876 xfer_delay = true;
1877 break;
1878 }
1879 }
1880
1881 /* Don't combine xfers if delay is needed after every xfer */
1882 if (dd->qup_ver || xfer_delay) {
1883 if (dd->qup_ver)
1884 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001886 &dd->cur_msg->transfers,
1887 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001888 struct spi_transfer *t = dd->cur_transfer;
1889 struct spi_transfer *nxt;
1890
1891 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1892 nxt = list_entry(t->transfer_list.next,
1893 struct spi_transfer,
1894 transfer_list);
1895
Sagar Dharia525593d2012-11-02 18:26:01 -06001896 if (dd->qup_ver &&
1897 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001898 write_force_cs(dd, 1);
Sagar Dharia525593d2012-11-02 18:26:01 -06001899 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001900 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001902
1903 dd->cur_msg_len = dd->cur_transfer->len;
1904 msm_spi_process_transfer(dd);
1905 }
1906 } else {
1907 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1908 struct spi_transfer,
1909 transfer_list);
1910 get_transfer_length(dd);
1911 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1912 /*
1913 * Handling of multi-transfers.
1914 * FIFO mode is used by default
1915 */
1916 list_for_each_entry(dd->cur_transfer,
1917 &dd->cur_msg->transfers,
1918 transfer_list) {
1919 if (!dd->cur_transfer->len)
1920 goto error;
1921 if (xfrs_grped) {
1922 xfrs_grped--;
1923 continue;
1924 } else {
1925 dd->read_len = dd->write_len = 0;
1926 xfrs_grped = combine_transfers(dd);
1927 }
1928
1929 dd->cur_tx_transfer = dd->cur_transfer;
1930 dd->cur_rx_transfer = dd->cur_transfer;
1931 msm_spi_process_transfer(dd);
1932 xfrs_grped--;
1933 }
1934 } else {
1935 /* Handling of a single transfer or
1936 * WR-WR or WR-RD transfers
1937 */
1938 if ((!dd->cur_msg->is_dma_mapped) &&
Gilad Avidovd0262342012-10-24 16:52:30 -06001939 (msm_spi_use_dma(dd, dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001940 dd->cur_transfer->bits_per_word))) {
1941 /* Mapping of DMA buffers */
Gilad Avidovd0262342012-10-24 16:52:30 -06001942 int ret = msm_spi_dma_map_buffers(dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001943 if (ret < 0) {
1944 dd->cur_msg->status = ret;
1945 goto error;
1946 }
1947 }
1948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 dd->cur_tx_transfer = dd->cur_transfer;
1950 dd->cur_rx_transfer = dd->cur_transfer;
1951 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001954
1955 return;
1956
1957error:
1958 if (dd->cs_gpios[cs_num].valid) {
1959 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1960 dd->cs_gpios[cs_num].valid = 0;
1961 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962}
1963
1964/* workqueue - pull messages from queue & process */
1965static void msm_spi_workq(struct work_struct *work)
1966{
1967 struct msm_spi *dd =
1968 container_of(work, struct msm_spi, work_data);
1969 unsigned long flags;
1970 u32 status_error = 0;
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301971
1972 pm_runtime_get_sync(dd->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973
1974 mutex_lock(&dd->core_lock);
1975
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301976 /*
1977 * Counter-part of system-suspend when runtime-pm is not enabled.
1978 * This way, resume can be left empty and device will be put in
1979 * active mode only if client requests anything on the bus
1980 */
1981 if (!pm_runtime_enabled(dd->dev))
1982 msm_spi_pm_resume_runtime(dd->dev);
1983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 if (dd->use_rlock)
1985 remote_mutex_lock(&dd->r_lock);
1986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001987 if (!msm_spi_is_valid_state(dd)) {
1988 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1989 __func__);
1990 status_error = 1;
1991 }
1992
1993 spin_lock_irqsave(&dd->queue_lock, flags);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301994 dd->transfer_pending = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001995 while (!list_empty(&dd->queue)) {
1996 dd->cur_msg = list_entry(dd->queue.next,
1997 struct spi_message, queue);
1998 list_del_init(&dd->cur_msg->queue);
1999 spin_unlock_irqrestore(&dd->queue_lock, flags);
2000 if (status_error)
2001 dd->cur_msg->status = -EIO;
2002 else
2003 msm_spi_process_message(dd);
2004 if (dd->cur_msg->complete)
2005 dd->cur_msg->complete(dd->cur_msg->context);
2006 spin_lock_irqsave(&dd->queue_lock, flags);
2007 }
2008 dd->transfer_pending = 0;
2009 spin_unlock_irqrestore(&dd->queue_lock, flags);
2010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002011 if (dd->use_rlock)
2012 remote_mutex_unlock(&dd->r_lock);
2013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002014 mutex_unlock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302015
2016 pm_runtime_mark_last_busy(dd->dev);
2017 pm_runtime_put_autosuspend(dd->dev);
2018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 /* If needed, this can be done after the current message is complete,
2020 and work can be continued upon resume. No motivation for now. */
2021 if (dd->suspended)
2022 wake_up_interruptible(&dd->continue_suspend);
2023}
2024
2025static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
2026{
2027 struct msm_spi *dd;
2028 unsigned long flags;
2029 struct spi_transfer *tr;
2030
2031 dd = spi_master_get_devdata(spi->master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002032
2033 if (list_empty(&msg->transfers) || !msg->complete)
2034 return -EINVAL;
2035
2036 list_for_each_entry(tr, &msg->transfers, transfer_list) {
2037 /* Check message parameters */
2038 if (tr->speed_hz > dd->pdata->max_clock_speed ||
2039 (tr->bits_per_word &&
2040 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
2041 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
2042 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
2043 "tx=%p, rx=%p\n",
2044 tr->speed_hz, tr->bits_per_word,
2045 tr->tx_buf, tr->rx_buf);
2046 return -EINVAL;
2047 }
2048 }
2049
2050 spin_lock_irqsave(&dd->queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002051 list_add_tail(&msg->queue, &dd->queue);
2052 spin_unlock_irqrestore(&dd->queue_lock, flags);
2053 queue_work(dd->workqueue, &dd->work_data);
2054 return 0;
2055}
2056
2057static int msm_spi_setup(struct spi_device *spi)
2058{
2059 struct msm_spi *dd;
2060 int rc = 0;
2061 u32 spi_ioc;
2062 u32 spi_config;
2063 u32 mask;
2064
2065 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
2066 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
2067 __func__, spi->bits_per_word);
2068 rc = -EINVAL;
2069 }
2070 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
2071 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
2072 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
2073 rc = -EINVAL;
2074 }
2075
2076 if (rc)
2077 goto err_setup_exit;
2078
2079 dd = spi_master_get_devdata(spi->master);
2080
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302081 pm_runtime_get_sync(dd->dev);
2082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002083 mutex_lock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302084
2085 /* Counter-part of system-suspend when runtime-pm is not enabled. */
2086 if (!pm_runtime_enabled(dd->dev))
2087 msm_spi_pm_resume_runtime(dd->dev);
2088
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 if (dd->suspended) {
2090 mutex_unlock(&dd->core_lock);
2091 return -EBUSY;
2092 }
2093
2094 if (dd->use_rlock)
2095 remote_mutex_lock(&dd->r_lock);
2096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002097 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
2098 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
2099 if (spi->mode & SPI_CS_HIGH)
2100 spi_ioc |= mask;
2101 else
2102 spi_ioc &= ~mask;
Gilad Avidovd0262342012-10-24 16:52:30 -06002103 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002104
2105 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
2106
2107 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -06002108 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
2109 spi_config, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002110 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
2111
2112 /* Ensure previous write completed before disabling the clocks */
2113 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002114
2115 if (dd->use_rlock)
2116 remote_mutex_unlock(&dd->r_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302117
2118 /* Counter-part of system-resume when runtime-pm is not enabled. */
2119 if (!pm_runtime_enabled(dd->dev))
2120 msm_spi_pm_suspend_runtime(dd->dev);
2121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 mutex_unlock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302123
2124 pm_runtime_mark_last_busy(dd->dev);
2125 pm_runtime_put_autosuspend(dd->dev);
2126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002127err_setup_exit:
2128 return rc;
2129}
2130
2131#ifdef CONFIG_DEBUG_FS
2132static int debugfs_iomem_x32_set(void *data, u64 val)
2133{
2134 writel_relaxed(val, data);
2135 /* Ensure the previous write completed. */
2136 mb();
2137 return 0;
2138}
2139
2140static int debugfs_iomem_x32_get(void *data, u64 *val)
2141{
2142 *val = readl_relaxed(data);
2143 /* Ensure the previous read completed. */
2144 mb();
2145 return 0;
2146}
2147
2148DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
2149 debugfs_iomem_x32_set, "0x%08llx\n");
2150
2151static void spi_debugfs_init(struct msm_spi *dd)
2152{
2153 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
2154 if (dd->dent_spi) {
2155 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002157 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
2158 dd->debugfs_spi_regs[i] =
2159 debugfs_create_file(
2160 debugfs_spi_regs[i].name,
2161 debugfs_spi_regs[i].mode,
2162 dd->dent_spi,
2163 dd->base + debugfs_spi_regs[i].offset,
2164 &fops_iomem_x32);
2165 }
2166 }
2167}
2168
2169static void spi_debugfs_exit(struct msm_spi *dd)
2170{
2171 if (dd->dent_spi) {
2172 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 debugfs_remove_recursive(dd->dent_spi);
2175 dd->dent_spi = NULL;
2176 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
2177 dd->debugfs_spi_regs[i] = NULL;
2178 }
2179}
2180#else
2181static void spi_debugfs_init(struct msm_spi *dd) {}
2182static void spi_debugfs_exit(struct msm_spi *dd) {}
2183#endif
2184
2185/* ===Device attributes begin=== */
2186static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
2187 char *buf)
2188{
2189 struct spi_master *master = dev_get_drvdata(dev);
2190 struct msm_spi *dd = spi_master_get_devdata(master);
2191
2192 return snprintf(buf, PAGE_SIZE,
2193 "Device %s\n"
2194 "rx fifo_size = %d spi words\n"
2195 "tx fifo_size = %d spi words\n"
2196 "use_dma ? %s\n"
2197 "rx block size = %d bytes\n"
2198 "tx block size = %d bytes\n"
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302199 "input burst size = %d bytes\n"
2200 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201 "DMA configuration:\n"
2202 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
2203 "--statistics--\n"
2204 "Rx isrs = %d\n"
2205 "Tx isrs = %d\n"
2206 "DMA error = %d\n"
2207 "--debug--\n"
2208 "NA yet\n",
2209 dev_name(dev),
2210 dd->input_fifo_size,
2211 dd->output_fifo_size,
2212 dd->use_dma ? "yes" : "no",
2213 dd->input_block_size,
2214 dd->output_block_size,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302215 dd->input_burst_size,
2216 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002217 dd->tx_dma_chan,
2218 dd->rx_dma_chan,
2219 dd->tx_dma_crci,
2220 dd->rx_dma_crci,
2221 dd->stat_rx + dd->stat_dmov_rx,
2222 dd->stat_tx + dd->stat_dmov_tx,
2223 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
2224 );
2225}
2226
2227/* Reset statistics on write */
2228static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
2229 const char *buf, size_t count)
2230{
2231 struct msm_spi *dd = dev_get_drvdata(dev);
2232 dd->stat_rx = 0;
2233 dd->stat_tx = 0;
2234 dd->stat_dmov_rx = 0;
2235 dd->stat_dmov_tx = 0;
2236 dd->stat_dmov_rx_err = 0;
2237 dd->stat_dmov_tx_err = 0;
2238 return count;
2239}
2240
2241static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
2242
2243static struct attribute *dev_attrs[] = {
2244 &dev_attr_stats.attr,
2245 NULL,
2246};
2247
2248static struct attribute_group dev_attr_grp = {
2249 .attrs = dev_attrs,
2250};
2251/* ===Device attributes end=== */
2252
2253/**
2254 * spi_dmov_tx_complete_func - DataMover tx completion callback
2255 *
2256 * Executed in IRQ context (Data Mover's IRQ) DataMover's
2257 * spinlock @msm_dmov_lock held.
2258 */
2259static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
2260 unsigned int result,
2261 struct msm_dmov_errdata *err)
2262{
2263 struct msm_spi *dd;
2264
2265 if (!(result & DMOV_RSLT_VALID)) {
2266 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2267 return;
2268 }
2269 /* restore original context */
2270 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302271 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002272 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302273 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
2274 return;
2275 complete(&dd->transfer_complete);
2276 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002277 /* Error or flush */
2278 if (result & DMOV_RSLT_ERROR) {
2279 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2280 dd->stat_dmov_tx_err++;
2281 }
2282 if (result & DMOV_RSLT_FLUSH) {
2283 /*
2284 * Flushing normally happens in process of
2285 * removing, when we are waiting for outstanding
2286 * DMA commands to be flushed.
2287 */
2288 dev_info(dd->dev,
2289 "DMA channel flushed (0x%08x)\n", result);
2290 }
2291 if (err)
2292 dev_err(dd->dev,
2293 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2294 err->flush[0], err->flush[1], err->flush[2],
2295 err->flush[3], err->flush[4], err->flush[5]);
2296 dd->cur_msg->status = -EIO;
2297 complete(&dd->transfer_complete);
2298 }
2299}
2300
2301/**
2302 * spi_dmov_rx_complete_func - DataMover rx completion callback
2303 *
2304 * Executed in IRQ context (Data Mover's IRQ)
2305 * DataMover's spinlock @msm_dmov_lock held.
2306 */
2307static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2308 unsigned int result,
2309 struct msm_dmov_errdata *err)
2310{
2311 struct msm_spi *dd;
2312
2313 if (!(result & DMOV_RSLT_VALID)) {
2314 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2315 result, cmd);
2316 return;
2317 }
2318 /* restore original context */
2319 dd = container_of(cmd, struct msm_spi, rx_hdr);
2320 if (result & DMOV_RSLT_DONE) {
2321 dd->stat_dmov_rx++;
2322 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2323 return;
2324 complete(&dd->transfer_complete);
2325 } else {
2326 /** Error or flush */
2327 if (result & DMOV_RSLT_ERROR) {
2328 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2329 dd->stat_dmov_rx_err++;
2330 }
2331 if (result & DMOV_RSLT_FLUSH) {
2332 dev_info(dd->dev,
2333 "DMA channel flushed(0x%08x)\n", result);
2334 }
2335 if (err)
2336 dev_err(dd->dev,
2337 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2338 err->flush[0], err->flush[1], err->flush[2],
2339 err->flush[3], err->flush[4], err->flush[5]);
2340 dd->cur_msg->status = -EIO;
2341 complete(&dd->transfer_complete);
2342 }
2343}
2344
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302345static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
2346 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002347{
2348 u32 cache_line = dma_get_cache_alignment();
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302349 int burst_size = (input_burst_size > output_burst_size) ?
2350 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002351
2352 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302353 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002354}
2355
Gilad Avidovd0262342012-10-24 16:52:30 -06002356static void msm_spi_dmov_teardown(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002357{
2358 int limit = 0;
2359
2360 if (!dd->use_dma)
2361 return;
2362
2363 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002364 msm_dmov_flush(dd->tx_dma_chan, 1);
2365 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002366 msleep(10);
2367 }
2368
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302369 dma_free_coherent(NULL,
2370 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
2371 dd->tx_dmov_cmd,
2372 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002373 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2374 dd->tx_padding = dd->rx_padding = NULL;
2375}
2376
Gilad Avidovd0262342012-10-24 16:52:30 -06002377static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
2378 enum msm_spi_pipe_direction pipe_dir)
2379{
2380 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2381 (&dd->bam.prod) : (&dd->bam.cons);
2382 if (!pipe->teardown_required)
2383 return;
2384
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002385 msm_spi_bam_pipe_disconnect(dd, pipe);
Gilad Avidovd0262342012-10-24 16:52:30 -06002386 dma_free_coherent(dd->dev, pipe->config.desc.size,
2387 pipe->config.desc.base, pipe->config.desc.phys_base);
2388 sps_free_endpoint(pipe->handle);
2389 pipe->handle = 0;
2390 pipe->teardown_required = false;
2391}
2392
2393static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2394 enum msm_spi_pipe_direction pipe_dir)
2395{
2396 int rc = 0;
2397 struct sps_pipe *pipe_handle;
Gilad Avidovd0262342012-10-24 16:52:30 -06002398 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2399 (&dd->bam.prod) : (&dd->bam.cons);
2400 struct sps_connect *pipe_conf = &pipe->config;
2401
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002402 pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
Gilad Avidovd0262342012-10-24 16:52:30 -06002403 pipe->handle = 0;
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002404 pipe_handle = sps_alloc_endpoint();
Gilad Avidovd0262342012-10-24 16:52:30 -06002405 if (!pipe_handle) {
2406 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2407 , __func__);
2408 return -ENOMEM;
2409 }
2410
2411 memset(pipe_conf, 0, sizeof(*pipe_conf));
2412 rc = sps_get_config(pipe_handle, pipe_conf);
2413 if (rc) {
2414 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2415 , __func__);
2416 goto config_err;
2417 }
2418
2419 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2420 pipe_conf->source = dd->bam.handle;
2421 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2422 pipe_conf->mode = SPS_MODE_SRC;
2423 pipe_conf->src_pipe_index =
2424 dd->pdata->bam_producer_pipe_index;
2425 pipe_conf->dest_pipe_index = 0;
2426 } else {
2427 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2428 pipe_conf->destination = dd->bam.handle;
2429 pipe_conf->mode = SPS_MODE_DEST;
2430 pipe_conf->src_pipe_index = 0;
2431 pipe_conf->dest_pipe_index =
2432 dd->pdata->bam_consumer_pipe_index;
2433 }
2434 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2435 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2436 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2437 pipe_conf->desc.size,
2438 &pipe_conf->desc.phys_base,
2439 GFP_KERNEL);
2440 if (!pipe_conf->desc.base) {
2441 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2442 , __func__);
2443 rc = -ENOMEM;
2444 goto config_err;
2445 }
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002446 /* zero descriptor FIFO for convenient debugging of first descs */
Gilad Avidovd0262342012-10-24 16:52:30 -06002447 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2448
Gilad Avidovd0262342012-10-24 16:52:30 -06002449 pipe->handle = pipe_handle;
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002450 rc = msm_spi_bam_pipe_connect(dd, pipe, pipe_conf);
2451 if (rc)
2452 goto connect_err;
2453
Gilad Avidovd0262342012-10-24 16:52:30 -06002454 return 0;
2455
Gilad Avidovd0262342012-10-24 16:52:30 -06002456connect_err:
2457 dma_free_coherent(dd->dev, pipe_conf->desc.size,
2458 pipe_conf->desc.base, pipe_conf->desc.phys_base);
2459config_err:
2460 sps_free_endpoint(pipe_handle);
2461
2462 return rc;
2463}
2464
2465static void msm_spi_bam_teardown(struct msm_spi *dd)
2466{
2467 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2468 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2469
2470 if (dd->bam.deregister_required) {
2471 sps_deregister_bam_device(dd->bam.handle);
2472 dd->bam.deregister_required = false;
2473 }
2474}
2475
2476static int msm_spi_bam_init(struct msm_spi *dd)
2477{
2478 struct sps_bam_props bam_props = {0};
2479 u32 bam_handle;
2480 int rc = 0;
2481
2482 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2483 if (rc || !bam_handle) {
2484 bam_props.phys_addr = dd->bam.phys_addr;
2485 bam_props.virt_addr = dd->bam.base;
2486 bam_props.irq = dd->bam.irq;
Gilad Avidovb0968052013-05-03 09:51:37 -06002487 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Gilad Avidovd0262342012-10-24 16:52:30 -06002488 bam_props.summing_threshold = 0x10;
2489
2490 rc = sps_register_bam_device(&bam_props, &bam_handle);
2491 if (rc) {
2492 dev_err(dd->dev,
2493 "%s: Failed to register BAM device",
2494 __func__);
2495 return rc;
2496 }
2497 dd->bam.deregister_required = true;
2498 }
2499
2500 dd->bam.handle = bam_handle;
2501
2502 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2503 if (rc) {
2504 dev_err(dd->dev,
2505 "%s: Failed to init producer BAM-pipe",
2506 __func__);
2507 goto bam_init_error;
2508 }
2509
2510 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2511 if (rc) {
2512 dev_err(dd->dev,
2513 "%s: Failed to init consumer BAM-pipe",
2514 __func__);
2515 goto bam_init_error;
2516 }
2517
2518 return 0;
2519
2520bam_init_error:
2521 msm_spi_bam_teardown(dd);
2522 return rc;
2523}
2524
2525static __init int msm_spi_dmov_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002526{
2527 dmov_box *box;
2528 u32 cache_line = dma_get_cache_alignment();
2529
2530 /* Allocate all as one chunk, since all is smaller than page size */
2531
2532 /* We send NULL device, since it requires coherent_dma_mask id
2533 device definition, we're okay with using system pool */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302534 dd->tx_dmov_cmd
2535 = dma_alloc_coherent(NULL,
2536 get_chunk_size(dd, dd->input_burst_size,
2537 dd->output_burst_size),
2538 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002539 if (dd->tx_dmov_cmd == NULL)
2540 return -ENOMEM;
2541
2542 /* DMA addresses should be 64 bit aligned aligned */
2543 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2544 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2545 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2546 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2547
2548 /* Buffers should be aligned to cache line */
2549 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2550 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2551 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302552 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
2553 dd->output_burst_size), cache_line);
2554 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002555 cache_line);
2556
2557 /* Setup DM commands */
2558 box = &(dd->rx_dmov_cmd->box);
2559 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2560 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2561 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2562 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2563 offsetof(struct spi_dmov_cmd, cmd_ptr));
2564 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002565
2566 box = &(dd->tx_dmov_cmd->box);
2567 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2568 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2569 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2570 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2571 offsetof(struct spi_dmov_cmd, cmd_ptr));
2572 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002573
2574 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2575 CMD_DST_CRCI(dd->tx_dma_crci);
2576 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2577 SPI_OUTPUT_FIFO;
2578 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2579 CMD_SRC_CRCI(dd->rx_dma_crci);
2580 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2581 SPI_INPUT_FIFO;
2582
2583 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002584 msm_dmov_flush(dd->tx_dma_chan, 1);
2585 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002586
2587 return 0;
2588}
2589
Gilad Avidov23350552013-05-21 09:26:46 -06002590enum msm_spi_dt_entry_status {
2591 DT_REQ, /* Required: fail if missing */
2592 DT_SGST, /* Suggested: warn if missing */
2593 DT_OPT, /* Optional: don't warn if missing */
2594};
2595
2596enum msm_spi_dt_entry_type {
2597 DT_U32,
2598 DT_GPIO,
2599 DT_BOOL,
2600};
2601
2602struct msm_spi_dt_to_pdata_map {
2603 const char *dt_name;
2604 void *ptr_data;
2605 enum msm_spi_dt_entry_status status;
2606 enum msm_spi_dt_entry_type type;
2607 int default_val;
2608};
2609
2610static int __init msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
2611 struct msm_spi_platform_data *pdata,
2612 struct msm_spi_dt_to_pdata_map *itr)
2613{
2614 int ret, err = 0;
2615 struct device_node *node = pdev->dev.of_node;
2616
2617 for (; itr->dt_name ; ++itr) {
2618 switch (itr->type) {
2619 case DT_GPIO:
2620 ret = of_get_named_gpio(node, itr->dt_name, 0);
2621 if (ret >= 0) {
2622 *((int *) itr->ptr_data) = ret;
2623 ret = 0;
2624 }
2625 break;
2626 case DT_U32:
2627 ret = of_property_read_u32(node, itr->dt_name,
2628 (u32 *) itr->ptr_data);
2629 break;
2630 case DT_BOOL:
2631 *((bool *) itr->ptr_data) =
2632 of_property_read_bool(node, itr->dt_name);
2633 ret = 0;
2634 break;
2635 default:
2636 dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
2637 itr->type);
2638 ret = -EBADE;
2639 }
2640
2641 dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
2642 ret, itr->dt_name, *((int *)itr->ptr_data));
2643
2644 if (ret) {
2645 *((int *)itr->ptr_data) = itr->default_val;
2646
2647 if (itr->status < DT_OPT) {
2648 dev_err(&pdev->dev, "Missing '%s' DT entry\n",
2649 itr->dt_name);
2650
2651 /* cont on err to dump all missing entries */
2652 if (itr->status == DT_REQ && !err)
2653 err = ret;
2654 }
2655 }
2656 }
2657
2658 return err;
2659}
2660
Gilad Avidovd0262342012-10-24 16:52:30 -06002661/**
Gilad Avidov002dba02013-05-21 18:06:32 -06002662 * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
Gilad Avidovd0262342012-10-24 16:52:30 -06002663 */
Gilad Avidov002dba02013-05-21 18:06:32 -06002664struct msm_spi_platform_data * __init msm_spi_dt_to_pdata(
2665 struct platform_device *pdev, struct msm_spi *dd)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002666{
Gilad Avidov002dba02013-05-21 18:06:32 -06002667 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002668 struct msm_spi_platform_data *pdata;
2669
2670 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2671 if (!pdata) {
2672 pr_err("Unable to allocate platform data\n");
2673 return NULL;
Gilad Avidov23350552013-05-21 09:26:46 -06002674 } else {
2675 struct msm_spi_dt_to_pdata_map map[] = {
2676 {"spi-max-frequency",
Gilad Avidov002dba02013-05-21 18:06:32 -06002677 &pdata->max_clock_speed, DT_SGST, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002678 {"qcom,infinite-mode",
Gilad Avidov002dba02013-05-21 18:06:32 -06002679 &pdata->infinite_mode, DT_OPT, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002680 {"qcom,active-only",
Gilad Avidov002dba02013-05-21 18:06:32 -06002681 &pdata->active_only, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002682 {"qcom,master-id",
Gilad Avidov002dba02013-05-21 18:06:32 -06002683 &pdata->master_id, DT_SGST, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002684 {"qcom,ver-reg-exists",
Gilad Avidov002dba02013-05-21 18:06:32 -06002685 &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002686 {"qcom,use-bam",
Gilad Avidov002dba02013-05-21 18:06:32 -06002687 &pdata->use_bam, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002688 {"qcom,bam-consumer-pipe-index",
Gilad Avidov002dba02013-05-21 18:06:32 -06002689 &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002690 {"qcom,bam-producer-pipe-index",
Gilad Avidov002dba02013-05-21 18:06:32 -06002691 &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0},
2692 {"qcom,gpio-clk",
2693 &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1},
2694 {"qcom,gpio-miso",
2695 &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1},
2696 {"qcom,gpio-mosi",
2697 &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1},
2698 {"qcom,gpio-cs0",
2699 &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1},
2700 {"qcom,gpio-cs1",
2701 &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1},
2702 {"qcom,gpio-cs2",
2703 &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
2704 {"qcom,gpio-cs3",
2705 &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
2706 {NULL, NULL, 0, 0, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002707 };
2708
2709 if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
2710 devm_kfree(&pdev->dev, pdata);
2711 return NULL;
2712 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002713 }
2714
Gilad Avidovd0262342012-10-24 16:52:30 -06002715 if (pdata->use_bam) {
Gilad Avidov23350552013-05-21 09:26:46 -06002716 if (!pdata->bam_consumer_pipe_index) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002717 dev_warn(&pdev->dev,
2718 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2719 pdata->use_bam = false;
2720 }
2721
Gilad Avidov23350552013-05-21 09:26:46 -06002722 if (pdata->bam_producer_pipe_index) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002723 dev_warn(&pdev->dev,
2724 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2725 pdata->use_bam = false;
2726 }
2727 }
Gilad Avidov002dba02013-05-21 18:06:32 -06002728
2729 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
2730 dd->cs_gpios[i].valid = (dd->cs_gpios[i].gpio_num >= 0);
2731
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002732 return pdata;
2733}
2734
Gilad Avidovd0262342012-10-24 16:52:30 -06002735static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2736{
2737 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2738 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2739 : SPI_QUP_VERSION_NONE;
2740}
2741
2742static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
2743 struct platform_device *pdev, struct spi_master *master)
2744{
2745 struct resource *resource;
2746 size_t bam_mem_size;
2747
2748 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2749 "spi_bam_physical");
2750 if (!resource) {
2751 dev_warn(&pdev->dev,
2752 "%s: Missing spi_bam_physical entry in DT",
2753 __func__);
2754 return -ENXIO;
2755 }
2756
2757 dd->bam.phys_addr = resource->start;
2758 bam_mem_size = resource_size(resource);
2759 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2760 bam_mem_size);
2761 if (!dd->bam.base) {
2762 dev_warn(&pdev->dev,
2763 "%s: Failed to ioremap(spi_bam_physical)",
2764 __func__);
2765 return -ENXIO;
2766 }
2767
2768 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2769 if (dd->bam.irq < 0) {
2770 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2771 __func__);
2772 return -EINVAL;
2773 }
2774
2775 dd->dma_init = msm_spi_bam_init;
2776 dd->dma_teardown = msm_spi_bam_teardown;
2777 return 0;
2778}
2779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002780static int __init msm_spi_probe(struct platform_device *pdev)
2781{
2782 struct spi_master *master;
2783 struct msm_spi *dd;
2784 struct resource *resource;
2785 int rc = -ENXIO;
2786 int locked = 0;
2787 int i = 0;
2788 int clk_enabled = 0;
2789 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002790 struct msm_spi_platform_data *pdata;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002791
2792 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2793 if (!master) {
2794 rc = -ENOMEM;
2795 dev_err(&pdev->dev, "master allocation failed\n");
2796 goto err_probe_exit;
2797 }
2798
2799 master->bus_num = pdev->id;
2800 master->mode_bits = SPI_SUPPORTED_MODES;
2801 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2802 master->setup = msm_spi_setup;
2803 master->transfer = msm_spi_transfer;
2804 platform_set_drvdata(pdev, master);
2805 dd = spi_master_get_devdata(master);
2806
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002807 if (pdev->dev.of_node) {
2808 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2809 master->dev.of_node = pdev->dev.of_node;
Gilad Avidov002dba02013-05-21 18:06:32 -06002810 pdata = msm_spi_dt_to_pdata(pdev, dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002811 if (!pdata) {
2812 rc = -ENOMEM;
2813 goto err_probe_exit;
2814 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002815
Gilad Avidov0697ea62013-02-11 16:46:38 -07002816 rc = of_alias_get_id(pdev->dev.of_node, "spi");
2817 if (rc < 0)
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002818 dev_warn(&pdev->dev,
2819 "using default bus_num %d\n", pdev->id);
2820 else
Gilad Avidov0697ea62013-02-11 16:46:38 -07002821 master->bus_num = pdev->id = rc;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002822 } else {
2823 pdata = pdev->dev.platform_data;
2824 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002825
2826 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2827 resource = platform_get_resource(pdev, IORESOURCE_IO,
2828 i);
2829 dd->spi_gpios[i] = resource ? resource->start : -1;
2830 }
2831
2832 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2833 resource = platform_get_resource(pdev, IORESOURCE_IO,
2834 i + ARRAY_SIZE(spi_rsrcs));
2835 dd->cs_gpios[i].gpio_num = resource ?
2836 resource->start : -1;
2837 dd->cs_gpios[i].valid = 0;
2838 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002839 }
2840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002841 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002842 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002843 if (!resource) {
2844 rc = -ENXIO;
2845 goto err_probe_res;
2846 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002848 dd->mem_phys_addr = resource->start;
2849 dd->mem_size = resource_size(resource);
2850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002851 if (pdata) {
2852 if (pdata->dma_config) {
2853 rc = pdata->dma_config();
2854 if (rc) {
2855 dev_warn(&pdev->dev,
2856 "%s: DM mode not supported\n",
2857 __func__);
2858 dd->use_dma = 0;
2859 goto skip_dma_resources;
2860 }
2861 }
Gilad Avidovd0262342012-10-24 16:52:30 -06002862 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
2863 resource = platform_get_resource(pdev,
2864 IORESOURCE_DMA, 0);
2865 if (resource) {
2866 dd->rx_dma_chan = resource->start;
2867 dd->tx_dma_chan = resource->end;
2868 resource = platform_get_resource(pdev,
2869 IORESOURCE_DMA, 1);
2870 if (!resource) {
2871 rc = -ENXIO;
2872 goto err_probe_res;
2873 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002874
Gilad Avidovd0262342012-10-24 16:52:30 -06002875 dd->rx_dma_crci = resource->start;
2876 dd->tx_dma_crci = resource->end;
2877 dd->use_dma = 1;
2878 master->dma_alignment =
2879 dma_get_cache_alignment();
2880 dd->dma_init = msm_spi_dmov_init ;
2881 dd->dma_teardown = msm_spi_dmov_teardown;
2882 }
2883 } else {
2884 if (!dd->pdata->use_bam)
2885 goto skip_dma_resources;
2886
2887 rc = msm_spi_bam_get_resources(dd, pdev, master);
2888 if (rc) {
2889 dev_warn(dd->dev,
2890 "%s: Faild to get BAM resources",
2891 __func__);
2892 goto skip_dma_resources;
2893 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002894 dd->use_dma = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002895 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002896 }
2897
Alok Chauhan66554a12012-08-22 19:54:45 +05302898skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002900 spin_lock_init(&dd->queue_lock);
2901 mutex_init(&dd->core_lock);
2902 INIT_LIST_HEAD(&dd->queue);
2903 INIT_WORK(&dd->work_data, msm_spi_workq);
2904 init_waitqueue_head(&dd->continue_suspend);
2905 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002906 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002907 if (!dd->workqueue)
2908 goto err_probe_workq;
2909
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002910 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2911 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002912 rc = -ENXIO;
2913 goto err_probe_reqmem;
2914 }
2915
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002916 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2917 if (!dd->base) {
2918 rc = -ENOMEM;
2919 goto err_probe_reqmem;
2920 }
2921
Gilad Avidovd0262342012-10-24 16:52:30 -06002922 if (pdata && pdata->ver_reg_exists) {
2923 enum msm_spi_qup_version ver =
2924 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2925 if (dd->qup_ver != ver)
2926 dev_warn(&pdev->dev,
2927 "%s: HW version different then initially assumed by probe",
2928 __func__);
2929 }
2930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002931 if (pdata && pdata->rsl_id) {
2932 struct remote_mutex_id rmid;
2933 rmid.r_spinlock_id = pdata->rsl_id;
2934 rmid.delay_us = SPI_TRYLOCK_DELAY;
2935
2936 rc = remote_mutex_init(&dd->r_lock, &rmid);
2937 if (rc) {
2938 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2939 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2940 __func__, rc);
2941 goto err_probe_rlock_init;
2942 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002944 dd->use_rlock = 1;
2945 dd->pm_lat = pdata->pm_lat;
Alok Chauhan66554a12012-08-22 19:54:45 +05302946 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
Gilad Avidovd0262342012-10-24 16:52:30 -06002947 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002948 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 mutex_lock(&dd->core_lock);
2951 if (dd->use_rlock)
2952 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002954 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002955 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002956 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002957 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002958 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002959 rc = PTR_ERR(dd->clk);
2960 goto err_probe_clk_get;
2961 }
2962
Matt Wagantallac294852011-08-17 15:44:58 -07002963 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002964 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002965 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002966 rc = PTR_ERR(dd->pclk);
2967 goto err_probe_pclk_get;
2968 }
2969
2970 if (pdata && pdata->max_clock_speed)
2971 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2972
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002973 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002974 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002975 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002976 __func__);
2977 goto err_probe_clk_enable;
2978 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002980 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002981 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002982 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002983 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002984 __func__);
2985 goto err_probe_pclk_enable;
2986 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002988 pclk_enabled = 1;
Gilad Avidovd0262342012-10-24 16:52:30 -06002989 /* GSBI dose not exists on B-family MSM-chips */
2990 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2991 rc = msm_spi_configure_gsbi(dd, pdev);
2992 if (rc)
2993 goto err_probe_gsbi;
2994 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002995
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002996 msm_spi_calculate_fifo_size(dd);
2997 if (dd->use_dma) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002998 rc = dd->dma_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002999 if (rc)
3000 goto err_probe_dma;
3001 }
3002
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003003 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003004 /*
3005 * The SPI core generates a bogus input overrun error on some targets,
3006 * when a transition from run to reset state occurs and if the FIFO has
3007 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
3008 * bit.
3009 */
3010 msm_spi_enable_error_flags(dd);
3011
3012 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
3013 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
3014 if (rc)
3015 goto err_probe_state;
3016
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003017 clk_disable_unprepare(dd->clk);
3018 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003019 clk_enabled = 0;
3020 pclk_enabled = 0;
3021
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303022 dd->suspended = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003023 dd->transfer_pending = 0;
3024 dd->multi_xfr = 0;
3025 dd->mode = SPI_MODE_NONE;
3026
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003027 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003028 if (rc)
3029 goto err_probe_irq;
3030
3031 msm_spi_disable_irqs(dd);
3032 if (dd->use_rlock)
3033 remote_mutex_unlock(&dd->r_lock);
3034
3035 mutex_unlock(&dd->core_lock);
3036 locked = 0;
3037
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303038 pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
3039 pm_runtime_use_autosuspend(&pdev->dev);
3040 pm_runtime_enable(&pdev->dev);
3041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003042 rc = spi_register_master(master);
3043 if (rc)
3044 goto err_probe_reg_master;
3045
3046 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
3047 if (rc) {
3048 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
3049 goto err_attrs;
3050 }
3051
3052 spi_debugfs_init(dd);
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05303053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003054 return 0;
3055
3056err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003057 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003058err_probe_reg_master:
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303059 pm_runtime_disable(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003060err_probe_irq:
3061err_probe_state:
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08003062 if (dd->dma_teardown)
3063 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003064err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003065err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003066 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003067 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003068err_probe_pclk_enable:
3069 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003070 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003071err_probe_clk_enable:
3072 clk_put(dd->pclk);
3073err_probe_pclk_get:
3074 clk_put(dd->clk);
3075err_probe_clk_get:
3076 if (locked) {
3077 if (dd->use_rlock)
3078 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003080 mutex_unlock(&dd->core_lock);
3081 }
3082err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003083err_probe_reqmem:
3084 destroy_workqueue(dd->workqueue);
3085err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003086err_probe_res:
3087 spi_master_put(master);
3088err_probe_exit:
3089 return rc;
3090}
3091
3092#ifdef CONFIG_PM
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303093static int msm_spi_pm_suspend_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003094{
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303095 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303097 struct msm_spi *dd;
3098 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003099
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303100 dev_dbg(device, "pm_runtime: suspending...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003101 if (!master)
3102 goto suspend_exit;
3103 dd = spi_master_get_devdata(master);
3104 if (!dd)
3105 goto suspend_exit;
3106
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303107 if (dd->suspended)
3108 return 0;
3109
3110 /*
3111 * Make sure nothing is added to the queue while we're
3112 * suspending
3113 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003114 spin_lock_irqsave(&dd->queue_lock, flags);
3115 dd->suspended = 1;
3116 spin_unlock_irqrestore(&dd->queue_lock, flags);
3117
3118 /* Wait for transactions to end, or time out */
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303119 wait_event_interruptible(dd->continue_suspend,
3120 !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003121
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303122 msm_spi_disable_irqs(dd);
3123 clk_disable_unprepare(dd->clk);
3124 clk_disable_unprepare(dd->pclk);
Gilad Avidov8d99efa2013-06-27 15:33:02 -06003125 if (dd->pdata && !dd->pdata->active_only)
Gilad Avidov23350552013-05-21 09:26:46 -06003126 msm_spi_clk_path_unvote(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303127
3128 /* Free the spi clk, miso, mosi, cs gpio */
3129 if (dd->pdata && dd->pdata->gpio_release)
3130 dd->pdata->gpio_release();
3131
3132 msm_spi_free_gpios(dd);
3133
3134 if (pm_qos_request_active(&qos_req_list))
3135 pm_qos_update_request(&qos_req_list,
3136 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003137suspend_exit:
3138 return 0;
3139}
3140
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303141static int msm_spi_pm_resume_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003142{
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303143 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303145 struct msm_spi *dd;
3146 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003147
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303148 dev_dbg(device, "pm_runtime: resuming...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003149 if (!master)
3150 goto resume_exit;
3151 dd = spi_master_get_devdata(master);
3152 if (!dd)
3153 goto resume_exit;
3154
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303155 if (!dd->suspended)
3156 return 0;
3157
3158 if (pm_qos_request_active(&qos_req_list))
3159 pm_qos_update_request(&qos_req_list,
3160 dd->pm_lat);
3161
3162 /* Configure the spi clk, miso, mosi and cs gpio */
3163 if (dd->pdata->gpio_config) {
3164 ret = dd->pdata->gpio_config();
3165 if (ret) {
3166 dev_err(dd->dev,
3167 "%s: error configuring GPIOs\n",
3168 __func__);
3169 return ret;
3170 }
3171 }
3172
3173 ret = msm_spi_request_gpios(dd);
3174 if (ret)
3175 return ret;
3176
Gilad Avidov23350552013-05-21 09:26:46 -06003177 msm_spi_clk_path_init(dd);
3178 if (!dd->pdata->active_only)
3179 msm_spi_clk_path_vote(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303180 clk_prepare_enable(dd->clk);
3181 clk_prepare_enable(dd->pclk);
3182 msm_spi_enable_irqs(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003183 dd->suspended = 0;
Gilad Avidov23350552013-05-21 09:26:46 -06003184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003185resume_exit:
3186 return 0;
3187}
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303188
3189static int msm_spi_suspend(struct device *device)
3190{
3191 if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
3192 struct platform_device *pdev = to_platform_device(device);
3193 struct spi_master *master = platform_get_drvdata(pdev);
3194 struct msm_spi *dd;
3195
3196 dev_dbg(device, "system suspend");
3197 if (!master)
3198 goto suspend_exit;
3199 dd = spi_master_get_devdata(master);
3200 if (!dd)
3201 goto suspend_exit;
3202 msm_spi_pm_suspend_runtime(device);
3203 }
3204suspend_exit:
3205 return 0;
3206}
3207
3208static int msm_spi_resume(struct device *device)
3209{
3210 /*
3211 * Rely on runtime-PM to call resume in case it is enabled
3212 * Even if it's not enabled, rely on 1st client transaction to do
3213 * clock ON and gpio configuration
3214 */
3215 dev_dbg(device, "system resume");
3216 return 0;
3217}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003218#else
3219#define msm_spi_suspend NULL
3220#define msm_spi_resume NULL
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303221#define msm_spi_pm_suspend_runtime NULL
3222#define msm_spi_pm_resume_runtime NULL
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003223#endif /* CONFIG_PM */
3224
3225static int __devexit msm_spi_remove(struct platform_device *pdev)
3226{
3227 struct spi_master *master = platform_get_drvdata(pdev);
3228 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003229
3230 pm_qos_remove_request(&qos_req_list);
3231 spi_debugfs_exit(dd);
3232 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
3233
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08003234 if (dd->dma_teardown)
3235 dd->dma_teardown(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303236 pm_runtime_disable(&pdev->dev);
3237 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003238 clk_put(dd->clk);
3239 clk_put(dd->pclk);
Gilad Avidov23350552013-05-21 09:26:46 -06003240 msm_spi_clk_path_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003241 destroy_workqueue(dd->workqueue);
3242 platform_set_drvdata(pdev, 0);
3243 spi_unregister_master(master);
3244 spi_master_put(master);
3245
3246 return 0;
3247}
3248
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003249static struct of_device_id msm_spi_dt_match[] = {
3250 {
3251 .compatible = "qcom,spi-qup-v2",
3252 },
3253 {}
3254};
3255
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303256static const struct dev_pm_ops msm_spi_dev_pm_ops = {
3257 SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
3258 SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
3259 msm_spi_pm_resume_runtime, NULL)
3260};
3261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003262static struct platform_driver msm_spi_driver = {
3263 .driver = {
3264 .name = SPI_DRV_NAME,
3265 .owner = THIS_MODULE,
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303266 .pm = &msm_spi_dev_pm_ops,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003267 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003268 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003269 .remove = __exit_p(msm_spi_remove),
3270};
3271
3272static int __init msm_spi_init(void)
3273{
3274 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
3275}
3276module_init(msm_spi_init);
3277
3278static void __exit msm_spi_exit(void)
3279{
3280 platform_driver_unregister(&msm_spi_driver);
3281}
3282module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003283
3284MODULE_LICENSE("GPL v2");
3285MODULE_VERSION("0.4");
3286MODULE_ALIAS("platform:"SPI_DRV_NAME);