blob: 45400cb25adce95c85e7b253c49326e13382d2bf [file] [log] [blame]
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
Gilad Avidov002dba02013-05-21 18:06:32 -060017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/version.h>
19#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070020#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/init.h>
22#include <linux/spinlock.h>
23#include <linux/list.h>
24#include <linux/irq.h>
25#include <linux/platform_device.h>
26#include <linux/spi/spi.h>
27#include <linux/interrupt.h>
28#include <linux/err.h>
29#include <linux/clk.h>
30#include <linux/delay.h>
31#include <linux/workqueue.h>
32#include <linux/io.h>
33#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#include <linux/gpio.h>
35#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070036#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070037#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070038#include <linux/of_gpio.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060039#include <linux/dma-mapping.h>
40#include <linux/sched.h>
41#include <linux/mutex.h>
42#include <linux/atomic.h>
Alok Chauhan7fd3add2013-03-12 18:34:43 +053043#include <linux/pm_runtime.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060044#include <mach/msm_spi.h>
45#include <mach/sps.h>
46#include <mach/dma.h>
Gilad Avidov23350552013-05-21 09:26:46 -060047#include <mach/msm_bus.h>
48#include <mach/msm_bus_board.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070049#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Alok Chauhan7fd3add2013-03-12 18:34:43 +053051static int msm_spi_pm_resume_runtime(struct device *device);
52static int msm_spi_pm_suspend_runtime(struct device *device);
53
54
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
56 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057{
58 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070059 unsigned long gsbi_mem_phys_addr;
60 size_t gsbi_mem_size;
61 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070065 return 0;
66
67 gsbi_mem_phys_addr = resource->start;
68 gsbi_mem_size = resource_size(resource);
69 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
70 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070072
73 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
74 gsbi_mem_size);
75 if (!gsbi_base)
76 return -ENXIO;
77
78 /* Set GSBI to SPI mode */
79 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
81 return 0;
82}
83
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070084static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070086 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
87 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
88 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
89 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
90 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
91 if (dd->qup_ver)
92 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093}
94
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static inline int msm_spi_request_gpios(struct msm_spi *dd)
96{
97 int i;
98 int result = 0;
99
100 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
101 if (dd->spi_gpios[i] >= 0) {
102 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
103 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -0600104 dev_err(dd->dev, "%s: gpio_request for pin %d "
105 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 dd->spi_gpios[i], result);
107 goto error;
108 }
109 }
110 }
111 return 0;
112
113error:
114 for (; --i >= 0;) {
115 if (dd->spi_gpios[i] >= 0)
116 gpio_free(dd->spi_gpios[i]);
117 }
118 return result;
119}
120
121static inline void msm_spi_free_gpios(struct msm_spi *dd)
122{
123 int i;
124
125 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
126 if (dd->spi_gpios[i] >= 0)
127 gpio_free(dd->spi_gpios[i]);
128 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600129
130 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
131 if (dd->cs_gpios[i].valid) {
132 gpio_free(dd->cs_gpios[i].gpio_num);
133 dd->cs_gpios[i].valid = 0;
134 }
135 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136}
137
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600138/**
139 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
140 * @clk the clock for which to find nearest lower rate
141 * @rate clock frequency in Hz
142 * @return nearest lower rate or negative error value
143 *
144 * Public clock API extends clk_round_rate which is a ceiling function. This
145 * function is a floor function implemented as a binary search using the
146 * ceiling function.
147 */
148static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
149{
150 long lowest_available, nearest_low, step_size, cur;
151 long step_direction = -1;
152 long guess = rate;
153 int max_steps = 10;
154
155 cur = clk_round_rate(clk, rate);
156 if (cur == rate)
157 return rate;
158
159 /* if we got here then: cur > rate */
160 lowest_available = clk_round_rate(clk, 0);
161 if (lowest_available > rate)
162 return -EINVAL;
163
164 step_size = (rate - lowest_available) >> 1;
165 nearest_low = lowest_available;
166
167 while (max_steps-- && step_size) {
168 guess += step_size * step_direction;
169
170 cur = clk_round_rate(clk, guess);
171
172 if ((cur < rate) && (cur > nearest_low))
173 nearest_low = cur;
174
175 /*
176 * if we stepped too far, then start stepping in the other
177 * direction with half the step size
178 */
179 if (((cur > rate) && (step_direction > 0))
180 || ((cur < rate) && (step_direction < 0))) {
181 step_direction = -step_direction;
182 step_size >>= 1;
183 }
184 }
185 return nearest_low;
186}
187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188static void msm_spi_clock_set(struct msm_spi *dd, int speed)
189{
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600190 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 int rc;
192
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600193 rate = msm_spi_clk_max_rate(dd->clk, speed);
194 if (rate < 0) {
195 dev_err(dd->dev,
196 "%s: no match found for requested clock frequency:%d",
197 __func__, speed);
198 return;
199 }
200
201 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 if (!rc)
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600203 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204}
205
Gilad Avidov23350552013-05-21 09:26:46 -0600206static void msm_spi_clk_path_vote(struct msm_spi *dd)
207{
208 if (dd->clk_path_vote.client_hdl)
209 msm_bus_scale_client_update_request(
210 dd->clk_path_vote.client_hdl,
211 MSM_SPI_CLK_PATH_RESUME_VEC);
212}
213
214static void msm_spi_clk_path_unvote(struct msm_spi *dd)
215{
216 if (dd->clk_path_vote.client_hdl)
217 msm_bus_scale_client_update_request(
218 dd->clk_path_vote.client_hdl,
219 MSM_SPI_CLK_PATH_SUSPEND_VEC);
220}
221
222static void msm_spi_clk_path_teardown(struct msm_spi *dd)
223{
224 if (dd->pdata->active_only)
225 msm_spi_clk_path_unvote(dd);
226
227 if (dd->clk_path_vote.client_hdl) {
228 msm_bus_scale_unregister_client(dd->clk_path_vote.client_hdl);
229 dd->clk_path_vote.client_hdl = 0;
230 }
231}
232
233/**
234 * msm_spi_clk_path_init_structs: internal impl detail of msm_spi_clk_path_init
235 *
236 * allocates and initilizes the bus scaling vectors.
237 */
238static int msm_spi_clk_path_init_structs(struct msm_spi *dd)
239{
240 struct msm_bus_vectors *paths = NULL;
241 struct msm_bus_paths *usecases = NULL;
242
243 dev_dbg(dd->dev, "initialises path clock voting structs");
244
245 paths = devm_kzalloc(dd->dev, sizeof(*paths) * 2, GFP_KERNEL);
246 if (!paths) {
247 dev_err(dd->dev,
248 "msm_bus_paths.paths memory allocation failed");
249 return -ENOMEM;
250 }
251
252 usecases = devm_kzalloc(dd->dev, sizeof(*usecases) * 2, GFP_KERNEL);
253 if (!usecases) {
254 dev_err(dd->dev,
255 "msm_bus_scale_pdata.usecases memory allocation failed");
256 goto path_init_err;
257 }
258
259 dd->clk_path_vote.pdata = devm_kzalloc(dd->dev,
260 sizeof(*dd->clk_path_vote.pdata),
261 GFP_KERNEL);
262 if (!dd->clk_path_vote.pdata) {
263 dev_err(dd->dev,
264 "msm_bus_scale_pdata memory allocation failed");
265 goto path_init_err;
266 }
267
268 paths[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
269 .src = dd->pdata->master_id,
270 .dst = MSM_BUS_SLAVE_EBI_CH0,
271 .ab = 0,
272 .ib = 0,
273 };
274
275 paths[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) {
276 .src = dd->pdata->master_id,
277 .dst = MSM_BUS_SLAVE_EBI_CH0,
278 .ab = MSM_SPI_CLK_PATH_AVRG_BW(dd),
279 .ib = MSM_SPI_CLK_PATH_BRST_BW(dd),
280 };
281
282 usecases[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
283 .num_paths = 1,
284 .vectors = &paths[MSM_SPI_CLK_PATH_SUSPEND_VEC],
285 };
286
287 usecases[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
288 .num_paths = 1,
289 .vectors = &paths[MSM_SPI_CLK_PATH_RESUME_VEC],
290 };
291
292 *dd->clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
293 .active_only = dd->pdata->active_only,
294 .name = dev_name(dd->dev),
295 .num_usecases = 2,
296 .usecase = usecases,
297 };
298
299 return 0;
300
301path_init_err:
302 devm_kfree(dd->dev, paths);
303 devm_kfree(dd->dev, usecases);
304 devm_kfree(dd->dev, dd->clk_path_vote.pdata);
305 dd->clk_path_vote.pdata = NULL;
306 return -ENOMEM;
307}
308
309/**
310 * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
311 *
312 * @return zero on success
313 *
314 * Workaround: SPI driver may be probed before the bus scaling driver. Calling
315 * msm_bus_scale_register_client() will fail if the bus scaling driver is not
316 * ready yet. Thus, this function should be called not from probe but from a
317 * later context. Also, this function may be called more then once before
318 * register succeed. At this case only one error message will be logged. At boot
319 * time all clocks are on, so earlier SPI transactions should succeed.
320 */
321static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
322{
323 dd->clk_path_vote.client_hdl = msm_bus_scale_register_client(
324 dd->clk_path_vote.pdata);
325
326 if (dd->clk_path_vote.client_hdl) {
327 if (dd->clk_path_vote.reg_err) {
328 /* log a success message if an error msg was logged */
329 dd->clk_path_vote.reg_err = false;
330 dev_info(dd->dev,
331 "msm_bus_scale_register_client(mstr-id:%d "
332 "actv-only:%d):0x%x",
333 dd->pdata->master_id, dd->pdata->active_only,
334 dd->clk_path_vote.client_hdl);
335 }
336
337 if (dd->pdata->active_only)
338 msm_spi_clk_path_vote(dd);
339 } else {
340 /* guard to log only one error on multiple failure */
341 if (!dd->clk_path_vote.reg_err) {
342 dd->clk_path_vote.reg_err = true;
343
344 dev_info(dd->dev,
345 "msm_bus_scale_register_client(mstr-id:%d "
346 "actv-only:%d):0",
347 dd->pdata->master_id, dd->pdata->active_only);
348 }
349 }
350
351 return dd->clk_path_vote.client_hdl ? 0 : -EAGAIN;
352}
353
354static void msm_spi_clk_path_init(struct msm_spi *dd)
355{
356 /*
357 * bail out if path voting is diabled (master_id == 0) or if it is
358 * already registered (client_hdl != 0)
359 */
360 if (!dd->pdata->master_id || dd->clk_path_vote.client_hdl)
361 return;
362
363 /* if fail once then try no more */
364 if (!dd->clk_path_vote.pdata && msm_spi_clk_path_init_structs(dd)) {
365 dd->pdata->master_id = 0;
366 return;
367 };
368
369 /* on failure try again later */
370 if (msm_spi_clk_path_postponed_register(dd))
371 return;
372
373 if (dd->pdata->active_only)
374 msm_spi_clk_path_vote(dd);
375}
376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377static int msm_spi_calculate_size(int *fifo_size,
378 int *block_size,
379 int block,
380 int mult)
381{
382 int words;
383
384 switch (block) {
385 case 0:
386 words = 1; /* 4 bytes */
387 break;
388 case 1:
389 words = 4; /* 16 bytes */
390 break;
391 case 2:
392 words = 8; /* 32 bytes */
393 break;
394 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700395 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 switch (mult) {
399 case 0:
400 *fifo_size = words * 2;
401 break;
402 case 1:
403 *fifo_size = words * 4;
404 break;
405 case 2:
406 *fifo_size = words * 8;
407 break;
408 case 3:
409 *fifo_size = words * 16;
410 break;
411 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700412 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 *block_size = words * sizeof(u32); /* in bytes */
416 return 0;
417}
418
419static void get_next_transfer(struct msm_spi *dd)
420{
421 struct spi_transfer *t = dd->cur_transfer;
422
423 if (t->transfer_list.next != &dd->cur_msg->transfers) {
424 dd->cur_transfer = list_entry(t->transfer_list.next,
425 struct spi_transfer,
426 transfer_list);
427 dd->write_buf = dd->cur_transfer->tx_buf;
428 dd->read_buf = dd->cur_transfer->rx_buf;
429 }
430}
431
432static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
433{
434 u32 spi_iom;
435 int block;
436 int mult;
437
438 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
439
440 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
441 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
442 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
443 block, mult)) {
444 goto fifo_size_err;
445 }
446
447 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
448 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
449 if (msm_spi_calculate_size(&dd->output_fifo_size,
450 &dd->output_block_size, block, mult)) {
451 goto fifo_size_err;
452 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600453 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
454 /* DM mode is not available for this block size */
455 if (dd->input_block_size == 4 || dd->output_block_size == 4)
456 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530458 if (dd->use_dma) {
459 dd->input_burst_size = max(dd->input_block_size,
460 DM_BURST_SIZE);
461 dd->output_burst_size = max(dd->output_block_size,
462 DM_BURST_SIZE);
463 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600464 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465
466 return;
467
468fifo_size_err:
469 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700470 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 return;
472}
473
474static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
475{
476 u32 data_in;
477 int i;
478 int shift;
479
480 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
481 if (dd->read_buf) {
482 for (i = 0; (i < dd->bytes_per_word) &&
483 dd->rx_bytes_remaining; i++) {
484 /* The data format depends on bytes_per_word:
485 4 bytes: 0x12345678
486 3 bytes: 0x00123456
487 2 bytes: 0x00001234
488 1 byte : 0x00000012
489 */
490 shift = 8 * (dd->bytes_per_word - i - 1);
491 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
492 dd->rx_bytes_remaining--;
493 }
494 } else {
495 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
496 dd->rx_bytes_remaining -= dd->bytes_per_word;
497 else
498 dd->rx_bytes_remaining = 0;
499 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 dd->read_xfr_cnt++;
502 if (dd->multi_xfr) {
503 if (!dd->rx_bytes_remaining)
504 dd->read_xfr_cnt = 0;
505 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
506 dd->read_len) {
507 struct spi_transfer *t = dd->cur_rx_transfer;
508 if (t->transfer_list.next != &dd->cur_msg->transfers) {
509 t = list_entry(t->transfer_list.next,
510 struct spi_transfer,
511 transfer_list);
512 dd->read_buf = t->rx_buf;
513 dd->read_len = t->len;
514 dd->read_xfr_cnt = 0;
515 dd->cur_rx_transfer = t;
516 }
517 }
518 }
519}
520
521static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
522{
523 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
524
525 return spi_op & SPI_OP_STATE_VALID;
526}
527
Sagar Dharia525593d2012-11-02 18:26:01 -0600528static inline void msm_spi_udelay(unsigned long delay_usecs)
529{
530 /*
531 * For smaller values of delay, context switch time
532 * would negate the usage of usleep
533 */
534 if (delay_usecs > 20)
535 usleep_range(delay_usecs, delay_usecs);
536 else if (delay_usecs)
537 udelay(delay_usecs);
538}
539
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540static inline int msm_spi_wait_valid(struct msm_spi *dd)
541{
542 unsigned long delay = 0;
543 unsigned long timeout = 0;
544
545 if (dd->clock_speed == 0)
546 return -EINVAL;
547 /*
548 * Based on the SPI clock speed, sufficient time
549 * should be given for the SPI state transition
550 * to occur
551 */
552 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
553 /*
554 * For small delay values, the default timeout would
555 * be one jiffy
556 */
557 if (delay < SPI_DELAY_THRESHOLD)
558 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600559
560 /* Adding one to round off to the nearest jiffy */
561 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 while (!msm_spi_is_valid_state(dd)) {
563 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600564 if (!msm_spi_is_valid_state(dd)) {
565 if (dd->cur_msg)
566 dd->cur_msg->status = -EIO;
567 dev_err(dd->dev, "%s: SPI operational state"
568 "not valid\n", __func__);
569 return -ETIMEDOUT;
570 } else
571 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 }
Sagar Dharia525593d2012-11-02 18:26:01 -0600573 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 }
575 return 0;
576}
577
578static inline int msm_spi_set_state(struct msm_spi *dd,
579 enum msm_spi_state state)
580{
581 enum msm_spi_state cur_state;
582 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700583 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 cur_state = readl_relaxed(dd->base + SPI_STATE);
585 /* Per spec:
586 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
587 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
588 (state == SPI_OP_STATE_RESET)) {
589 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
590 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
591 } else {
592 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
593 dd->base + SPI_STATE);
594 }
595 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700596 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597
598 return 0;
599}
600
Gilad Avidovd0262342012-10-24 16:52:30 -0600601/**
602 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
603 */
604static inline void
605msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606{
607 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
608
609 if (n != (*config & SPI_CFG_N))
610 *config = (*config & ~SPI_CFG_N) | n;
611
Gilad Avidovd0262342012-10-24 16:52:30 -0600612 if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
613 || (dd->mode == SPI_BAM_MODE)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 if (dd->read_buf == NULL)
615 *config |= SPI_NO_INPUT;
616 if (dd->write_buf == NULL)
617 *config |= SPI_NO_OUTPUT;
618 }
619}
620
Gilad Avidovd0262342012-10-24 16:52:30 -0600621/**
622 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
623 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
624 * @return calculatd value for SPI_CONFIG
625 */
626static u32
627msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628{
Gilad Avidovd0262342012-10-24 16:52:30 -0600629 if (mode & SPI_LOOP)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 spi_config |= SPI_CFG_LOOPBACK;
631 else
632 spi_config &= ~SPI_CFG_LOOPBACK;
Gilad Avidovd0262342012-10-24 16:52:30 -0600633
634 if (mode & SPI_CPHA)
635 spi_config &= ~SPI_CFG_INPUT_FIRST;
636 else
637 spi_config |= SPI_CFG_INPUT_FIRST;
638
639 return spi_config;
640}
641
642/**
643 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
644 * next transfer
645 */
646static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
647{
648 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
649 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
650 spi_config, dd->cur_msg->spi->mode);
651
652 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
653 /* flags removed from SPI_CONFIG in QUP version-2 */
654 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
Gilad Avidovd0262342012-10-24 16:52:30 -0600655
Gilad Avidov91c2ab4c2013-03-12 11:01:22 -0600656 /*
657 * HS_MODE improves signal stability for spi-clk high rates
658 * but is invalid in LOOPBACK mode.
659 */
660 if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
661 !(dd->cur_msg->spi->mode & SPI_LOOP))
662 spi_config |= SPI_CFG_HS_MODE;
663 else
664 spi_config &= ~SPI_CFG_HS_MODE;
665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -0600667}
668
669/**
670 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
671 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
672 * BAM and DMOV modes.
673 * @n_words The number of reads/writes of size N.
674 */
675static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
676{
677 /*
678 * n_words cannot exceed fifo_size, and only one READ COUNT
679 * interrupt is generated per transaction, so for transactions
680 * larger than fifo size READ COUNT must be disabled.
681 * For those transactions we usually move to Data Mover mode.
682 */
683 if (dd->mode == SPI_FIFO_MODE) {
684 if (n_words <= dd->input_fifo_size) {
685 writel_relaxed(n_words,
686 dd->base + SPI_MX_READ_COUNT);
687 msm_spi_set_write_count(dd, n_words);
688 } else {
689 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
690 msm_spi_set_write_count(dd, 0);
691 }
692 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
693 /* must be zero for FIFO */
694 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
695 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
696 }
697 } else {
698 /* must be zero for BAM and DMOV */
699 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
700 msm_spi_set_write_count(dd, 0);
701
702 /*
703 * for DMA transfers, both QUP_MX_INPUT_COUNT and
704 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
705 * That case is a non-balanced transfer when there is
706 * only a read_buf.
707 */
708 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
709 if (dd->write_buf)
710 writel_relaxed(0,
711 dd->base + SPI_MX_INPUT_COUNT);
712 else
713 writel_relaxed(n_words,
714 dd->base + SPI_MX_INPUT_COUNT);
715
716 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
717 }
718 }
719}
720
Gilad Avidov799cfeb2013-06-26 17:18:36 -0600721static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
722 struct msm_spi_bam_pipe *pipe)
723{
724 int ret = sps_disconnect(pipe->handle);
725 if (ret) {
726 dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
727 __func__, pipe->name);
728 return ret;
729 }
730 return 0;
731}
732
733static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
734 struct msm_spi_bam_pipe *pipe, struct sps_connect *config)
735{
736 int ret;
737 struct sps_register_event event = {
738 .mode = SPS_TRIGGER_WAIT,
739 .options = SPS_O_EOT,
740 .xfer_done = &dd->transfer_complete,
741 };
742
743 ret = sps_connect(pipe->handle, config);
744 if (ret) {
745 dev_err(dd->dev, "%s: sps_connect(%s:0x%p):%d",
746 __func__, pipe->name, pipe->handle, ret);
747 return ret;
748 }
749
750 ret = sps_register_event(pipe->handle, &event);
751 if (ret) {
752 dev_err(dd->dev, "%s sps_register_event(hndl:0x%p %s):%d",
753 __func__, pipe->handle, pipe->name, ret);
754 msm_spi_bam_pipe_disconnect(dd, pipe);
755 return ret;
756 }
757
758 pipe->teardown_required = true;
759 return 0;
760}
761
762
763static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
764 enum msm_spi_pipe_direction pipe_dir)
765{
766 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
767 (&dd->bam.prod) : (&dd->bam.cons);
768 struct sps_connect config = pipe->config;
769 int ret;
770
771 ret = msm_spi_bam_pipe_disconnect(dd, pipe);
772 if (ret)
773 return;
774
775 ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
776 if (ret)
777 return;
778}
779
780static void msm_spi_bam_flush(struct msm_spi *dd)
781{
782 dev_dbg(dd->dev, "%s flushing bam for recovery\n" , __func__);
783
784 msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
785 msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
786}
787
Gilad Avidovd0262342012-10-24 16:52:30 -0600788/**
789 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
790 * using BAM.
791 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
792 * transfer. Between transfer QUP must change to reset state. A loop is
793 * issuing a single BAM transfer at a time. If another tsranfer is
794 * required, it waits for the trasfer to finish, then moving to reset
795 * state, and back to run state to issue the next transfer.
796 * The function dose not wait for the last transfer to end, or if only
797 * a single transfer is required, the function dose not wait for it to
798 * end.
799 * @timeout max time in jiffies to wait for a transfer to finish.
800 * @return zero on success
801 */
802static int
803msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
804{
805 u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
806 int ret;
807 /*
808 * QUP must move to reset mode every 64K-1 bytes of transfer
809 * (counter is 16 bit)
810 */
811 if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
812 /* assert chip select unconditionally */
813 u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
814 if (!(spi_ioc & SPI_IO_C_FORCE_CS))
815 writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
816 dd->base + SPI_IO_CONTROL);
817 }
818
819 /* Following flags are required since we are waiting on all transfers */
820 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
821 /*
822 * on a balanced transaction, BAM will set the flags on the producer
823 * pipe based on the flags set on the consumer pipe
824 */
825 prod_flags = (dd->write_buf) ? 0 : cons_flags;
826
827 while (dd->tx_bytes_remaining > 0) {
828 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
829 bytes_to_send = min_t(u32, dd->tx_bytes_remaining
830 , SPI_MAX_TRFR_BTWN_RESETS);
831 n_words_xfr = DIV_ROUND_UP(bytes_to_send
832 , dd->bytes_per_word);
833
834 msm_spi_set_mx_counts(dd, n_words_xfr);
835
836 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
837 if (ret < 0) {
838 dev_err(dd->dev,
839 "%s: Failed to set QUP state to run",
840 __func__);
841 goto xfr_err;
842 }
843
844 /* enqueue read buffer in BAM */
845 if (dd->read_buf) {
846 ret = sps_transfer_one(dd->bam.prod.handle,
847 dd->cur_transfer->rx_dma + bytes_sent,
848 bytes_to_send, dd, prod_flags);
849 if (ret < 0) {
850 dev_err(dd->dev,
851 "%s: Failed to queue producer BAM transfer",
852 __func__);
853 goto xfr_err;
854 }
855 }
856
857 /* enqueue write buffer in BAM */
858 if (dd->write_buf) {
859 ret = sps_transfer_one(dd->bam.cons.handle,
860 dd->cur_transfer->tx_dma + bytes_sent,
861 bytes_to_send, dd, cons_flags);
862 if (ret < 0) {
863 dev_err(dd->dev,
864 "%s: Failed to queue consumer BAM transfer",
865 __func__);
866 goto xfr_err;
867 }
868 }
869
870 dd->tx_bytes_remaining -= bytes_to_send;
871
872 /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
873 if (dd->tx_bytes_remaining > 0) {
874 if (!wait_for_completion_timeout(
875 &dd->transfer_complete, timeout)) {
876 dev_err(dd->dev,
877 "%s: SPI transaction timeout",
878 __func__);
879 dd->cur_msg->status = -EIO;
880 ret = -EIO;
881 goto xfr_err;
882 }
883 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
884 if (ret < 0) {
885 dev_err(dd->dev,
886 "%s: Failed to set QUP state to reset",
887 __func__);
888 goto xfr_err;
889 }
890 init_completion(&dd->transfer_complete);
891 }
892 }
893 return 0;
894
895xfr_err:
896 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897}
898
899static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
900{
901 dmov_box *box;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530902 int bytes_to_send, bytes_sent;
903 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 u32 num_transfers;
905
906 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530907 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908 if (dd->write_len && !dd->read_len) {
909 /* WR-WR transfer */
910 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
911 dd->write_buf = dd->temp_buf;
912 } else {
913 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
914 /* For WR-RD transfer, bytes_sent can be negative */
915 if (bytes_sent < 0)
916 bytes_sent = 0;
917 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530918 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530919 * 4K bytes for targets that have only 12 bits in
920 * QUP_MAX_OUTPUT_CNT register. If the target supports
921 * more than 12bits then we send the data in chunks of
922 * the infinite_mode value that is defined in the
923 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530924 */
925 if (!dd->pdata->infinite_mode)
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530926 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530927 else
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530928 dd->max_trfr_len = (dd->pdata->infinite_mode) *
929 (dd->bytes_per_word);
930
931 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
932 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530935 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
936 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
937 tx_num_rows = bytes_to_send / dd->output_burst_size;
938 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939
940 dd->mode = SPI_DMOV_MODE;
941
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530942 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 /* src in 16 MSB, dst in 16 LSB */
944 box = &dd->tx_dmov_cmd->box;
945 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530946 box->src_dst_len
947 = (dd->output_burst_size << 16) | dd->output_burst_size;
948 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
949 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530951 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
952 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
953 offsetof(struct spi_dmov_cmd, box));
954 } else {
955 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
956 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
957 offsetof(struct spi_dmov_cmd, single_pad));
958 }
959
960 if (rx_num_rows) {
961 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962 box = &dd->rx_dmov_cmd->box;
963 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530964 box->src_dst_len
965 = (dd->input_burst_size << 16) | dd->input_burst_size;
966 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
967 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
970 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
971 offsetof(struct spi_dmov_cmd, box));
972 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
974 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
975 offsetof(struct spi_dmov_cmd, single_pad));
976 }
977
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530978 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980 } else {
981 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530982 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983
984 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530985 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986
987 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530989 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 if (dd->write_buf)
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530991 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
992 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993
994 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530995 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530997
998 if (!dd->rx_unaligned_len) {
999 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
1000 } else {
1001 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
1002 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
1003
1004 memset(dd->rx_padding, 0, dd->input_burst_size);
1005 rx_cmd->dst = dd->rx_padding_dma;
1006 rx_cmd->len = dd->input_burst_size;
1007 }
1008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 /* This also takes care of the padding dummy buf
1010 Since this is set to the correct length, the
1011 dummy bytes won't be actually sent */
1012 if (dd->multi_xfr) {
1013 u32 write_transfers = 0;
1014 u32 read_transfers = 0;
1015
1016 if (dd->write_len > 0) {
1017 write_transfers = DIV_ROUND_UP(dd->write_len,
1018 dd->bytes_per_word);
1019 writel_relaxed(write_transfers,
1020 dd->base + SPI_MX_OUTPUT_COUNT);
1021 }
1022 if (dd->read_len > 0) {
1023 /*
1024 * The read following a write transfer must take
1025 * into account, that the bytes pertaining to
1026 * the write transfer needs to be discarded,
1027 * before the actual read begins.
1028 */
1029 read_transfers = DIV_ROUND_UP(dd->read_len +
1030 dd->write_len,
1031 dd->bytes_per_word);
1032 writel_relaxed(read_transfers,
1033 dd->base + SPI_MX_INPUT_COUNT);
1034 }
1035 } else {
1036 if (dd->write_buf)
1037 writel_relaxed(num_transfers,
1038 dd->base + SPI_MX_OUTPUT_COUNT);
1039 if (dd->read_buf)
1040 writel_relaxed(num_transfers,
1041 dd->base + SPI_MX_INPUT_COUNT);
1042 }
1043}
1044
1045static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
1046{
1047 dma_coherent_pre_ops();
1048 if (dd->write_buf)
1049 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
1050 if (dd->read_buf)
1051 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
1052}
1053
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05301054/* SPI core on targets that does not support infinite mode can send
1055 maximum of 4K transfers or 64K transfers depending up on size of
1056 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
1057 chunks. Upon completion we send the next chunk, or complete the
1058 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +05301059 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060*/
1061static int msm_spi_dm_send_next(struct msm_spi *dd)
1062{
1063 /* By now we should have sent all the bytes in FIFO mode,
1064 * However to make things right, we'll check anyway.
1065 */
1066 if (dd->mode != SPI_DMOV_MODE)
1067 return 0;
1068
Kiran Gundae8f16742012-06-27 10:06:32 +05301069 /* On targets which does not support infinite mode,
1070 We need to send more chunks, if we sent max last time */
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05301071 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
1072 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1074 return 0;
1075 dd->read_len = dd->write_len = 0;
1076 msm_spi_setup_dm_transfer(dd);
1077 msm_spi_enqueue_dm_commands(dd);
1078 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1079 return 0;
1080 return 1;
1081 } else if (dd->read_len && dd->write_len) {
1082 dd->tx_bytes_remaining -= dd->cur_transfer->len;
1083 if (list_is_last(&dd->cur_transfer->transfer_list,
1084 &dd->cur_msg->transfers))
1085 return 0;
1086 get_next_transfer(dd);
1087 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
1088 return 0;
1089 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
1090 dd->read_buf = dd->temp_buf;
1091 dd->read_len = dd->write_len = -1;
1092 msm_spi_setup_dm_transfer(dd);
1093 msm_spi_enqueue_dm_commands(dd);
1094 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1095 return 0;
1096 return 1;
1097 }
1098 return 0;
1099}
1100
1101static inline void msm_spi_ack_transfer(struct msm_spi *dd)
1102{
1103 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
1104 SPI_OP_MAX_OUTPUT_DONE_FLAG,
1105 dd->base + SPI_OPERATIONAL);
1106 /* Ensure done flag was cleared before proceeding further */
1107 mb();
1108}
1109
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001110/* Figure which irq occured and call the relevant functions */
1111static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
1112{
1113 u32 op, ret = IRQ_NONE;
1114 struct msm_spi *dd = dev_id;
1115
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301116 if (pm_runtime_suspended(dd->dev)) {
1117 dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
1118 return ret;
1119 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001120 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
1121 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
1122 struct spi_master *master = dev_get_drvdata(dd->dev);
1123 ret |= msm_spi_error_irq(irq, master);
1124 }
1125
1126 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1127 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
1128 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
1129 dd->base + SPI_OPERATIONAL);
1130 /*
1131 * Ensure service flag was cleared before further
1132 * processing of interrupt.
1133 */
1134 mb();
1135 ret |= msm_spi_input_irq(irq, dev_id);
1136 }
1137
1138 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
1139 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
1140 dd->base + SPI_OPERATIONAL);
1141 /*
1142 * Ensure service flag was cleared before further
1143 * processing of interrupt.
1144 */
1145 mb();
1146 ret |= msm_spi_output_irq(irq, dev_id);
1147 }
1148
1149 if (dd->done) {
1150 complete(&dd->transfer_complete);
1151 dd->done = 0;
1152 }
1153 return ret;
1154}
1155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
1157{
1158 struct msm_spi *dd = dev_id;
1159
1160 dd->stat_rx++;
1161
1162 if (dd->mode == SPI_MODE_NONE)
1163 return IRQ_HANDLED;
1164
1165 if (dd->mode == SPI_DMOV_MODE) {
1166 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1167 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
1168 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
1169 msm_spi_ack_transfer(dd);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301170 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1172 return IRQ_HANDLED;
1173 }
1174 msm_spi_complete(dd);
1175 return IRQ_HANDLED;
1176 }
1177 return IRQ_NONE;
1178 }
1179
1180 if (dd->mode == SPI_FIFO_MODE) {
1181 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
1182 SPI_OP_IP_FIFO_NOT_EMPTY) &&
1183 (dd->rx_bytes_remaining > 0)) {
1184 msm_spi_read_word_from_fifo(dd);
1185 }
1186 if (dd->rx_bytes_remaining == 0)
1187 msm_spi_complete(dd);
1188 }
1189
1190 return IRQ_HANDLED;
1191}
1192
1193static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1194{
1195 u32 word;
1196 u8 byte;
1197 int i;
1198
1199 word = 0;
1200 if (dd->write_buf) {
1201 for (i = 0; (i < dd->bytes_per_word) &&
1202 dd->tx_bytes_remaining; i++) {
1203 dd->tx_bytes_remaining--;
1204 byte = *dd->write_buf++;
1205 word |= (byte << (BITS_PER_BYTE * (3 - i)));
1206 }
1207 } else
1208 if (dd->tx_bytes_remaining > dd->bytes_per_word)
1209 dd->tx_bytes_remaining -= dd->bytes_per_word;
1210 else
1211 dd->tx_bytes_remaining = 0;
1212 dd->write_xfr_cnt++;
1213 if (dd->multi_xfr) {
1214 if (!dd->tx_bytes_remaining)
1215 dd->write_xfr_cnt = 0;
1216 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
1217 dd->write_len) {
1218 struct spi_transfer *t = dd->cur_tx_transfer;
1219 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1220 t = list_entry(t->transfer_list.next,
1221 struct spi_transfer,
1222 transfer_list);
1223 dd->write_buf = t->tx_buf;
1224 dd->write_len = t->len;
1225 dd->write_xfr_cnt = 0;
1226 dd->cur_tx_transfer = t;
1227 }
1228 }
1229 }
1230 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1231}
1232
1233static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1234{
1235 int count = 0;
1236
1237 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
1238 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
1239 SPI_OP_OUTPUT_FIFO_FULL)) {
1240 msm_spi_write_word_to_fifo(dd);
1241 count++;
1242 }
1243}
1244
1245static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1246{
1247 struct msm_spi *dd = dev_id;
1248
1249 dd->stat_tx++;
1250
1251 if (dd->mode == SPI_MODE_NONE)
1252 return IRQ_HANDLED;
1253
1254 if (dd->mode == SPI_DMOV_MODE) {
1255 /* TX_ONLY transaction is handled here
1256 This is the only place we send complete at tx and not rx */
1257 if (dd->read_buf == NULL &&
1258 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1259 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1260 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301261 if (atomic_inc_return(&dd->tx_irq_called) == 1)
1262 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263 msm_spi_complete(dd);
1264 return IRQ_HANDLED;
1265 }
1266 return IRQ_NONE;
1267 }
1268
1269 /* Output FIFO is empty. Transmit any outstanding write data. */
1270 if (dd->mode == SPI_FIFO_MODE)
1271 msm_spi_write_rmn_to_fifo(dd);
1272
1273 return IRQ_HANDLED;
1274}
1275
1276static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1277{
1278 struct spi_master *master = dev_id;
1279 struct msm_spi *dd = spi_master_get_devdata(master);
1280 u32 spi_err;
1281
1282 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1283 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1284 dev_warn(master->dev.parent, "SPI output overrun error\n");
1285 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1286 dev_warn(master->dev.parent, "SPI input underrun error\n");
1287 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1288 dev_warn(master->dev.parent, "SPI output underrun error\n");
1289 msm_spi_get_clk_err(dd, &spi_err);
1290 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1291 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1292 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1293 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1294 msm_spi_clear_error_flags(dd);
1295 msm_spi_ack_clk_err(dd);
1296 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1297 mb();
1298 return IRQ_HANDLED;
1299}
1300
Gilad Avidovd0262342012-10-24 16:52:30 -06001301/**
1302 * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
1303 * @return zero on success or negative error code
1304 *
1305 * calls dma_map_single() on the read/write buffers, effectively invalidating
1306 * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
1307 * buffer and copy the data to/from the client buffers
1308 */
1309static int msm_spi_dma_map_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310{
1311 struct device *dev;
1312 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001313 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314 void *tx_buf, *rx_buf;
1315 unsigned tx_len, rx_len;
1316 int ret = -EINVAL;
1317
1318 dev = &dd->cur_msg->spi->dev;
1319 first_xfr = dd->cur_transfer;
1320 tx_buf = (void *)first_xfr->tx_buf;
1321 rx_buf = first_xfr->rx_buf;
1322 tx_len = rx_len = first_xfr->len;
1323
1324 /*
1325 * For WR-WR and WR-RD transfers, we allocate our own temporary
1326 * buffer and copy the data to/from the client buffers.
1327 */
1328 if (dd->multi_xfr) {
1329 dd->temp_buf = kzalloc(dd->cur_msg_len,
1330 GFP_KERNEL | __GFP_DMA);
1331 if (!dd->temp_buf)
1332 return -ENOMEM;
1333 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1334 struct spi_transfer, transfer_list);
1335
1336 if (dd->write_len && !dd->read_len) {
1337 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1338 goto error;
1339
1340 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1341 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1342 nxt_xfr->len);
1343 tx_buf = dd->temp_buf;
1344 tx_len = dd->cur_msg_len;
1345 } else {
1346 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1347 goto error;
1348
1349 rx_buf = dd->temp_buf;
1350 rx_len = dd->cur_msg_len;
1351 }
1352 }
1353 if (tx_buf != NULL) {
1354 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1355 tx_len, DMA_TO_DEVICE);
1356 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1357 dev_err(dev, "dma %cX %d bytes error\n",
1358 'T', tx_len);
1359 ret = -ENOMEM;
1360 goto error;
1361 }
1362 }
1363 if (rx_buf != NULL) {
1364 dma_addr_t dma_handle;
1365 dma_handle = dma_map_single(dev, rx_buf,
1366 rx_len, DMA_FROM_DEVICE);
1367 if (dma_mapping_error(NULL, dma_handle)) {
1368 dev_err(dev, "dma %cX %d bytes error\n",
1369 'R', rx_len);
1370 if (tx_buf != NULL)
1371 dma_unmap_single(NULL, first_xfr->tx_dma,
1372 tx_len, DMA_TO_DEVICE);
1373 ret = -ENOMEM;
1374 goto error;
1375 }
1376 if (dd->multi_xfr)
1377 nxt_xfr->rx_dma = dma_handle;
1378 else
1379 first_xfr->rx_dma = dma_handle;
1380 }
1381 return 0;
1382
1383error:
1384 kfree(dd->temp_buf);
1385 dd->temp_buf = NULL;
1386 return ret;
1387}
1388
Gilad Avidovd0262342012-10-24 16:52:30 -06001389static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390{
1391 struct device *dev;
1392 u32 offset;
1393
1394 dev = &dd->cur_msg->spi->dev;
1395 if (dd->cur_msg->is_dma_mapped)
1396 goto unmap_end;
1397
1398 if (dd->multi_xfr) {
1399 if (dd->write_len && !dd->read_len) {
1400 dma_unmap_single(dev,
1401 dd->cur_transfer->tx_dma,
1402 dd->cur_msg_len,
1403 DMA_TO_DEVICE);
1404 } else {
1405 struct spi_transfer *prev_xfr;
1406 prev_xfr = list_entry(
1407 dd->cur_transfer->transfer_list.prev,
1408 struct spi_transfer,
1409 transfer_list);
1410 if (dd->cur_transfer->rx_buf) {
1411 dma_unmap_single(dev,
1412 dd->cur_transfer->rx_dma,
1413 dd->cur_msg_len,
1414 DMA_FROM_DEVICE);
1415 }
1416 if (prev_xfr->tx_buf) {
1417 dma_unmap_single(dev,
1418 prev_xfr->tx_dma,
1419 prev_xfr->len,
1420 DMA_TO_DEVICE);
1421 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301422 if (dd->rx_unaligned_len && dd->read_buf) {
1423 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424 dma_coherent_post_ops();
1425 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301426 dd->rx_unaligned_len);
Gilad Avidov8d99efa2013-06-27 15:33:02 -06001427 if (dd->cur_transfer->rx_buf)
1428 memcpy(dd->cur_transfer->rx_buf,
1429 dd->read_buf + prev_xfr->len,
1430 dd->cur_transfer->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 }
1432 }
1433 kfree(dd->temp_buf);
1434 dd->temp_buf = NULL;
1435 return;
1436 } else {
1437 if (dd->cur_transfer->rx_buf)
1438 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1439 dd->cur_transfer->len,
1440 DMA_FROM_DEVICE);
1441 if (dd->cur_transfer->tx_buf)
1442 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1443 dd->cur_transfer->len,
1444 DMA_TO_DEVICE);
1445 }
1446
1447unmap_end:
1448 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301449 if (dd->rx_unaligned_len && dd->read_buf) {
1450 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451 dma_coherent_post_ops();
1452 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301453 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 }
1455}
1456
Gilad Avidovd0262342012-10-24 16:52:30 -06001457static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1458{
1459 struct device *dev;
1460
1461 /* mapped by client */
1462 if (dd->cur_msg->is_dma_mapped)
1463 return;
1464
1465 dev = &dd->cur_msg->spi->dev;
1466 if (dd->cur_transfer->rx_buf)
1467 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1468 dd->cur_transfer->len,
1469 DMA_FROM_DEVICE);
1470
1471 if (dd->cur_transfer->tx_buf)
1472 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1473 dd->cur_transfer->len,
1474 DMA_TO_DEVICE);
1475}
1476
1477static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1478{
1479 if (dd->mode == SPI_DMOV_MODE)
1480 msm_spi_dmov_unmap_buffers(dd);
1481 else if (dd->mode == SPI_BAM_MODE)
1482 msm_spi_bam_unmap_buffers(dd);
1483}
1484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001485/**
Gilad Avidovd0262342012-10-24 16:52:30 -06001486 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1487 * the given transfer
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 * @dd: device
1489 * @tr: transfer
1490 *
Gilad Avidovd0262342012-10-24 16:52:30 -06001491 * Start using DMA if:
1492 * 1. Is supported by HW
1493 * 2. Is not diabled by platfrom data
1494 * 3. Transfer size is greater than 3*block size.
1495 * 4. Buffers are aligned to cache line.
1496 * 5. Bytes-per-word is 8,16 or 32.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001497 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001498static inline bool
1499msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 if (!dd->use_dma)
Gilad Avidovd0262342012-10-24 16:52:30 -06001502 return false;
1503
1504 /* check constraints from platform data */
1505 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1506 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507
1508 if (dd->cur_msg_len < 3*dd->input_block_size)
Gilad Avidovd0262342012-10-24 16:52:30 -06001509 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510
1511 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
Gilad Avidovd0262342012-10-24 16:52:30 -06001512 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513
Gilad Avidovd0262342012-10-24 16:52:30 -06001514 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1515 u32 cache_line = dma_get_cache_alignment();
1516
1517 if (tr->tx_buf) {
1518 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1519 return 0;
1520 }
1521 if (tr->rx_buf) {
1522 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1523 return false;
1524 }
1525
1526 if (tr->cs_change &&
Kiran Gunda84286c32013-04-29 18:05:49 +05301527 ((bpw != 8) && (bpw != 16) && (bpw != 32)))
Gilad Avidovd0262342012-10-24 16:52:30 -06001528 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 }
1530
Gilad Avidovd0262342012-10-24 16:52:30 -06001531 return true;
1532}
1533
1534/**
1535 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1536 * prepares to process a transfer.
1537 */
1538static void
1539msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1540{
1541 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1542 if (dd->qup_ver) {
1543 dd->mode = SPI_BAM_MODE;
1544 } else {
1545 dd->mode = SPI_DMOV_MODE;
1546 if (dd->write_len && dd->read_len) {
1547 dd->tx_bytes_remaining = dd->write_len;
1548 dd->rx_bytes_remaining = dd->read_len;
1549 }
1550 }
1551 } else {
1552 dd->mode = SPI_FIFO_MODE;
1553 if (dd->multi_xfr) {
1554 dd->read_len = dd->cur_transfer->len;
1555 dd->write_len = dd->cur_transfer->len;
1556 }
1557 }
1558}
1559
1560/**
1561 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1562 * transfer
1563 */
1564static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1565{
1566 u32 spi_iom;
1567 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1568 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1569 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1570 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1571 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1572 /* Turn on packing for data mover */
1573 if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
1574 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1575 else
1576 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1577
1578 /*if (dd->mode == SPI_BAM_MODE) {
1579 spi_iom |= SPI_IO_C_NO_TRI_STATE;
1580 spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
1581 }*/
1582 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1583}
1584
1585static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1586{
1587 if (mode & SPI_CPOL)
1588 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1589 else
1590 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1591 return spi_ioc;
1592}
1593
1594/**
1595 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1596 * next transfer
1597 * @return the new set value of SPI_IO_CONTROL
1598 */
1599static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1600{
1601 u32 spi_ioc, spi_ioc_orig, chip_select;
1602
1603 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1604 spi_ioc_orig = spi_ioc;
1605 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1606 , dd->cur_msg->spi->mode);
1607 /* Set chip-select */
1608 chip_select = dd->cur_msg->spi->chip_select << 2;
1609 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1610 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1611 if (!dd->cur_transfer->cs_change)
1612 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1613
1614 if (spi_ioc != spi_ioc_orig)
1615 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1616
1617 return spi_ioc;
1618}
1619
1620/**
1621 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1622 * the next transfer
1623 */
1624static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1625{
1626 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1627 * change in BAM mode */
1628 u32 mask = (dd->mode == SPI_BAM_MODE) ?
1629 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1630 : 0;
1631 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001632}
1633
1634static void msm_spi_process_transfer(struct msm_spi *dd)
1635{
1636 u8 bpw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 u32 max_speed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 u32 read_count;
1639 u32 timeout;
Gilad Avidovd0262342012-10-24 16:52:30 -06001640 u32 spi_ioc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641 u32 int_loopback = 0;
1642
1643 dd->tx_bytes_remaining = dd->cur_msg_len;
1644 dd->rx_bytes_remaining = dd->cur_msg_len;
1645 dd->read_buf = dd->cur_transfer->rx_buf;
1646 dd->write_buf = dd->cur_transfer->tx_buf;
1647 init_completion(&dd->transfer_complete);
1648 if (dd->cur_transfer->bits_per_word)
1649 bpw = dd->cur_transfer->bits_per_word;
1650 else
1651 if (dd->cur_msg->spi->bits_per_word)
1652 bpw = dd->cur_msg->spi->bits_per_word;
1653 else
1654 bpw = 8;
1655 dd->bytes_per_word = (bpw + 7) / 8;
1656
1657 if (dd->cur_transfer->speed_hz)
1658 max_speed = dd->cur_transfer->speed_hz;
1659 else
1660 max_speed = dd->cur_msg->spi->max_speed_hz;
1661 if (!dd->clock_speed || max_speed != dd->clock_speed)
1662 msm_spi_clock_set(dd, max_speed);
1663
Gilad Avidovd0262342012-10-24 16:52:30 -06001664 timeout = 100 * msecs_to_jiffies(
1665 DIV_ROUND_UP(dd->cur_msg_len * 8,
1666 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1669 if (dd->cur_msg->spi->mode & SPI_LOOP)
1670 int_loopback = 1;
1671 if (int_loopback && dd->multi_xfr &&
1672 (read_count > dd->input_fifo_size)) {
1673 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001674 pr_err(
1675 "%s:Internal Loopback does not support > fifo size"
1676 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001677 __func__);
1678 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001679 pr_err(
1680 "%s:Internal Loopback does not support > fifo size"
1681 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001682 __func__);
1683 return;
1684 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685
Gilad Avidovd0262342012-10-24 16:52:30 -06001686 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1687 dev_err(dd->dev,
1688 "%s: Error setting QUP to reset-state",
1689 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690
Gilad Avidovd0262342012-10-24 16:52:30 -06001691 msm_spi_set_transfer_mode(dd, bpw, read_count);
1692 msm_spi_set_mx_counts(dd, read_count);
1693 if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
1694 if (msm_spi_dma_map_buffers(dd) < 0) {
1695 pr_err("Mapping DMA buffers\n");
1696 return;
1697 }
1698 msm_spi_set_qup_io_modes(dd);
1699 msm_spi_set_spi_config(dd, bpw);
1700 msm_spi_set_qup_config(dd, bpw);
1701 spi_ioc = msm_spi_set_spi_io_control(dd);
1702 msm_spi_set_qup_op_mask(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001703
1704 if (dd->mode == SPI_DMOV_MODE) {
1705 msm_spi_setup_dm_transfer(dd);
1706 msm_spi_enqueue_dm_commands(dd);
1707 }
1708 /* The output fifo interrupt handler will handle all writes after
1709 the first. Restricting this to one write avoids contention
1710 issues and race conditions between this thread and the int handler
1711 */
1712 else if (dd->mode == SPI_FIFO_MODE) {
1713 if (msm_spi_prepare_for_write(dd))
1714 goto transfer_end;
1715 msm_spi_start_write(dd, read_count);
Gilad Avidovd0262342012-10-24 16:52:30 -06001716 } else if (dd->mode == SPI_BAM_MODE) {
1717 if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
1718 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1719 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 }
1721
Gilad Avidovd0262342012-10-24 16:52:30 -06001722 /*
1723 * On BAM mode, current state here is run.
1724 * Only enter the RUN state after the first word is written into
1725 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1726 * might fire before the first word is written resulting in a
1727 * possible race condition.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001729 if (dd->mode != SPI_BAM_MODE)
1730 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1731 dev_warn(dd->dev,
1732 "%s: Failed to set QUP to run-state. Mode:%d",
1733 __func__, dd->mode);
1734 goto transfer_end;
1735 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001736
1737 /* Assume success, this might change later upon transaction result */
1738 dd->cur_msg->status = 0;
1739 do {
1740 if (!wait_for_completion_timeout(&dd->transfer_complete,
1741 timeout)) {
Gilad Avidovd0262342012-10-24 16:52:30 -06001742 dev_err(dd->dev,
1743 "%s: SPI transaction timeout\n",
1744 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745 dd->cur_msg->status = -EIO;
1746 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001747 msm_dmov_flush(dd->tx_dma_chan, 1);
1748 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749 }
Gilad Avidov799cfeb2013-06-26 17:18:36 -06001750 if (dd->mode == SPI_BAM_MODE)
1751 msm_spi_bam_flush(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752 break;
1753 }
1754 } while (msm_spi_dm_send_next(dd));
1755
Sagar Dharia525593d2012-11-02 18:26:01 -06001756 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001757transfer_end:
Gilad Avidovd0262342012-10-24 16:52:30 -06001758 msm_spi_dma_unmap_buffers(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001759 dd->mode = SPI_MODE_NONE;
1760
1761 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1762 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1763 dd->base + SPI_IO_CONTROL);
1764}
1765
1766static void get_transfer_length(struct msm_spi *dd)
1767{
1768 struct spi_transfer *tr;
1769 int num_xfrs = 0;
1770 int readlen = 0;
1771 int writelen = 0;
1772
1773 dd->cur_msg_len = 0;
1774 dd->multi_xfr = 0;
1775 dd->read_len = dd->write_len = 0;
1776
1777 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1778 if (tr->tx_buf)
1779 writelen += tr->len;
1780 if (tr->rx_buf)
1781 readlen += tr->len;
1782 dd->cur_msg_len += tr->len;
1783 num_xfrs++;
1784 }
1785
1786 if (num_xfrs == 2) {
1787 struct spi_transfer *first_xfr = dd->cur_transfer;
1788
1789 dd->multi_xfr = 1;
1790 tr = list_entry(first_xfr->transfer_list.next,
1791 struct spi_transfer,
1792 transfer_list);
1793 /*
1794 * We update dd->read_len and dd->write_len only
1795 * for WR-WR and WR-RD transfers.
1796 */
1797 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1798 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1799 ((!tr->tx_buf) && (tr->rx_buf))) {
1800 dd->read_len = readlen;
1801 dd->write_len = writelen;
1802 }
1803 }
1804 } else if (num_xfrs > 1)
1805 dd->multi_xfr = 1;
1806}
1807
1808static inline int combine_transfers(struct msm_spi *dd)
1809{
1810 struct spi_transfer *t = dd->cur_transfer;
1811 struct spi_transfer *nxt;
1812 int xfrs_grped = 1;
1813
1814 dd->cur_msg_len = dd->cur_transfer->len;
1815 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1816 nxt = list_entry(t->transfer_list.next,
1817 struct spi_transfer,
1818 transfer_list);
1819 if (t->cs_change != nxt->cs_change)
1820 return xfrs_grped;
1821 dd->cur_msg_len += nxt->len;
1822 xfrs_grped++;
1823 t = nxt;
1824 }
1825 return xfrs_grped;
1826}
1827
Harini Jayaraman093938a2012-04-20 15:33:23 -06001828static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1829{
1830 u32 spi_ioc;
1831 u32 spi_ioc_orig;
1832
1833 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1834 spi_ioc_orig = spi_ioc;
1835 if (set_flag)
1836 spi_ioc |= SPI_IO_C_FORCE_CS;
1837 else
1838 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1839
1840 if (spi_ioc != spi_ioc_orig)
1841 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1842}
1843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001844static void msm_spi_process_message(struct msm_spi *dd)
1845{
1846 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001847 int cs_num;
1848 int rc;
Sagar Dharia525593d2012-11-02 18:26:01 -06001849 bool xfer_delay = false;
1850 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001853 cs_num = dd->cur_msg->spi->chip_select;
1854 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1855 (!(dd->cs_gpios[cs_num].valid)) &&
1856 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1857 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1858 spi_cs_rsrcs[cs_num]);
1859 if (rc) {
1860 dev_err(dd->dev, "gpio_request for pin %d failed with "
1861 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1862 rc);
1863 return;
1864 }
1865 dd->cs_gpios[cs_num].valid = 1;
1866 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001867
Sagar Dharia525593d2012-11-02 18:26:01 -06001868 list_for_each_entry(tr,
1869 &dd->cur_msg->transfers,
1870 transfer_list) {
1871 if (tr->delay_usecs) {
1872 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1873 tr->delay_usecs);
1874 xfer_delay = true;
1875 break;
1876 }
1877 }
1878
1879 /* Don't combine xfers if delay is needed after every xfer */
1880 if (dd->qup_ver || xfer_delay) {
1881 if (dd->qup_ver)
1882 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001884 &dd->cur_msg->transfers,
1885 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001886 struct spi_transfer *t = dd->cur_transfer;
1887 struct spi_transfer *nxt;
1888
1889 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1890 nxt = list_entry(t->transfer_list.next,
1891 struct spi_transfer,
1892 transfer_list);
1893
Sagar Dharia525593d2012-11-02 18:26:01 -06001894 if (dd->qup_ver &&
1895 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001896 write_force_cs(dd, 1);
Sagar Dharia525593d2012-11-02 18:26:01 -06001897 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001898 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001900
1901 dd->cur_msg_len = dd->cur_transfer->len;
1902 msm_spi_process_transfer(dd);
1903 }
1904 } else {
1905 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1906 struct spi_transfer,
1907 transfer_list);
1908 get_transfer_length(dd);
1909 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1910 /*
1911 * Handling of multi-transfers.
1912 * FIFO mode is used by default
1913 */
1914 list_for_each_entry(dd->cur_transfer,
1915 &dd->cur_msg->transfers,
1916 transfer_list) {
1917 if (!dd->cur_transfer->len)
1918 goto error;
1919 if (xfrs_grped) {
1920 xfrs_grped--;
1921 continue;
1922 } else {
1923 dd->read_len = dd->write_len = 0;
1924 xfrs_grped = combine_transfers(dd);
1925 }
1926
1927 dd->cur_tx_transfer = dd->cur_transfer;
1928 dd->cur_rx_transfer = dd->cur_transfer;
1929 msm_spi_process_transfer(dd);
1930 xfrs_grped--;
1931 }
1932 } else {
1933 /* Handling of a single transfer or
1934 * WR-WR or WR-RD transfers
1935 */
1936 if ((!dd->cur_msg->is_dma_mapped) &&
Gilad Avidovd0262342012-10-24 16:52:30 -06001937 (msm_spi_use_dma(dd, dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001938 dd->cur_transfer->bits_per_word))) {
1939 /* Mapping of DMA buffers */
Gilad Avidovd0262342012-10-24 16:52:30 -06001940 int ret = msm_spi_dma_map_buffers(dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001941 if (ret < 0) {
1942 dd->cur_msg->status = ret;
1943 goto error;
1944 }
1945 }
1946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947 dd->cur_tx_transfer = dd->cur_transfer;
1948 dd->cur_rx_transfer = dd->cur_transfer;
1949 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001951 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001952
1953 return;
1954
1955error:
1956 if (dd->cs_gpios[cs_num].valid) {
1957 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1958 dd->cs_gpios[cs_num].valid = 0;
1959 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001960}
1961
1962/* workqueue - pull messages from queue & process */
1963static void msm_spi_workq(struct work_struct *work)
1964{
1965 struct msm_spi *dd =
1966 container_of(work, struct msm_spi, work_data);
1967 unsigned long flags;
1968 u32 status_error = 0;
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301969
1970 pm_runtime_get_sync(dd->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001971
1972 mutex_lock(&dd->core_lock);
1973
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301974 /*
1975 * Counter-part of system-suspend when runtime-pm is not enabled.
1976 * This way, resume can be left empty and device will be put in
1977 * active mode only if client requests anything on the bus
1978 */
1979 if (!pm_runtime_enabled(dd->dev))
1980 msm_spi_pm_resume_runtime(dd->dev);
1981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001982 if (dd->use_rlock)
1983 remote_mutex_lock(&dd->r_lock);
1984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 if (!msm_spi_is_valid_state(dd)) {
1986 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1987 __func__);
1988 status_error = 1;
1989 }
1990
1991 spin_lock_irqsave(&dd->queue_lock, flags);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301992 dd->transfer_pending = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001993 while (!list_empty(&dd->queue)) {
1994 dd->cur_msg = list_entry(dd->queue.next,
1995 struct spi_message, queue);
1996 list_del_init(&dd->cur_msg->queue);
1997 spin_unlock_irqrestore(&dd->queue_lock, flags);
1998 if (status_error)
1999 dd->cur_msg->status = -EIO;
2000 else
2001 msm_spi_process_message(dd);
2002 if (dd->cur_msg->complete)
2003 dd->cur_msg->complete(dd->cur_msg->context);
2004 spin_lock_irqsave(&dd->queue_lock, flags);
2005 }
2006 dd->transfer_pending = 0;
2007 spin_unlock_irqrestore(&dd->queue_lock, flags);
2008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009 if (dd->use_rlock)
2010 remote_mutex_unlock(&dd->r_lock);
2011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012 mutex_unlock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302013
2014 pm_runtime_mark_last_busy(dd->dev);
2015 pm_runtime_put_autosuspend(dd->dev);
2016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002017 /* If needed, this can be done after the current message is complete,
2018 and work can be continued upon resume. No motivation for now. */
2019 if (dd->suspended)
2020 wake_up_interruptible(&dd->continue_suspend);
2021}
2022
2023static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
2024{
2025 struct msm_spi *dd;
2026 unsigned long flags;
2027 struct spi_transfer *tr;
2028
2029 dd = spi_master_get_devdata(spi->master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030
2031 if (list_empty(&msg->transfers) || !msg->complete)
2032 return -EINVAL;
2033
2034 list_for_each_entry(tr, &msg->transfers, transfer_list) {
2035 /* Check message parameters */
2036 if (tr->speed_hz > dd->pdata->max_clock_speed ||
2037 (tr->bits_per_word &&
2038 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
2039 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
2040 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
2041 "tx=%p, rx=%p\n",
2042 tr->speed_hz, tr->bits_per_word,
2043 tr->tx_buf, tr->rx_buf);
2044 return -EINVAL;
2045 }
2046 }
2047
2048 spin_lock_irqsave(&dd->queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002049 list_add_tail(&msg->queue, &dd->queue);
2050 spin_unlock_irqrestore(&dd->queue_lock, flags);
2051 queue_work(dd->workqueue, &dd->work_data);
2052 return 0;
2053}
2054
2055static int msm_spi_setup(struct spi_device *spi)
2056{
2057 struct msm_spi *dd;
2058 int rc = 0;
2059 u32 spi_ioc;
2060 u32 spi_config;
2061 u32 mask;
2062
2063 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
2064 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
2065 __func__, spi->bits_per_word);
2066 rc = -EINVAL;
2067 }
2068 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
2069 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
2070 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
2071 rc = -EINVAL;
2072 }
2073
2074 if (rc)
2075 goto err_setup_exit;
2076
2077 dd = spi_master_get_devdata(spi->master);
2078
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302079 pm_runtime_get_sync(dd->dev);
2080
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 mutex_lock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302082
2083 /* Counter-part of system-suspend when runtime-pm is not enabled. */
2084 if (!pm_runtime_enabled(dd->dev))
2085 msm_spi_pm_resume_runtime(dd->dev);
2086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087 if (dd->suspended) {
2088 mutex_unlock(&dd->core_lock);
2089 return -EBUSY;
2090 }
2091
2092 if (dd->use_rlock)
2093 remote_mutex_lock(&dd->r_lock);
2094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002095 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
2096 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
2097 if (spi->mode & SPI_CS_HIGH)
2098 spi_ioc |= mask;
2099 else
2100 spi_ioc &= ~mask;
Gilad Avidovd0262342012-10-24 16:52:30 -06002101 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102
2103 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
2104
2105 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -06002106 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
2107 spi_config, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002108 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
2109
2110 /* Ensure previous write completed before disabling the clocks */
2111 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002112
2113 if (dd->use_rlock)
2114 remote_mutex_unlock(&dd->r_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302115
2116 /* Counter-part of system-resume when runtime-pm is not enabled. */
2117 if (!pm_runtime_enabled(dd->dev))
2118 msm_spi_pm_suspend_runtime(dd->dev);
2119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120 mutex_unlock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302121
2122 pm_runtime_mark_last_busy(dd->dev);
2123 pm_runtime_put_autosuspend(dd->dev);
2124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002125err_setup_exit:
2126 return rc;
2127}
2128
2129#ifdef CONFIG_DEBUG_FS
2130static int debugfs_iomem_x32_set(void *data, u64 val)
2131{
2132 writel_relaxed(val, data);
2133 /* Ensure the previous write completed. */
2134 mb();
2135 return 0;
2136}
2137
2138static int debugfs_iomem_x32_get(void *data, u64 *val)
2139{
2140 *val = readl_relaxed(data);
2141 /* Ensure the previous read completed. */
2142 mb();
2143 return 0;
2144}
2145
2146DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
2147 debugfs_iomem_x32_set, "0x%08llx\n");
2148
2149static void spi_debugfs_init(struct msm_spi *dd)
2150{
2151 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
2152 if (dd->dent_spi) {
2153 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002155 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
2156 dd->debugfs_spi_regs[i] =
2157 debugfs_create_file(
2158 debugfs_spi_regs[i].name,
2159 debugfs_spi_regs[i].mode,
2160 dd->dent_spi,
2161 dd->base + debugfs_spi_regs[i].offset,
2162 &fops_iomem_x32);
2163 }
2164 }
2165}
2166
2167static void spi_debugfs_exit(struct msm_spi *dd)
2168{
2169 if (dd->dent_spi) {
2170 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172 debugfs_remove_recursive(dd->dent_spi);
2173 dd->dent_spi = NULL;
2174 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
2175 dd->debugfs_spi_regs[i] = NULL;
2176 }
2177}
2178#else
2179static void spi_debugfs_init(struct msm_spi *dd) {}
2180static void spi_debugfs_exit(struct msm_spi *dd) {}
2181#endif
2182
2183/* ===Device attributes begin=== */
2184static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
2185 char *buf)
2186{
2187 struct spi_master *master = dev_get_drvdata(dev);
2188 struct msm_spi *dd = spi_master_get_devdata(master);
2189
2190 return snprintf(buf, PAGE_SIZE,
2191 "Device %s\n"
2192 "rx fifo_size = %d spi words\n"
2193 "tx fifo_size = %d spi words\n"
2194 "use_dma ? %s\n"
2195 "rx block size = %d bytes\n"
2196 "tx block size = %d bytes\n"
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302197 "input burst size = %d bytes\n"
2198 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002199 "DMA configuration:\n"
2200 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
2201 "--statistics--\n"
2202 "Rx isrs = %d\n"
2203 "Tx isrs = %d\n"
2204 "DMA error = %d\n"
2205 "--debug--\n"
2206 "NA yet\n",
2207 dev_name(dev),
2208 dd->input_fifo_size,
2209 dd->output_fifo_size,
2210 dd->use_dma ? "yes" : "no",
2211 dd->input_block_size,
2212 dd->output_block_size,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302213 dd->input_burst_size,
2214 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002215 dd->tx_dma_chan,
2216 dd->rx_dma_chan,
2217 dd->tx_dma_crci,
2218 dd->rx_dma_crci,
2219 dd->stat_rx + dd->stat_dmov_rx,
2220 dd->stat_tx + dd->stat_dmov_tx,
2221 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
2222 );
2223}
2224
2225/* Reset statistics on write */
2226static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
2227 const char *buf, size_t count)
2228{
2229 struct msm_spi *dd = dev_get_drvdata(dev);
2230 dd->stat_rx = 0;
2231 dd->stat_tx = 0;
2232 dd->stat_dmov_rx = 0;
2233 dd->stat_dmov_tx = 0;
2234 dd->stat_dmov_rx_err = 0;
2235 dd->stat_dmov_tx_err = 0;
2236 return count;
2237}
2238
2239static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
2240
2241static struct attribute *dev_attrs[] = {
2242 &dev_attr_stats.attr,
2243 NULL,
2244};
2245
2246static struct attribute_group dev_attr_grp = {
2247 .attrs = dev_attrs,
2248};
2249/* ===Device attributes end=== */
2250
2251/**
2252 * spi_dmov_tx_complete_func - DataMover tx completion callback
2253 *
2254 * Executed in IRQ context (Data Mover's IRQ) DataMover's
2255 * spinlock @msm_dmov_lock held.
2256 */
2257static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
2258 unsigned int result,
2259 struct msm_dmov_errdata *err)
2260{
2261 struct msm_spi *dd;
2262
2263 if (!(result & DMOV_RSLT_VALID)) {
2264 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2265 return;
2266 }
2267 /* restore original context */
2268 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302269 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002270 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302271 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
2272 return;
2273 complete(&dd->transfer_complete);
2274 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 /* Error or flush */
2276 if (result & DMOV_RSLT_ERROR) {
2277 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2278 dd->stat_dmov_tx_err++;
2279 }
2280 if (result & DMOV_RSLT_FLUSH) {
2281 /*
2282 * Flushing normally happens in process of
2283 * removing, when we are waiting for outstanding
2284 * DMA commands to be flushed.
2285 */
2286 dev_info(dd->dev,
2287 "DMA channel flushed (0x%08x)\n", result);
2288 }
2289 if (err)
2290 dev_err(dd->dev,
2291 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2292 err->flush[0], err->flush[1], err->flush[2],
2293 err->flush[3], err->flush[4], err->flush[5]);
2294 dd->cur_msg->status = -EIO;
2295 complete(&dd->transfer_complete);
2296 }
2297}
2298
2299/**
2300 * spi_dmov_rx_complete_func - DataMover rx completion callback
2301 *
2302 * Executed in IRQ context (Data Mover's IRQ)
2303 * DataMover's spinlock @msm_dmov_lock held.
2304 */
2305static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2306 unsigned int result,
2307 struct msm_dmov_errdata *err)
2308{
2309 struct msm_spi *dd;
2310
2311 if (!(result & DMOV_RSLT_VALID)) {
2312 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2313 result, cmd);
2314 return;
2315 }
2316 /* restore original context */
2317 dd = container_of(cmd, struct msm_spi, rx_hdr);
2318 if (result & DMOV_RSLT_DONE) {
2319 dd->stat_dmov_rx++;
2320 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2321 return;
2322 complete(&dd->transfer_complete);
2323 } else {
2324 /** Error or flush */
2325 if (result & DMOV_RSLT_ERROR) {
2326 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2327 dd->stat_dmov_rx_err++;
2328 }
2329 if (result & DMOV_RSLT_FLUSH) {
2330 dev_info(dd->dev,
2331 "DMA channel flushed(0x%08x)\n", result);
2332 }
2333 if (err)
2334 dev_err(dd->dev,
2335 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2336 err->flush[0], err->flush[1], err->flush[2],
2337 err->flush[3], err->flush[4], err->flush[5]);
2338 dd->cur_msg->status = -EIO;
2339 complete(&dd->transfer_complete);
2340 }
2341}
2342
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302343static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
2344 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002345{
2346 u32 cache_line = dma_get_cache_alignment();
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302347 int burst_size = (input_burst_size > output_burst_size) ?
2348 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002349
2350 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302351 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352}
2353
Gilad Avidovd0262342012-10-24 16:52:30 -06002354static void msm_spi_dmov_teardown(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355{
2356 int limit = 0;
2357
2358 if (!dd->use_dma)
2359 return;
2360
2361 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002362 msm_dmov_flush(dd->tx_dma_chan, 1);
2363 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002364 msleep(10);
2365 }
2366
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302367 dma_free_coherent(NULL,
2368 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
2369 dd->tx_dmov_cmd,
2370 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2372 dd->tx_padding = dd->rx_padding = NULL;
2373}
2374
Gilad Avidovd0262342012-10-24 16:52:30 -06002375static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
2376 enum msm_spi_pipe_direction pipe_dir)
2377{
2378 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2379 (&dd->bam.prod) : (&dd->bam.cons);
2380 if (!pipe->teardown_required)
2381 return;
2382
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002383 msm_spi_bam_pipe_disconnect(dd, pipe);
Gilad Avidovd0262342012-10-24 16:52:30 -06002384 dma_free_coherent(dd->dev, pipe->config.desc.size,
2385 pipe->config.desc.base, pipe->config.desc.phys_base);
2386 sps_free_endpoint(pipe->handle);
2387 pipe->handle = 0;
2388 pipe->teardown_required = false;
2389}
2390
2391static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2392 enum msm_spi_pipe_direction pipe_dir)
2393{
2394 int rc = 0;
2395 struct sps_pipe *pipe_handle;
Gilad Avidovd0262342012-10-24 16:52:30 -06002396 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2397 (&dd->bam.prod) : (&dd->bam.cons);
2398 struct sps_connect *pipe_conf = &pipe->config;
2399
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002400 pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
Gilad Avidovd0262342012-10-24 16:52:30 -06002401 pipe->handle = 0;
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002402 pipe_handle = sps_alloc_endpoint();
Gilad Avidovd0262342012-10-24 16:52:30 -06002403 if (!pipe_handle) {
2404 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2405 , __func__);
2406 return -ENOMEM;
2407 }
2408
2409 memset(pipe_conf, 0, sizeof(*pipe_conf));
2410 rc = sps_get_config(pipe_handle, pipe_conf);
2411 if (rc) {
2412 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2413 , __func__);
2414 goto config_err;
2415 }
2416
2417 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2418 pipe_conf->source = dd->bam.handle;
2419 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2420 pipe_conf->mode = SPS_MODE_SRC;
2421 pipe_conf->src_pipe_index =
2422 dd->pdata->bam_producer_pipe_index;
2423 pipe_conf->dest_pipe_index = 0;
2424 } else {
2425 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2426 pipe_conf->destination = dd->bam.handle;
2427 pipe_conf->mode = SPS_MODE_DEST;
2428 pipe_conf->src_pipe_index = 0;
2429 pipe_conf->dest_pipe_index =
2430 dd->pdata->bam_consumer_pipe_index;
2431 }
2432 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2433 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2434 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2435 pipe_conf->desc.size,
2436 &pipe_conf->desc.phys_base,
2437 GFP_KERNEL);
2438 if (!pipe_conf->desc.base) {
2439 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2440 , __func__);
2441 rc = -ENOMEM;
2442 goto config_err;
2443 }
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002444 /* zero descriptor FIFO for convenient debugging of first descs */
Gilad Avidovd0262342012-10-24 16:52:30 -06002445 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2446
Gilad Avidovd0262342012-10-24 16:52:30 -06002447 pipe->handle = pipe_handle;
Gilad Avidov799cfeb2013-06-26 17:18:36 -06002448 rc = msm_spi_bam_pipe_connect(dd, pipe, pipe_conf);
2449 if (rc)
2450 goto connect_err;
2451
Gilad Avidovd0262342012-10-24 16:52:30 -06002452 return 0;
2453
Gilad Avidovd0262342012-10-24 16:52:30 -06002454connect_err:
2455 dma_free_coherent(dd->dev, pipe_conf->desc.size,
2456 pipe_conf->desc.base, pipe_conf->desc.phys_base);
2457config_err:
2458 sps_free_endpoint(pipe_handle);
2459
2460 return rc;
2461}
2462
2463static void msm_spi_bam_teardown(struct msm_spi *dd)
2464{
2465 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2466 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2467
2468 if (dd->bam.deregister_required) {
2469 sps_deregister_bam_device(dd->bam.handle);
2470 dd->bam.deregister_required = false;
2471 }
2472}
2473
2474static int msm_spi_bam_init(struct msm_spi *dd)
2475{
2476 struct sps_bam_props bam_props = {0};
2477 u32 bam_handle;
2478 int rc = 0;
2479
2480 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2481 if (rc || !bam_handle) {
2482 bam_props.phys_addr = dd->bam.phys_addr;
2483 bam_props.virt_addr = dd->bam.base;
2484 bam_props.irq = dd->bam.irq;
Gilad Avidovb0968052013-05-03 09:51:37 -06002485 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Gilad Avidovd0262342012-10-24 16:52:30 -06002486 bam_props.summing_threshold = 0x10;
2487
2488 rc = sps_register_bam_device(&bam_props, &bam_handle);
2489 if (rc) {
2490 dev_err(dd->dev,
2491 "%s: Failed to register BAM device",
2492 __func__);
2493 return rc;
2494 }
2495 dd->bam.deregister_required = true;
2496 }
2497
2498 dd->bam.handle = bam_handle;
2499
2500 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2501 if (rc) {
2502 dev_err(dd->dev,
2503 "%s: Failed to init producer BAM-pipe",
2504 __func__);
2505 goto bam_init_error;
2506 }
2507
2508 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2509 if (rc) {
2510 dev_err(dd->dev,
2511 "%s: Failed to init consumer BAM-pipe",
2512 __func__);
2513 goto bam_init_error;
2514 }
2515
2516 return 0;
2517
2518bam_init_error:
2519 msm_spi_bam_teardown(dd);
2520 return rc;
2521}
2522
2523static __init int msm_spi_dmov_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002524{
2525 dmov_box *box;
2526 u32 cache_line = dma_get_cache_alignment();
2527
2528 /* Allocate all as one chunk, since all is smaller than page size */
2529
2530 /* We send NULL device, since it requires coherent_dma_mask id
2531 device definition, we're okay with using system pool */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302532 dd->tx_dmov_cmd
2533 = dma_alloc_coherent(NULL,
2534 get_chunk_size(dd, dd->input_burst_size,
2535 dd->output_burst_size),
2536 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002537 if (dd->tx_dmov_cmd == NULL)
2538 return -ENOMEM;
2539
2540 /* DMA addresses should be 64 bit aligned aligned */
2541 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2542 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2543 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2544 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2545
2546 /* Buffers should be aligned to cache line */
2547 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2548 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2549 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302550 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
2551 dd->output_burst_size), cache_line);
2552 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002553 cache_line);
2554
2555 /* Setup DM commands */
2556 box = &(dd->rx_dmov_cmd->box);
2557 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2558 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2559 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2560 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2561 offsetof(struct spi_dmov_cmd, cmd_ptr));
2562 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002563
2564 box = &(dd->tx_dmov_cmd->box);
2565 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2566 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2567 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2568 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2569 offsetof(struct spi_dmov_cmd, cmd_ptr));
2570 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002571
2572 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2573 CMD_DST_CRCI(dd->tx_dma_crci);
2574 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2575 SPI_OUTPUT_FIFO;
2576 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2577 CMD_SRC_CRCI(dd->rx_dma_crci);
2578 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2579 SPI_INPUT_FIFO;
2580
2581 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002582 msm_dmov_flush(dd->tx_dma_chan, 1);
2583 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002584
2585 return 0;
2586}
2587
Gilad Avidov23350552013-05-21 09:26:46 -06002588enum msm_spi_dt_entry_status {
2589 DT_REQ, /* Required: fail if missing */
2590 DT_SGST, /* Suggested: warn if missing */
2591 DT_OPT, /* Optional: don't warn if missing */
2592};
2593
2594enum msm_spi_dt_entry_type {
2595 DT_U32,
2596 DT_GPIO,
2597 DT_BOOL,
2598};
2599
2600struct msm_spi_dt_to_pdata_map {
2601 const char *dt_name;
2602 void *ptr_data;
2603 enum msm_spi_dt_entry_status status;
2604 enum msm_spi_dt_entry_type type;
2605 int default_val;
2606};
2607
2608static int __init msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
2609 struct msm_spi_platform_data *pdata,
2610 struct msm_spi_dt_to_pdata_map *itr)
2611{
2612 int ret, err = 0;
2613 struct device_node *node = pdev->dev.of_node;
2614
2615 for (; itr->dt_name ; ++itr) {
2616 switch (itr->type) {
2617 case DT_GPIO:
2618 ret = of_get_named_gpio(node, itr->dt_name, 0);
2619 if (ret >= 0) {
2620 *((int *) itr->ptr_data) = ret;
2621 ret = 0;
2622 }
2623 break;
2624 case DT_U32:
2625 ret = of_property_read_u32(node, itr->dt_name,
2626 (u32 *) itr->ptr_data);
2627 break;
2628 case DT_BOOL:
2629 *((bool *) itr->ptr_data) =
2630 of_property_read_bool(node, itr->dt_name);
2631 ret = 0;
2632 break;
2633 default:
2634 dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
2635 itr->type);
2636 ret = -EBADE;
2637 }
2638
2639 dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
2640 ret, itr->dt_name, *((int *)itr->ptr_data));
2641
2642 if (ret) {
2643 *((int *)itr->ptr_data) = itr->default_val;
2644
2645 if (itr->status < DT_OPT) {
2646 dev_err(&pdev->dev, "Missing '%s' DT entry\n",
2647 itr->dt_name);
2648
2649 /* cont on err to dump all missing entries */
2650 if (itr->status == DT_REQ && !err)
2651 err = ret;
2652 }
2653 }
2654 }
2655
2656 return err;
2657}
2658
Gilad Avidovd0262342012-10-24 16:52:30 -06002659/**
Gilad Avidov002dba02013-05-21 18:06:32 -06002660 * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
Gilad Avidovd0262342012-10-24 16:52:30 -06002661 */
Gilad Avidov002dba02013-05-21 18:06:32 -06002662struct msm_spi_platform_data * __init msm_spi_dt_to_pdata(
2663 struct platform_device *pdev, struct msm_spi *dd)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002664{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002665 struct msm_spi_platform_data *pdata;
2666
2667 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2668 if (!pdata) {
2669 pr_err("Unable to allocate platform data\n");
2670 return NULL;
Gilad Avidov23350552013-05-21 09:26:46 -06002671 } else {
2672 struct msm_spi_dt_to_pdata_map map[] = {
2673 {"spi-max-frequency",
Gilad Avidov002dba02013-05-21 18:06:32 -06002674 &pdata->max_clock_speed, DT_SGST, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002675 {"qcom,infinite-mode",
Gilad Avidov002dba02013-05-21 18:06:32 -06002676 &pdata->infinite_mode, DT_OPT, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002677 {"qcom,active-only",
Gilad Avidov002dba02013-05-21 18:06:32 -06002678 &pdata->active_only, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002679 {"qcom,master-id",
Gilad Avidov002dba02013-05-21 18:06:32 -06002680 &pdata->master_id, DT_SGST, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002681 {"qcom,ver-reg-exists",
Gilad Avidov002dba02013-05-21 18:06:32 -06002682 &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002683 {"qcom,use-bam",
Gilad Avidov002dba02013-05-21 18:06:32 -06002684 &pdata->use_bam, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002685 {"qcom,bam-consumer-pipe-index",
Gilad Avidov002dba02013-05-21 18:06:32 -06002686 &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002687 {"qcom,bam-producer-pipe-index",
Gilad Avidov002dba02013-05-21 18:06:32 -06002688 &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0},
2689 {"qcom,gpio-clk",
2690 &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1},
2691 {"qcom,gpio-miso",
2692 &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1},
2693 {"qcom,gpio-mosi",
2694 &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1},
2695 {"qcom,gpio-cs0",
2696 &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1},
2697 {"qcom,gpio-cs1",
2698 &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1},
2699 {"qcom,gpio-cs2",
2700 &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
2701 {"qcom,gpio-cs3",
2702 &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
2703 {NULL, NULL, 0, 0, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002704 };
2705
2706 if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
2707 devm_kfree(&pdev->dev, pdata);
2708 return NULL;
2709 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002710 }
2711
Gilad Avidovd0262342012-10-24 16:52:30 -06002712 if (pdata->use_bam) {
Gilad Avidov23350552013-05-21 09:26:46 -06002713 if (!pdata->bam_consumer_pipe_index) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002714 dev_warn(&pdev->dev,
2715 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2716 pdata->use_bam = false;
2717 }
2718
Gilad Avidovc0465dc2013-07-11 15:59:45 -06002719 if (!pdata->bam_producer_pipe_index) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002720 dev_warn(&pdev->dev,
2721 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2722 pdata->use_bam = false;
2723 }
2724 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002725 return pdata;
2726}
2727
Gilad Avidovd0262342012-10-24 16:52:30 -06002728static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2729{
2730 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2731 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2732 : SPI_QUP_VERSION_NONE;
2733}
2734
2735static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
2736 struct platform_device *pdev, struct spi_master *master)
2737{
2738 struct resource *resource;
2739 size_t bam_mem_size;
2740
2741 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2742 "spi_bam_physical");
2743 if (!resource) {
2744 dev_warn(&pdev->dev,
2745 "%s: Missing spi_bam_physical entry in DT",
2746 __func__);
2747 return -ENXIO;
2748 }
2749
2750 dd->bam.phys_addr = resource->start;
2751 bam_mem_size = resource_size(resource);
2752 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2753 bam_mem_size);
2754 if (!dd->bam.base) {
2755 dev_warn(&pdev->dev,
2756 "%s: Failed to ioremap(spi_bam_physical)",
2757 __func__);
2758 return -ENXIO;
2759 }
2760
2761 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2762 if (dd->bam.irq < 0) {
2763 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2764 __func__);
2765 return -EINVAL;
2766 }
2767
2768 dd->dma_init = msm_spi_bam_init;
2769 dd->dma_teardown = msm_spi_bam_teardown;
2770 return 0;
2771}
2772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002773static int __init msm_spi_probe(struct platform_device *pdev)
2774{
2775 struct spi_master *master;
2776 struct msm_spi *dd;
2777 struct resource *resource;
2778 int rc = -ENXIO;
2779 int locked = 0;
2780 int i = 0;
2781 int clk_enabled = 0;
2782 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002783 struct msm_spi_platform_data *pdata;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002784
2785 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2786 if (!master) {
2787 rc = -ENOMEM;
2788 dev_err(&pdev->dev, "master allocation failed\n");
2789 goto err_probe_exit;
2790 }
2791
2792 master->bus_num = pdev->id;
2793 master->mode_bits = SPI_SUPPORTED_MODES;
2794 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2795 master->setup = msm_spi_setup;
2796 master->transfer = msm_spi_transfer;
2797 platform_set_drvdata(pdev, master);
2798 dd = spi_master_get_devdata(master);
2799
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002800 if (pdev->dev.of_node) {
2801 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2802 master->dev.of_node = pdev->dev.of_node;
Gilad Avidov002dba02013-05-21 18:06:32 -06002803 pdata = msm_spi_dt_to_pdata(pdev, dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002804 if (!pdata) {
2805 rc = -ENOMEM;
2806 goto err_probe_exit;
2807 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002808
Gilad Avidov0697ea62013-02-11 16:46:38 -07002809 rc = of_alias_get_id(pdev->dev.of_node, "spi");
2810 if (rc < 0)
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002811 dev_warn(&pdev->dev,
2812 "using default bus_num %d\n", pdev->id);
2813 else
Gilad Avidov0697ea62013-02-11 16:46:38 -07002814 master->bus_num = pdev->id = rc;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002815 } else {
2816 pdata = pdev->dev.platform_data;
2817 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002818
2819 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2820 resource = platform_get_resource(pdev, IORESOURCE_IO,
2821 i);
2822 dd->spi_gpios[i] = resource ? resource->start : -1;
2823 }
2824
2825 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2826 resource = platform_get_resource(pdev, IORESOURCE_IO,
2827 i + ARRAY_SIZE(spi_rsrcs));
2828 dd->cs_gpios[i].gpio_num = resource ?
2829 resource->start : -1;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002830 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002831 }
2832
Alok Chauhan0ba44ae2013-08-20 15:15:28 +05302833 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
2834 dd->cs_gpios[i].valid = 0;
2835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002836 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002837 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002838 if (!resource) {
2839 rc = -ENXIO;
2840 goto err_probe_res;
2841 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002842
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002843 dd->mem_phys_addr = resource->start;
2844 dd->mem_size = resource_size(resource);
2845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002846 if (pdata) {
2847 if (pdata->dma_config) {
2848 rc = pdata->dma_config();
2849 if (rc) {
2850 dev_warn(&pdev->dev,
2851 "%s: DM mode not supported\n",
2852 __func__);
2853 dd->use_dma = 0;
2854 goto skip_dma_resources;
2855 }
2856 }
Gilad Avidovd0262342012-10-24 16:52:30 -06002857 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
2858 resource = platform_get_resource(pdev,
2859 IORESOURCE_DMA, 0);
2860 if (resource) {
2861 dd->rx_dma_chan = resource->start;
2862 dd->tx_dma_chan = resource->end;
2863 resource = platform_get_resource(pdev,
2864 IORESOURCE_DMA, 1);
2865 if (!resource) {
2866 rc = -ENXIO;
2867 goto err_probe_res;
2868 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002869
Gilad Avidovd0262342012-10-24 16:52:30 -06002870 dd->rx_dma_crci = resource->start;
2871 dd->tx_dma_crci = resource->end;
2872 dd->use_dma = 1;
2873 master->dma_alignment =
2874 dma_get_cache_alignment();
2875 dd->dma_init = msm_spi_dmov_init ;
2876 dd->dma_teardown = msm_spi_dmov_teardown;
2877 }
2878 } else {
2879 if (!dd->pdata->use_bam)
2880 goto skip_dma_resources;
2881
2882 rc = msm_spi_bam_get_resources(dd, pdev, master);
2883 if (rc) {
2884 dev_warn(dd->dev,
2885 "%s: Faild to get BAM resources",
2886 __func__);
2887 goto skip_dma_resources;
2888 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889 dd->use_dma = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002890 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002891 }
2892
Alok Chauhan66554a12012-08-22 19:54:45 +05302893skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002895 spin_lock_init(&dd->queue_lock);
2896 mutex_init(&dd->core_lock);
2897 INIT_LIST_HEAD(&dd->queue);
2898 INIT_WORK(&dd->work_data, msm_spi_workq);
2899 init_waitqueue_head(&dd->continue_suspend);
2900 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002901 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002902 if (!dd->workqueue)
2903 goto err_probe_workq;
2904
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002905 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2906 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002907 rc = -ENXIO;
2908 goto err_probe_reqmem;
2909 }
2910
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002911 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2912 if (!dd->base) {
2913 rc = -ENOMEM;
2914 goto err_probe_reqmem;
2915 }
2916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002917 if (pdata && pdata->rsl_id) {
2918 struct remote_mutex_id rmid;
2919 rmid.r_spinlock_id = pdata->rsl_id;
2920 rmid.delay_us = SPI_TRYLOCK_DELAY;
2921
2922 rc = remote_mutex_init(&dd->r_lock, &rmid);
2923 if (rc) {
2924 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2925 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2926 __func__, rc);
2927 goto err_probe_rlock_init;
2928 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002929
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002930 dd->use_rlock = 1;
2931 dd->pm_lat = pdata->pm_lat;
Alok Chauhan66554a12012-08-22 19:54:45 +05302932 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
Gilad Avidovd0262342012-10-24 16:52:30 -06002933 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002936 mutex_lock(&dd->core_lock);
2937 if (dd->use_rlock)
2938 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002939
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002940 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002941 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002942 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002943 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002944 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002945 rc = PTR_ERR(dd->clk);
2946 goto err_probe_clk_get;
2947 }
2948
Matt Wagantallac294852011-08-17 15:44:58 -07002949 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002951 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002952 rc = PTR_ERR(dd->pclk);
2953 goto err_probe_pclk_get;
2954 }
2955
2956 if (pdata && pdata->max_clock_speed)
2957 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2958
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002959 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002960 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002961 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002962 __func__);
2963 goto err_probe_clk_enable;
2964 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002965
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002966 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002967 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002968 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002969 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002970 __func__);
2971 goto err_probe_pclk_enable;
2972 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002973
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002974 pclk_enabled = 1;
Alok Chauhan40376582013-07-17 21:08:00 +05302975
2976 if (pdata && pdata->ver_reg_exists) {
2977 enum msm_spi_qup_version ver =
2978 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2979 if (dd->qup_ver != ver)
2980 dev_warn(&pdev->dev,
2981 "%s: HW version different then initially assumed by probe",
2982 __func__);
2983 }
2984
Gilad Avidovd0262342012-10-24 16:52:30 -06002985 /* GSBI dose not exists on B-family MSM-chips */
2986 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2987 rc = msm_spi_configure_gsbi(dd, pdev);
2988 if (rc)
2989 goto err_probe_gsbi;
2990 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002992 msm_spi_calculate_fifo_size(dd);
2993 if (dd->use_dma) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002994 rc = dd->dma_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002995 if (rc)
2996 goto err_probe_dma;
2997 }
2998
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002999 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 /*
3001 * The SPI core generates a bogus input overrun error on some targets,
3002 * when a transition from run to reset state occurs and if the FIFO has
3003 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
3004 * bit.
3005 */
3006 msm_spi_enable_error_flags(dd);
3007
3008 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
3009 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
3010 if (rc)
3011 goto err_probe_state;
3012
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003013 clk_disable_unprepare(dd->clk);
3014 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003015 clk_enabled = 0;
3016 pclk_enabled = 0;
3017
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303018 dd->suspended = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003019 dd->transfer_pending = 0;
3020 dd->multi_xfr = 0;
3021 dd->mode = SPI_MODE_NONE;
3022
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003023 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003024 if (rc)
3025 goto err_probe_irq;
3026
3027 msm_spi_disable_irqs(dd);
3028 if (dd->use_rlock)
3029 remote_mutex_unlock(&dd->r_lock);
3030
3031 mutex_unlock(&dd->core_lock);
3032 locked = 0;
3033
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303034 pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
3035 pm_runtime_use_autosuspend(&pdev->dev);
3036 pm_runtime_enable(&pdev->dev);
3037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003038 rc = spi_register_master(master);
3039 if (rc)
3040 goto err_probe_reg_master;
3041
3042 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
3043 if (rc) {
3044 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
3045 goto err_attrs;
3046 }
3047
3048 spi_debugfs_init(dd);
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05303049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050 return 0;
3051
3052err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003053 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003054err_probe_reg_master:
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303055 pm_runtime_disable(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003056err_probe_irq:
3057err_probe_state:
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08003058 if (dd->dma_teardown)
3059 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003060err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003061err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003062 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003063 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003064err_probe_pclk_enable:
3065 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003066 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003067err_probe_clk_enable:
3068 clk_put(dd->pclk);
3069err_probe_pclk_get:
3070 clk_put(dd->clk);
3071err_probe_clk_get:
3072 if (locked) {
3073 if (dd->use_rlock)
3074 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003076 mutex_unlock(&dd->core_lock);
3077 }
3078err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003079err_probe_reqmem:
3080 destroy_workqueue(dd->workqueue);
3081err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003082err_probe_res:
3083 spi_master_put(master);
3084err_probe_exit:
3085 return rc;
3086}
3087
3088#ifdef CONFIG_PM
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303089static int msm_spi_pm_suspend_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003090{
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303091 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003092 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303093 struct msm_spi *dd;
3094 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003095
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303096 dev_dbg(device, "pm_runtime: suspending...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003097 if (!master)
3098 goto suspend_exit;
3099 dd = spi_master_get_devdata(master);
3100 if (!dd)
3101 goto suspend_exit;
3102
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303103 if (dd->suspended)
3104 return 0;
3105
3106 /*
3107 * Make sure nothing is added to the queue while we're
3108 * suspending
3109 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003110 spin_lock_irqsave(&dd->queue_lock, flags);
3111 dd->suspended = 1;
3112 spin_unlock_irqrestore(&dd->queue_lock, flags);
3113
3114 /* Wait for transactions to end, or time out */
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303115 wait_event_interruptible(dd->continue_suspend,
3116 !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003117
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303118 msm_spi_disable_irqs(dd);
3119 clk_disable_unprepare(dd->clk);
3120 clk_disable_unprepare(dd->pclk);
Gilad Avidov8d99efa2013-06-27 15:33:02 -06003121 if (dd->pdata && !dd->pdata->active_only)
Gilad Avidov23350552013-05-21 09:26:46 -06003122 msm_spi_clk_path_unvote(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303123
3124 /* Free the spi clk, miso, mosi, cs gpio */
3125 if (dd->pdata && dd->pdata->gpio_release)
3126 dd->pdata->gpio_release();
3127
3128 msm_spi_free_gpios(dd);
3129
3130 if (pm_qos_request_active(&qos_req_list))
3131 pm_qos_update_request(&qos_req_list,
3132 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003133suspend_exit:
3134 return 0;
3135}
3136
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303137static int msm_spi_pm_resume_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003138{
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303139 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003140 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303141 struct msm_spi *dd;
3142 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003143
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303144 dev_dbg(device, "pm_runtime: resuming...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003145 if (!master)
3146 goto resume_exit;
3147 dd = spi_master_get_devdata(master);
3148 if (!dd)
3149 goto resume_exit;
3150
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303151 if (!dd->suspended)
3152 return 0;
3153
3154 if (pm_qos_request_active(&qos_req_list))
3155 pm_qos_update_request(&qos_req_list,
3156 dd->pm_lat);
3157
3158 /* Configure the spi clk, miso, mosi and cs gpio */
3159 if (dd->pdata->gpio_config) {
3160 ret = dd->pdata->gpio_config();
3161 if (ret) {
3162 dev_err(dd->dev,
3163 "%s: error configuring GPIOs\n",
3164 __func__);
3165 return ret;
3166 }
3167 }
3168
3169 ret = msm_spi_request_gpios(dd);
3170 if (ret)
3171 return ret;
3172
Gilad Avidov23350552013-05-21 09:26:46 -06003173 msm_spi_clk_path_init(dd);
3174 if (!dd->pdata->active_only)
3175 msm_spi_clk_path_vote(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303176 clk_prepare_enable(dd->clk);
3177 clk_prepare_enable(dd->pclk);
3178 msm_spi_enable_irqs(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003179 dd->suspended = 0;
Gilad Avidov23350552013-05-21 09:26:46 -06003180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003181resume_exit:
3182 return 0;
3183}
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303184
3185static int msm_spi_suspend(struct device *device)
3186{
3187 if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
3188 struct platform_device *pdev = to_platform_device(device);
3189 struct spi_master *master = platform_get_drvdata(pdev);
3190 struct msm_spi *dd;
3191
3192 dev_dbg(device, "system suspend");
3193 if (!master)
3194 goto suspend_exit;
3195 dd = spi_master_get_devdata(master);
3196 if (!dd)
3197 goto suspend_exit;
3198 msm_spi_pm_suspend_runtime(device);
Alok Chauhan0717e202013-07-13 10:42:26 +05303199
3200 /*
3201 * set the device's runtime PM status to 'suspended'
3202 */
3203 pm_runtime_disable(device);
3204 pm_runtime_set_suspended(device);
3205 pm_runtime_enable(device);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303206 }
3207suspend_exit:
3208 return 0;
3209}
3210
3211static int msm_spi_resume(struct device *device)
3212{
3213 /*
3214 * Rely on runtime-PM to call resume in case it is enabled
3215 * Even if it's not enabled, rely on 1st client transaction to do
3216 * clock ON and gpio configuration
3217 */
3218 dev_dbg(device, "system resume");
3219 return 0;
3220}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003221#else
3222#define msm_spi_suspend NULL
3223#define msm_spi_resume NULL
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303224#define msm_spi_pm_suspend_runtime NULL
3225#define msm_spi_pm_resume_runtime NULL
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003226#endif /* CONFIG_PM */
3227
3228static int __devexit msm_spi_remove(struct platform_device *pdev)
3229{
3230 struct spi_master *master = platform_get_drvdata(pdev);
3231 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003232
3233 pm_qos_remove_request(&qos_req_list);
3234 spi_debugfs_exit(dd);
3235 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
3236
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08003237 if (dd->dma_teardown)
3238 dd->dma_teardown(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303239 pm_runtime_disable(&pdev->dev);
3240 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003241 clk_put(dd->clk);
3242 clk_put(dd->pclk);
Gilad Avidov23350552013-05-21 09:26:46 -06003243 msm_spi_clk_path_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003244 destroy_workqueue(dd->workqueue);
3245 platform_set_drvdata(pdev, 0);
3246 spi_unregister_master(master);
3247 spi_master_put(master);
3248
3249 return 0;
3250}
3251
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003252static struct of_device_id msm_spi_dt_match[] = {
3253 {
3254 .compatible = "qcom,spi-qup-v2",
3255 },
3256 {}
3257};
3258
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303259static const struct dev_pm_ops msm_spi_dev_pm_ops = {
3260 SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
3261 SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
3262 msm_spi_pm_resume_runtime, NULL)
3263};
3264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003265static struct platform_driver msm_spi_driver = {
3266 .driver = {
3267 .name = SPI_DRV_NAME,
3268 .owner = THIS_MODULE,
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303269 .pm = &msm_spi_dev_pm_ops,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003270 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003271 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003272 .remove = __exit_p(msm_spi_remove),
3273};
3274
3275static int __init msm_spi_init(void)
3276{
3277 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
3278}
3279module_init(msm_spi_init);
3280
3281static void __exit msm_spi_exit(void)
3282{
3283 platform_driver_unregister(&msm_spi_driver);
3284}
3285module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003286
3287MODULE_LICENSE("GPL v2");
3288MODULE_VERSION("0.4");
3289MODULE_ALIAS("platform:"SPI_DRV_NAME);